problem_id
stringlengths 18
21
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
54
| prompt
stringlengths 1.28k
64.2k
| golden_diff
stringlengths 166
811
| verification_info
stringlengths 604
118k
|
---|---|---|---|---|---|---|
gh_patches_debug_1200 | rasdani/github-patches | git_diff | Cog-Creators__Red-DiscordBot-2889 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Removing commands on bot load
# Other bugs
Loading a cog with bot.remove_command statements in init causes it to error out and not load.
#### What were you trying to do?
I have a cog that has statements in __init__ like this
```py
bot.remove_command("invite")
bot.remove_command("ping")
bot.remove_command("pong")
bot.remove_command("uptime")
```
#### What were you expecting to happen?
The commands to be unloaded and my command loaded.
#### What actually happened?
The cog does not load.
```py
Traceback (most recent call last):
File "/data/venv/lib/python3.7/site-packages/redbot/core/events.py", line 71, in on_ready
await bot.load_extension(spec)
File "/data/venv/lib/python3.7/site-packages/redbot/core/bot.py", line 304, in load_extension
lib.setup(self)
File "/data/cogs/CogManager/cogs/general/__init__.py", line 5, in setup
bot.add_cog(General(bot))
File "/data/cogs/CogManager/cogs/general/general.py", line 39, in __init__
bot.remove_command("pong")
File "/data/venv/lib/python3.7/site-packages/redbot/core/bot.py", line 466, in remove_command
command.requires.reset()
AttributeError: 'NoneType' object has no attribute 'requires'
```
#### How can we reproduce this issue?
Make a simple cog with this in init
`bot.remove_command("ping")`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redbot/core/bot.py`
Content:
```
1 import asyncio
2 import inspect
3 import os
4 import logging
5 from collections import Counter
6 from enum import Enum
7 from importlib.machinery import ModuleSpec
8 from pathlib import Path
9 from typing import Optional, Union, List
10
11 import discord
12 import sys
13 from discord.ext.commands import when_mentioned_or
14
15 from . import Config, i18n, commands, errors
16 from .cog_manager import CogManager
17
18 from .rpc import RPCMixin
19 from .utils import common_filters
20
21 CUSTOM_GROUPS = "CUSTOM_GROUPS"
22
23 log = logging.getLogger("redbot")
24
25
26 def _is_submodule(parent, child):
27 return parent == child or child.startswith(parent + ".")
28
29
30 # barely spurious warning caused by our intentional shadowing
31 class RedBase(commands.GroupMixin, commands.bot.BotBase, RPCMixin): # pylint: disable=no-member
32 """Mixin for the main bot class.
33
34 This exists because `Red` inherits from `discord.AutoShardedClient`, which
35 is something other bot classes may not want to have as a parent class.
36 """
37
38 def __init__(self, *args, cli_flags=None, bot_dir: Path = Path.cwd(), **kwargs):
39 self._shutdown_mode = ExitCodes.CRITICAL
40 self.db = Config.get_core_conf(force_registration=True)
41 self._co_owners = cli_flags.co_owner
42 self.rpc_enabled = cli_flags.rpc
43 self._last_exception = None
44 self.db.register_global(
45 token=None,
46 prefix=[],
47 packages=[],
48 owner=None,
49 whitelist=[],
50 blacklist=[],
51 locale="en-US",
52 embeds=True,
53 color=15158332,
54 fuzzy=False,
55 custom_info=None,
56 help__page_char_limit=1000,
57 help__max_pages_in_guild=2,
58 help__use_menus=False,
59 help__show_hidden=False,
60 help__verify_checks=True,
61 help__verify_exists=False,
62 help__tagline="",
63 invite_public=False,
64 invite_perm=0,
65 disabled_commands=[],
66 disabled_command_msg="That command is disabled.",
67 api_tokens={},
68 extra_owner_destinations=[],
69 owner_opt_out_list=[],
70 schema_version=0,
71 )
72
73 self.db.register_guild(
74 prefix=[],
75 whitelist=[],
76 blacklist=[],
77 admin_role=[],
78 mod_role=[],
79 embeds=None,
80 use_bot_color=False,
81 fuzzy=False,
82 disabled_commands=[],
83 autoimmune_ids=[],
84 )
85
86 self.db.register_user(embeds=None)
87
88 self.db.init_custom(CUSTOM_GROUPS, 2)
89 self.db.register_custom(CUSTOM_GROUPS)
90
91 async def prefix_manager(bot, message):
92 if not cli_flags.prefix:
93 global_prefix = await bot.db.prefix()
94 else:
95 global_prefix = cli_flags.prefix
96 if message.guild is None:
97 return global_prefix
98 server_prefix = await bot.db.guild(message.guild).prefix()
99 if cli_flags.mentionable:
100 return (
101 when_mentioned_or(*server_prefix)(bot, message)
102 if server_prefix
103 else when_mentioned_or(*global_prefix)(bot, message)
104 )
105 else:
106 return server_prefix if server_prefix else global_prefix
107
108 if "command_prefix" not in kwargs:
109 kwargs["command_prefix"] = prefix_manager
110
111 if cli_flags.owner and "owner_id" not in kwargs:
112 kwargs["owner_id"] = cli_flags.owner
113
114 if "owner_id" not in kwargs:
115 loop = asyncio.get_event_loop()
116 loop.run_until_complete(self._dict_abuse(kwargs))
117
118 if "command_not_found" not in kwargs:
119 kwargs["command_not_found"] = "Command {} not found.\n{}"
120
121 self.counter = Counter()
122 self.uptime = None
123 self.checked_time_accuracy = None
124 self.color = discord.Embed.Empty # This is needed or color ends up 0x000000
125
126 self.main_dir = bot_dir
127
128 self.cog_mgr = CogManager()
129
130 super().__init__(*args, help_command=None, **kwargs)
131 # Do not manually use the help formatter attribute here, see `send_help_for`,
132 # for a documented API. The internals of this object are still subject to change.
133 self._help_formatter = commands.help.RedHelpFormatter()
134 self.add_command(commands.help.red_help)
135
136 self._permissions_hooks: List[commands.CheckPredicate] = []
137
138 async def maybe_update_config(self):
139 """
140 This should be run prior to loading cogs or connecting to discord.
141 """
142 schema_version = await self.db.schema_version()
143
144 if schema_version == 0:
145 await self._schema_0_to_1()
146 schema_version += 1
147 await self.db.schema_version.set(schema_version)
148
149 async def _schema_0_to_1(self):
150 """
151 This contains the migration to allow multiple mod and multiple admin roles.
152 """
153
154 log.info("Begin updating guild configs to support multiple mod/admin roles")
155 all_guild_data = await self.db.all_guilds()
156 for guild_id, guild_data in all_guild_data.items():
157 guild_obj = discord.Object(id=guild_id)
158 mod_roles, admin_roles = [], []
159 maybe_mod_role_id = guild_data["mod_role"]
160 maybe_admin_role_id = guild_data["admin_role"]
161
162 if maybe_mod_role_id:
163 mod_roles.append(maybe_mod_role_id)
164 await self.db.guild(guild_obj).mod_role.set(mod_roles)
165 if maybe_admin_role_id:
166 admin_roles.append(maybe_admin_role_id)
167 await self.db.guild(guild_obj).admin_role.set(admin_roles)
168 log.info("Done updating guild configs to support multiple mod/admin roles")
169
170 async def send_help_for(
171 self, ctx: commands.Context, help_for: Union[commands.Command, commands.GroupMixin, str]
172 ):
173 """
174 Invokes Red's helpformatter for a given context and object.
175 """
176 return await self._help_formatter.send_help(ctx, help_for)
177
178 async def _dict_abuse(self, indict):
179 """
180 Please blame <@269933075037814786> for this.
181
182 :param indict:
183 :return:
184 """
185
186 indict["owner_id"] = await self.db.owner()
187 i18n.set_locale(await self.db.locale())
188
189 async def embed_requested(self, channel, user, command=None) -> bool:
190 """
191 Determine if an embed is requested for a response.
192
193 Parameters
194 ----------
195 channel : `discord.abc.GuildChannel` or `discord.abc.PrivateChannel`
196 The channel to check embed settings for.
197 user : `discord.abc.User`
198 The user to check embed settings for.
199 command
200 (Optional) the command ran.
201
202 Returns
203 -------
204 bool
205 :code:`True` if an embed is requested
206 """
207 if isinstance(channel, discord.abc.PrivateChannel) or (
208 command and command == self.get_command("help")
209 ):
210 user_setting = await self.db.user(user).embeds()
211 if user_setting is not None:
212 return user_setting
213 else:
214 guild_setting = await self.db.guild(channel.guild).embeds()
215 if guild_setting is not None:
216 return guild_setting
217 global_setting = await self.db.embeds()
218 return global_setting
219
220 async def is_owner(self, user):
221 if user.id in self._co_owners:
222 return True
223 return await super().is_owner(user)
224
225 async def is_admin(self, member: discord.Member):
226 """Checks if a member is an admin of their guild."""
227 try:
228 member_snowflakes = member._roles # DEP-WARN
229 for snowflake in await self.db.guild(member.guild).admin_role():
230 if member_snowflakes.has(snowflake): # Dep-WARN
231 return True
232 except AttributeError: # someone passed a webhook to this
233 pass
234 return False
235
236 async def is_mod(self, member: discord.Member):
237 """Checks if a member is a mod or admin of their guild."""
238 try:
239 member_snowflakes = member._roles # DEP-WARN
240 for snowflake in await self.db.guild(member.guild).admin_role():
241 if member_snowflakes.has(snowflake): # DEP-WARN
242 return True
243 for snowflake in await self.db.guild(member.guild).mod_role():
244 if member_snowflakes.has(snowflake): # DEP-WARN
245 return True
246 except AttributeError: # someone passed a webhook to this
247 pass
248 return False
249
250 async def get_context(self, message, *, cls=commands.Context):
251 return await super().get_context(message, cls=cls)
252
253 async def process_commands(self, message: discord.Message):
254 """
255 Same as base method, but dispatches an additional event for cogs
256 which want to handle normal messages differently to command
257 messages, without the overhead of additional get_context calls
258 per cog.
259 """
260 if not message.author.bot:
261 ctx = await self.get_context(message)
262 await self.invoke(ctx)
263 else:
264 ctx = None
265
266 if ctx is None or ctx.valid is False:
267 self.dispatch("message_without_command", message)
268
269 @staticmethod
270 def list_packages():
271 """Lists packages present in the cogs the folder"""
272 return os.listdir("cogs")
273
274 async def save_packages_status(self, packages):
275 await self.db.packages.set(packages)
276
277 async def add_loaded_package(self, pkg_name: str):
278 async with self.db.packages() as curr_pkgs:
279 if pkg_name not in curr_pkgs:
280 curr_pkgs.append(pkg_name)
281
282 async def remove_loaded_package(self, pkg_name: str):
283 async with self.db.packages() as curr_pkgs:
284 while pkg_name in curr_pkgs:
285 curr_pkgs.remove(pkg_name)
286
287 async def load_extension(self, spec: ModuleSpec):
288 # NB: this completely bypasses `discord.ext.commands.Bot._load_from_module_spec`
289 name = spec.name.split(".")[-1]
290 if name in self.extensions:
291 raise errors.PackageAlreadyLoaded(spec)
292
293 lib = spec.loader.load_module()
294 if not hasattr(lib, "setup"):
295 del lib
296 raise discord.ClientException(f"extension {name} does not have a setup function")
297
298 try:
299 if asyncio.iscoroutinefunction(lib.setup):
300 await lib.setup(self)
301 else:
302 lib.setup(self)
303 except Exception as e:
304 self._remove_module_references(lib.__name__)
305 self._call_module_finalizers(lib, name)
306 raise
307 else:
308 self._BotBase__extensions[name] = lib
309
310 def remove_cog(self, cogname: str):
311 cog = self.get_cog(cogname)
312 if cog is None:
313 return
314
315 for cls in inspect.getmro(cog.__class__):
316 try:
317 hook = getattr(cog, f"_{cls.__name__}__permissions_hook")
318 except AttributeError:
319 pass
320 else:
321 self.remove_permissions_hook(hook)
322
323 super().remove_cog(cogname)
324
325 cog.requires.reset()
326
327 for meth in self.rpc_handlers.pop(cogname.upper(), ()):
328 self.unregister_rpc_handler(meth)
329
330 async def is_automod_immune(
331 self, to_check: Union[discord.Message, commands.Context, discord.abc.User, discord.Role]
332 ) -> bool:
333 """
334 Checks if the user, message, context, or role should be considered immune from automated
335 moderation actions.
336
337 This will return ``False`` in direct messages.
338
339 Parameters
340 ----------
341 to_check : `discord.Message` or `commands.Context` or `discord.abc.User` or `discord.Role`
342 Something to check if it would be immune
343
344 Returns
345 -------
346 bool
347 ``True`` if immune
348
349 """
350 guild = to_check.guild
351 if not guild:
352 return False
353
354 if isinstance(to_check, discord.Role):
355 ids_to_check = [to_check.id]
356 else:
357 author = getattr(to_check, "author", to_check)
358 try:
359 ids_to_check = [r.id for r in author.roles]
360 except AttributeError:
361 # webhook messages are a user not member,
362 # cheaper than isinstance
363 return True # webhooks require significant permissions to enable.
364 else:
365 ids_to_check.append(author.id)
366
367 immune_ids = await self.db.guild(guild).autoimmune_ids()
368
369 return any(i in immune_ids for i in ids_to_check)
370
371 @staticmethod
372 async def send_filtered(
373 destination: discord.abc.Messageable,
374 filter_mass_mentions=True,
375 filter_invite_links=True,
376 filter_all_links=False,
377 **kwargs,
378 ):
379 """
380 This is a convienience wrapper around
381
382 discord.abc.Messageable.send
383
384 It takes the destination you'd like to send to, which filters to apply
385 (defaults on mass mentions, and invite links) and any other parameters
386 normally accepted by destination.send
387
388 This should realistically only be used for responding using user provided
389 input. (unfortunately, including usernames)
390 Manually crafted messages which dont take any user input have no need of this
391 """
392
393 content = kwargs.pop("content", None)
394
395 if content:
396 if filter_mass_mentions:
397 content = common_filters.filter_mass_mentions(content)
398 if filter_invite_links:
399 content = common_filters.filter_invites(content)
400 if filter_all_links:
401 content = common_filters.filter_urls(content)
402
403 await destination.send(content=content, **kwargs)
404
405 def add_cog(self, cog: commands.Cog):
406 if not isinstance(cog, commands.Cog):
407 raise RuntimeError(
408 f"The {cog.__class__.__name__} cog in the {cog.__module__} package does "
409 f"not inherit from the commands.Cog base class. The cog author must update "
410 f"the cog to adhere to this requirement."
411 )
412 if cog.__cog_name__ in self.cogs:
413 raise RuntimeError(f"There is already a cog named {cog.__cog_name__} loaded.")
414 if not hasattr(cog, "requires"):
415 commands.Cog.__init__(cog)
416
417 added_hooks = []
418
419 try:
420 for cls in inspect.getmro(cog.__class__):
421 try:
422 hook = getattr(cog, f"_{cls.__name__}__permissions_hook")
423 except AttributeError:
424 pass
425 else:
426 self.add_permissions_hook(hook)
427 added_hooks.append(hook)
428
429 super().add_cog(cog)
430 self.dispatch("cog_add", cog)
431 if "permissions" not in self.extensions:
432 cog.requires.ready_event.set()
433 except Exception:
434 for hook in added_hooks:
435 try:
436 self.remove_permissions_hook(hook)
437 except Exception:
438 # This shouldn't be possible
439 log.exception(
440 "A hook got extremely screwed up, "
441 "and could not be removed properly during another error in cog load."
442 )
443 del cog
444 raise
445
446 def add_command(self, command: commands.Command) -> None:
447 if not isinstance(command, commands.Command):
448 raise RuntimeError("Commands must be instances of `redbot.core.commands.Command`")
449
450 super().add_command(command)
451
452 permissions_not_loaded = "permissions" not in self.extensions
453 self.dispatch("command_add", command)
454 if permissions_not_loaded:
455 command.requires.ready_event.set()
456 if isinstance(command, commands.Group):
457 for subcommand in set(command.walk_commands()):
458 self.dispatch("command_add", subcommand)
459 if permissions_not_loaded:
460 subcommand.requires.ready_event.set()
461
462 def remove_command(self, name: str) -> None:
463 command = super().remove_command(name)
464 command.requires.reset()
465 if isinstance(command, commands.Group):
466 for subcommand in set(command.walk_commands()):
467 subcommand.requires.reset()
468
469 def clear_permission_rules(self, guild_id: Optional[int]) -> None:
470 """Clear all permission overrides in a scope.
471
472 Parameters
473 ----------
474 guild_id : Optional[int]
475 The guild ID to wipe permission overrides for. If
476 ``None``, this will clear all global rules and leave all
477 guild rules untouched.
478
479 """
480 for cog in self.cogs.values():
481 cog.requires.clear_all_rules(guild_id)
482 for command in self.walk_commands():
483 command.requires.clear_all_rules(guild_id)
484
485 def add_permissions_hook(self, hook: commands.CheckPredicate) -> None:
486 """Add a permissions hook.
487
488 Permissions hooks are check predicates which are called before
489 calling `Requires.verify`, and they can optionally return an
490 override: ``True`` to allow, ``False`` to deny, and ``None`` to
491 default to normal behaviour.
492
493 Parameters
494 ----------
495 hook
496 A command check predicate which returns ``True``, ``False``
497 or ``None``.
498
499 """
500 self._permissions_hooks.append(hook)
501
502 def remove_permissions_hook(self, hook: commands.CheckPredicate) -> None:
503 """Remove a permissions hook.
504
505 Parameters are the same as those in `add_permissions_hook`.
506
507 Raises
508 ------
509 ValueError
510 If the permissions hook has not been added.
511
512 """
513 self._permissions_hooks.remove(hook)
514
515 async def verify_permissions_hooks(self, ctx: commands.Context) -> Optional[bool]:
516 """Run permissions hooks.
517
518 Parameters
519 ----------
520 ctx : commands.Context
521 The context for the command being invoked.
522
523 Returns
524 -------
525 Optional[bool]
526 ``False`` if any hooks returned ``False``, ``True`` if any
527 hooks return ``True`` and none returned ``False``, ``None``
528 otherwise.
529
530 """
531 hook_results = []
532 for hook in self._permissions_hooks:
533 result = await discord.utils.maybe_coroutine(hook, ctx)
534 if result is not None:
535 hook_results.append(result)
536 if hook_results:
537 if all(hook_results):
538 ctx.permission_state = commands.PermState.ALLOWED_BY_HOOK
539 return True
540 else:
541 ctx.permission_state = commands.PermState.DENIED_BY_HOOK
542 return False
543
544 async def get_owner_notification_destinations(self) -> List[discord.abc.Messageable]:
545 """
546 Gets the users and channels to send to
547 """
548 destinations = []
549 opt_outs = await self.db.owner_opt_out_list()
550 for user_id in (self.owner_id, *self._co_owners):
551 if user_id not in opt_outs:
552 user = self.get_user(user_id)
553 if user:
554 destinations.append(user)
555
556 channel_ids = await self.db.extra_owner_destinations()
557 for channel_id in channel_ids:
558 channel = self.get_channel(channel_id)
559 if channel:
560 destinations.append(channel)
561
562 return destinations
563
564 async def send_to_owners(self, content=None, **kwargs):
565 """
566 This sends something to all owners and their configured extra destinations.
567
568 This takes the same arguments as discord.abc.Messageable.send
569
570 This logs failing sends
571 """
572 destinations = await self.get_owner_notification_destinations()
573
574 async def wrapped_send(location, content=None, **kwargs):
575 try:
576 await location.send(content, **kwargs)
577 except Exception as _exc:
578 log.exception(
579 f"I could not send an owner notification to ({location.id}){location}"
580 )
581
582 sends = [wrapped_send(d, content, **kwargs) for d in destinations]
583 await asyncio.gather(*sends)
584
585
586 class Red(RedBase, discord.AutoShardedClient):
587 """
588 You're welcome Caleb.
589 """
590
591 async def logout(self):
592 """Logs out of Discord and closes all connections."""
593
594 await super().logout()
595
596 async def shutdown(self, *, restart: bool = False):
597 """Gracefully quit Red.
598
599 The program will exit with code :code:`0` by default.
600
601 Parameters
602 ----------
603 restart : bool
604 If :code:`True`, the program will exit with code :code:`26`. If the
605 launcher sees this, it will attempt to restart the bot.
606
607 """
608 if not restart:
609 self._shutdown_mode = ExitCodes.SHUTDOWN
610 else:
611 self._shutdown_mode = ExitCodes.RESTART
612
613 await self.logout()
614
615
616 class ExitCodes(Enum):
617 CRITICAL = 1
618 SHUTDOWN = 0
619 RESTART = 26
620
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redbot/core/bot.py b/redbot/core/bot.py
--- a/redbot/core/bot.py
+++ b/redbot/core/bot.py
@@ -461,6 +461,8 @@
def remove_command(self, name: str) -> None:
command = super().remove_command(name)
+ if not command:
+ return
command.requires.reset()
if isinstance(command, commands.Group):
for subcommand in set(command.walk_commands()):
| {"golden_diff": "diff --git a/redbot/core/bot.py b/redbot/core/bot.py\n--- a/redbot/core/bot.py\n+++ b/redbot/core/bot.py\n@@ -461,6 +461,8 @@\n \n def remove_command(self, name: str) -> None:\n command = super().remove_command(name)\n+ if not command:\n+ return\n command.requires.reset()\n if isinstance(command, commands.Group):\n for subcommand in set(command.walk_commands()):\n", "issue": "Removing commands on bot load\n# Other bugs\r\n\r\nLoading a cog with bot.remove_command statements in init causes it to error out and not load.\r\n\r\n\r\n\r\n#### What were you trying to do?\r\n\r\nI have a cog that has statements in __init__ like this\r\n```py\r\n bot.remove_command(\"invite\")\r\n bot.remove_command(\"ping\")\r\n bot.remove_command(\"pong\")\r\n bot.remove_command(\"uptime\")\r\n```\r\n\r\n#### What were you expecting to happen?\r\n\r\nThe commands to be unloaded and my command loaded.\r\n\r\n#### What actually happened?\r\n\r\nThe cog does not load. \r\n\r\n```py\r\nTraceback (most recent call last):\r\nFile \"/data/venv/lib/python3.7/site-packages/redbot/core/events.py\", line 71, in on_ready\r\nawait bot.load_extension(spec)\r\nFile \"/data/venv/lib/python3.7/site-packages/redbot/core/bot.py\", line 304, in load_extension\r\nlib.setup(self)\r\nFile \"/data/cogs/CogManager/cogs/general/__init__.py\", line 5, in setup\r\nbot.add_cog(General(bot))\r\nFile \"/data/cogs/CogManager/cogs/general/general.py\", line 39, in __init__\r\nbot.remove_command(\"pong\")\r\nFile \"/data/venv/lib/python3.7/site-packages/redbot/core/bot.py\", line 466, in remove_command\r\ncommand.requires.reset()\r\nAttributeError: 'NoneType' object has no attribute 'requires'\r\n``` \r\n#### How can we reproduce this issue?\r\n\r\nMake a simple cog with this in init\r\n`bot.remove_command(\"ping\")`\r\n\n", "before_files": [{"content": "import asyncio\nimport inspect\nimport os\nimport logging\nfrom collections import Counter\nfrom enum import Enum\nfrom importlib.machinery import ModuleSpec\nfrom pathlib import Path\nfrom typing import Optional, Union, List\n\nimport discord\nimport sys\nfrom discord.ext.commands import when_mentioned_or\n\nfrom . import Config, i18n, commands, errors\nfrom .cog_manager import CogManager\n\nfrom .rpc import RPCMixin\nfrom .utils import common_filters\n\nCUSTOM_GROUPS = \"CUSTOM_GROUPS\"\n\nlog = logging.getLogger(\"redbot\")\n\n\ndef _is_submodule(parent, child):\n return parent == child or child.startswith(parent + \".\")\n\n\n# barely spurious warning caused by our intentional shadowing\nclass RedBase(commands.GroupMixin, commands.bot.BotBase, RPCMixin): # pylint: disable=no-member\n \"\"\"Mixin for the main bot class.\n\n This exists because `Red` inherits from `discord.AutoShardedClient`, which\n is something other bot classes may not want to have as a parent class.\n \"\"\"\n\n def __init__(self, *args, cli_flags=None, bot_dir: Path = Path.cwd(), **kwargs):\n self._shutdown_mode = ExitCodes.CRITICAL\n self.db = Config.get_core_conf(force_registration=True)\n self._co_owners = cli_flags.co_owner\n self.rpc_enabled = cli_flags.rpc\n self._last_exception = None\n self.db.register_global(\n token=None,\n prefix=[],\n packages=[],\n owner=None,\n whitelist=[],\n blacklist=[],\n locale=\"en-US\",\n embeds=True,\n color=15158332,\n fuzzy=False,\n custom_info=None,\n help__page_char_limit=1000,\n help__max_pages_in_guild=2,\n help__use_menus=False,\n help__show_hidden=False,\n help__verify_checks=True,\n help__verify_exists=False,\n help__tagline=\"\",\n invite_public=False,\n invite_perm=0,\n disabled_commands=[],\n disabled_command_msg=\"That command is disabled.\",\n api_tokens={},\n extra_owner_destinations=[],\n owner_opt_out_list=[],\n schema_version=0,\n )\n\n self.db.register_guild(\n prefix=[],\n whitelist=[],\n blacklist=[],\n admin_role=[],\n mod_role=[],\n embeds=None,\n use_bot_color=False,\n fuzzy=False,\n disabled_commands=[],\n autoimmune_ids=[],\n )\n\n self.db.register_user(embeds=None)\n\n self.db.init_custom(CUSTOM_GROUPS, 2)\n self.db.register_custom(CUSTOM_GROUPS)\n\n async def prefix_manager(bot, message):\n if not cli_flags.prefix:\n global_prefix = await bot.db.prefix()\n else:\n global_prefix = cli_flags.prefix\n if message.guild is None:\n return global_prefix\n server_prefix = await bot.db.guild(message.guild).prefix()\n if cli_flags.mentionable:\n return (\n when_mentioned_or(*server_prefix)(bot, message)\n if server_prefix\n else when_mentioned_or(*global_prefix)(bot, message)\n )\n else:\n return server_prefix if server_prefix else global_prefix\n\n if \"command_prefix\" not in kwargs:\n kwargs[\"command_prefix\"] = prefix_manager\n\n if cli_flags.owner and \"owner_id\" not in kwargs:\n kwargs[\"owner_id\"] = cli_flags.owner\n\n if \"owner_id\" not in kwargs:\n loop = asyncio.get_event_loop()\n loop.run_until_complete(self._dict_abuse(kwargs))\n\n if \"command_not_found\" not in kwargs:\n kwargs[\"command_not_found\"] = \"Command {} not found.\\n{}\"\n\n self.counter = Counter()\n self.uptime = None\n self.checked_time_accuracy = None\n self.color = discord.Embed.Empty # This is needed or color ends up 0x000000\n\n self.main_dir = bot_dir\n\n self.cog_mgr = CogManager()\n\n super().__init__(*args, help_command=None, **kwargs)\n # Do not manually use the help formatter attribute here, see `send_help_for`,\n # for a documented API. The internals of this object are still subject to change.\n self._help_formatter = commands.help.RedHelpFormatter()\n self.add_command(commands.help.red_help)\n\n self._permissions_hooks: List[commands.CheckPredicate] = []\n\n async def maybe_update_config(self):\n \"\"\"\n This should be run prior to loading cogs or connecting to discord.\n \"\"\"\n schema_version = await self.db.schema_version()\n\n if schema_version == 0:\n await self._schema_0_to_1()\n schema_version += 1\n await self.db.schema_version.set(schema_version)\n\n async def _schema_0_to_1(self):\n \"\"\"\n This contains the migration to allow multiple mod and multiple admin roles.\n \"\"\"\n\n log.info(\"Begin updating guild configs to support multiple mod/admin roles\")\n all_guild_data = await self.db.all_guilds()\n for guild_id, guild_data in all_guild_data.items():\n guild_obj = discord.Object(id=guild_id)\n mod_roles, admin_roles = [], []\n maybe_mod_role_id = guild_data[\"mod_role\"]\n maybe_admin_role_id = guild_data[\"admin_role\"]\n\n if maybe_mod_role_id:\n mod_roles.append(maybe_mod_role_id)\n await self.db.guild(guild_obj).mod_role.set(mod_roles)\n if maybe_admin_role_id:\n admin_roles.append(maybe_admin_role_id)\n await self.db.guild(guild_obj).admin_role.set(admin_roles)\n log.info(\"Done updating guild configs to support multiple mod/admin roles\")\n\n async def send_help_for(\n self, ctx: commands.Context, help_for: Union[commands.Command, commands.GroupMixin, str]\n ):\n \"\"\"\n Invokes Red's helpformatter for a given context and object.\n \"\"\"\n return await self._help_formatter.send_help(ctx, help_for)\n\n async def _dict_abuse(self, indict):\n \"\"\"\n Please blame <@269933075037814786> for this.\n\n :param indict:\n :return:\n \"\"\"\n\n indict[\"owner_id\"] = await self.db.owner()\n i18n.set_locale(await self.db.locale())\n\n async def embed_requested(self, channel, user, command=None) -> bool:\n \"\"\"\n Determine if an embed is requested for a response.\n\n Parameters\n ----------\n channel : `discord.abc.GuildChannel` or `discord.abc.PrivateChannel`\n The channel to check embed settings for.\n user : `discord.abc.User`\n The user to check embed settings for.\n command\n (Optional) the command ran.\n\n Returns\n -------\n bool\n :code:`True` if an embed is requested\n \"\"\"\n if isinstance(channel, discord.abc.PrivateChannel) or (\n command and command == self.get_command(\"help\")\n ):\n user_setting = await self.db.user(user).embeds()\n if user_setting is not None:\n return user_setting\n else:\n guild_setting = await self.db.guild(channel.guild).embeds()\n if guild_setting is not None:\n return guild_setting\n global_setting = await self.db.embeds()\n return global_setting\n\n async def is_owner(self, user):\n if user.id in self._co_owners:\n return True\n return await super().is_owner(user)\n\n async def is_admin(self, member: discord.Member):\n \"\"\"Checks if a member is an admin of their guild.\"\"\"\n try:\n member_snowflakes = member._roles # DEP-WARN\n for snowflake in await self.db.guild(member.guild).admin_role():\n if member_snowflakes.has(snowflake): # Dep-WARN\n return True\n except AttributeError: # someone passed a webhook to this\n pass\n return False\n\n async def is_mod(self, member: discord.Member):\n \"\"\"Checks if a member is a mod or admin of their guild.\"\"\"\n try:\n member_snowflakes = member._roles # DEP-WARN\n for snowflake in await self.db.guild(member.guild).admin_role():\n if member_snowflakes.has(snowflake): # DEP-WARN\n return True\n for snowflake in await self.db.guild(member.guild).mod_role():\n if member_snowflakes.has(snowflake): # DEP-WARN\n return True\n except AttributeError: # someone passed a webhook to this\n pass\n return False\n\n async def get_context(self, message, *, cls=commands.Context):\n return await super().get_context(message, cls=cls)\n\n async def process_commands(self, message: discord.Message):\n \"\"\"\n Same as base method, but dispatches an additional event for cogs\n which want to handle normal messages differently to command\n messages, without the overhead of additional get_context calls\n per cog.\n \"\"\"\n if not message.author.bot:\n ctx = await self.get_context(message)\n await self.invoke(ctx)\n else:\n ctx = None\n\n if ctx is None or ctx.valid is False:\n self.dispatch(\"message_without_command\", message)\n\n @staticmethod\n def list_packages():\n \"\"\"Lists packages present in the cogs the folder\"\"\"\n return os.listdir(\"cogs\")\n\n async def save_packages_status(self, packages):\n await self.db.packages.set(packages)\n\n async def add_loaded_package(self, pkg_name: str):\n async with self.db.packages() as curr_pkgs:\n if pkg_name not in curr_pkgs:\n curr_pkgs.append(pkg_name)\n\n async def remove_loaded_package(self, pkg_name: str):\n async with self.db.packages() as curr_pkgs:\n while pkg_name in curr_pkgs:\n curr_pkgs.remove(pkg_name)\n\n async def load_extension(self, spec: ModuleSpec):\n # NB: this completely bypasses `discord.ext.commands.Bot._load_from_module_spec`\n name = spec.name.split(\".\")[-1]\n if name in self.extensions:\n raise errors.PackageAlreadyLoaded(spec)\n\n lib = spec.loader.load_module()\n if not hasattr(lib, \"setup\"):\n del lib\n raise discord.ClientException(f\"extension {name} does not have a setup function\")\n\n try:\n if asyncio.iscoroutinefunction(lib.setup):\n await lib.setup(self)\n else:\n lib.setup(self)\n except Exception as e:\n self._remove_module_references(lib.__name__)\n self._call_module_finalizers(lib, name)\n raise\n else:\n self._BotBase__extensions[name] = lib\n\n def remove_cog(self, cogname: str):\n cog = self.get_cog(cogname)\n if cog is None:\n return\n\n for cls in inspect.getmro(cog.__class__):\n try:\n hook = getattr(cog, f\"_{cls.__name__}__permissions_hook\")\n except AttributeError:\n pass\n else:\n self.remove_permissions_hook(hook)\n\n super().remove_cog(cogname)\n\n cog.requires.reset()\n\n for meth in self.rpc_handlers.pop(cogname.upper(), ()):\n self.unregister_rpc_handler(meth)\n\n async def is_automod_immune(\n self, to_check: Union[discord.Message, commands.Context, discord.abc.User, discord.Role]\n ) -> bool:\n \"\"\"\n Checks if the user, message, context, or role should be considered immune from automated\n moderation actions.\n\n This will return ``False`` in direct messages.\n\n Parameters\n ----------\n to_check : `discord.Message` or `commands.Context` or `discord.abc.User` or `discord.Role`\n Something to check if it would be immune\n\n Returns\n -------\n bool\n ``True`` if immune\n\n \"\"\"\n guild = to_check.guild\n if not guild:\n return False\n\n if isinstance(to_check, discord.Role):\n ids_to_check = [to_check.id]\n else:\n author = getattr(to_check, \"author\", to_check)\n try:\n ids_to_check = [r.id for r in author.roles]\n except AttributeError:\n # webhook messages are a user not member,\n # cheaper than isinstance\n return True # webhooks require significant permissions to enable.\n else:\n ids_to_check.append(author.id)\n\n immune_ids = await self.db.guild(guild).autoimmune_ids()\n\n return any(i in immune_ids for i in ids_to_check)\n\n @staticmethod\n async def send_filtered(\n destination: discord.abc.Messageable,\n filter_mass_mentions=True,\n filter_invite_links=True,\n filter_all_links=False,\n **kwargs,\n ):\n \"\"\"\n This is a convienience wrapper around\n\n discord.abc.Messageable.send\n\n It takes the destination you'd like to send to, which filters to apply\n (defaults on mass mentions, and invite links) and any other parameters\n normally accepted by destination.send\n\n This should realistically only be used for responding using user provided\n input. (unfortunately, including usernames)\n Manually crafted messages which dont take any user input have no need of this\n \"\"\"\n\n content = kwargs.pop(\"content\", None)\n\n if content:\n if filter_mass_mentions:\n content = common_filters.filter_mass_mentions(content)\n if filter_invite_links:\n content = common_filters.filter_invites(content)\n if filter_all_links:\n content = common_filters.filter_urls(content)\n\n await destination.send(content=content, **kwargs)\n\n def add_cog(self, cog: commands.Cog):\n if not isinstance(cog, commands.Cog):\n raise RuntimeError(\n f\"The {cog.__class__.__name__} cog in the {cog.__module__} package does \"\n f\"not inherit from the commands.Cog base class. The cog author must update \"\n f\"the cog to adhere to this requirement.\"\n )\n if cog.__cog_name__ in self.cogs:\n raise RuntimeError(f\"There is already a cog named {cog.__cog_name__} loaded.\")\n if not hasattr(cog, \"requires\"):\n commands.Cog.__init__(cog)\n\n added_hooks = []\n\n try:\n for cls in inspect.getmro(cog.__class__):\n try:\n hook = getattr(cog, f\"_{cls.__name__}__permissions_hook\")\n except AttributeError:\n pass\n else:\n self.add_permissions_hook(hook)\n added_hooks.append(hook)\n\n super().add_cog(cog)\n self.dispatch(\"cog_add\", cog)\n if \"permissions\" not in self.extensions:\n cog.requires.ready_event.set()\n except Exception:\n for hook in added_hooks:\n try:\n self.remove_permissions_hook(hook)\n except Exception:\n # This shouldn't be possible\n log.exception(\n \"A hook got extremely screwed up, \"\n \"and could not be removed properly during another error in cog load.\"\n )\n del cog\n raise\n\n def add_command(self, command: commands.Command) -> None:\n if not isinstance(command, commands.Command):\n raise RuntimeError(\"Commands must be instances of `redbot.core.commands.Command`\")\n\n super().add_command(command)\n\n permissions_not_loaded = \"permissions\" not in self.extensions\n self.dispatch(\"command_add\", command)\n if permissions_not_loaded:\n command.requires.ready_event.set()\n if isinstance(command, commands.Group):\n for subcommand in set(command.walk_commands()):\n self.dispatch(\"command_add\", subcommand)\n if permissions_not_loaded:\n subcommand.requires.ready_event.set()\n\n def remove_command(self, name: str) -> None:\n command = super().remove_command(name)\n command.requires.reset()\n if isinstance(command, commands.Group):\n for subcommand in set(command.walk_commands()):\n subcommand.requires.reset()\n\n def clear_permission_rules(self, guild_id: Optional[int]) -> None:\n \"\"\"Clear all permission overrides in a scope.\n\n Parameters\n ----------\n guild_id : Optional[int]\n The guild ID to wipe permission overrides for. If\n ``None``, this will clear all global rules and leave all\n guild rules untouched.\n\n \"\"\"\n for cog in self.cogs.values():\n cog.requires.clear_all_rules(guild_id)\n for command in self.walk_commands():\n command.requires.clear_all_rules(guild_id)\n\n def add_permissions_hook(self, hook: commands.CheckPredicate) -> None:\n \"\"\"Add a permissions hook.\n\n Permissions hooks are check predicates which are called before\n calling `Requires.verify`, and they can optionally return an\n override: ``True`` to allow, ``False`` to deny, and ``None`` to\n default to normal behaviour.\n\n Parameters\n ----------\n hook\n A command check predicate which returns ``True``, ``False``\n or ``None``.\n\n \"\"\"\n self._permissions_hooks.append(hook)\n\n def remove_permissions_hook(self, hook: commands.CheckPredicate) -> None:\n \"\"\"Remove a permissions hook.\n\n Parameters are the same as those in `add_permissions_hook`.\n\n Raises\n ------\n ValueError\n If the permissions hook has not been added.\n\n \"\"\"\n self._permissions_hooks.remove(hook)\n\n async def verify_permissions_hooks(self, ctx: commands.Context) -> Optional[bool]:\n \"\"\"Run permissions hooks.\n\n Parameters\n ----------\n ctx : commands.Context\n The context for the command being invoked.\n\n Returns\n -------\n Optional[bool]\n ``False`` if any hooks returned ``False``, ``True`` if any\n hooks return ``True`` and none returned ``False``, ``None``\n otherwise.\n\n \"\"\"\n hook_results = []\n for hook in self._permissions_hooks:\n result = await discord.utils.maybe_coroutine(hook, ctx)\n if result is not None:\n hook_results.append(result)\n if hook_results:\n if all(hook_results):\n ctx.permission_state = commands.PermState.ALLOWED_BY_HOOK\n return True\n else:\n ctx.permission_state = commands.PermState.DENIED_BY_HOOK\n return False\n\n async def get_owner_notification_destinations(self) -> List[discord.abc.Messageable]:\n \"\"\"\n Gets the users and channels to send to\n \"\"\"\n destinations = []\n opt_outs = await self.db.owner_opt_out_list()\n for user_id in (self.owner_id, *self._co_owners):\n if user_id not in opt_outs:\n user = self.get_user(user_id)\n if user:\n destinations.append(user)\n\n channel_ids = await self.db.extra_owner_destinations()\n for channel_id in channel_ids:\n channel = self.get_channel(channel_id)\n if channel:\n destinations.append(channel)\n\n return destinations\n\n async def send_to_owners(self, content=None, **kwargs):\n \"\"\"\n This sends something to all owners and their configured extra destinations.\n\n This takes the same arguments as discord.abc.Messageable.send\n\n This logs failing sends\n \"\"\"\n destinations = await self.get_owner_notification_destinations()\n\n async def wrapped_send(location, content=None, **kwargs):\n try:\n await location.send(content, **kwargs)\n except Exception as _exc:\n log.exception(\n f\"I could not send an owner notification to ({location.id}){location}\"\n )\n\n sends = [wrapped_send(d, content, **kwargs) for d in destinations]\n await asyncio.gather(*sends)\n\n\nclass Red(RedBase, discord.AutoShardedClient):\n \"\"\"\n You're welcome Caleb.\n \"\"\"\n\n async def logout(self):\n \"\"\"Logs out of Discord and closes all connections.\"\"\"\n\n await super().logout()\n\n async def shutdown(self, *, restart: bool = False):\n \"\"\"Gracefully quit Red.\n\n The program will exit with code :code:`0` by default.\n\n Parameters\n ----------\n restart : bool\n If :code:`True`, the program will exit with code :code:`26`. If the\n launcher sees this, it will attempt to restart the bot.\n\n \"\"\"\n if not restart:\n self._shutdown_mode = ExitCodes.SHUTDOWN\n else:\n self._shutdown_mode = ExitCodes.RESTART\n\n await self.logout()\n\n\nclass ExitCodes(Enum):\n CRITICAL = 1\n SHUTDOWN = 0\n RESTART = 26\n", "path": "redbot/core/bot.py"}], "after_files": [{"content": "import asyncio\nimport inspect\nimport os\nimport logging\nfrom collections import Counter\nfrom enum import Enum\nfrom importlib.machinery import ModuleSpec\nfrom pathlib import Path\nfrom typing import Optional, Union, List\n\nimport discord\nimport sys\nfrom discord.ext.commands import when_mentioned_or\n\nfrom . import Config, i18n, commands, errors\nfrom .cog_manager import CogManager\n\nfrom .rpc import RPCMixin\nfrom .utils import common_filters\n\nCUSTOM_GROUPS = \"CUSTOM_GROUPS\"\n\nlog = logging.getLogger(\"redbot\")\n\n\ndef _is_submodule(parent, child):\n return parent == child or child.startswith(parent + \".\")\n\n\n# barely spurious warning caused by our intentional shadowing\nclass RedBase(commands.GroupMixin, commands.bot.BotBase, RPCMixin): # pylint: disable=no-member\n \"\"\"Mixin for the main bot class.\n\n This exists because `Red` inherits from `discord.AutoShardedClient`, which\n is something other bot classes may not want to have as a parent class.\n \"\"\"\n\n def __init__(self, *args, cli_flags=None, bot_dir: Path = Path.cwd(), **kwargs):\n self._shutdown_mode = ExitCodes.CRITICAL\n self.db = Config.get_core_conf(force_registration=True)\n self._co_owners = cli_flags.co_owner\n self.rpc_enabled = cli_flags.rpc\n self._last_exception = None\n self.db.register_global(\n token=None,\n prefix=[],\n packages=[],\n owner=None,\n whitelist=[],\n blacklist=[],\n locale=\"en-US\",\n embeds=True,\n color=15158332,\n fuzzy=False,\n custom_info=None,\n help__page_char_limit=1000,\n help__max_pages_in_guild=2,\n help__use_menus=False,\n help__show_hidden=False,\n help__verify_checks=True,\n help__verify_exists=False,\n help__tagline=\"\",\n invite_public=False,\n invite_perm=0,\n disabled_commands=[],\n disabled_command_msg=\"That command is disabled.\",\n api_tokens={},\n extra_owner_destinations=[],\n owner_opt_out_list=[],\n schema_version=0,\n )\n\n self.db.register_guild(\n prefix=[],\n whitelist=[],\n blacklist=[],\n admin_role=[],\n mod_role=[],\n embeds=None,\n use_bot_color=False,\n fuzzy=False,\n disabled_commands=[],\n autoimmune_ids=[],\n )\n\n self.db.register_user(embeds=None)\n\n self.db.init_custom(CUSTOM_GROUPS, 2)\n self.db.register_custom(CUSTOM_GROUPS)\n\n async def prefix_manager(bot, message):\n if not cli_flags.prefix:\n global_prefix = await bot.db.prefix()\n else:\n global_prefix = cli_flags.prefix\n if message.guild is None:\n return global_prefix\n server_prefix = await bot.db.guild(message.guild).prefix()\n if cli_flags.mentionable:\n return (\n when_mentioned_or(*server_prefix)(bot, message)\n if server_prefix\n else when_mentioned_or(*global_prefix)(bot, message)\n )\n else:\n return server_prefix if server_prefix else global_prefix\n\n if \"command_prefix\" not in kwargs:\n kwargs[\"command_prefix\"] = prefix_manager\n\n if cli_flags.owner and \"owner_id\" not in kwargs:\n kwargs[\"owner_id\"] = cli_flags.owner\n\n if \"owner_id\" not in kwargs:\n loop = asyncio.get_event_loop()\n loop.run_until_complete(self._dict_abuse(kwargs))\n\n if \"command_not_found\" not in kwargs:\n kwargs[\"command_not_found\"] = \"Command {} not found.\\n{}\"\n\n self.counter = Counter()\n self.uptime = None\n self.checked_time_accuracy = None\n self.color = discord.Embed.Empty # This is needed or color ends up 0x000000\n\n self.main_dir = bot_dir\n\n self.cog_mgr = CogManager()\n\n super().__init__(*args, help_command=None, **kwargs)\n # Do not manually use the help formatter attribute here, see `send_help_for`,\n # for a documented API. The internals of this object are still subject to change.\n self._help_formatter = commands.help.RedHelpFormatter()\n self.add_command(commands.help.red_help)\n\n self._permissions_hooks: List[commands.CheckPredicate] = []\n\n async def maybe_update_config(self):\n \"\"\"\n This should be run prior to loading cogs or connecting to discord.\n \"\"\"\n schema_version = await self.db.schema_version()\n\n if schema_version == 0:\n await self._schema_0_to_1()\n schema_version += 1\n await self.db.schema_version.set(schema_version)\n\n async def _schema_0_to_1(self):\n \"\"\"\n This contains the migration to allow multiple mod and multiple admin roles.\n \"\"\"\n\n log.info(\"Begin updating guild configs to support multiple mod/admin roles\")\n all_guild_data = await self.db.all_guilds()\n for guild_id, guild_data in all_guild_data.items():\n guild_obj = discord.Object(id=guild_id)\n mod_roles, admin_roles = [], []\n maybe_mod_role_id = guild_data[\"mod_role\"]\n maybe_admin_role_id = guild_data[\"admin_role\"]\n\n if maybe_mod_role_id:\n mod_roles.append(maybe_mod_role_id)\n await self.db.guild(guild_obj).mod_role.set(mod_roles)\n if maybe_admin_role_id:\n admin_roles.append(maybe_admin_role_id)\n await self.db.guild(guild_obj).admin_role.set(admin_roles)\n log.info(\"Done updating guild configs to support multiple mod/admin roles\")\n\n async def send_help_for(\n self, ctx: commands.Context, help_for: Union[commands.Command, commands.GroupMixin, str]\n ):\n \"\"\"\n Invokes Red's helpformatter for a given context and object.\n \"\"\"\n return await self._help_formatter.send_help(ctx, help_for)\n\n async def _dict_abuse(self, indict):\n \"\"\"\n Please blame <@269933075037814786> for this.\n\n :param indict:\n :return:\n \"\"\"\n\n indict[\"owner_id\"] = await self.db.owner()\n i18n.set_locale(await self.db.locale())\n\n async def embed_requested(self, channel, user, command=None) -> bool:\n \"\"\"\n Determine if an embed is requested for a response.\n\n Parameters\n ----------\n channel : `discord.abc.GuildChannel` or `discord.abc.PrivateChannel`\n The channel to check embed settings for.\n user : `discord.abc.User`\n The user to check embed settings for.\n command\n (Optional) the command ran.\n\n Returns\n -------\n bool\n :code:`True` if an embed is requested\n \"\"\"\n if isinstance(channel, discord.abc.PrivateChannel) or (\n command and command == self.get_command(\"help\")\n ):\n user_setting = await self.db.user(user).embeds()\n if user_setting is not None:\n return user_setting\n else:\n guild_setting = await self.db.guild(channel.guild).embeds()\n if guild_setting is not None:\n return guild_setting\n global_setting = await self.db.embeds()\n return global_setting\n\n async def is_owner(self, user):\n if user.id in self._co_owners:\n return True\n return await super().is_owner(user)\n\n async def is_admin(self, member: discord.Member):\n \"\"\"Checks if a member is an admin of their guild.\"\"\"\n try:\n member_snowflakes = member._roles # DEP-WARN\n for snowflake in await self.db.guild(member.guild).admin_role():\n if member_snowflakes.has(snowflake): # Dep-WARN\n return True\n except AttributeError: # someone passed a webhook to this\n pass\n return False\n\n async def is_mod(self, member: discord.Member):\n \"\"\"Checks if a member is a mod or admin of their guild.\"\"\"\n try:\n member_snowflakes = member._roles # DEP-WARN\n for snowflake in await self.db.guild(member.guild).admin_role():\n if member_snowflakes.has(snowflake): # DEP-WARN\n return True\n for snowflake in await self.db.guild(member.guild).mod_role():\n if member_snowflakes.has(snowflake): # DEP-WARN\n return True\n except AttributeError: # someone passed a webhook to this\n pass\n return False\n\n async def get_context(self, message, *, cls=commands.Context):\n return await super().get_context(message, cls=cls)\n\n async def process_commands(self, message: discord.Message):\n \"\"\"\n Same as base method, but dispatches an additional event for cogs\n which want to handle normal messages differently to command\n messages, without the overhead of additional get_context calls\n per cog.\n \"\"\"\n if not message.author.bot:\n ctx = await self.get_context(message)\n await self.invoke(ctx)\n else:\n ctx = None\n\n if ctx is None or ctx.valid is False:\n self.dispatch(\"message_without_command\", message)\n\n @staticmethod\n def list_packages():\n \"\"\"Lists packages present in the cogs the folder\"\"\"\n return os.listdir(\"cogs\")\n\n async def save_packages_status(self, packages):\n await self.db.packages.set(packages)\n\n async def add_loaded_package(self, pkg_name: str):\n async with self.db.packages() as curr_pkgs:\n if pkg_name not in curr_pkgs:\n curr_pkgs.append(pkg_name)\n\n async def remove_loaded_package(self, pkg_name: str):\n async with self.db.packages() as curr_pkgs:\n while pkg_name in curr_pkgs:\n curr_pkgs.remove(pkg_name)\n\n async def load_extension(self, spec: ModuleSpec):\n # NB: this completely bypasses `discord.ext.commands.Bot._load_from_module_spec`\n name = spec.name.split(\".\")[-1]\n if name in self.extensions:\n raise errors.PackageAlreadyLoaded(spec)\n\n lib = spec.loader.load_module()\n if not hasattr(lib, \"setup\"):\n del lib\n raise discord.ClientException(f\"extension {name} does not have a setup function\")\n\n try:\n if asyncio.iscoroutinefunction(lib.setup):\n await lib.setup(self)\n else:\n lib.setup(self)\n except Exception as e:\n self._remove_module_references(lib.__name__)\n self._call_module_finalizers(lib, name)\n raise\n else:\n self._BotBase__extensions[name] = lib\n\n def remove_cog(self, cogname: str):\n cog = self.get_cog(cogname)\n if cog is None:\n return\n\n for cls in inspect.getmro(cog.__class__):\n try:\n hook = getattr(cog, f\"_{cls.__name__}__permissions_hook\")\n except AttributeError:\n pass\n else:\n self.remove_permissions_hook(hook)\n\n super().remove_cog(cogname)\n\n cog.requires.reset()\n\n for meth in self.rpc_handlers.pop(cogname.upper(), ()):\n self.unregister_rpc_handler(meth)\n\n async def is_automod_immune(\n self, to_check: Union[discord.Message, commands.Context, discord.abc.User, discord.Role]\n ) -> bool:\n \"\"\"\n Checks if the user, message, context, or role should be considered immune from automated\n moderation actions.\n\n This will return ``False`` in direct messages.\n\n Parameters\n ----------\n to_check : `discord.Message` or `commands.Context` or `discord.abc.User` or `discord.Role`\n Something to check if it would be immune\n\n Returns\n -------\n bool\n ``True`` if immune\n\n \"\"\"\n guild = to_check.guild\n if not guild:\n return False\n\n if isinstance(to_check, discord.Role):\n ids_to_check = [to_check.id]\n else:\n author = getattr(to_check, \"author\", to_check)\n try:\n ids_to_check = [r.id for r in author.roles]\n except AttributeError:\n # webhook messages are a user not member,\n # cheaper than isinstance\n return True # webhooks require significant permissions to enable.\n else:\n ids_to_check.append(author.id)\n\n immune_ids = await self.db.guild(guild).autoimmune_ids()\n\n return any(i in immune_ids for i in ids_to_check)\n\n @staticmethod\n async def send_filtered(\n destination: discord.abc.Messageable,\n filter_mass_mentions=True,\n filter_invite_links=True,\n filter_all_links=False,\n **kwargs,\n ):\n \"\"\"\n This is a convienience wrapper around\n\n discord.abc.Messageable.send\n\n It takes the destination you'd like to send to, which filters to apply\n (defaults on mass mentions, and invite links) and any other parameters\n normally accepted by destination.send\n\n This should realistically only be used for responding using user provided\n input. (unfortunately, including usernames)\n Manually crafted messages which dont take any user input have no need of this\n \"\"\"\n\n content = kwargs.pop(\"content\", None)\n\n if content:\n if filter_mass_mentions:\n content = common_filters.filter_mass_mentions(content)\n if filter_invite_links:\n content = common_filters.filter_invites(content)\n if filter_all_links:\n content = common_filters.filter_urls(content)\n\n await destination.send(content=content, **kwargs)\n\n def add_cog(self, cog: commands.Cog):\n if not isinstance(cog, commands.Cog):\n raise RuntimeError(\n f\"The {cog.__class__.__name__} cog in the {cog.__module__} package does \"\n f\"not inherit from the commands.Cog base class. The cog author must update \"\n f\"the cog to adhere to this requirement.\"\n )\n if cog.__cog_name__ in self.cogs:\n raise RuntimeError(f\"There is already a cog named {cog.__cog_name__} loaded.\")\n if not hasattr(cog, \"requires\"):\n commands.Cog.__init__(cog)\n\n added_hooks = []\n\n try:\n for cls in inspect.getmro(cog.__class__):\n try:\n hook = getattr(cog, f\"_{cls.__name__}__permissions_hook\")\n except AttributeError:\n pass\n else:\n self.add_permissions_hook(hook)\n added_hooks.append(hook)\n\n super().add_cog(cog)\n self.dispatch(\"cog_add\", cog)\n if \"permissions\" not in self.extensions:\n cog.requires.ready_event.set()\n except Exception:\n for hook in added_hooks:\n try:\n self.remove_permissions_hook(hook)\n except Exception:\n # This shouldn't be possible\n log.exception(\n \"A hook got extremely screwed up, \"\n \"and could not be removed properly during another error in cog load.\"\n )\n del cog\n raise\n\n def add_command(self, command: commands.Command) -> None:\n if not isinstance(command, commands.Command):\n raise RuntimeError(\"Commands must be instances of `redbot.core.commands.Command`\")\n\n super().add_command(command)\n\n permissions_not_loaded = \"permissions\" not in self.extensions\n self.dispatch(\"command_add\", command)\n if permissions_not_loaded:\n command.requires.ready_event.set()\n if isinstance(command, commands.Group):\n for subcommand in set(command.walk_commands()):\n self.dispatch(\"command_add\", subcommand)\n if permissions_not_loaded:\n subcommand.requires.ready_event.set()\n\n def remove_command(self, name: str) -> None:\n command = super().remove_command(name)\n if not command:\n return\n command.requires.reset()\n if isinstance(command, commands.Group):\n for subcommand in set(command.walk_commands()):\n subcommand.requires.reset()\n\n def clear_permission_rules(self, guild_id: Optional[int]) -> None:\n \"\"\"Clear all permission overrides in a scope.\n\n Parameters\n ----------\n guild_id : Optional[int]\n The guild ID to wipe permission overrides for. If\n ``None``, this will clear all global rules and leave all\n guild rules untouched.\n\n \"\"\"\n for cog in self.cogs.values():\n cog.requires.clear_all_rules(guild_id)\n for command in self.walk_commands():\n command.requires.clear_all_rules(guild_id)\n\n def add_permissions_hook(self, hook: commands.CheckPredicate) -> None:\n \"\"\"Add a permissions hook.\n\n Permissions hooks are check predicates which are called before\n calling `Requires.verify`, and they can optionally return an\n override: ``True`` to allow, ``False`` to deny, and ``None`` to\n default to normal behaviour.\n\n Parameters\n ----------\n hook\n A command check predicate which returns ``True``, ``False``\n or ``None``.\n\n \"\"\"\n self._permissions_hooks.append(hook)\n\n def remove_permissions_hook(self, hook: commands.CheckPredicate) -> None:\n \"\"\"Remove a permissions hook.\n\n Parameters are the same as those in `add_permissions_hook`.\n\n Raises\n ------\n ValueError\n If the permissions hook has not been added.\n\n \"\"\"\n self._permissions_hooks.remove(hook)\n\n async def verify_permissions_hooks(self, ctx: commands.Context) -> Optional[bool]:\n \"\"\"Run permissions hooks.\n\n Parameters\n ----------\n ctx : commands.Context\n The context for the command being invoked.\n\n Returns\n -------\n Optional[bool]\n ``False`` if any hooks returned ``False``, ``True`` if any\n hooks return ``True`` and none returned ``False``, ``None``\n otherwise.\n\n \"\"\"\n hook_results = []\n for hook in self._permissions_hooks:\n result = await discord.utils.maybe_coroutine(hook, ctx)\n if result is not None:\n hook_results.append(result)\n if hook_results:\n if all(hook_results):\n ctx.permission_state = commands.PermState.ALLOWED_BY_HOOK\n return True\n else:\n ctx.permission_state = commands.PermState.DENIED_BY_HOOK\n return False\n\n async def get_owner_notification_destinations(self) -> List[discord.abc.Messageable]:\n \"\"\"\n Gets the users and channels to send to\n \"\"\"\n destinations = []\n opt_outs = await self.db.owner_opt_out_list()\n for user_id in (self.owner_id, *self._co_owners):\n if user_id not in opt_outs:\n user = self.get_user(user_id)\n if user:\n destinations.append(user)\n\n channel_ids = await self.db.extra_owner_destinations()\n for channel_id in channel_ids:\n channel = self.get_channel(channel_id)\n if channel:\n destinations.append(channel)\n\n return destinations\n\n async def send_to_owners(self, content=None, **kwargs):\n \"\"\"\n This sends something to all owners and their configured extra destinations.\n\n This takes the same arguments as discord.abc.Messageable.send\n\n This logs failing sends\n \"\"\"\n destinations = await self.get_owner_notification_destinations()\n\n async def wrapped_send(location, content=None, **kwargs):\n try:\n await location.send(content, **kwargs)\n except Exception as _exc:\n log.exception(\n f\"I could not send an owner notification to ({location.id}){location}\"\n )\n\n sends = [wrapped_send(d, content, **kwargs) for d in destinations]\n await asyncio.gather(*sends)\n\n\nclass Red(RedBase, discord.AutoShardedClient):\n \"\"\"\n You're welcome Caleb.\n \"\"\"\n\n async def logout(self):\n \"\"\"Logs out of Discord and closes all connections.\"\"\"\n\n await super().logout()\n\n async def shutdown(self, *, restart: bool = False):\n \"\"\"Gracefully quit Red.\n\n The program will exit with code :code:`0` by default.\n\n Parameters\n ----------\n restart : bool\n If :code:`True`, the program will exit with code :code:`26`. If the\n launcher sees this, it will attempt to restart the bot.\n\n \"\"\"\n if not restart:\n self._shutdown_mode = ExitCodes.SHUTDOWN\n else:\n self._shutdown_mode = ExitCodes.RESTART\n\n await self.logout()\n\n\nclass ExitCodes(Enum):\n CRITICAL = 1\n SHUTDOWN = 0\n RESTART = 26\n", "path": "redbot/core/bot.py"}]} |
gh_patches_debug_1201 | rasdani/github-patches | git_diff | openedx__ecommerce-348 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Running migrations for Travis builds
We run migrations to ensure no migrations are missing, and they work on fresh installs.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import unicode_literals
3
4 from django.db import models, migrations
5
6
7 def create_shipping_event(apps, schema_editor):
8 """
9
10 Create a single new shipping event type that can be applied to an order. This will allow us to initiate order
11 shipment.
12
13 """
14 # Create all our Product Types.
15 ShippingEventType = apps.get_model("order", "ShippingEventType")
16 shipped_event = ShippingEventType(code="shipped", name="Shipped")
17 shipped_event.save()
18
19
20 class Migration(migrations.Migration):
21
22 dependencies = [
23 ('order', '0002_auto_20141007_2032'),
24 ]
25
26 operations = [
27 migrations.RunPython(create_shipping_event),
28 ]
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py b/ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py
--- a/ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py
+++ b/ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py
@@ -13,8 +13,7 @@
"""
# Create all our Product Types.
ShippingEventType = apps.get_model("order", "ShippingEventType")
- shipped_event = ShippingEventType(code="shipped", name="Shipped")
- shipped_event.save()
+ ShippingEventType.objects.create(code="shipped", name="Shipped")
class Migration(migrations.Migration):
| {"golden_diff": "diff --git a/ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py b/ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py\n--- a/ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py\n+++ b/ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py\n@@ -13,8 +13,7 @@\n \"\"\"\n # Create all our Product Types.\n ShippingEventType = apps.get_model(\"order\", \"ShippingEventType\")\n- shipped_event = ShippingEventType(code=\"shipped\", name=\"Shipped\")\n- shipped_event.save()\n+ ShippingEventType.objects.create(code=\"shipped\", name=\"Shipped\")\n \n \n class Migration(migrations.Migration):\n", "issue": "Running migrations for Travis builds\nWe run migrations to ensure no migrations are missing, and they work on fresh installs.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\ndef create_shipping_event(apps, schema_editor):\n \"\"\"\n\n Create a single new shipping event type that can be applied to an order. This will allow us to initiate order\n shipment.\n\n \"\"\"\n # Create all our Product Types.\n ShippingEventType = apps.get_model(\"order\", \"ShippingEventType\")\n shipped_event = ShippingEventType(code=\"shipped\", name=\"Shipped\")\n shipped_event.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('order', '0002_auto_20141007_2032'),\n ]\n\n operations = [\n migrations.RunPython(create_shipping_event),\n ]\n", "path": "ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\ndef create_shipping_event(apps, schema_editor):\n \"\"\"\n\n Create a single new shipping event type that can be applied to an order. This will allow us to initiate order\n shipment.\n\n \"\"\"\n # Create all our Product Types.\n ShippingEventType = apps.get_model(\"order\", \"ShippingEventType\")\n ShippingEventType.objects.create(code=\"shipped\", name=\"Shipped\")\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('order', '0002_auto_20141007_2032'),\n ]\n\n operations = [\n migrations.RunPython(create_shipping_event),\n ]\n", "path": "ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py"}]} |
gh_patches_debug_1202 | rasdani/github-patches | git_diff | apluslms__a-plus-964 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove the HTML attribute id="exercise" when chapters and exercises are rendered in the A+ frontend
HTML id attributes must be unique. However, currently, the `id="exercise"` is used multiple times. It seems that the chapter content itself has one `<div id="exercise">` and each embedded exercise also has one `<div id="exercise">`.
One approach would be to remove the attribute `id="exercise"` when A-plus retrieves chapter and exercise HTML from the backend. A-plus already manipulates the HTML a bit before rendering it to the web browser.
Another approach is to change the templates in the MOOC-Grader so that the `id="exercise"` is never even included anywhere, but I think it is usually used to pick the correct part of the HTML document retrieved from the backend (Git manager or MOOC-Grader).
Very related to one subtask in https://github.com/apluslms/a-plus/issues/667.
A little related to https://github.com/apluslms/a-plus/issues/593.
Id attribute issue in active elements: https://github.com/apluslms/a-plus/issues/823.
Related code:
* https://github.com/apluslms/a-plus/blob/660f98d0906b05a925fca64c6f2c3906497fc8f6/exercise/protocol/aplus.py#L157-L164
* https://github.com/apluslms/mooc-grader/blob/af39777890fc62805d12963abae994d85e11525b/access/templates/access/exercise_frame.html#L9
* https://github.com/apluslms/a-plus/blob/660f98d0906b05a925fca64c6f2c3906497fc8f6/exercise/templates/exercise/exercise_plain.html#L193
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/remote_page.py`
Content:
```
1 import logging
2 import posixpath
3 import re
4 import time
5 from typing import Optional
6 from urllib.parse import urlparse, urljoin
7
8 from bs4 import BeautifulSoup
9 import requests
10 from requests.models import Response
11
12 from aplus_auth.payload import Permission, Permissions
13 from django.conf import settings
14 from django.utils.http import parse_http_date_safe
15 from django.utils.text import format_lazy
16 from django.utils.translation import gettext_lazy as _
17
18 from aplus_auth.requests import post as aplus_post, get as aplus_get
19
20
21 logger = logging.getLogger('aplus.remote_page')
22
23
24 class RemotePageException(Exception):
25 def __init__(self, message, code=500):
26 self.message = message
27 self.code = code
28
29
30 class RemotePageNotFound(RemotePageException):
31 def __init__(self, message):
32 super().__init__(message, 404)
33
34
35 class RemotePageNotModified(Exception):
36
37 def __init__(self, expires=None):
38 self.expires = expires
39
40
41 def parse_expires(response):
42 return parse_http_date_safe(response.headers.get("Expires", "")) or 0
43
44
45 def request_for_response(url,
46 post=False,
47 data=None,
48 files=None,
49 stamp=None,
50 instance_id: Optional[int]=None
51 ) -> Response:
52 permissions = Permissions()
53 if instance_id is not None:
54 if post:
55 permissions.instances.add(Permission.WRITE, id=instance_id)
56 else:
57 permissions.instances.add(Permission.READ, id=instance_id)
58
59 try:
60 last_retry = len(settings.EXERCISE_HTTP_RETRIES) - 1
61 n = 0
62 while n <= last_retry:
63 try:
64 request_time = time.time()
65 if post:
66 logger.info("POST %s", url)
67 response = aplus_post(
68 url,
69 permissions=permissions,
70 data=data,
71 files=files,
72 timeout=settings.EXERCISE_HTTP_TIMEOUT
73 )
74 else:
75 logger.info("GET %s", url)
76 headers = {}
77 if stamp:
78 headers['If-Modified-Since'] = stamp
79 response = aplus_get(
80 url,
81 permissions=permissions,
82 timeout=settings.EXERCISE_HTTP_TIMEOUT,
83 headers=headers
84 )
85 request_time = time.time() - request_time
86 logger.info("Response %d (%d sec) %s",
87 response.status_code, request_time, url)
88 if response.status_code == 200:
89 return response
90 elif response.status_code == 304:
91 raise RemotePageNotModified(parse_expires(response))
92 if response.status_code < 500 or n >= last_retry:
93 response.raise_for_status()
94 except requests.exceptions.ConnectionError as e:
95 logger.warning("ConnectionError %s", url);
96 if n >= last_retry:
97 raise e
98 logger.info("Sleep %d sec before retry",
99 settings.EXERCISE_HTTP_RETRIES[n])
100 time.sleep(settings.EXERCISE_HTTP_RETRIES[n])
101 n += 1
102 logger.error("HTTP request loop ended in unexpected state")
103 raise RuntimeError("HTTP request loop ended in unexpected state")
104 except requests.exceptions.RequestException as e:
105 if e.response is not None and e.response.status_code == 404:
106 raise RemotePageNotFound(_('REQUESTED_RESOURCE_NOT_FOUND_FROM_COURSE_SERVICE'))
107 raise RemotePageException(format_lazy(
108 _('CONNECTING_TO_COURSE_SERVICE_FAILED -- {code}'),
109 code=e.response.status_code if e.response is not None else '-1',
110 )) from e
111
112
113 class RemotePage:
114 """
115 Represents a page that can be loaded over HTTP for further processing.
116 """
117 def __init__(self,
118 url,
119 post=False,
120 data=None,
121 files=None,
122 stamp=None,
123 instance_id: Optional[int] = None,
124 ) -> None:
125 self.url = urlparse(url)
126 self.response = request_for_response(url, post, data, files, stamp, instance_id)
127 self.response.encoding = "utf-8"
128 self.soup = BeautifulSoup(self.response.text, 'html5lib')
129
130 def base_address(self):
131 path = posixpath.dirname(self.url.path).rstrip('/') + '/'
132 url = self.url._replace(path=path, params='', query='', fragment='')
133 if settings.REMOTE_PAGE_HOSTS_MAP:
134 auth, sep, domain = url.netloc.rpartition('@')
135 domain = settings.REMOTE_PAGE_HOSTS_MAP.get(domain, domain)
136 url = url._replace(netloc=auth+sep+domain)
137 return url
138
139 def meta(self, name):
140 if self.soup:
141 element = self.soup.find("meta", {"name": name})
142 if element:
143 return element.get("value",
144 default=element.get("content", default=None))
145 return None
146
147 def header(self, name):
148 return self.response.headers.get(name, "")
149
150 def last_modified(self):
151 return self.header('Last-Modified')
152
153 def expires(self):
154 return parse_expires(self.response)
155
156 def title(self):
157 if self.soup and self.soup.title:
158 return self.soup.title.contents
159 return ""
160
161 def head(self, search_attribute):
162 if self.soup and self.soup.head:
163 return "\n".join(str(tag) for tag in
164 self.soup.head.find_all(True, search_attribute))
165 return ""
166
167 def select_element_or_body(self, search_attributes):
168 if self.soup:
169 for attr in search_attributes:
170 element = self.soup.find(**attr)
171 if element:
172 return element
173 return self.soup.body
174 return None
175
176 def element_or_body(self, search_attributes):
177 element = self.select_element_or_body(search_attributes)
178 return str(element) if element else ""
179
180 def clean_element_or_body(self, search_attributes):
181 element = self.select_element_or_body(search_attributes)
182 if element:
183 for once in element.find_all(True, {'data-aplus-once':True}):
184 once.extract()
185 return str(element) if element else ""
186
187 def body(self):
188 return self.element_or_body([])
189
190 def fix_relative_urls(self):
191 url = self.base_address()
192 for tag,attr in [
193 ("img","src"),
194 ("script","src"),
195 ("iframe","src"),
196 ("link","href"),
197 ("a","href"),
198 ("video","poster"),
199 ("source","src"),
200 ]:
201 self._fix_relative_urls(url, tag, attr)
202
203 def _fix_relative_urls(self, url, tag_name, attr_name):
204 # Starts with "#", "//" or "https:".
205 test = re.compile('^(#|\/\/|\w+:)', re.IGNORECASE)
206 # Ends with filename extension ".html" and possibly "#anchor".
207 chapter = re.compile('.*\.html(#.+)?$', re.IGNORECASE)
208 # Starts with at least one "../".
209 start_dotdot_path = re.compile(r"^(../)+")
210 # May end with the language suffix _en or _en/#anchor or _en#anchor.
211 lang_suffix = re.compile(r'(?P<lang>_[a-z]{2})?(?P<slash>/)?(?P<anchor>#.+)?$')
212 # Detect certain A+ exercise info URLs so that they are not broken by
213 # the transformations: "../../module1/chapter/module1_chapter_exercise/info/model/".
214 # URLs /plain, /info, /info/model, /info/template.
215 exercise_info = re.compile(r'/((plain)|(info(/model|/template)?))/?(#.+)?$')
216
217 for element in self.soup.find_all(tag_name, {attr_name:True}):
218 value = element[attr_name]
219 if not value:
220 continue
221
222 # Custom transform for RST chapter to chapter links.
223 if element.has_attr('data-aplus-chapter'):
224 m = chapter.match(value)
225 if m:
226 i = m.start(1)
227 if i > 0:
228 without_html_suffix = value[:i-5] + value[i:] # Keep #anchor in the end.
229 else:
230 without_html_suffix = value[:-5]
231 elif not value.startswith('/'):
232 without_html_suffix = value
233 else:
234 continue
235 # Remove all ../ from the start and prepend exactly "../../".
236 # a-plus-rst-tools modifies chapter links so that the URL path
237 # begins from the html build root directory (_build/html).
238 # The path starts with "../" to match the directory depth and
239 # there are as many "../" as needed to reach the root.
240 # Chapter html files are located under module directories in
241 # the _build/html directory and some courses use subdirectories
242 # under the module directories too.
243 # In A+, the URL path must start with "../../" so that it
244 # removes the current chapter and module from the A+ chapter
245 # page URL: /course/course_instance/module/chapter/
246 # (A+ URLs do not have the same "subdirectories" as
247 # the real subdirectories in the course git repo.)
248 new_val = '../../' + start_dotdot_path.sub("", without_html_suffix)
249
250 split_path = new_val.split('/')
251 if len(split_path) > 4 and not exercise_info.search(new_val):
252 # If the module directory has subdirectories in the course
253 # git repo, the subdirectory must be modified in the A+ URL.
254 # The subdirectory slash / is converted to underscore _.
255 # Convert "../../module1/subdir/chapter2_en" into "../../module1/subdir_chapter2_en".
256 # Do not convert if the URL points to an A+ page such as
257 # "../../module1/chapter2/info/model/".
258 chapter_key = '_'.join(split_path[3:])
259 new_val = '/'.join(split_path[:3]) + '/' + chapter_key
260
261 # Remove lang suffix in chapter2_en#anchor without modifying the #anchor.
262 # Add slash / to the end before the #anchor.
263 m = lang_suffix.search(new_val)
264 if m:
265 anchor = m.group('anchor')
266 if anchor is None:
267 anchor = ''
268 new_val = new_val[:m.start()] + '/' + anchor
269
270 element[attr_name] = new_val
271
272 elif value and not test.match(value):
273
274 # Custom transform for RST generated exercises.
275 if element.has_attr('data-aplus-path'):
276 # If the exercise description HTML has links to static files such as images,
277 # their links can be fixed with the data-aplus-path="/static/{course}" attribute.
278 # A+ converts "{course}" into the course key used by the backend based on
279 # the exercise service URL. For example, in the MOOC-Grader, exercise service URLs
280 # follow this scheme: "http://grader.local/coursekey/exercisekey".
281 # In the exercise HTML, image <img data-aplus-path="/static/{course}" src="../_images/image.png">
282 # gets the correct URL "http://grader.local/static/coursekey/_images/image.png".
283 fix_path = element['data-aplus-path'].replace(
284 '{course}',
285 url.path.split('/', 2)[1]
286 )
287 fix_value = start_dotdot_path.sub("/", value)
288 value = fix_path + fix_value
289
290 # url points to the exercise service, e.g., MOOC-Grader.
291 # This fixes links to static files (such as images) in RST chapters.
292 # The image URL must be absolute and refer to the grader server
293 # instead of the A+ server. A relative URL with only path
294 # "/static/course/image.png" would target the A+ server when
295 # it is included in the A+ page. The value should be a relative
296 # path in the course build directory so that it becomes the full
297 # correct URL to the target file.
298 # E.g., urljoin('http://localhost:8080/static/default/module1/chapter.html', "../_images/image.png")
299 # -> 'http://localhost:8080/static/default/_images/image.png'
300 element[attr_name] = urljoin(url.geturl(), value)
301
302 def find_and_replace(self, attr_name, list_of_attributes):
303 l = len(list_of_attributes)
304 if l == 0:
305 return
306 i = 0
307 for element in self.soup.find_all(True, {attr_name:True}):
308 for name,value in list_of_attributes[i].items():
309 if name.startswith('?'):
310 if name[1:] in element:
311 element[name[1:]] = value
312 else:
313 element[name] = value
314 i += 1
315 if i >= l:
316 return
317
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/remote_page.py b/lib/remote_page.py
--- a/lib/remote_page.py
+++ b/lib/remote_page.py
@@ -175,6 +175,8 @@
def element_or_body(self, search_attributes):
element = self.select_element_or_body(search_attributes)
+ if element.get('id') == 'exercise':
+ del element['id']
return str(element) if element else ""
def clean_element_or_body(self, search_attributes):
| {"golden_diff": "diff --git a/lib/remote_page.py b/lib/remote_page.py\n--- a/lib/remote_page.py\n+++ b/lib/remote_page.py\n@@ -175,6 +175,8 @@\n \n def element_or_body(self, search_attributes):\n element = self.select_element_or_body(search_attributes)\n+ if element.get('id') == 'exercise':\n+ del element['id']\n return str(element) if element else \"\"\n \n def clean_element_or_body(self, search_attributes):\n", "issue": "Remove the HTML attribute id=\"exercise\" when chapters and exercises are rendered in the A+ frontend\nHTML id attributes must be unique. However, currently, the `id=\"exercise\"` is used multiple times. It seems that the chapter content itself has one `<div id=\"exercise\">` and each embedded exercise also has one `<div id=\"exercise\">`.\r\n\r\nOne approach would be to remove the attribute `id=\"exercise\"` when A-plus retrieves chapter and exercise HTML from the backend. A-plus already manipulates the HTML a bit before rendering it to the web browser.\r\n\r\nAnother approach is to change the templates in the MOOC-Grader so that the `id=\"exercise\"` is never even included anywhere, but I think it is usually used to pick the correct part of the HTML document retrieved from the backend (Git manager or MOOC-Grader).\r\n\r\nVery related to one subtask in https://github.com/apluslms/a-plus/issues/667.\r\nA little related to https://github.com/apluslms/a-plus/issues/593.\r\nId attribute issue in active elements: https://github.com/apluslms/a-plus/issues/823.\r\n\r\nRelated code:\r\n* https://github.com/apluslms/a-plus/blob/660f98d0906b05a925fca64c6f2c3906497fc8f6/exercise/protocol/aplus.py#L157-L164\r\n* https://github.com/apluslms/mooc-grader/blob/af39777890fc62805d12963abae994d85e11525b/access/templates/access/exercise_frame.html#L9\r\n* https://github.com/apluslms/a-plus/blob/660f98d0906b05a925fca64c6f2c3906497fc8f6/exercise/templates/exercise/exercise_plain.html#L193\n", "before_files": [{"content": "import logging\nimport posixpath\nimport re\nimport time\nfrom typing import Optional\nfrom urllib.parse import urlparse, urljoin\n\nfrom bs4 import BeautifulSoup\nimport requests\nfrom requests.models import Response\n\nfrom aplus_auth.payload import Permission, Permissions\nfrom django.conf import settings\nfrom django.utils.http import parse_http_date_safe\nfrom django.utils.text import format_lazy\nfrom django.utils.translation import gettext_lazy as _\n\nfrom aplus_auth.requests import post as aplus_post, get as aplus_get\n\n\nlogger = logging.getLogger('aplus.remote_page')\n\n\nclass RemotePageException(Exception):\n def __init__(self, message, code=500):\n self.message = message\n self.code = code\n\n\nclass RemotePageNotFound(RemotePageException):\n def __init__(self, message):\n super().__init__(message, 404)\n\n\nclass RemotePageNotModified(Exception):\n\n def __init__(self, expires=None):\n self.expires = expires\n\n\ndef parse_expires(response):\n return parse_http_date_safe(response.headers.get(\"Expires\", \"\")) or 0\n\n\ndef request_for_response(url,\n post=False,\n data=None,\n files=None,\n stamp=None,\n instance_id: Optional[int]=None\n ) -> Response:\n permissions = Permissions()\n if instance_id is not None:\n if post:\n permissions.instances.add(Permission.WRITE, id=instance_id)\n else:\n permissions.instances.add(Permission.READ, id=instance_id)\n\n try:\n last_retry = len(settings.EXERCISE_HTTP_RETRIES) - 1\n n = 0\n while n <= last_retry:\n try:\n request_time = time.time()\n if post:\n logger.info(\"POST %s\", url)\n response = aplus_post(\n url,\n permissions=permissions,\n data=data,\n files=files,\n timeout=settings.EXERCISE_HTTP_TIMEOUT\n )\n else:\n logger.info(\"GET %s\", url)\n headers = {}\n if stamp:\n headers['If-Modified-Since'] = stamp\n response = aplus_get(\n url,\n permissions=permissions,\n timeout=settings.EXERCISE_HTTP_TIMEOUT,\n headers=headers\n )\n request_time = time.time() - request_time\n logger.info(\"Response %d (%d sec) %s\",\n response.status_code, request_time, url)\n if response.status_code == 200:\n return response\n elif response.status_code == 304:\n raise RemotePageNotModified(parse_expires(response))\n if response.status_code < 500 or n >= last_retry:\n response.raise_for_status()\n except requests.exceptions.ConnectionError as e:\n logger.warning(\"ConnectionError %s\", url);\n if n >= last_retry:\n raise e\n logger.info(\"Sleep %d sec before retry\",\n settings.EXERCISE_HTTP_RETRIES[n])\n time.sleep(settings.EXERCISE_HTTP_RETRIES[n])\n n += 1\n logger.error(\"HTTP request loop ended in unexpected state\")\n raise RuntimeError(\"HTTP request loop ended in unexpected state\")\n except requests.exceptions.RequestException as e:\n if e.response is not None and e.response.status_code == 404:\n raise RemotePageNotFound(_('REQUESTED_RESOURCE_NOT_FOUND_FROM_COURSE_SERVICE'))\n raise RemotePageException(format_lazy(\n _('CONNECTING_TO_COURSE_SERVICE_FAILED -- {code}'),\n code=e.response.status_code if e.response is not None else '-1',\n )) from e\n\n\nclass RemotePage:\n \"\"\"\n Represents a page that can be loaded over HTTP for further processing.\n \"\"\"\n def __init__(self,\n url,\n post=False,\n data=None,\n files=None,\n stamp=None,\n instance_id: Optional[int] = None,\n ) -> None:\n self.url = urlparse(url)\n self.response = request_for_response(url, post, data, files, stamp, instance_id)\n self.response.encoding = \"utf-8\"\n self.soup = BeautifulSoup(self.response.text, 'html5lib')\n\n def base_address(self):\n path = posixpath.dirname(self.url.path).rstrip('/') + '/'\n url = self.url._replace(path=path, params='', query='', fragment='')\n if settings.REMOTE_PAGE_HOSTS_MAP:\n auth, sep, domain = url.netloc.rpartition('@')\n domain = settings.REMOTE_PAGE_HOSTS_MAP.get(domain, domain)\n url = url._replace(netloc=auth+sep+domain)\n return url\n\n def meta(self, name):\n if self.soup:\n element = self.soup.find(\"meta\", {\"name\": name})\n if element:\n return element.get(\"value\",\n default=element.get(\"content\", default=None))\n return None\n\n def header(self, name):\n return self.response.headers.get(name, \"\")\n\n def last_modified(self):\n return self.header('Last-Modified')\n\n def expires(self):\n return parse_expires(self.response)\n\n def title(self):\n if self.soup and self.soup.title:\n return self.soup.title.contents\n return \"\"\n\n def head(self, search_attribute):\n if self.soup and self.soup.head:\n return \"\\n\".join(str(tag) for tag in\n self.soup.head.find_all(True, search_attribute))\n return \"\"\n\n def select_element_or_body(self, search_attributes):\n if self.soup:\n for attr in search_attributes:\n element = self.soup.find(**attr)\n if element:\n return element\n return self.soup.body\n return None\n\n def element_or_body(self, search_attributes):\n element = self.select_element_or_body(search_attributes)\n return str(element) if element else \"\"\n\n def clean_element_or_body(self, search_attributes):\n element = self.select_element_or_body(search_attributes)\n if element:\n for once in element.find_all(True, {'data-aplus-once':True}):\n once.extract()\n return str(element) if element else \"\"\n\n def body(self):\n return self.element_or_body([])\n\n def fix_relative_urls(self):\n url = self.base_address()\n for tag,attr in [\n (\"img\",\"src\"),\n (\"script\",\"src\"),\n (\"iframe\",\"src\"),\n (\"link\",\"href\"),\n (\"a\",\"href\"),\n (\"video\",\"poster\"),\n (\"source\",\"src\"),\n ]:\n self._fix_relative_urls(url, tag, attr)\n\n def _fix_relative_urls(self, url, tag_name, attr_name):\n # Starts with \"#\", \"//\" or \"https:\".\n test = re.compile('^(#|\\/\\/|\\w+:)', re.IGNORECASE)\n # Ends with filename extension \".html\" and possibly \"#anchor\".\n chapter = re.compile('.*\\.html(#.+)?$', re.IGNORECASE)\n # Starts with at least one \"../\".\n start_dotdot_path = re.compile(r\"^(../)+\")\n # May end with the language suffix _en or _en/#anchor or _en#anchor.\n lang_suffix = re.compile(r'(?P<lang>_[a-z]{2})?(?P<slash>/)?(?P<anchor>#.+)?$')\n # Detect certain A+ exercise info URLs so that they are not broken by\n # the transformations: \"../../module1/chapter/module1_chapter_exercise/info/model/\".\n # URLs /plain, /info, /info/model, /info/template.\n exercise_info = re.compile(r'/((plain)|(info(/model|/template)?))/?(#.+)?$')\n\n for element in self.soup.find_all(tag_name, {attr_name:True}):\n value = element[attr_name]\n if not value:\n continue\n\n # Custom transform for RST chapter to chapter links.\n if element.has_attr('data-aplus-chapter'):\n m = chapter.match(value)\n if m:\n i = m.start(1)\n if i > 0:\n without_html_suffix = value[:i-5] + value[i:] # Keep #anchor in the end.\n else:\n without_html_suffix = value[:-5]\n elif not value.startswith('/'):\n without_html_suffix = value\n else:\n continue\n # Remove all ../ from the start and prepend exactly \"../../\".\n # a-plus-rst-tools modifies chapter links so that the URL path\n # begins from the html build root directory (_build/html).\n # The path starts with \"../\" to match the directory depth and\n # there are as many \"../\" as needed to reach the root.\n # Chapter html files are located under module directories in\n # the _build/html directory and some courses use subdirectories\n # under the module directories too.\n # In A+, the URL path must start with \"../../\" so that it\n # removes the current chapter and module from the A+ chapter\n # page URL: /course/course_instance/module/chapter/\n # (A+ URLs do not have the same \"subdirectories\" as\n # the real subdirectories in the course git repo.)\n new_val = '../../' + start_dotdot_path.sub(\"\", without_html_suffix)\n\n split_path = new_val.split('/')\n if len(split_path) > 4 and not exercise_info.search(new_val):\n # If the module directory has subdirectories in the course\n # git repo, the subdirectory must be modified in the A+ URL.\n # The subdirectory slash / is converted to underscore _.\n # Convert \"../../module1/subdir/chapter2_en\" into \"../../module1/subdir_chapter2_en\".\n # Do not convert if the URL points to an A+ page such as\n # \"../../module1/chapter2/info/model/\".\n chapter_key = '_'.join(split_path[3:])\n new_val = '/'.join(split_path[:3]) + '/' + chapter_key\n\n # Remove lang suffix in chapter2_en#anchor without modifying the #anchor.\n # Add slash / to the end before the #anchor.\n m = lang_suffix.search(new_val)\n if m:\n anchor = m.group('anchor')\n if anchor is None:\n anchor = ''\n new_val = new_val[:m.start()] + '/' + anchor\n\n element[attr_name] = new_val\n\n elif value and not test.match(value):\n\n # Custom transform for RST generated exercises.\n if element.has_attr('data-aplus-path'):\n # If the exercise description HTML has links to static files such as images,\n # their links can be fixed with the data-aplus-path=\"/static/{course}\" attribute.\n # A+ converts \"{course}\" into the course key used by the backend based on\n # the exercise service URL. For example, in the MOOC-Grader, exercise service URLs\n # follow this scheme: \"http://grader.local/coursekey/exercisekey\".\n # In the exercise HTML, image <img data-aplus-path=\"/static/{course}\" src=\"../_images/image.png\">\n # gets the correct URL \"http://grader.local/static/coursekey/_images/image.png\".\n fix_path = element['data-aplus-path'].replace(\n '{course}',\n url.path.split('/', 2)[1]\n )\n fix_value = start_dotdot_path.sub(\"/\", value)\n value = fix_path + fix_value\n\n # url points to the exercise service, e.g., MOOC-Grader.\n # This fixes links to static files (such as images) in RST chapters.\n # The image URL must be absolute and refer to the grader server\n # instead of the A+ server. A relative URL with only path\n # \"/static/course/image.png\" would target the A+ server when\n # it is included in the A+ page. The value should be a relative\n # path in the course build directory so that it becomes the full\n # correct URL to the target file.\n # E.g., urljoin('http://localhost:8080/static/default/module1/chapter.html', \"../_images/image.png\")\n # -> 'http://localhost:8080/static/default/_images/image.png'\n element[attr_name] = urljoin(url.geturl(), value)\n\n def find_and_replace(self, attr_name, list_of_attributes):\n l = len(list_of_attributes)\n if l == 0:\n return\n i = 0\n for element in self.soup.find_all(True, {attr_name:True}):\n for name,value in list_of_attributes[i].items():\n if name.startswith('?'):\n if name[1:] in element:\n element[name[1:]] = value\n else:\n element[name] = value\n i += 1\n if i >= l:\n return\n", "path": "lib/remote_page.py"}], "after_files": [{"content": "import logging\nimport posixpath\nimport re\nimport time\nfrom typing import Optional\nfrom urllib.parse import urlparse, urljoin\n\nfrom bs4 import BeautifulSoup\nimport requests\nfrom requests.models import Response\n\nfrom aplus_auth.payload import Permission, Permissions\nfrom django.conf import settings\nfrom django.utils.http import parse_http_date_safe\nfrom django.utils.text import format_lazy\nfrom django.utils.translation import gettext_lazy as _\n\nfrom aplus_auth.requests import post as aplus_post, get as aplus_get\n\n\nlogger = logging.getLogger('aplus.remote_page')\n\n\nclass RemotePageException(Exception):\n def __init__(self, message, code=500):\n self.message = message\n self.code = code\n\n\nclass RemotePageNotFound(RemotePageException):\n def __init__(self, message):\n super().__init__(message, 404)\n\n\nclass RemotePageNotModified(Exception):\n\n def __init__(self, expires=None):\n self.expires = expires\n\n\ndef parse_expires(response):\n return parse_http_date_safe(response.headers.get(\"Expires\", \"\")) or 0\n\n\ndef request_for_response(url,\n post=False,\n data=None,\n files=None,\n stamp=None,\n instance_id: Optional[int]=None\n ) -> Response:\n permissions = Permissions()\n if instance_id is not None:\n if post:\n permissions.instances.add(Permission.WRITE, id=instance_id)\n else:\n permissions.instances.add(Permission.READ, id=instance_id)\n\n try:\n last_retry = len(settings.EXERCISE_HTTP_RETRIES) - 1\n n = 0\n while n <= last_retry:\n try:\n request_time = time.time()\n if post:\n logger.info(\"POST %s\", url)\n response = aplus_post(\n url,\n permissions=permissions,\n data=data,\n files=files,\n timeout=settings.EXERCISE_HTTP_TIMEOUT\n )\n else:\n logger.info(\"GET %s\", url)\n headers = {}\n if stamp:\n headers['If-Modified-Since'] = stamp\n response = aplus_get(\n url,\n permissions=permissions,\n timeout=settings.EXERCISE_HTTP_TIMEOUT,\n headers=headers\n )\n request_time = time.time() - request_time\n logger.info(\"Response %d (%d sec) %s\",\n response.status_code, request_time, url)\n if response.status_code == 200:\n return response\n elif response.status_code == 304:\n raise RemotePageNotModified(parse_expires(response))\n if response.status_code < 500 or n >= last_retry:\n response.raise_for_status()\n except requests.exceptions.ConnectionError as e:\n logger.warning(\"ConnectionError %s\", url);\n if n >= last_retry:\n raise e\n logger.info(\"Sleep %d sec before retry\",\n settings.EXERCISE_HTTP_RETRIES[n])\n time.sleep(settings.EXERCISE_HTTP_RETRIES[n])\n n += 1\n logger.error(\"HTTP request loop ended in unexpected state\")\n raise RuntimeError(\"HTTP request loop ended in unexpected state\")\n except requests.exceptions.RequestException as e:\n if e.response is not None and e.response.status_code == 404:\n raise RemotePageNotFound(_('REQUESTED_RESOURCE_NOT_FOUND_FROM_COURSE_SERVICE'))\n raise RemotePageException(format_lazy(\n _('CONNECTING_TO_COURSE_SERVICE_FAILED -- {code}'),\n code=e.response.status_code if e.response is not None else '-1',\n )) from e\n\n\nclass RemotePage:\n \"\"\"\n Represents a page that can be loaded over HTTP for further processing.\n \"\"\"\n def __init__(self,\n url,\n post=False,\n data=None,\n files=None,\n stamp=None,\n instance_id: Optional[int] = None,\n ) -> None:\n self.url = urlparse(url)\n self.response = request_for_response(url, post, data, files, stamp, instance_id)\n self.response.encoding = \"utf-8\"\n self.soup = BeautifulSoup(self.response.text, 'html5lib')\n\n def base_address(self):\n path = posixpath.dirname(self.url.path).rstrip('/') + '/'\n url = self.url._replace(path=path, params='', query='', fragment='')\n if settings.REMOTE_PAGE_HOSTS_MAP:\n auth, sep, domain = url.netloc.rpartition('@')\n domain = settings.REMOTE_PAGE_HOSTS_MAP.get(domain, domain)\n url = url._replace(netloc=auth+sep+domain)\n return url\n\n def meta(self, name):\n if self.soup:\n element = self.soup.find(\"meta\", {\"name\": name})\n if element:\n return element.get(\"value\",\n default=element.get(\"content\", default=None))\n return None\n\n def header(self, name):\n return self.response.headers.get(name, \"\")\n\n def last_modified(self):\n return self.header('Last-Modified')\n\n def expires(self):\n return parse_expires(self.response)\n\n def title(self):\n if self.soup and self.soup.title:\n return self.soup.title.contents\n return \"\"\n\n def head(self, search_attribute):\n if self.soup and self.soup.head:\n return \"\\n\".join(str(tag) for tag in\n self.soup.head.find_all(True, search_attribute))\n return \"\"\n\n def select_element_or_body(self, search_attributes):\n if self.soup:\n for attr in search_attributes:\n element = self.soup.find(**attr)\n if element:\n return element\n return self.soup.body\n return None\n\n def element_or_body(self, search_attributes):\n element = self.select_element_or_body(search_attributes)\n if element.get('id') == 'exercise':\n del element['id']\n return str(element) if element else \"\"\n\n def clean_element_or_body(self, search_attributes):\n element = self.select_element_or_body(search_attributes)\n if element:\n for once in element.find_all(True, {'data-aplus-once':True}):\n once.extract()\n return str(element) if element else \"\"\n\n def body(self):\n return self.element_or_body([])\n\n def fix_relative_urls(self):\n url = self.base_address()\n for tag,attr in [\n (\"img\",\"src\"),\n (\"script\",\"src\"),\n (\"iframe\",\"src\"),\n (\"link\",\"href\"),\n (\"a\",\"href\"),\n (\"video\",\"poster\"),\n (\"source\",\"src\"),\n ]:\n self._fix_relative_urls(url, tag, attr)\n\n def _fix_relative_urls(self, url, tag_name, attr_name):\n # Starts with \"#\", \"//\" or \"https:\".\n test = re.compile('^(#|\\/\\/|\\w+:)', re.IGNORECASE)\n # Ends with filename extension \".html\" and possibly \"#anchor\".\n chapter = re.compile('.*\\.html(#.+)?$', re.IGNORECASE)\n # Starts with at least one \"../\".\n start_dotdot_path = re.compile(r\"^(../)+\")\n # May end with the language suffix _en or _en/#anchor or _en#anchor.\n lang_suffix = re.compile(r'(?P<lang>_[a-z]{2})?(?P<slash>/)?(?P<anchor>#.+)?$')\n # Detect certain A+ exercise info URLs so that they are not broken by\n # the transformations: \"../../module1/chapter/module1_chapter_exercise/info/model/\".\n # URLs /plain, /info, /info/model, /info/template.\n exercise_info = re.compile(r'/((plain)|(info(/model|/template)?))/?(#.+)?$')\n\n for element in self.soup.find_all(tag_name, {attr_name:True}):\n value = element[attr_name]\n if not value:\n continue\n\n # Custom transform for RST chapter to chapter links.\n if element.has_attr('data-aplus-chapter'):\n m = chapter.match(value)\n if m:\n i = m.start(1)\n if i > 0:\n without_html_suffix = value[:i-5] + value[i:] # Keep #anchor in the end.\n else:\n without_html_suffix = value[:-5]\n elif not value.startswith('/'):\n without_html_suffix = value\n else:\n continue\n # Remove all ../ from the start and prepend exactly \"../../\".\n # a-plus-rst-tools modifies chapter links so that the URL path\n # begins from the html build root directory (_build/html).\n # The path starts with \"../\" to match the directory depth and\n # there are as many \"../\" as needed to reach the root.\n # Chapter html files are located under module directories in\n # the _build/html directory and some courses use subdirectories\n # under the module directories too.\n # In A+, the URL path must start with \"../../\" so that it\n # removes the current chapter and module from the A+ chapter\n # page URL: /course/course_instance/module/chapter/\n # (A+ URLs do not have the same \"subdirectories\" as\n # the real subdirectories in the course git repo.)\n new_val = '../../' + start_dotdot_path.sub(\"\", without_html_suffix)\n\n split_path = new_val.split('/')\n if len(split_path) > 4 and not exercise_info.search(new_val):\n # If the module directory has subdirectories in the course\n # git repo, the subdirectory must be modified in the A+ URL.\n # The subdirectory slash / is converted to underscore _.\n # Convert \"../../module1/subdir/chapter2_en\" into \"../../module1/subdir_chapter2_en\".\n # Do not convert if the URL points to an A+ page such as\n # \"../../module1/chapter2/info/model/\".\n chapter_key = '_'.join(split_path[3:])\n new_val = '/'.join(split_path[:3]) + '/' + chapter_key\n\n # Remove lang suffix in chapter2_en#anchor without modifying the #anchor.\n # Add slash / to the end before the #anchor.\n m = lang_suffix.search(new_val)\n if m:\n anchor = m.group('anchor')\n if anchor is None:\n anchor = ''\n new_val = new_val[:m.start()] + '/' + anchor\n\n element[attr_name] = new_val\n\n elif value and not test.match(value):\n\n # Custom transform for RST generated exercises.\n if element.has_attr('data-aplus-path'):\n # If the exercise description HTML has links to static files such as images,\n # their links can be fixed with the data-aplus-path=\"/static/{course}\" attribute.\n # A+ converts \"{course}\" into the course key used by the backend based on\n # the exercise service URL. For example, in the MOOC-Grader, exercise service URLs\n # follow this scheme: \"http://grader.local/coursekey/exercisekey\".\n # In the exercise HTML, image <img data-aplus-path=\"/static/{course}\" src=\"../_images/image.png\">\n # gets the correct URL \"http://grader.local/static/coursekey/_images/image.png\".\n fix_path = element['data-aplus-path'].replace(\n '{course}',\n url.path.split('/', 2)[1]\n )\n fix_value = start_dotdot_path.sub(\"/\", value)\n value = fix_path + fix_value\n\n # url points to the exercise service, e.g., MOOC-Grader.\n # This fixes links to static files (such as images) in RST chapters.\n # The image URL must be absolute and refer to the grader server\n # instead of the A+ server. A relative URL with only path\n # \"/static/course/image.png\" would target the A+ server when\n # it is included in the A+ page. The value should be a relative\n # path in the course build directory so that it becomes the full\n # correct URL to the target file.\n # E.g., urljoin('http://localhost:8080/static/default/module1/chapter.html', \"../_images/image.png\")\n # -> 'http://localhost:8080/static/default/_images/image.png'\n element[attr_name] = urljoin(url.geturl(), value)\n\n def find_and_replace(self, attr_name, list_of_attributes):\n l = len(list_of_attributes)\n if l == 0:\n return\n i = 0\n for element in self.soup.find_all(True, {attr_name:True}):\n for name,value in list_of_attributes[i].items():\n if name.startswith('?'):\n if name[1:] in element:\n element[name[1:]] = value\n else:\n element[name] = value\n i += 1\n if i >= l:\n return\n", "path": "lib/remote_page.py"}]} |
gh_patches_debug_1203 | rasdani/github-patches | git_diff | qutip__qutip-2183 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot import qutip after latest scipy release
### Bug Description
I've encountered this bug when creating a clean environment, and installing `qutip` (and IPython as console) through mamba.
Next, I've opened an IPython console and ran `import qutip`, which resulted in the exception printed below - stating that the `format` attribute of the `fast_csr_matrix` cannot be set.
I believe the latest `scipy` [release](https://github.com/scipy/scipy/releases) is the culprit (released 14 hrs ago at the time of writing) - reducing from `scipy==1.11.0` to `scipy==1.10.1` resolves the issue for me.
### Code to Reproduce the Bug
```shell
import qutip
```
### Code Output
```shell
Cell In[1], line 1
----> 1 import qutip
File ~\mambaforge\envs\test-env-scipy-qutip\Lib\site-packages\qutip\__init__.py:106
98 del matplotlib
101 # -----------------------------------------------------------------------------
102 # Load modules
103 #
104
105 # core
--> 106 from qutip.qobj import *
107 from qutip.qobjevo import *
108 from qutip.states import *
File ~\mambaforge\envs\test-env-scipy-qutip\Lib\site-packages\qutip\qobj.py:2526
2523 # TRAILING IMPORTS
2524 # We do a few imports here to avoid circular dependencies.
2525 from qutip.eseries import eseries
-> 2526 import qutip.superop_reps as sr
2527 import qutip.tensor as tensor
2528 import qutip.operators as ops
File ~\mambaforge\envs\test-env-scipy-qutip\Lib\site-packages\qutip\superop_reps.py:74
61 return Qobj(dims=[[[2], [2]], [[2], [2]]],
62 inpt=array([[1. - pe / 2., 0., 0., 1. - pe],
63 [0., pe / 2., 0., 0.],
64 [0., 0., pe / 2., 0.],
65 [1. - pe, 0., 0., 1. - pe / 2.]]),
66 superrep='choi')
69 # CHANGE OF BASIS FUNCTIONS ---------------------------------------------------
70 # These functions find change of basis matrices, and are useful in converting
71 # between (for instance) Choi and chi matrices. At some point, these should
72 # probably be moved out to another module.
---> 74 _SINGLE_QUBIT_PAULI_BASIS = (identity(2), sigmax(), sigmay(), sigmaz())
77 def _pauli_basis(nq=1):
78 # NOTE: This is slow as can be.
79 # TODO: Make this sparse. CSR format was causing problems for the [idx, :]
80 # slicing below.
81 B = zeros((4 ** nq, 4 ** nq), dtype=complex)
File ~\mambaforge\envs\test-env-scipy-qutip\Lib\site-packages\qutip\operators.py:508, in identity(dims)
492 def identity(dims):
493 """Identity operator. Alternative name to :func:`qeye`.
494
495 Parameters
(...)
506 Identity operator Qobj.
507 """
--> 508 return qeye(dims)
File ~\mambaforge\envs\test-env-scipy-qutip\Lib\site-packages\qutip\operators.py:488, in qeye(dimensions)
452 """
453 Identity operator.
454
(...)
485
486 """
487 size, dimensions = _implicit_tensor_dimensions(dimensions)
--> 488 return Qobj(fast_identity(size),
489 dims=dimensions, isherm=True, isunitary=True)
File ~\mambaforge\envs\test-env-scipy-qutip\Lib\site-packages\qutip\fastsparse.py:389, in fast_identity(N)
387 ptr = np.arange(N+1, dtype=np.int32)
388 ptr[-1] = N
--> 389 return fast_csr_matrix((data,ind,ptr),shape=(N,N))
File ~\mambaforge\envs\test-env-scipy-qutip\Lib\site-packages\qutip\fastsparse.py:55, in fast_csr_matrix.__init__(self, args, shape, dtype, copy)
53 self.dtype = complex
54 self.maxprint = 50
---> 55 self.format = 'csr'
AttributeError: property 'format' of 'fast_csr_matrix' object has no setter
```
### Expected Behaviour
I expected to be able to import qutip :)
### Your Environment
```shell
QuTiP Version: 4.7.1
Numpy Version: 1.25.0
Scipy Version: 1.11.0
Cython Version: None
Matplotlib Version: None
Python Version: 3.11.4
Number of CPUs: 8
BLAS Info: Generic
OPENMP Installed: False
INTEL MKL Ext: False
Platform Info: Windows (AMD64)
```
### Additional Context
The environment above was generated with `scipy==1.10.1` (which still worked); I've changed the Scipy version to `1.11.0` manually in that list.
Forcing `scipy==1.10.1`, resolves the problem. Hence, I'm under the impression that the recent bump of `scipy` to `1.11.0` is causing the issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qutip/fastsparse.py`
Content:
```
1 from warnings import warn
2 import operator
3
4 import numpy as np
5 from scipy.sparse import (
6 csr_matrix, dia_matrix, isspmatrix, SparseEfficiencyWarning,
7 )
8
9 # fast_csr_matrix extends the internals of csr_matrix, and we need to
10 # import parts of the internals of scipy.sparse to do that:
11 import scipy.sparse
12 import scipy.sparse._sparsetools as _sparsetools
13 if hasattr(scipy.sparse, "_sputils"):
14 # SciPy 1.8.0 deprecated the public scipy.sparse.sputils interface and
15 # moved it to _sputils
16 from scipy.sparse._sputils import (
17 isdense, isscalarlike, upcast, get_index_dtype,
18 )
19 else:
20 from scipy.sparse.sputils import (
21 isdense, isscalarlike, upcast, get_index_dtype,
22 )
23
24
25 class fast_csr_matrix(csr_matrix):
26 """
27 A subclass of scipy.sparse.csr_matrix that skips the data format
28 checks that are run everytime a new csr_matrix is created.
29 """
30 def __init__(self, args=None, shape=None, dtype=None, copy=False):
31 if args is None: #Build zero matrix
32 if shape is None:
33 raise Exception('Shape must be given when building zero matrix.')
34 self.data = np.array([], dtype=complex)
35 self.indices = np.array([], dtype=np.int32)
36 self.indptr = np.zeros(shape[0]+1, dtype=np.int32)
37 self._shape = tuple(int(s) for s in shape)
38
39 else:
40 if args[0].shape[0] and args[0].dtype != complex:
41 raise TypeError('fast_csr_matrix allows only complex data.')
42 if args[1].shape[0] and args[1].dtype != np.int32:
43 raise TypeError('fast_csr_matrix allows only int32 indices.')
44 if args[2].shape[0] and args[1].dtype != np.int32:
45 raise TypeError('fast_csr_matrix allows only int32 indptr.')
46 self.data = np.array(args[0], dtype=complex, copy=copy)
47 self.indices = np.array(args[1], dtype=np.int32, copy=copy)
48 self.indptr = np.array(args[2], dtype=np.int32, copy=copy)
49 if shape is None:
50 self._shape = tuple([len(self.indptr)-1]*2)
51 else:
52 self._shape = tuple(int(s) for s in shape)
53 self.dtype = complex
54 self.maxprint = 50
55 self.format = 'csr'
56
57 def _binopt(self, other, op):
58 """
59 Do the binary operation fn to two sparse matrices using
60 fast_csr_matrix only when other is also a fast_csr_matrix.
61 """
62 # e.g. csr_plus_csr, csr_minus_csr, etc.
63 if not isinstance(other, fast_csr_matrix):
64 other = csr_matrix(other)
65 # e.g. csr_plus_csr, csr_minus_csr, etc.
66 fn = getattr(_sparsetools, self.format + op + self.format)
67
68 maxnnz = self.nnz + other.nnz
69 idx_dtype = get_index_dtype((self.indptr, self.indices,
70 other.indptr, other.indices),
71 maxval=maxnnz)
72 indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
73 indices = np.empty(maxnnz, dtype=idx_dtype)
74
75 bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']
76 if op in bool_ops:
77 data = np.empty(maxnnz, dtype=np.bool_)
78 else:
79 data = np.empty(maxnnz, dtype=upcast(self.dtype, other.dtype))
80
81 fn(self.shape[0], self.shape[1],
82 np.asarray(self.indptr, dtype=idx_dtype),
83 np.asarray(self.indices, dtype=idx_dtype),
84 self.data,
85 np.asarray(other.indptr, dtype=idx_dtype),
86 np.asarray(other.indices, dtype=idx_dtype),
87 other.data,
88 indptr, indices, data)
89
90 actual_nnz = indptr[-1]
91 indices = indices[:actual_nnz]
92 data = data[:actual_nnz]
93 if actual_nnz < maxnnz // 2:
94 # too much waste, trim arrays
95 indices = indices.copy()
96 data = data.copy()
97 if isinstance(other, fast_csr_matrix) and (not op in bool_ops):
98 A = fast_csr_matrix((data, indices, indptr), dtype=data.dtype, shape=self.shape)
99 else:
100 A = csr_matrix((data, indices, indptr), dtype=data.dtype, shape=self.shape)
101 return A
102
103 def multiply(self, other):
104 """Point-wise multiplication by another matrix, vector, or
105 scalar.
106 """
107 # Scalar multiplication.
108 if isscalarlike(other):
109 return self._mul_scalar(other)
110 # Sparse matrix or vector.
111 if isspmatrix(other):
112 if self.shape == other.shape:
113 if not isinstance(other, fast_csr_matrix):
114 other = csr_matrix(other)
115 return self._binopt(other, '_elmul_')
116 # Single element.
117 elif other.shape == (1,1):
118 return self._mul_scalar(other.toarray()[0, 0])
119 elif self.shape == (1,1):
120 return other._mul_scalar(self.toarray()[0, 0])
121 # A row times a column.
122 elif self.shape[1] == other.shape[0] and self.shape[1] == 1:
123 return self._mul_sparse_matrix(other.tocsc())
124 elif self.shape[0] == other.shape[1] and self.shape[0] == 1:
125 return other._mul_sparse_matrix(self.tocsc())
126 # Row vector times matrix. other is a row.
127 elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:
128 other = dia_matrix((other.toarray().ravel(), [0]),
129 shape=(other.shape[1], other.shape[1]))
130 return self._mul_sparse_matrix(other)
131 # self is a row.
132 elif self.shape[0] == 1 and self.shape[1] == other.shape[1]:
133 copy = dia_matrix((self.toarray().ravel(), [0]),
134 shape=(self.shape[1], self.shape[1]))
135 return other._mul_sparse_matrix(copy)
136 # Column vector times matrix. other is a column.
137 elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:
138 other = dia_matrix((other.toarray().ravel(), [0]),
139 shape=(other.shape[0], other.shape[0]))
140 return other._mul_sparse_matrix(self)
141 # self is a column.
142 elif self.shape[1] == 1 and self.shape[0] == other.shape[0]:
143 copy = dia_matrix((self.toarray().ravel(), [0]),
144 shape=(self.shape[0], self.shape[0]))
145 return copy._mul_sparse_matrix(other)
146 else:
147 raise ValueError("inconsistent shapes")
148 # Dense matrix.
149 if isdense(other):
150 if self.shape == other.shape:
151 ret = self.tocoo()
152 ret.data = np.multiply(ret.data, other[ret.row, ret.col]
153 ).view(np.ndarray).ravel()
154 return ret
155 # Single element.
156 elif other.size == 1:
157 return self._mul_scalar(other.flat[0])
158 # Anything else.
159 return np.multiply(self.toarray(), other)
160
161 def _mul_sparse_matrix(self, other):
162 """
163 Do the sparse matrix mult returning fast_csr_matrix only
164 when other is also fast_csr_matrix.
165 """
166 M, _ = self.shape
167 _, N = other.shape
168
169 major_axis = self._swap((M, N))[0]
170 if isinstance(other, fast_csr_matrix):
171 A = zcsr_mult(self, other, sorted=1)
172 return A
173
174 other = csr_matrix(other) # convert to this format
175 idx_dtype = get_index_dtype((self.indptr, self.indices,
176 other.indptr, other.indices),
177 maxval=M*N)
178
179 # scipy 1.5 renamed the older csr_matmat_pass1 to the much more
180 # descriptive csr_matmat_maxnnz, but also changed the call and logic
181 # structure of constructing the indices.
182 try:
183 fn = getattr(_sparsetools, self.format + '_matmat_maxnnz')
184 nnz = fn(M, N,
185 np.asarray(self.indptr, dtype=idx_dtype),
186 np.asarray(self.indices, dtype=idx_dtype),
187 np.asarray(other.indptr, dtype=idx_dtype),
188 np.asarray(other.indices, dtype=idx_dtype))
189 idx_dtype = get_index_dtype((self.indptr, self.indices,
190 other.indptr, other.indices),
191 maxval=nnz)
192 indptr = np.empty(major_axis + 1, dtype=idx_dtype)
193 except AttributeError:
194 indptr = np.empty(major_axis + 1, dtype=idx_dtype)
195 fn = getattr(_sparsetools, self.format + '_matmat_pass1')
196 fn(M, N,
197 np.asarray(self.indptr, dtype=idx_dtype),
198 np.asarray(self.indices, dtype=idx_dtype),
199 np.asarray(other.indptr, dtype=idx_dtype),
200 np.asarray(other.indices, dtype=idx_dtype),
201 indptr)
202 nnz = indptr[-1]
203 idx_dtype = get_index_dtype((self.indptr, self.indices,
204 other.indptr, other.indices),
205 maxval=nnz)
206
207 indices = np.empty(nnz, dtype=idx_dtype)
208 data = np.empty(nnz, dtype=upcast(self.dtype, other.dtype))
209
210 try:
211 fn = getattr(_sparsetools, self.format + '_matmat')
212 except AttributeError:
213 fn = getattr(_sparsetools, self.format + '_matmat_pass2')
214 fn(M, N, np.asarray(self.indptr, dtype=idx_dtype),
215 np.asarray(self.indices, dtype=idx_dtype),
216 self.data,
217 np.asarray(other.indptr, dtype=idx_dtype),
218 np.asarray(other.indices, dtype=idx_dtype),
219 other.data,
220 indptr, indices, data)
221 A = csr_matrix((data, indices, indptr), shape=(M, N))
222 return A
223
224 def _scalar_binopt(self, other, op):
225 """Scalar version of self._binopt, for cases in which no new nonzeros
226 are added. Produces a new spmatrix in canonical form.
227 """
228 self.sum_duplicates()
229 res = self._with_data(op(self.data, other), copy=True)
230 res.eliminate_zeros()
231 return res
232
233 def __eq__(self, other):
234 # Scalar other.
235 if isscalarlike(other):
236 if np.isnan(other):
237 return csr_matrix(self.shape, dtype=np.bool_)
238
239 if other == 0:
240 warn("Comparing a sparse matrix with 0 using == is inefficient"
241 ", try using != instead.", SparseEfficiencyWarning)
242 all_true = _all_true(self.shape)
243 inv = self._scalar_binopt(other, operator.ne)
244 return all_true - inv
245 else:
246 return self._scalar_binopt(other, operator.eq)
247 # Dense other.
248 elif isdense(other):
249 return self.toarray() == other
250 # Sparse other.
251 elif isspmatrix(other):
252 warn("Comparing sparse matrices using == is inefficient, try using"
253 " != instead.", SparseEfficiencyWarning)
254 #TODO sparse broadcasting
255 if self.shape != other.shape:
256 return False
257 elif self.format != other.format:
258 other = other.asformat(self.format)
259 res = self._binopt(other,'_ne_')
260 all_true = _all_true(self.shape)
261 return all_true - res
262 else:
263 return False
264
265 def __ne__(self, other):
266 # Scalar other.
267 if isscalarlike(other):
268 if np.isnan(other):
269 warn("Comparing a sparse matrix with nan using != is inefficient",
270 SparseEfficiencyWarning)
271 all_true = _all_true(self.shape)
272 return all_true
273 elif other != 0:
274 warn("Comparing a sparse matrix with a nonzero scalar using !="
275 " is inefficient, try using == instead.", SparseEfficiencyWarning)
276 all_true = _all_true(self.shape)
277 inv = self._scalar_binopt(other, operator.eq)
278 return all_true - inv
279 else:
280 return self._scalar_binopt(other, operator.ne)
281 # Dense other.
282 elif isdense(other):
283 return self.toarray() != other
284 # Sparse other.
285 elif isspmatrix(other):
286 #TODO sparse broadcasting
287 if self.shape != other.shape:
288 return True
289 elif self.format != other.format:
290 other = other.asformat(self.format)
291 return self._binopt(other,'_ne_')
292 else:
293 return True
294
295 def _inequality(self, other, op, op_name, bad_scalar_msg):
296 # Scalar other.
297 if isscalarlike(other):
298 if 0 == other and op_name in ('_le_', '_ge_'):
299 raise NotImplementedError(" >= and <= don't work with 0.")
300 elif op(0, other):
301 warn(bad_scalar_msg, SparseEfficiencyWarning)
302 other_arr = np.empty(self.shape, dtype=np.result_type(other))
303 other_arr.fill(other)
304 other_arr = csr_matrix(other_arr)
305 return self._binopt(other_arr, op_name)
306 else:
307 return self._scalar_binopt(other, op)
308 # Dense other.
309 elif isdense(other):
310 return op(self.toarray(), other)
311 # Sparse other.
312 elif isspmatrix(other):
313 #TODO sparse broadcasting
314 if self.shape != other.shape:
315 raise ValueError("inconsistent shapes")
316 elif self.format != other.format:
317 other = other.asformat(self.format)
318 if op_name not in ('_ge_', '_le_'):
319 return self._binopt(other, op_name)
320
321 warn("Comparing sparse matrices using >= and <= is inefficient, "
322 "using <, >, or !=, instead.", SparseEfficiencyWarning)
323 all_true = _all_true(self.shape)
324 res = self._binopt(other, '_gt_' if op_name == '_le_' else '_lt_')
325 return all_true - res
326 else:
327 raise ValueError("Operands could not be compared.")
328
329 def _with_data(self,data,copy=True):
330 """Returns a matrix with the same sparsity structure as self,
331 but with different data. By default the structure arrays
332 (i.e. .indptr and .indices) are copied.
333 """
334 # We need this just in case something like abs(data) gets called
335 # does nothing if data.dtype is complex.
336 data = np.asarray(data, dtype=complex)
337 if copy:
338 return fast_csr_matrix((data,self.indices.copy(),self.indptr.copy()),
339 shape=self.shape,dtype=data.dtype)
340 else:
341 return fast_csr_matrix((data,self.indices,self.indptr),
342 shape=self.shape,dtype=data.dtype)
343
344 def transpose(self):
345 """
346 Returns the transpose of the matrix, keeping
347 it in fast_csr format.
348 """
349 return zcsr_transpose(self)
350
351 def trans(self):
352 """
353 Same as transpose
354 """
355 return zcsr_transpose(self)
356
357 def getH(self):
358 """
359 Returns the conjugate-transpose of the matrix, keeping
360 it in fast_csr format.
361 """
362 return zcsr_adjoint(self)
363
364 def adjoint(self):
365 """
366 Same as getH
367 """
368 return zcsr_adjoint(self)
369
370
371 def csr2fast(A, copy=False):
372 if (not isinstance(A, fast_csr_matrix)) or copy:
373 # Do not need to do any type checking here
374 # since fast_csr_matrix does that.
375 return fast_csr_matrix((A.data,A.indices,A.indptr),
376 shape=A.shape,copy=copy)
377 else:
378 return A
379
380
381 def fast_identity(N):
382 """Generates a sparse identity matrix in
383 fast_csr format.
384 """
385 data = np.ones(N, dtype=complex)
386 ind = np.arange(N, dtype=np.int32)
387 ptr = np.arange(N+1, dtype=np.int32)
388 ptr[-1] = N
389 return fast_csr_matrix((data,ind,ptr),shape=(N,N))
390
391
392
393 #Convenience functions
394 #--------------------
395 def _all_true(shape):
396 A = csr_matrix((np.ones(np.prod(shape), dtype=np.bool_),
397 np.tile(np.arange(shape[1],dtype=np.int32),shape[0]),
398 np.arange(0,np.prod(shape)+1,shape[1],dtype=np.int32)),
399 shape=shape)
400 return A
401
402
403
404 #Need to do some trailing imports here
405 #-------------------------------------
406 from qutip.cy.spmath import (zcsr_transpose, zcsr_adjoint, zcsr_mult)
407
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/qutip/fastsparse.py b/qutip/fastsparse.py
--- a/qutip/fastsparse.py
+++ b/qutip/fastsparse.py
@@ -52,7 +52,11 @@
self._shape = tuple(int(s) for s in shape)
self.dtype = complex
self.maxprint = 50
- self.format = 'csr'
+ if hasattr(self, "_format"):
+ # format is readonly since 1.11
+ self._format = 'csr'
+ else:
+ self.format = 'csr'
def _binopt(self, other, op):
"""
| {"golden_diff": "diff --git a/qutip/fastsparse.py b/qutip/fastsparse.py\n--- a/qutip/fastsparse.py\n+++ b/qutip/fastsparse.py\n@@ -52,7 +52,11 @@\n self._shape = tuple(int(s) for s in shape)\n self.dtype = complex\n self.maxprint = 50\n- self.format = 'csr'\n+ if hasattr(self, \"_format\"):\n+ # format is readonly since 1.11\n+ self._format = 'csr'\n+ else:\n+ self.format = 'csr'\n \n def _binopt(self, other, op):\n \"\"\"\n", "issue": "Cannot import qutip after latest scipy release\n### Bug Description\n\nI've encountered this bug when creating a clean environment, and installing `qutip` (and IPython as console) through mamba.\r\nNext, I've opened an IPython console and ran `import qutip`, which resulted in the exception printed below - stating that the `format` attribute of the `fast_csr_matrix` cannot be set.\r\n\r\nI believe the latest `scipy` [release](https://github.com/scipy/scipy/releases) is the culprit (released 14 hrs ago at the time of writing) - reducing from `scipy==1.11.0` to `scipy==1.10.1` resolves the issue for me.\n\n### Code to Reproduce the Bug\n\n```shell\nimport qutip\n```\n\n\n### Code Output\n\n```shell\nCell In[1], line 1\r\n----> 1 import qutip\r\n\r\nFile ~\\mambaforge\\envs\\test-env-scipy-qutip\\Lib\\site-packages\\qutip\\__init__.py:106\r\n 98 del matplotlib\r\n 101 # -----------------------------------------------------------------------------\r\n 102 # Load modules\r\n 103 #\r\n 104\r\n 105 # core\r\n--> 106 from qutip.qobj import *\r\n 107 from qutip.qobjevo import *\r\n 108 from qutip.states import *\r\n\r\nFile ~\\mambaforge\\envs\\test-env-scipy-qutip\\Lib\\site-packages\\qutip\\qobj.py:2526\r\n 2523 # TRAILING IMPORTS\r\n 2524 # We do a few imports here to avoid circular dependencies.\r\n 2525 from qutip.eseries import eseries\r\n-> 2526 import qutip.superop_reps as sr\r\n 2527 import qutip.tensor as tensor\r\n 2528 import qutip.operators as ops\r\n\r\nFile ~\\mambaforge\\envs\\test-env-scipy-qutip\\Lib\\site-packages\\qutip\\superop_reps.py:74\r\n 61 return Qobj(dims=[[[2], [2]], [[2], [2]]],\r\n 62 inpt=array([[1. - pe / 2., 0., 0., 1. - pe],\r\n 63 [0., pe / 2., 0., 0.],\r\n 64 [0., 0., pe / 2., 0.],\r\n 65 [1. - pe, 0., 0., 1. - pe / 2.]]),\r\n 66 superrep='choi')\r\n 69 # CHANGE OF BASIS FUNCTIONS ---------------------------------------------------\r\n 70 # These functions find change of basis matrices, and are useful in converting\r\n 71 # between (for instance) Choi and chi matrices. At some point, these should\r\n 72 # probably be moved out to another module.\r\n---> 74 _SINGLE_QUBIT_PAULI_BASIS = (identity(2), sigmax(), sigmay(), sigmaz())\r\n 77 def _pauli_basis(nq=1):\r\n 78 # NOTE: This is slow as can be.\r\n 79 # TODO: Make this sparse. CSR format was causing problems for the [idx, :]\r\n 80 # slicing below.\r\n 81 B = zeros((4 ** nq, 4 ** nq), dtype=complex)\r\n\r\nFile ~\\mambaforge\\envs\\test-env-scipy-qutip\\Lib\\site-packages\\qutip\\operators.py:508, in identity(dims)\r\n 492 def identity(dims):\r\n 493 \"\"\"Identity operator. Alternative name to :func:`qeye`.\r\n 494\r\n 495 Parameters\r\n (...)\r\n 506 Identity operator Qobj.\r\n 507 \"\"\"\r\n--> 508 return qeye(dims)\r\n\r\nFile ~\\mambaforge\\envs\\test-env-scipy-qutip\\Lib\\site-packages\\qutip\\operators.py:488, in qeye(dimensions)\r\n 452 \"\"\"\r\n 453 Identity operator.\r\n 454\r\n (...)\r\n 485\r\n 486 \"\"\"\r\n 487 size, dimensions = _implicit_tensor_dimensions(dimensions)\r\n--> 488 return Qobj(fast_identity(size),\r\n 489 dims=dimensions, isherm=True, isunitary=True)\r\n\r\nFile ~\\mambaforge\\envs\\test-env-scipy-qutip\\Lib\\site-packages\\qutip\\fastsparse.py:389, in fast_identity(N)\r\n 387 ptr = np.arange(N+1, dtype=np.int32)\r\n 388 ptr[-1] = N\r\n--> 389 return fast_csr_matrix((data,ind,ptr),shape=(N,N))\r\n\r\nFile ~\\mambaforge\\envs\\test-env-scipy-qutip\\Lib\\site-packages\\qutip\\fastsparse.py:55, in fast_csr_matrix.__init__(self, args, shape, dtype, copy)\r\n 53 self.dtype = complex\r\n 54 self.maxprint = 50\r\n---> 55 self.format = 'csr'\r\n\r\nAttributeError: property 'format' of 'fast_csr_matrix' object has no setter\n```\n\n\n### Expected Behaviour\n\nI expected to be able to import qutip :) \n\n### Your Environment\n\n```shell\nQuTiP Version: 4.7.1\r\nNumpy Version: 1.25.0\r\nScipy Version: 1.11.0\r\nCython Version: None\r\nMatplotlib Version: None\r\nPython Version: 3.11.4\r\nNumber of CPUs: 8\r\nBLAS Info: Generic\r\nOPENMP Installed: False\r\nINTEL MKL Ext: False\r\nPlatform Info: Windows (AMD64)\n```\n\n\n### Additional Context\n\nThe environment above was generated with `scipy==1.10.1` (which still worked); I've changed the Scipy version to `1.11.0` manually in that list.\r\n\r\nForcing `scipy==1.10.1`, resolves the problem. Hence, I'm under the impression that the recent bump of `scipy` to `1.11.0` is causing the issue.\n", "before_files": [{"content": "from warnings import warn\nimport operator\n\nimport numpy as np\nfrom scipy.sparse import (\n csr_matrix, dia_matrix, isspmatrix, SparseEfficiencyWarning,\n)\n\n# fast_csr_matrix extends the internals of csr_matrix, and we need to\n# import parts of the internals of scipy.sparse to do that:\nimport scipy.sparse\nimport scipy.sparse._sparsetools as _sparsetools\nif hasattr(scipy.sparse, \"_sputils\"):\n # SciPy 1.8.0 deprecated the public scipy.sparse.sputils interface and\n # moved it to _sputils\n from scipy.sparse._sputils import (\n isdense, isscalarlike, upcast, get_index_dtype,\n )\nelse:\n from scipy.sparse.sputils import (\n isdense, isscalarlike, upcast, get_index_dtype,\n )\n\n\nclass fast_csr_matrix(csr_matrix):\n \"\"\"\n A subclass of scipy.sparse.csr_matrix that skips the data format\n checks that are run everytime a new csr_matrix is created.\n \"\"\"\n def __init__(self, args=None, shape=None, dtype=None, copy=False):\n if args is None: #Build zero matrix\n if shape is None:\n raise Exception('Shape must be given when building zero matrix.')\n self.data = np.array([], dtype=complex)\n self.indices = np.array([], dtype=np.int32)\n self.indptr = np.zeros(shape[0]+1, dtype=np.int32)\n self._shape = tuple(int(s) for s in shape)\n\n else:\n if args[0].shape[0] and args[0].dtype != complex:\n raise TypeError('fast_csr_matrix allows only complex data.')\n if args[1].shape[0] and args[1].dtype != np.int32:\n raise TypeError('fast_csr_matrix allows only int32 indices.')\n if args[2].shape[0] and args[1].dtype != np.int32:\n raise TypeError('fast_csr_matrix allows only int32 indptr.')\n self.data = np.array(args[0], dtype=complex, copy=copy)\n self.indices = np.array(args[1], dtype=np.int32, copy=copy)\n self.indptr = np.array(args[2], dtype=np.int32, copy=copy)\n if shape is None:\n self._shape = tuple([len(self.indptr)-1]*2)\n else:\n self._shape = tuple(int(s) for s in shape)\n self.dtype = complex\n self.maxprint = 50\n self.format = 'csr'\n\n def _binopt(self, other, op):\n \"\"\"\n Do the binary operation fn to two sparse matrices using\n fast_csr_matrix only when other is also a fast_csr_matrix.\n \"\"\"\n # e.g. csr_plus_csr, csr_minus_csr, etc.\n if not isinstance(other, fast_csr_matrix):\n other = csr_matrix(other)\n # e.g. csr_plus_csr, csr_minus_csr, etc.\n fn = getattr(_sparsetools, self.format + op + self.format)\n\n maxnnz = self.nnz + other.nnz\n idx_dtype = get_index_dtype((self.indptr, self.indices,\n other.indptr, other.indices),\n maxval=maxnnz)\n indptr = np.empty(self.indptr.shape, dtype=idx_dtype)\n indices = np.empty(maxnnz, dtype=idx_dtype)\n\n bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']\n if op in bool_ops:\n data = np.empty(maxnnz, dtype=np.bool_)\n else:\n data = np.empty(maxnnz, dtype=upcast(self.dtype, other.dtype))\n\n fn(self.shape[0], self.shape[1],\n np.asarray(self.indptr, dtype=idx_dtype),\n np.asarray(self.indices, dtype=idx_dtype),\n self.data,\n np.asarray(other.indptr, dtype=idx_dtype),\n np.asarray(other.indices, dtype=idx_dtype),\n other.data,\n indptr, indices, data)\n\n actual_nnz = indptr[-1]\n indices = indices[:actual_nnz]\n data = data[:actual_nnz]\n if actual_nnz < maxnnz // 2:\n # too much waste, trim arrays\n indices = indices.copy()\n data = data.copy()\n if isinstance(other, fast_csr_matrix) and (not op in bool_ops):\n A = fast_csr_matrix((data, indices, indptr), dtype=data.dtype, shape=self.shape)\n else:\n A = csr_matrix((data, indices, indptr), dtype=data.dtype, shape=self.shape)\n return A\n\n def multiply(self, other):\n \"\"\"Point-wise multiplication by another matrix, vector, or\n scalar.\n \"\"\"\n # Scalar multiplication.\n if isscalarlike(other):\n return self._mul_scalar(other)\n # Sparse matrix or vector.\n if isspmatrix(other):\n if self.shape == other.shape:\n if not isinstance(other, fast_csr_matrix):\n other = csr_matrix(other)\n return self._binopt(other, '_elmul_')\n # Single element.\n elif other.shape == (1,1):\n return self._mul_scalar(other.toarray()[0, 0])\n elif self.shape == (1,1):\n return other._mul_scalar(self.toarray()[0, 0])\n # A row times a column.\n elif self.shape[1] == other.shape[0] and self.shape[1] == 1:\n return self._mul_sparse_matrix(other.tocsc())\n elif self.shape[0] == other.shape[1] and self.shape[0] == 1:\n return other._mul_sparse_matrix(self.tocsc())\n # Row vector times matrix. other is a row.\n elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:\n other = dia_matrix((other.toarray().ravel(), [0]),\n shape=(other.shape[1], other.shape[1]))\n return self._mul_sparse_matrix(other)\n # self is a row.\n elif self.shape[0] == 1 and self.shape[1] == other.shape[1]:\n copy = dia_matrix((self.toarray().ravel(), [0]),\n shape=(self.shape[1], self.shape[1]))\n return other._mul_sparse_matrix(copy)\n # Column vector times matrix. other is a column.\n elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:\n other = dia_matrix((other.toarray().ravel(), [0]),\n shape=(other.shape[0], other.shape[0]))\n return other._mul_sparse_matrix(self)\n # self is a column.\n elif self.shape[1] == 1 and self.shape[0] == other.shape[0]:\n copy = dia_matrix((self.toarray().ravel(), [0]),\n shape=(self.shape[0], self.shape[0]))\n return copy._mul_sparse_matrix(other)\n else:\n raise ValueError(\"inconsistent shapes\")\n # Dense matrix.\n if isdense(other):\n if self.shape == other.shape:\n ret = self.tocoo()\n ret.data = np.multiply(ret.data, other[ret.row, ret.col]\n ).view(np.ndarray).ravel()\n return ret\n # Single element.\n elif other.size == 1:\n return self._mul_scalar(other.flat[0])\n # Anything else.\n return np.multiply(self.toarray(), other)\n\n def _mul_sparse_matrix(self, other):\n \"\"\"\n Do the sparse matrix mult returning fast_csr_matrix only\n when other is also fast_csr_matrix.\n \"\"\"\n M, _ = self.shape\n _, N = other.shape\n\n major_axis = self._swap((M, N))[0]\n if isinstance(other, fast_csr_matrix):\n A = zcsr_mult(self, other, sorted=1)\n return A\n\n other = csr_matrix(other) # convert to this format\n idx_dtype = get_index_dtype((self.indptr, self.indices,\n other.indptr, other.indices),\n maxval=M*N)\n\n # scipy 1.5 renamed the older csr_matmat_pass1 to the much more\n # descriptive csr_matmat_maxnnz, but also changed the call and logic\n # structure of constructing the indices.\n try:\n fn = getattr(_sparsetools, self.format + '_matmat_maxnnz')\n nnz = fn(M, N,\n np.asarray(self.indptr, dtype=idx_dtype),\n np.asarray(self.indices, dtype=idx_dtype),\n np.asarray(other.indptr, dtype=idx_dtype),\n np.asarray(other.indices, dtype=idx_dtype))\n idx_dtype = get_index_dtype((self.indptr, self.indices,\n other.indptr, other.indices),\n maxval=nnz)\n indptr = np.empty(major_axis + 1, dtype=idx_dtype)\n except AttributeError:\n indptr = np.empty(major_axis + 1, dtype=idx_dtype)\n fn = getattr(_sparsetools, self.format + '_matmat_pass1')\n fn(M, N,\n np.asarray(self.indptr, dtype=idx_dtype),\n np.asarray(self.indices, dtype=idx_dtype),\n np.asarray(other.indptr, dtype=idx_dtype),\n np.asarray(other.indices, dtype=idx_dtype),\n indptr)\n nnz = indptr[-1]\n idx_dtype = get_index_dtype((self.indptr, self.indices,\n other.indptr, other.indices),\n maxval=nnz)\n\n indices = np.empty(nnz, dtype=idx_dtype)\n data = np.empty(nnz, dtype=upcast(self.dtype, other.dtype))\n\n try:\n fn = getattr(_sparsetools, self.format + '_matmat')\n except AttributeError:\n fn = getattr(_sparsetools, self.format + '_matmat_pass2')\n fn(M, N, np.asarray(self.indptr, dtype=idx_dtype),\n np.asarray(self.indices, dtype=idx_dtype),\n self.data,\n np.asarray(other.indptr, dtype=idx_dtype),\n np.asarray(other.indices, dtype=idx_dtype),\n other.data,\n indptr, indices, data)\n A = csr_matrix((data, indices, indptr), shape=(M, N))\n return A\n\n def _scalar_binopt(self, other, op):\n \"\"\"Scalar version of self._binopt, for cases in which no new nonzeros\n are added. Produces a new spmatrix in canonical form.\n \"\"\"\n self.sum_duplicates()\n res = self._with_data(op(self.data, other), copy=True)\n res.eliminate_zeros()\n return res\n\n def __eq__(self, other):\n # Scalar other.\n if isscalarlike(other):\n if np.isnan(other):\n return csr_matrix(self.shape, dtype=np.bool_)\n\n if other == 0:\n warn(\"Comparing a sparse matrix with 0 using == is inefficient\"\n \", try using != instead.\", SparseEfficiencyWarning)\n all_true = _all_true(self.shape)\n inv = self._scalar_binopt(other, operator.ne)\n return all_true - inv\n else:\n return self._scalar_binopt(other, operator.eq)\n # Dense other.\n elif isdense(other):\n return self.toarray() == other\n # Sparse other.\n elif isspmatrix(other):\n warn(\"Comparing sparse matrices using == is inefficient, try using\"\n \" != instead.\", SparseEfficiencyWarning)\n #TODO sparse broadcasting\n if self.shape != other.shape:\n return False\n elif self.format != other.format:\n other = other.asformat(self.format)\n res = self._binopt(other,'_ne_')\n all_true = _all_true(self.shape)\n return all_true - res\n else:\n return False\n\n def __ne__(self, other):\n # Scalar other.\n if isscalarlike(other):\n if np.isnan(other):\n warn(\"Comparing a sparse matrix with nan using != is inefficient\",\n SparseEfficiencyWarning)\n all_true = _all_true(self.shape)\n return all_true\n elif other != 0:\n warn(\"Comparing a sparse matrix with a nonzero scalar using !=\"\n \" is inefficient, try using == instead.\", SparseEfficiencyWarning)\n all_true = _all_true(self.shape)\n inv = self._scalar_binopt(other, operator.eq)\n return all_true - inv\n else:\n return self._scalar_binopt(other, operator.ne)\n # Dense other.\n elif isdense(other):\n return self.toarray() != other\n # Sparse other.\n elif isspmatrix(other):\n #TODO sparse broadcasting\n if self.shape != other.shape:\n return True\n elif self.format != other.format:\n other = other.asformat(self.format)\n return self._binopt(other,'_ne_')\n else:\n return True\n\n def _inequality(self, other, op, op_name, bad_scalar_msg):\n # Scalar other.\n if isscalarlike(other):\n if 0 == other and op_name in ('_le_', '_ge_'):\n raise NotImplementedError(\" >= and <= don't work with 0.\")\n elif op(0, other):\n warn(bad_scalar_msg, SparseEfficiencyWarning)\n other_arr = np.empty(self.shape, dtype=np.result_type(other))\n other_arr.fill(other)\n other_arr = csr_matrix(other_arr)\n return self._binopt(other_arr, op_name)\n else:\n return self._scalar_binopt(other, op)\n # Dense other.\n elif isdense(other):\n return op(self.toarray(), other)\n # Sparse other.\n elif isspmatrix(other):\n #TODO sparse broadcasting\n if self.shape != other.shape:\n raise ValueError(\"inconsistent shapes\")\n elif self.format != other.format:\n other = other.asformat(self.format)\n if op_name not in ('_ge_', '_le_'):\n return self._binopt(other, op_name)\n\n warn(\"Comparing sparse matrices using >= and <= is inefficient, \"\n \"using <, >, or !=, instead.\", SparseEfficiencyWarning)\n all_true = _all_true(self.shape)\n res = self._binopt(other, '_gt_' if op_name == '_le_' else '_lt_')\n return all_true - res\n else:\n raise ValueError(\"Operands could not be compared.\")\n\n def _with_data(self,data,copy=True):\n \"\"\"Returns a matrix with the same sparsity structure as self,\n but with different data. By default the structure arrays\n (i.e. .indptr and .indices) are copied.\n \"\"\"\n # We need this just in case something like abs(data) gets called\n # does nothing if data.dtype is complex.\n data = np.asarray(data, dtype=complex)\n if copy:\n return fast_csr_matrix((data,self.indices.copy(),self.indptr.copy()),\n shape=self.shape,dtype=data.dtype)\n else:\n return fast_csr_matrix((data,self.indices,self.indptr),\n shape=self.shape,dtype=data.dtype)\n\n def transpose(self):\n \"\"\"\n Returns the transpose of the matrix, keeping\n it in fast_csr format.\n \"\"\"\n return zcsr_transpose(self)\n\n def trans(self):\n \"\"\"\n Same as transpose\n \"\"\"\n return zcsr_transpose(self)\n\n def getH(self):\n \"\"\"\n Returns the conjugate-transpose of the matrix, keeping\n it in fast_csr format.\n \"\"\"\n return zcsr_adjoint(self)\n\n def adjoint(self):\n \"\"\"\n Same as getH\n \"\"\"\n return zcsr_adjoint(self)\n\n\ndef csr2fast(A, copy=False):\n if (not isinstance(A, fast_csr_matrix)) or copy:\n # Do not need to do any type checking here\n # since fast_csr_matrix does that.\n return fast_csr_matrix((A.data,A.indices,A.indptr),\n shape=A.shape,copy=copy)\n else:\n return A\n\n\ndef fast_identity(N):\n \"\"\"Generates a sparse identity matrix in\n fast_csr format.\n \"\"\"\n data = np.ones(N, dtype=complex)\n ind = np.arange(N, dtype=np.int32)\n ptr = np.arange(N+1, dtype=np.int32)\n ptr[-1] = N\n return fast_csr_matrix((data,ind,ptr),shape=(N,N))\n\n\n\n#Convenience functions\n#--------------------\ndef _all_true(shape):\n A = csr_matrix((np.ones(np.prod(shape), dtype=np.bool_),\n np.tile(np.arange(shape[1],dtype=np.int32),shape[0]),\n np.arange(0,np.prod(shape)+1,shape[1],dtype=np.int32)),\n shape=shape)\n return A\n\n\n\n#Need to do some trailing imports here\n#-------------------------------------\nfrom qutip.cy.spmath import (zcsr_transpose, zcsr_adjoint, zcsr_mult)\n", "path": "qutip/fastsparse.py"}], "after_files": [{"content": "from warnings import warn\nimport operator\n\nimport numpy as np\nfrom scipy.sparse import (\n csr_matrix, dia_matrix, isspmatrix, SparseEfficiencyWarning,\n)\n\n# fast_csr_matrix extends the internals of csr_matrix, and we need to\n# import parts of the internals of scipy.sparse to do that:\nimport scipy.sparse\nimport scipy.sparse._sparsetools as _sparsetools\nif hasattr(scipy.sparse, \"_sputils\"):\n # SciPy 1.8.0 deprecated the public scipy.sparse.sputils interface and\n # moved it to _sputils\n from scipy.sparse._sputils import (\n isdense, isscalarlike, upcast, get_index_dtype,\n )\nelse:\n from scipy.sparse.sputils import (\n isdense, isscalarlike, upcast, get_index_dtype,\n )\n\n\nclass fast_csr_matrix(csr_matrix):\n \"\"\"\n A subclass of scipy.sparse.csr_matrix that skips the data format\n checks that are run everytime a new csr_matrix is created.\n \"\"\"\n def __init__(self, args=None, shape=None, dtype=None, copy=False):\n if args is None: #Build zero matrix\n if shape is None:\n raise Exception('Shape must be given when building zero matrix.')\n self.data = np.array([], dtype=complex)\n self.indices = np.array([], dtype=np.int32)\n self.indptr = np.zeros(shape[0]+1, dtype=np.int32)\n self._shape = tuple(int(s) for s in shape)\n\n else:\n if args[0].shape[0] and args[0].dtype != complex:\n raise TypeError('fast_csr_matrix allows only complex data.')\n if args[1].shape[0] and args[1].dtype != np.int32:\n raise TypeError('fast_csr_matrix allows only int32 indices.')\n if args[2].shape[0] and args[1].dtype != np.int32:\n raise TypeError('fast_csr_matrix allows only int32 indptr.')\n self.data = np.array(args[0], dtype=complex, copy=copy)\n self.indices = np.array(args[1], dtype=np.int32, copy=copy)\n self.indptr = np.array(args[2], dtype=np.int32, copy=copy)\n if shape is None:\n self._shape = tuple([len(self.indptr)-1]*2)\n else:\n self._shape = tuple(int(s) for s in shape)\n self.dtype = complex\n self.maxprint = 50\n if hasattr(self, \"_format\"):\n # format is readonly since 1.11\n self._format = 'csr'\n else:\n self.format = 'csr'\n\n def _binopt(self, other, op):\n \"\"\"\n Do the binary operation fn to two sparse matrices using\n fast_csr_matrix only when other is also a fast_csr_matrix.\n \"\"\"\n # e.g. csr_plus_csr, csr_minus_csr, etc.\n if not isinstance(other, fast_csr_matrix):\n other = csr_matrix(other)\n # e.g. csr_plus_csr, csr_minus_csr, etc.\n fn = getattr(_sparsetools, self.format + op + self.format)\n\n maxnnz = self.nnz + other.nnz\n idx_dtype = get_index_dtype((self.indptr, self.indices,\n other.indptr, other.indices),\n maxval=maxnnz)\n indptr = np.empty(self.indptr.shape, dtype=idx_dtype)\n indices = np.empty(maxnnz, dtype=idx_dtype)\n\n bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']\n if op in bool_ops:\n data = np.empty(maxnnz, dtype=np.bool_)\n else:\n data = np.empty(maxnnz, dtype=upcast(self.dtype, other.dtype))\n\n fn(self.shape[0], self.shape[1],\n np.asarray(self.indptr, dtype=idx_dtype),\n np.asarray(self.indices, dtype=idx_dtype),\n self.data,\n np.asarray(other.indptr, dtype=idx_dtype),\n np.asarray(other.indices, dtype=idx_dtype),\n other.data,\n indptr, indices, data)\n\n actual_nnz = indptr[-1]\n indices = indices[:actual_nnz]\n data = data[:actual_nnz]\n if actual_nnz < maxnnz // 2:\n # too much waste, trim arrays\n indices = indices.copy()\n data = data.copy()\n if isinstance(other, fast_csr_matrix) and (not op in bool_ops):\n A = fast_csr_matrix((data, indices, indptr), dtype=data.dtype, shape=self.shape)\n else:\n A = csr_matrix((data, indices, indptr), dtype=data.dtype, shape=self.shape)\n return A\n\n def multiply(self, other):\n \"\"\"Point-wise multiplication by another matrix, vector, or\n scalar.\n \"\"\"\n # Scalar multiplication.\n if isscalarlike(other):\n return self._mul_scalar(other)\n # Sparse matrix or vector.\n if isspmatrix(other):\n if self.shape == other.shape:\n if not isinstance(other, fast_csr_matrix):\n other = csr_matrix(other)\n return self._binopt(other, '_elmul_')\n # Single element.\n elif other.shape == (1,1):\n return self._mul_scalar(other.toarray()[0, 0])\n elif self.shape == (1,1):\n return other._mul_scalar(self.toarray()[0, 0])\n # A row times a column.\n elif self.shape[1] == other.shape[0] and self.shape[1] == 1:\n return self._mul_sparse_matrix(other.tocsc())\n elif self.shape[0] == other.shape[1] and self.shape[0] == 1:\n return other._mul_sparse_matrix(self.tocsc())\n # Row vector times matrix. other is a row.\n elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:\n other = dia_matrix((other.toarray().ravel(), [0]),\n shape=(other.shape[1], other.shape[1]))\n return self._mul_sparse_matrix(other)\n # self is a row.\n elif self.shape[0] == 1 and self.shape[1] == other.shape[1]:\n copy = dia_matrix((self.toarray().ravel(), [0]),\n shape=(self.shape[1], self.shape[1]))\n return other._mul_sparse_matrix(copy)\n # Column vector times matrix. other is a column.\n elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:\n other = dia_matrix((other.toarray().ravel(), [0]),\n shape=(other.shape[0], other.shape[0]))\n return other._mul_sparse_matrix(self)\n # self is a column.\n elif self.shape[1] == 1 and self.shape[0] == other.shape[0]:\n copy = dia_matrix((self.toarray().ravel(), [0]),\n shape=(self.shape[0], self.shape[0]))\n return copy._mul_sparse_matrix(other)\n else:\n raise ValueError(\"inconsistent shapes\")\n # Dense matrix.\n if isdense(other):\n if self.shape == other.shape:\n ret = self.tocoo()\n ret.data = np.multiply(ret.data, other[ret.row, ret.col]\n ).view(np.ndarray).ravel()\n return ret\n # Single element.\n elif other.size == 1:\n return self._mul_scalar(other.flat[0])\n # Anything else.\n return np.multiply(self.toarray(), other)\n\n def _mul_sparse_matrix(self, other):\n \"\"\"\n Do the sparse matrix mult returning fast_csr_matrix only\n when other is also fast_csr_matrix.\n \"\"\"\n M, _ = self.shape\n _, N = other.shape\n\n major_axis = self._swap((M, N))[0]\n if isinstance(other, fast_csr_matrix):\n A = zcsr_mult(self, other, sorted=1)\n return A\n\n other = csr_matrix(other) # convert to this format\n idx_dtype = get_index_dtype((self.indptr, self.indices,\n other.indptr, other.indices),\n maxval=M*N)\n\n # scipy 1.5 renamed the older csr_matmat_pass1 to the much more\n # descriptive csr_matmat_maxnnz, but also changed the call and logic\n # structure of constructing the indices.\n try:\n fn = getattr(_sparsetools, self.format + '_matmat_maxnnz')\n nnz = fn(M, N,\n np.asarray(self.indptr, dtype=idx_dtype),\n np.asarray(self.indices, dtype=idx_dtype),\n np.asarray(other.indptr, dtype=idx_dtype),\n np.asarray(other.indices, dtype=idx_dtype))\n idx_dtype = get_index_dtype((self.indptr, self.indices,\n other.indptr, other.indices),\n maxval=nnz)\n indptr = np.empty(major_axis + 1, dtype=idx_dtype)\n except AttributeError:\n indptr = np.empty(major_axis + 1, dtype=idx_dtype)\n fn = getattr(_sparsetools, self.format + '_matmat_pass1')\n fn(M, N,\n np.asarray(self.indptr, dtype=idx_dtype),\n np.asarray(self.indices, dtype=idx_dtype),\n np.asarray(other.indptr, dtype=idx_dtype),\n np.asarray(other.indices, dtype=idx_dtype),\n indptr)\n nnz = indptr[-1]\n idx_dtype = get_index_dtype((self.indptr, self.indices,\n other.indptr, other.indices),\n maxval=nnz)\n\n indices = np.empty(nnz, dtype=idx_dtype)\n data = np.empty(nnz, dtype=upcast(self.dtype, other.dtype))\n\n try:\n fn = getattr(_sparsetools, self.format + '_matmat')\n except AttributeError:\n fn = getattr(_sparsetools, self.format + '_matmat_pass2')\n fn(M, N, np.asarray(self.indptr, dtype=idx_dtype),\n np.asarray(self.indices, dtype=idx_dtype),\n self.data,\n np.asarray(other.indptr, dtype=idx_dtype),\n np.asarray(other.indices, dtype=idx_dtype),\n other.data,\n indptr, indices, data)\n A = csr_matrix((data, indices, indptr), shape=(M, N))\n return A\n\n def _scalar_binopt(self, other, op):\n \"\"\"Scalar version of self._binopt, for cases in which no new nonzeros\n are added. Produces a new spmatrix in canonical form.\n \"\"\"\n self.sum_duplicates()\n res = self._with_data(op(self.data, other), copy=True)\n res.eliminate_zeros()\n return res\n\n def __eq__(self, other):\n # Scalar other.\n if isscalarlike(other):\n if np.isnan(other):\n return csr_matrix(self.shape, dtype=np.bool_)\n\n if other == 0:\n warn(\"Comparing a sparse matrix with 0 using == is inefficient\"\n \", try using != instead.\", SparseEfficiencyWarning)\n all_true = _all_true(self.shape)\n inv = self._scalar_binopt(other, operator.ne)\n return all_true - inv\n else:\n return self._scalar_binopt(other, operator.eq)\n # Dense other.\n elif isdense(other):\n return self.toarray() == other\n # Sparse other.\n elif isspmatrix(other):\n warn(\"Comparing sparse matrices using == is inefficient, try using\"\n \" != instead.\", SparseEfficiencyWarning)\n #TODO sparse broadcasting\n if self.shape != other.shape:\n return False\n elif self.format != other.format:\n other = other.asformat(self.format)\n res = self._binopt(other,'_ne_')\n all_true = _all_true(self.shape)\n return all_true - res\n else:\n return False\n\n def __ne__(self, other):\n # Scalar other.\n if isscalarlike(other):\n if np.isnan(other):\n warn(\"Comparing a sparse matrix with nan using != is inefficient\",\n SparseEfficiencyWarning)\n all_true = _all_true(self.shape)\n return all_true\n elif other != 0:\n warn(\"Comparing a sparse matrix with a nonzero scalar using !=\"\n \" is inefficient, try using == instead.\", SparseEfficiencyWarning)\n all_true = _all_true(self.shape)\n inv = self._scalar_binopt(other, operator.eq)\n return all_true - inv\n else:\n return self._scalar_binopt(other, operator.ne)\n # Dense other.\n elif isdense(other):\n return self.toarray() != other\n # Sparse other.\n elif isspmatrix(other):\n #TODO sparse broadcasting\n if self.shape != other.shape:\n return True\n elif self.format != other.format:\n other = other.asformat(self.format)\n return self._binopt(other,'_ne_')\n else:\n return True\n\n def _inequality(self, other, op, op_name, bad_scalar_msg):\n # Scalar other.\n if isscalarlike(other):\n if 0 == other and op_name in ('_le_', '_ge_'):\n raise NotImplementedError(\" >= and <= don't work with 0.\")\n elif op(0, other):\n warn(bad_scalar_msg, SparseEfficiencyWarning)\n other_arr = np.empty(self.shape, dtype=np.result_type(other))\n other_arr.fill(other)\n other_arr = csr_matrix(other_arr)\n return self._binopt(other_arr, op_name)\n else:\n return self._scalar_binopt(other, op)\n # Dense other.\n elif isdense(other):\n return op(self.toarray(), other)\n # Sparse other.\n elif isspmatrix(other):\n #TODO sparse broadcasting\n if self.shape != other.shape:\n raise ValueError(\"inconsistent shapes\")\n elif self.format != other.format:\n other = other.asformat(self.format)\n if op_name not in ('_ge_', '_le_'):\n return self._binopt(other, op_name)\n\n warn(\"Comparing sparse matrices using >= and <= is inefficient, \"\n \"using <, >, or !=, instead.\", SparseEfficiencyWarning)\n all_true = _all_true(self.shape)\n res = self._binopt(other, '_gt_' if op_name == '_le_' else '_lt_')\n return all_true - res\n else:\n raise ValueError(\"Operands could not be compared.\")\n\n def _with_data(self,data,copy=True):\n \"\"\"Returns a matrix with the same sparsity structure as self,\n but with different data. By default the structure arrays\n (i.e. .indptr and .indices) are copied.\n \"\"\"\n # We need this just in case something like abs(data) gets called\n # does nothing if data.dtype is complex.\n data = np.asarray(data, dtype=complex)\n if copy:\n return fast_csr_matrix((data,self.indices.copy(),self.indptr.copy()),\n shape=self.shape,dtype=data.dtype)\n else:\n return fast_csr_matrix((data,self.indices,self.indptr),\n shape=self.shape,dtype=data.dtype)\n\n def transpose(self):\n \"\"\"\n Returns the transpose of the matrix, keeping\n it in fast_csr format.\n \"\"\"\n return zcsr_transpose(self)\n\n def trans(self):\n \"\"\"\n Same as transpose\n \"\"\"\n return zcsr_transpose(self)\n\n def getH(self):\n \"\"\"\n Returns the conjugate-transpose of the matrix, keeping\n it in fast_csr format.\n \"\"\"\n return zcsr_adjoint(self)\n\n def adjoint(self):\n \"\"\"\n Same as getH\n \"\"\"\n return zcsr_adjoint(self)\n\n\ndef csr2fast(A, copy=False):\n if (not isinstance(A, fast_csr_matrix)) or copy:\n # Do not need to do any type checking here\n # since fast_csr_matrix does that.\n return fast_csr_matrix((A.data,A.indices,A.indptr),\n shape=A.shape,copy=copy)\n else:\n return A\n\n\ndef fast_identity(N):\n \"\"\"Generates a sparse identity matrix in\n fast_csr format.\n \"\"\"\n data = np.ones(N, dtype=complex)\n ind = np.arange(N, dtype=np.int32)\n ptr = np.arange(N+1, dtype=np.int32)\n ptr[-1] = N\n return fast_csr_matrix((data,ind,ptr),shape=(N,N))\n\n\n\n#Convenience functions\n#--------------------\ndef _all_true(shape):\n A = csr_matrix((np.ones(np.prod(shape), dtype=np.bool_),\n np.tile(np.arange(shape[1],dtype=np.int32),shape[0]),\n np.arange(0,np.prod(shape)+1,shape[1],dtype=np.int32)),\n shape=shape)\n return A\n\n\n\n#Need to do some trailing imports here\n#-------------------------------------\nfrom qutip.cy.spmath import (zcsr_transpose, zcsr_adjoint, zcsr_mult)\n", "path": "qutip/fastsparse.py"}]} |
gh_patches_debug_1204 | rasdani/github-patches | git_diff | qtile__qtile-2641 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bar moves when screen wakes up
<!--
Please do not ask general questions here! There are [community
contact](https://github.com/qtile/qtile#community) options for that.
-->
# Issue description
<!--
A brief discussion of what failed and how it failed. A description of
what you tried is helpful, i.e. "When I use lazy.kill() on a window I get
the following stack trace" instead of "Closing windows doesn't work".
-->
When my screen goes to sleep (screen blank) and I wake it up by moving the mouse, my bar has moved up a few pixels.
This only happens when the bar is configured to have a gap `margin=[15, 0, 0, 0]`
IMPORTANT: The screen has to actually go to sleep for this to happen. (give it some time after screen blank)
# Qtile version
<!--
Please include the exact commit hash of the version of Qtile that failed.
-->
0.18.1.dev0+g8e7ecc0a.d20210719
# Stack traces
<!--
Please attach any stack traces found in:
* `~/.xsession-errors`
* `~/.local/share/qtile/qtile.log`
-->
Nothing is written to `qtile.log` or `.xsession-errors`.
# Configuration
<!--
Please include a link or attach your configuration to the issue.
-->
This can be reproduced with a minor change to the default config.
Just add some margin to the default bottom bar `margin=[15, 0, 0, 0]`
Reload the configuration.
To force a screen blank run `xset dpms force off`.
Wait until the screen actually goes to sleep.
Move the mouse to wake it up again.
The bar should now have moved up a few pixels.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libqtile/bar.py`
Content:
```
1 # Copyright (c) 2008, Aldo Cortesi. All rights reserved.
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to deal
5 # in the Software without restriction, including without limitation the rights
6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 # copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 # SOFTWARE.
20
21 from __future__ import annotations
22
23 import typing
24
25 from libqtile import configurable
26 from libqtile.command.base import CommandObject, ItemT
27 from libqtile.log_utils import logger
28 from libqtile.utils import has_transparency
29
30 if typing.TYPE_CHECKING:
31 from libqtile.widget.base import _Widget
32
33
34 class Gap(CommandObject):
35 """A gap placed along one of the edges of the screen
36
37 If a gap has been defined, Qtile will avoid covering it with windows. The
38 most probable reason for configuring a gap is to make space for a
39 third-party bar or other static window.
40
41 Parameters
42 ==========
43 size :
44 The "thickness" of the gap, i.e. the height of a horizontal gap, or the
45 width of a vertical gap.
46 """
47 def __init__(self, size):
48 """
49 """
50 # 'size' corresponds to the height of a horizontal gap, or the width
51 # of a vertical gap
52 self.size = size
53 self.initial_size = size
54 # 'length' corresponds to the width of a horizontal gap, or the height
55 # of a vertical gap
56 self.length = None
57 self.qtile = None
58 self.screen = None
59 self.x = None
60 self.y = None
61 self.width = None
62 self.height = None
63 self.horizontal = None
64
65 def _configure(self, qtile, screen):
66 self.qtile = qtile
67 self.screen = screen
68 # If both horizontal and vertical gaps are present, screen corners are
69 # given to the horizontal ones
70 if screen.top is self:
71 self.x = screen.x
72 self.y = screen.y
73 self.length = screen.width
74 self.width = self.length
75 self.height = self.initial_size
76 self.horizontal = True
77 elif screen.bottom is self:
78 self.x = screen.x
79 self.y = screen.dy + screen.dheight
80 self.length = screen.width
81 self.width = self.length
82 self.height = self.initial_size
83 self.horizontal = True
84 elif screen.left is self:
85 self.x = screen.x
86 self.y = screen.dy
87 self.length = screen.dheight
88 self.width = self.initial_size
89 self.height = self.length
90 self.horizontal = False
91 else: # right
92 self.x = screen.dx + screen.dwidth
93 self.y = screen.dy
94 self.length = screen.dheight
95 self.width = self.initial_size
96 self.height = self.length
97 self.horizontal = False
98
99 def draw(self):
100 pass
101
102 def finalize(self):
103 pass
104
105 def geometry(self):
106 return (self.x, self.y, self.width, self.height)
107
108 def _items(self, name: str) -> ItemT:
109 if name == "screen" and self.screen is not None:
110 return True, []
111 return None
112
113 def _select(self, name, sel):
114 if name == "screen":
115 return self.screen
116
117 @property
118 def position(self):
119 for i in ["top", "bottom", "left", "right"]:
120 if getattr(self.screen, i) is self:
121 return i
122
123 def info(self):
124 return dict(position=self.position)
125
126 def cmd_info(self):
127 """
128 Info for this object.
129 """
130 return self.info()
131
132
133 class Obj:
134 def __init__(self, name):
135 self.name = name
136
137 def __str__(self):
138 return self.name
139
140 def __repr__(self):
141 return self.name
142
143
144 STRETCH = Obj("STRETCH")
145 CALCULATED = Obj("CALCULATED")
146 STATIC = Obj("STATIC")
147
148
149 class Bar(Gap, configurable.Configurable):
150 """A bar, which can contain widgets
151
152 Parameters
153 ==========
154 widgets :
155 A list of widget objects.
156 size :
157 The "thickness" of the bar, i.e. the height of a horizontal bar, or the
158 width of a vertical bar.
159 """
160 defaults = [
161 ("background", "#000000", "Background colour."),
162 ("opacity", 1, "Bar window opacity."),
163 ("margin", 0, "Space around bar as int or list of ints [N E S W]."),
164 ]
165
166 def __init__(self, widgets, size, **config):
167 Gap.__init__(self, size)
168 configurable.Configurable.__init__(self, **config)
169 self.add_defaults(Bar.defaults)
170 self.widgets = widgets
171 self.saved_focus = None
172 self.cursor_in = None
173 self.window = None
174 self.size_calculated = 0
175 self._configured = False
176
177 self.queued_draws = 0
178
179 def _configure(self, qtile, screen):
180 Gap._configure(self, qtile, screen)
181
182 if self.margin:
183 if isinstance(self.margin, int):
184 self.margin = [self.margin] * 4
185 if self.horizontal:
186 self.x += self.margin[3]
187 self.width -= self.margin[1] + self.margin[3]
188 self.length = self.width
189 if self.size == self.initial_size:
190 self.size += self.margin[0] + self.margin[2]
191 if self.screen.top is self:
192 self.y += self.margin[0]
193 else:
194 self.y -= self.margin[2]
195 else:
196 self.y += self.margin[0]
197 self.height -= self.margin[0] + self.margin[2]
198 self.length = self.height
199 self.size += self.margin[1] + self.margin[3]
200 if self.screen.left is self:
201 self.x += self.margin[3]
202 else:
203 self.x -= self.margin[1]
204
205 for w in self.widgets:
206 # Executing _test_orientation_compatibility later, for example in
207 # the _configure() method of each widget, would still pass
208 # test/test_bar.py but a segfault would be raised when nosetests is
209 # about to exit
210 w._test_orientation_compatibility(self.horizontal)
211
212 if self.window:
213 # We get _configure()-ed with an existing window when screens are getting
214 # reconfigured but this screen is present both before and after
215 self.window.place(self.x, self.y, self.width, self.height, 0, None)
216 else:
217 # Whereas we won't have a window if we're startup up for the first time or
218 # the window has been killed by us no longer using the bar's screen
219
220 # X11 only:
221 # To preserve correct display of SysTray widget, we need a 24-bit
222 # window where the user requests an opaque bar.
223 if self.qtile.core.name == "x11":
224 depth = 32 if has_transparency(self.background) else self.qtile.core.conn.default_screen.root_depth
225
226 self.window = self.qtile.core.create_internal(
227 self.x, self.y, self.width, self.height, depth
228 )
229
230 else:
231 self.window = self.qtile.core.create_internal(
232 self.x, self.y, self.width, self.height
233 )
234
235 self.window.opacity = self.opacity
236 self.window.unhide()
237
238 self.drawer = self.window.create_drawer(self.width, self.height)
239 self.drawer.clear(self.background)
240
241 self.window.process_window_expose = self.draw
242 self.window.process_button_click = self.process_button_click
243 self.window.process_button_release = self.process_button_release
244 self.window.process_pointer_enter = self.process_pointer_enter
245 self.window.process_pointer_leave = self.process_pointer_leave
246 self.window.process_pointer_motion = self.process_pointer_motion
247 self.window.process_key_press = self.process_key_press
248
249 self.crashed_widgets = []
250 if self._configured:
251 for i in self.widgets:
252 self._configure_widget(i)
253 else:
254 for idx, i in enumerate(self.widgets):
255 if i.configured:
256 i = i.create_mirror()
257 self.widgets[idx] = i
258 success = self._configure_widget(i)
259 if success:
260 qtile.register_widget(i)
261
262 self._remove_crashed_widgets()
263 self.draw()
264 self._resize(self.length, self.widgets)
265 self._configured = True
266
267 def _configure_widget(self, widget):
268 configured = True
269 try:
270 widget._configure(self.qtile, self)
271 widget.configured = True
272 except Exception as e:
273 logger.error(
274 "{} widget crashed during _configure with "
275 "error: {}".format(widget.__class__.__name__, repr(e))
276 )
277 self.crashed_widgets.append(widget)
278 configured = False
279
280 return configured
281
282 def _remove_crashed_widgets(self):
283 if self.crashed_widgets:
284 from libqtile.widget.config_error import ConfigErrorWidget
285
286 for i in self.crashed_widgets:
287 index = self.widgets.index(i)
288 crash = ConfigErrorWidget(widget=i)
289 crash._configure(self.qtile, self)
290 self.widgets.insert(index, crash)
291 self.widgets.remove(i)
292
293 def finalize(self):
294 self.drawer.finalize()
295
296 def kill_window(self):
297 """Kill the window when the bar's screen is no longer being used."""
298 self.drawer.finalize()
299 self.window.kill()
300 self.window = None
301
302 def _resize(self, length, widgets):
303 stretches = [i for i in widgets if i.length_type == STRETCH]
304 if stretches:
305 stretchspace = length - sum(
306 [i.length for i in widgets if i.length_type != STRETCH]
307 )
308 stretchspace = max(stretchspace, 0)
309 num_stretches = len(stretches)
310 if num_stretches == 1:
311 stretches[0].length = stretchspace
312 else:
313 block = 0
314 blocks = []
315 for i in widgets:
316 if i.length_type != STRETCH:
317 block += i.length
318 else:
319 blocks.append(block)
320 block = 0
321 if block:
322 blocks.append(block)
323 interval = length // num_stretches
324 for idx, i in enumerate(stretches):
325 if idx == 0:
326 i.length = interval - blocks[0] - blocks[1] // 2
327 elif idx == num_stretches - 1:
328 i.length = interval - blocks[-1] - blocks[-2] // 2
329 else:
330 i.length = int(interval - blocks[idx] / 2 - blocks[idx + 1] / 2)
331 stretchspace -= i.length
332 stretches[0].length += stretchspace // 2
333 stretches[-1].length += stretchspace - stretchspace // 2
334
335 offset = 0
336 if self.horizontal:
337 for i in widgets:
338 i.offsetx = offset
339 i.offsety = 0
340 offset += i.length
341 else:
342 for i in widgets:
343 i.offsetx = 0
344 i.offsety = offset
345 offset += i.length
346
347 def get_widget_in_position(self, x: int, y: int) -> typing.Optional[_Widget]:
348 if self.horizontal:
349 for i in self.widgets:
350 if x < i.offsetx + i.length:
351 return i
352 else:
353 for i in self.widgets:
354 if y < i.offsety + i.length:
355 return i
356 return None
357
358 def process_button_click(self, x: int, y: int, button: int) -> None:
359 widget = self.get_widget_in_position(x, y)
360 if widget:
361 widget.button_press(
362 x - widget.offsetx,
363 y - widget.offsety,
364 button,
365 )
366
367 def process_button_release(self, x: int, y: int, button: int) -> None:
368 widget = self.get_widget_in_position(x, y)
369 if widget:
370 widget.button_release(
371 x - widget.offsetx,
372 y - widget.offsety,
373 button,
374 )
375
376 def process_pointer_enter(self, x: int, y: int) -> None:
377 widget = self.get_widget_in_position(x, y)
378 if widget:
379 widget.mouse_enter(
380 x - widget.offsetx,
381 y - widget.offsety,
382 )
383 self.cursor_in = widget
384
385 def process_pointer_leave(self, x: int, y: int) -> None:
386 if self.cursor_in:
387 self.cursor_in.mouse_leave(
388 x - self.cursor_in.offsetx,
389 y - self.cursor_in.offsety,
390 )
391 self.cursor_in = None
392
393 def process_pointer_motion(self, x: int, y: int) -> None:
394 widget = self.get_widget_in_position(x, y)
395 if widget and self.cursor_in and widget is not self.cursor_in:
396 self.cursor_in.mouse_leave(
397 x - self.cursor_in.offsetx,
398 y - self.cursor_in.offsety,
399 )
400 widget.mouse_enter(
401 x - widget.offsetx,
402 y - widget.offsety,
403 )
404 self.cursor_in = widget
405
406 def process_key_press(self, keycode: int) -> None:
407 if self.has_keyboard:
408 self.has_keyboard.process_key_press(keycode)
409
410 def widget_grab_keyboard(self, widget):
411 """
412 A widget can call this method to grab the keyboard focus
413 and receive keyboard messages. When done,
414 widget_ungrab_keyboard() must be called.
415 """
416 self.has_keyboard = widget
417 self.saved_focus = self.qtile.current_window
418 self.window.focus(False)
419
420 def widget_ungrab_keyboard(self):
421 """
422 Removes keyboard focus from the widget.
423 """
424 if self.saved_focus is not None:
425 self.saved_focus.focus(False)
426 self.has_keyboard = None
427
428 def draw(self):
429 if not self.widgets:
430 return # calling self._actual_draw in this case would cause a NameError.
431 if self.queued_draws == 0:
432 self.qtile.call_soon(self._actual_draw)
433 self.queued_draws += 1
434
435 def _actual_draw(self):
436 self.queued_draws = 0
437 self._resize(self.length, self.widgets)
438 for i in self.widgets:
439 i.draw()
440 end = i.offset + i.length # pylint: disable=undefined-loop-variable
441 # we verified that self.widgets is not empty in self.draw(), see above.
442 if end < self.length:
443 if self.horizontal:
444 self.drawer.draw(offsetx=end, width=self.length - end)
445 else:
446 self.drawer.draw(offsety=end, height=self.length - end)
447
448 def info(self):
449 return dict(
450 size=self.size,
451 length=self.length,
452 width=self.width,
453 height=self.height,
454 position=self.position,
455 widgets=[i.info() for i in self.widgets],
456 window=self.window.wid
457 )
458
459 def is_show(self):
460 return self.size != 0
461
462 def show(self, is_show=True):
463 if is_show != self.is_show():
464 if is_show:
465 self.size = self.size_calculated
466 self.window.unhide()
467 else:
468 self.size_calculated = self.size
469 self.size = 0
470 self.window.hide()
471 self.screen.group.layout_all()
472
473 def adjust_for_strut(self, size):
474 if self.size:
475 self.size = self.initial_size
476 if not self.margin:
477 self.margin = [0, 0, 0, 0]
478 if self.screen.top is self:
479 self.margin[0] += size
480 elif self.screen.bottom is self:
481 self.margin[2] += size
482 elif self.screen.left is self:
483 self.margin[3] += size
484 else: # right
485 self.margin[1] += size
486
487 def cmd_fake_button_press(self, screen, position, x, y, button=1):
488 """
489 Fake a mouse-button-press on the bar. Co-ordinates are relative
490 to the top-left corner of the bar.
491
492 :screen The integer screen offset
493 :position One of "top", "bottom", "left", or "right"
494 """
495 self.process_button_click(x, y, button)
496
497
498 BarType = typing.Union[Bar, Gap]
499
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libqtile/bar.py b/libqtile/bar.py
--- a/libqtile/bar.py
+++ b/libqtile/bar.py
@@ -65,6 +65,7 @@
def _configure(self, qtile, screen):
self.qtile = qtile
self.screen = screen
+ self.size = self.initial_size
# If both horizontal and vertical gaps are present, screen corners are
# given to the horizontal ones
if screen.top is self:
| {"golden_diff": "diff --git a/libqtile/bar.py b/libqtile/bar.py\n--- a/libqtile/bar.py\n+++ b/libqtile/bar.py\n@@ -65,6 +65,7 @@\n def _configure(self, qtile, screen):\n self.qtile = qtile\n self.screen = screen\n+ self.size = self.initial_size\n # If both horizontal and vertical gaps are present, screen corners are\n # given to the horizontal ones\n if screen.top is self:\n", "issue": "Bar moves when screen wakes up\n<!--\r\nPlease do not ask general questions here! There are [community\r\ncontact](https://github.com/qtile/qtile#community) options for that.\r\n-->\r\n\r\n# Issue description\r\n\r\n<!--\r\nA brief discussion of what failed and how it failed. A description of\r\nwhat you tried is helpful, i.e. \"When I use lazy.kill() on a window I get\r\nthe following stack trace\" instead of \"Closing windows doesn't work\".\r\n-->\r\n\r\nWhen my screen goes to sleep (screen blank) and I wake it up by moving the mouse, my bar has moved up a few pixels.\r\nThis only happens when the bar is configured to have a gap `margin=[15, 0, 0, 0]`\r\n\r\nIMPORTANT: The screen has to actually go to sleep for this to happen. (give it some time after screen blank)\r\n\r\n# Qtile version\r\n\r\n<!--\r\nPlease include the exact commit hash of the version of Qtile that failed.\r\n-->\r\n\r\n0.18.1.dev0+g8e7ecc0a.d20210719\r\n\r\n# Stack traces\r\n\r\n<!--\r\nPlease attach any stack traces found in:\r\n\r\n* `~/.xsession-errors`\r\n* `~/.local/share/qtile/qtile.log`\r\n-->\r\n\r\nNothing is written to `qtile.log` or `.xsession-errors`.\r\n\r\n# Configuration\r\n\r\n<!--\r\nPlease include a link or attach your configuration to the issue.\r\n-->\r\n\r\nThis can be reproduced with a minor change to the default config.\r\nJust add some margin to the default bottom bar `margin=[15, 0, 0, 0]`\r\n\r\nReload the configuration.\r\nTo force a screen blank run `xset dpms force off`.\r\nWait until the screen actually goes to sleep.\r\nMove the mouse to wake it up again.\r\n\r\nThe bar should now have moved up a few pixels.\r\n\n", "before_files": [{"content": "# Copyright (c) 2008, Aldo Cortesi. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom __future__ import annotations\n\nimport typing\n\nfrom libqtile import configurable\nfrom libqtile.command.base import CommandObject, ItemT\nfrom libqtile.log_utils import logger\nfrom libqtile.utils import has_transparency\n\nif typing.TYPE_CHECKING:\n from libqtile.widget.base import _Widget\n\n\nclass Gap(CommandObject):\n \"\"\"A gap placed along one of the edges of the screen\n\n If a gap has been defined, Qtile will avoid covering it with windows. The\n most probable reason for configuring a gap is to make space for a\n third-party bar or other static window.\n\n Parameters\n ==========\n size :\n The \"thickness\" of the gap, i.e. the height of a horizontal gap, or the\n width of a vertical gap.\n \"\"\"\n def __init__(self, size):\n \"\"\"\n \"\"\"\n # 'size' corresponds to the height of a horizontal gap, or the width\n # of a vertical gap\n self.size = size\n self.initial_size = size\n # 'length' corresponds to the width of a horizontal gap, or the height\n # of a vertical gap\n self.length = None\n self.qtile = None\n self.screen = None\n self.x = None\n self.y = None\n self.width = None\n self.height = None\n self.horizontal = None\n\n def _configure(self, qtile, screen):\n self.qtile = qtile\n self.screen = screen\n # If both horizontal and vertical gaps are present, screen corners are\n # given to the horizontal ones\n if screen.top is self:\n self.x = screen.x\n self.y = screen.y\n self.length = screen.width\n self.width = self.length\n self.height = self.initial_size\n self.horizontal = True\n elif screen.bottom is self:\n self.x = screen.x\n self.y = screen.dy + screen.dheight\n self.length = screen.width\n self.width = self.length\n self.height = self.initial_size\n self.horizontal = True\n elif screen.left is self:\n self.x = screen.x\n self.y = screen.dy\n self.length = screen.dheight\n self.width = self.initial_size\n self.height = self.length\n self.horizontal = False\n else: # right\n self.x = screen.dx + screen.dwidth\n self.y = screen.dy\n self.length = screen.dheight\n self.width = self.initial_size\n self.height = self.length\n self.horizontal = False\n\n def draw(self):\n pass\n\n def finalize(self):\n pass\n\n def geometry(self):\n return (self.x, self.y, self.width, self.height)\n\n def _items(self, name: str) -> ItemT:\n if name == \"screen\" and self.screen is not None:\n return True, []\n return None\n\n def _select(self, name, sel):\n if name == \"screen\":\n return self.screen\n\n @property\n def position(self):\n for i in [\"top\", \"bottom\", \"left\", \"right\"]:\n if getattr(self.screen, i) is self:\n return i\n\n def info(self):\n return dict(position=self.position)\n\n def cmd_info(self):\n \"\"\"\n Info for this object.\n \"\"\"\n return self.info()\n\n\nclass Obj:\n def __init__(self, name):\n self.name = name\n\n def __str__(self):\n return self.name\n\n def __repr__(self):\n return self.name\n\n\nSTRETCH = Obj(\"STRETCH\")\nCALCULATED = Obj(\"CALCULATED\")\nSTATIC = Obj(\"STATIC\")\n\n\nclass Bar(Gap, configurable.Configurable):\n \"\"\"A bar, which can contain widgets\n\n Parameters\n ==========\n widgets :\n A list of widget objects.\n size :\n The \"thickness\" of the bar, i.e. the height of a horizontal bar, or the\n width of a vertical bar.\n \"\"\"\n defaults = [\n (\"background\", \"#000000\", \"Background colour.\"),\n (\"opacity\", 1, \"Bar window opacity.\"),\n (\"margin\", 0, \"Space around bar as int or list of ints [N E S W].\"),\n ]\n\n def __init__(self, widgets, size, **config):\n Gap.__init__(self, size)\n configurable.Configurable.__init__(self, **config)\n self.add_defaults(Bar.defaults)\n self.widgets = widgets\n self.saved_focus = None\n self.cursor_in = None\n self.window = None\n self.size_calculated = 0\n self._configured = False\n\n self.queued_draws = 0\n\n def _configure(self, qtile, screen):\n Gap._configure(self, qtile, screen)\n\n if self.margin:\n if isinstance(self.margin, int):\n self.margin = [self.margin] * 4\n if self.horizontal:\n self.x += self.margin[3]\n self.width -= self.margin[1] + self.margin[3]\n self.length = self.width\n if self.size == self.initial_size:\n self.size += self.margin[0] + self.margin[2]\n if self.screen.top is self:\n self.y += self.margin[0]\n else:\n self.y -= self.margin[2]\n else:\n self.y += self.margin[0]\n self.height -= self.margin[0] + self.margin[2]\n self.length = self.height\n self.size += self.margin[1] + self.margin[3]\n if self.screen.left is self:\n self.x += self.margin[3]\n else:\n self.x -= self.margin[1]\n\n for w in self.widgets:\n # Executing _test_orientation_compatibility later, for example in\n # the _configure() method of each widget, would still pass\n # test/test_bar.py but a segfault would be raised when nosetests is\n # about to exit\n w._test_orientation_compatibility(self.horizontal)\n\n if self.window:\n # We get _configure()-ed with an existing window when screens are getting\n # reconfigured but this screen is present both before and after\n self.window.place(self.x, self.y, self.width, self.height, 0, None)\n else:\n # Whereas we won't have a window if we're startup up for the first time or\n # the window has been killed by us no longer using the bar's screen\n\n # X11 only:\n # To preserve correct display of SysTray widget, we need a 24-bit\n # window where the user requests an opaque bar.\n if self.qtile.core.name == \"x11\":\n depth = 32 if has_transparency(self.background) else self.qtile.core.conn.default_screen.root_depth\n\n self.window = self.qtile.core.create_internal(\n self.x, self.y, self.width, self.height, depth\n )\n\n else:\n self.window = self.qtile.core.create_internal(\n self.x, self.y, self.width, self.height\n )\n\n self.window.opacity = self.opacity\n self.window.unhide()\n\n self.drawer = self.window.create_drawer(self.width, self.height)\n self.drawer.clear(self.background)\n\n self.window.process_window_expose = self.draw\n self.window.process_button_click = self.process_button_click\n self.window.process_button_release = self.process_button_release\n self.window.process_pointer_enter = self.process_pointer_enter\n self.window.process_pointer_leave = self.process_pointer_leave\n self.window.process_pointer_motion = self.process_pointer_motion\n self.window.process_key_press = self.process_key_press\n\n self.crashed_widgets = []\n if self._configured:\n for i in self.widgets:\n self._configure_widget(i)\n else:\n for idx, i in enumerate(self.widgets):\n if i.configured:\n i = i.create_mirror()\n self.widgets[idx] = i\n success = self._configure_widget(i)\n if success:\n qtile.register_widget(i)\n\n self._remove_crashed_widgets()\n self.draw()\n self._resize(self.length, self.widgets)\n self._configured = True\n\n def _configure_widget(self, widget):\n configured = True\n try:\n widget._configure(self.qtile, self)\n widget.configured = True\n except Exception as e:\n logger.error(\n \"{} widget crashed during _configure with \"\n \"error: {}\".format(widget.__class__.__name__, repr(e))\n )\n self.crashed_widgets.append(widget)\n configured = False\n\n return configured\n\n def _remove_crashed_widgets(self):\n if self.crashed_widgets:\n from libqtile.widget.config_error import ConfigErrorWidget\n\n for i in self.crashed_widgets:\n index = self.widgets.index(i)\n crash = ConfigErrorWidget(widget=i)\n crash._configure(self.qtile, self)\n self.widgets.insert(index, crash)\n self.widgets.remove(i)\n\n def finalize(self):\n self.drawer.finalize()\n\n def kill_window(self):\n \"\"\"Kill the window when the bar's screen is no longer being used.\"\"\"\n self.drawer.finalize()\n self.window.kill()\n self.window = None\n\n def _resize(self, length, widgets):\n stretches = [i for i in widgets if i.length_type == STRETCH]\n if stretches:\n stretchspace = length - sum(\n [i.length for i in widgets if i.length_type != STRETCH]\n )\n stretchspace = max(stretchspace, 0)\n num_stretches = len(stretches)\n if num_stretches == 1:\n stretches[0].length = stretchspace\n else:\n block = 0\n blocks = []\n for i in widgets:\n if i.length_type != STRETCH:\n block += i.length\n else:\n blocks.append(block)\n block = 0\n if block:\n blocks.append(block)\n interval = length // num_stretches\n for idx, i in enumerate(stretches):\n if idx == 0:\n i.length = interval - blocks[0] - blocks[1] // 2\n elif idx == num_stretches - 1:\n i.length = interval - blocks[-1] - blocks[-2] // 2\n else:\n i.length = int(interval - blocks[idx] / 2 - blocks[idx + 1] / 2)\n stretchspace -= i.length\n stretches[0].length += stretchspace // 2\n stretches[-1].length += stretchspace - stretchspace // 2\n\n offset = 0\n if self.horizontal:\n for i in widgets:\n i.offsetx = offset\n i.offsety = 0\n offset += i.length\n else:\n for i in widgets:\n i.offsetx = 0\n i.offsety = offset\n offset += i.length\n\n def get_widget_in_position(self, x: int, y: int) -> typing.Optional[_Widget]:\n if self.horizontal:\n for i in self.widgets:\n if x < i.offsetx + i.length:\n return i\n else:\n for i in self.widgets:\n if y < i.offsety + i.length:\n return i\n return None\n\n def process_button_click(self, x: int, y: int, button: int) -> None:\n widget = self.get_widget_in_position(x, y)\n if widget:\n widget.button_press(\n x - widget.offsetx,\n y - widget.offsety,\n button,\n )\n\n def process_button_release(self, x: int, y: int, button: int) -> None:\n widget = self.get_widget_in_position(x, y)\n if widget:\n widget.button_release(\n x - widget.offsetx,\n y - widget.offsety,\n button,\n )\n\n def process_pointer_enter(self, x: int, y: int) -> None:\n widget = self.get_widget_in_position(x, y)\n if widget:\n widget.mouse_enter(\n x - widget.offsetx,\n y - widget.offsety,\n )\n self.cursor_in = widget\n\n def process_pointer_leave(self, x: int, y: int) -> None:\n if self.cursor_in:\n self.cursor_in.mouse_leave(\n x - self.cursor_in.offsetx,\n y - self.cursor_in.offsety,\n )\n self.cursor_in = None\n\n def process_pointer_motion(self, x: int, y: int) -> None:\n widget = self.get_widget_in_position(x, y)\n if widget and self.cursor_in and widget is not self.cursor_in:\n self.cursor_in.mouse_leave(\n x - self.cursor_in.offsetx,\n y - self.cursor_in.offsety,\n )\n widget.mouse_enter(\n x - widget.offsetx,\n y - widget.offsety,\n )\n self.cursor_in = widget\n\n def process_key_press(self, keycode: int) -> None:\n if self.has_keyboard:\n self.has_keyboard.process_key_press(keycode)\n\n def widget_grab_keyboard(self, widget):\n \"\"\"\n A widget can call this method to grab the keyboard focus\n and receive keyboard messages. When done,\n widget_ungrab_keyboard() must be called.\n \"\"\"\n self.has_keyboard = widget\n self.saved_focus = self.qtile.current_window\n self.window.focus(False)\n\n def widget_ungrab_keyboard(self):\n \"\"\"\n Removes keyboard focus from the widget.\n \"\"\"\n if self.saved_focus is not None:\n self.saved_focus.focus(False)\n self.has_keyboard = None\n\n def draw(self):\n if not self.widgets:\n return # calling self._actual_draw in this case would cause a NameError.\n if self.queued_draws == 0:\n self.qtile.call_soon(self._actual_draw)\n self.queued_draws += 1\n\n def _actual_draw(self):\n self.queued_draws = 0\n self._resize(self.length, self.widgets)\n for i in self.widgets:\n i.draw()\n end = i.offset + i.length # pylint: disable=undefined-loop-variable\n # we verified that self.widgets is not empty in self.draw(), see above.\n if end < self.length:\n if self.horizontal:\n self.drawer.draw(offsetx=end, width=self.length - end)\n else:\n self.drawer.draw(offsety=end, height=self.length - end)\n\n def info(self):\n return dict(\n size=self.size,\n length=self.length,\n width=self.width,\n height=self.height,\n position=self.position,\n widgets=[i.info() for i in self.widgets],\n window=self.window.wid\n )\n\n def is_show(self):\n return self.size != 0\n\n def show(self, is_show=True):\n if is_show != self.is_show():\n if is_show:\n self.size = self.size_calculated\n self.window.unhide()\n else:\n self.size_calculated = self.size\n self.size = 0\n self.window.hide()\n self.screen.group.layout_all()\n\n def adjust_for_strut(self, size):\n if self.size:\n self.size = self.initial_size\n if not self.margin:\n self.margin = [0, 0, 0, 0]\n if self.screen.top is self:\n self.margin[0] += size\n elif self.screen.bottom is self:\n self.margin[2] += size\n elif self.screen.left is self:\n self.margin[3] += size\n else: # right\n self.margin[1] += size\n\n def cmd_fake_button_press(self, screen, position, x, y, button=1):\n \"\"\"\n Fake a mouse-button-press on the bar. Co-ordinates are relative\n to the top-left corner of the bar.\n\n :screen The integer screen offset\n :position One of \"top\", \"bottom\", \"left\", or \"right\"\n \"\"\"\n self.process_button_click(x, y, button)\n\n\nBarType = typing.Union[Bar, Gap]\n", "path": "libqtile/bar.py"}], "after_files": [{"content": "# Copyright (c) 2008, Aldo Cortesi. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom __future__ import annotations\n\nimport typing\n\nfrom libqtile import configurable\nfrom libqtile.command.base import CommandObject, ItemT\nfrom libqtile.log_utils import logger\nfrom libqtile.utils import has_transparency\n\nif typing.TYPE_CHECKING:\n from libqtile.widget.base import _Widget\n\n\nclass Gap(CommandObject):\n \"\"\"A gap placed along one of the edges of the screen\n\n If a gap has been defined, Qtile will avoid covering it with windows. The\n most probable reason for configuring a gap is to make space for a\n third-party bar or other static window.\n\n Parameters\n ==========\n size :\n The \"thickness\" of the gap, i.e. the height of a horizontal gap, or the\n width of a vertical gap.\n \"\"\"\n def __init__(self, size):\n \"\"\"\n \"\"\"\n # 'size' corresponds to the height of a horizontal gap, or the width\n # of a vertical gap\n self.size = size\n self.initial_size = size\n # 'length' corresponds to the width of a horizontal gap, or the height\n # of a vertical gap\n self.length = None\n self.qtile = None\n self.screen = None\n self.x = None\n self.y = None\n self.width = None\n self.height = None\n self.horizontal = None\n\n def _configure(self, qtile, screen):\n self.qtile = qtile\n self.screen = screen\n self.size = self.initial_size\n # If both horizontal and vertical gaps are present, screen corners are\n # given to the horizontal ones\n if screen.top is self:\n self.x = screen.x\n self.y = screen.y\n self.length = screen.width\n self.width = self.length\n self.height = self.initial_size\n self.horizontal = True\n elif screen.bottom is self:\n self.x = screen.x\n self.y = screen.dy + screen.dheight\n self.length = screen.width\n self.width = self.length\n self.height = self.initial_size\n self.horizontal = True\n elif screen.left is self:\n self.x = screen.x\n self.y = screen.dy\n self.length = screen.dheight\n self.width = self.initial_size\n self.height = self.length\n self.horizontal = False\n else: # right\n self.x = screen.dx + screen.dwidth\n self.y = screen.dy\n self.length = screen.dheight\n self.width = self.initial_size\n self.height = self.length\n self.horizontal = False\n\n def draw(self):\n pass\n\n def finalize(self):\n pass\n\n def geometry(self):\n return (self.x, self.y, self.width, self.height)\n\n def _items(self, name: str) -> ItemT:\n if name == \"screen\" and self.screen is not None:\n return True, []\n return None\n\n def _select(self, name, sel):\n if name == \"screen\":\n return self.screen\n\n @property\n def position(self):\n for i in [\"top\", \"bottom\", \"left\", \"right\"]:\n if getattr(self.screen, i) is self:\n return i\n\n def info(self):\n return dict(position=self.position)\n\n def cmd_info(self):\n \"\"\"\n Info for this object.\n \"\"\"\n return self.info()\n\n\nclass Obj:\n def __init__(self, name):\n self.name = name\n\n def __str__(self):\n return self.name\n\n def __repr__(self):\n return self.name\n\n\nSTRETCH = Obj(\"STRETCH\")\nCALCULATED = Obj(\"CALCULATED\")\nSTATIC = Obj(\"STATIC\")\n\n\nclass Bar(Gap, configurable.Configurable):\n \"\"\"A bar, which can contain widgets\n\n Parameters\n ==========\n widgets :\n A list of widget objects.\n size :\n The \"thickness\" of the bar, i.e. the height of a horizontal bar, or the\n width of a vertical bar.\n \"\"\"\n defaults = [\n (\"background\", \"#000000\", \"Background colour.\"),\n (\"opacity\", 1, \"Bar window opacity.\"),\n (\"margin\", 0, \"Space around bar as int or list of ints [N E S W].\"),\n ]\n\n def __init__(self, widgets, size, **config):\n Gap.__init__(self, size)\n configurable.Configurable.__init__(self, **config)\n self.add_defaults(Bar.defaults)\n self.widgets = widgets\n self.saved_focus = None\n self.cursor_in = None\n self.window = None\n self.size_calculated = 0\n self._configured = False\n\n self.queued_draws = 0\n\n def _configure(self, qtile, screen):\n Gap._configure(self, qtile, screen)\n\n if self.margin:\n if isinstance(self.margin, int):\n self.margin = [self.margin] * 4\n if self.horizontal:\n self.x += self.margin[3]\n self.width -= self.margin[1] + self.margin[3]\n self.length = self.width\n if self.size == self.initial_size:\n self.size += self.margin[0] + self.margin[2]\n if self.screen.top is self:\n self.y += self.margin[0]\n else:\n self.y -= self.margin[2]\n else:\n self.y += self.margin[0]\n self.height -= self.margin[0] + self.margin[2]\n self.length = self.height\n self.size += self.margin[1] + self.margin[3]\n if self.screen.left is self:\n self.x += self.margin[3]\n else:\n self.x -= self.margin[1]\n\n for w in self.widgets:\n # Executing _test_orientation_compatibility later, for example in\n # the _configure() method of each widget, would still pass\n # test/test_bar.py but a segfault would be raised when nosetests is\n # about to exit\n w._test_orientation_compatibility(self.horizontal)\n\n if self.window:\n # We get _configure()-ed with an existing window when screens are getting\n # reconfigured but this screen is present both before and after\n self.window.place(self.x, self.y, self.width, self.height, 0, None)\n else:\n # Whereas we won't have a window if we're startup up for the first time or\n # the window has been killed by us no longer using the bar's screen\n\n # X11 only:\n # To preserve correct display of SysTray widget, we need a 24-bit\n # window where the user requests an opaque bar.\n if self.qtile.core.name == \"x11\":\n depth = 32 if has_transparency(self.background) else self.qtile.core.conn.default_screen.root_depth\n\n self.window = self.qtile.core.create_internal(\n self.x, self.y, self.width, self.height, depth\n )\n\n else:\n self.window = self.qtile.core.create_internal(\n self.x, self.y, self.width, self.height\n )\n\n self.window.opacity = self.opacity\n self.window.unhide()\n\n self.drawer = self.window.create_drawer(self.width, self.height)\n self.drawer.clear(self.background)\n\n self.window.process_window_expose = self.draw\n self.window.process_button_click = self.process_button_click\n self.window.process_button_release = self.process_button_release\n self.window.process_pointer_enter = self.process_pointer_enter\n self.window.process_pointer_leave = self.process_pointer_leave\n self.window.process_pointer_motion = self.process_pointer_motion\n self.window.process_key_press = self.process_key_press\n\n self.crashed_widgets = []\n if self._configured:\n for i in self.widgets:\n self._configure_widget(i)\n else:\n for idx, i in enumerate(self.widgets):\n if i.configured:\n i = i.create_mirror()\n self.widgets[idx] = i\n success = self._configure_widget(i)\n if success:\n qtile.register_widget(i)\n\n self._remove_crashed_widgets()\n self.draw()\n self._resize(self.length, self.widgets)\n self._configured = True\n\n def _configure_widget(self, widget):\n configured = True\n try:\n widget._configure(self.qtile, self)\n widget.configured = True\n except Exception as e:\n logger.error(\n \"{} widget crashed during _configure with \"\n \"error: {}\".format(widget.__class__.__name__, repr(e))\n )\n self.crashed_widgets.append(widget)\n configured = False\n\n return configured\n\n def _remove_crashed_widgets(self):\n if self.crashed_widgets:\n from libqtile.widget.config_error import ConfigErrorWidget\n\n for i in self.crashed_widgets:\n index = self.widgets.index(i)\n crash = ConfigErrorWidget(widget=i)\n crash._configure(self.qtile, self)\n self.widgets.insert(index, crash)\n self.widgets.remove(i)\n\n def finalize(self):\n self.drawer.finalize()\n\n def kill_window(self):\n \"\"\"Kill the window when the bar's screen is no longer being used.\"\"\"\n self.drawer.finalize()\n self.window.kill()\n self.window = None\n\n def _resize(self, length, widgets):\n stretches = [i for i in widgets if i.length_type == STRETCH]\n if stretches:\n stretchspace = length - sum(\n [i.length for i in widgets if i.length_type != STRETCH]\n )\n stretchspace = max(stretchspace, 0)\n num_stretches = len(stretches)\n if num_stretches == 1:\n stretches[0].length = stretchspace\n else:\n block = 0\n blocks = []\n for i in widgets:\n if i.length_type != STRETCH:\n block += i.length\n else:\n blocks.append(block)\n block = 0\n if block:\n blocks.append(block)\n interval = length // num_stretches\n for idx, i in enumerate(stretches):\n if idx == 0:\n i.length = interval - blocks[0] - blocks[1] // 2\n elif idx == num_stretches - 1:\n i.length = interval - blocks[-1] - blocks[-2] // 2\n else:\n i.length = int(interval - blocks[idx] / 2 - blocks[idx + 1] / 2)\n stretchspace -= i.length\n stretches[0].length += stretchspace // 2\n stretches[-1].length += stretchspace - stretchspace // 2\n\n offset = 0\n if self.horizontal:\n for i in widgets:\n i.offsetx = offset\n i.offsety = 0\n offset += i.length\n else:\n for i in widgets:\n i.offsetx = 0\n i.offsety = offset\n offset += i.length\n\n def get_widget_in_position(self, x: int, y: int) -> typing.Optional[_Widget]:\n if self.horizontal:\n for i in self.widgets:\n if x < i.offsetx + i.length:\n return i\n else:\n for i in self.widgets:\n if y < i.offsety + i.length:\n return i\n return None\n\n def process_button_click(self, x: int, y: int, button: int) -> None:\n widget = self.get_widget_in_position(x, y)\n if widget:\n widget.button_press(\n x - widget.offsetx,\n y - widget.offsety,\n button,\n )\n\n def process_button_release(self, x: int, y: int, button: int) -> None:\n widget = self.get_widget_in_position(x, y)\n if widget:\n widget.button_release(\n x - widget.offsetx,\n y - widget.offsety,\n button,\n )\n\n def process_pointer_enter(self, x: int, y: int) -> None:\n widget = self.get_widget_in_position(x, y)\n if widget:\n widget.mouse_enter(\n x - widget.offsetx,\n y - widget.offsety,\n )\n self.cursor_in = widget\n\n def process_pointer_leave(self, x: int, y: int) -> None:\n if self.cursor_in:\n self.cursor_in.mouse_leave(\n x - self.cursor_in.offsetx,\n y - self.cursor_in.offsety,\n )\n self.cursor_in = None\n\n def process_pointer_motion(self, x: int, y: int) -> None:\n widget = self.get_widget_in_position(x, y)\n if widget and self.cursor_in and widget is not self.cursor_in:\n self.cursor_in.mouse_leave(\n x - self.cursor_in.offsetx,\n y - self.cursor_in.offsety,\n )\n widget.mouse_enter(\n x - widget.offsetx,\n y - widget.offsety,\n )\n self.cursor_in = widget\n\n def process_key_press(self, keycode: int) -> None:\n if self.has_keyboard:\n self.has_keyboard.process_key_press(keycode)\n\n def widget_grab_keyboard(self, widget):\n \"\"\"\n A widget can call this method to grab the keyboard focus\n and receive keyboard messages. When done,\n widget_ungrab_keyboard() must be called.\n \"\"\"\n self.has_keyboard = widget\n self.saved_focus = self.qtile.current_window\n self.window.focus(False)\n\n def widget_ungrab_keyboard(self):\n \"\"\"\n Removes keyboard focus from the widget.\n \"\"\"\n if self.saved_focus is not None:\n self.saved_focus.focus(False)\n self.has_keyboard = None\n\n def draw(self):\n if not self.widgets:\n return # calling self._actual_draw in this case would cause a NameError.\n if self.queued_draws == 0:\n self.qtile.call_soon(self._actual_draw)\n self.queued_draws += 1\n\n def _actual_draw(self):\n self.queued_draws = 0\n self._resize(self.length, self.widgets)\n for i in self.widgets:\n i.draw()\n end = i.offset + i.length # pylint: disable=undefined-loop-variable\n # we verified that self.widgets is not empty in self.draw(), see above.\n if end < self.length:\n if self.horizontal:\n self.drawer.draw(offsetx=end, width=self.length - end)\n else:\n self.drawer.draw(offsety=end, height=self.length - end)\n\n def info(self):\n return dict(\n size=self.size,\n length=self.length,\n width=self.width,\n height=self.height,\n position=self.position,\n widgets=[i.info() for i in self.widgets],\n window=self.window.wid\n )\n\n def is_show(self):\n return self.size != 0\n\n def show(self, is_show=True):\n if is_show != self.is_show():\n if is_show:\n self.size = self.size_calculated\n self.window.unhide()\n else:\n self.size_calculated = self.size\n self.size = 0\n self.window.hide()\n self.screen.group.layout_all()\n\n def adjust_for_strut(self, size):\n if self.size:\n self.size = self.initial_size\n if not self.margin:\n self.margin = [0, 0, 0, 0]\n if self.screen.top is self:\n self.margin[0] += size\n elif self.screen.bottom is self:\n self.margin[2] += size\n elif self.screen.left is self:\n self.margin[3] += size\n else: # right\n self.margin[1] += size\n\n def cmd_fake_button_press(self, screen, position, x, y, button=1):\n \"\"\"\n Fake a mouse-button-press on the bar. Co-ordinates are relative\n to the top-left corner of the bar.\n\n :screen The integer screen offset\n :position One of \"top\", \"bottom\", \"left\", or \"right\"\n \"\"\"\n self.process_button_click(x, y, button)\n\n\nBarType = typing.Union[Bar, Gap]\n", "path": "libqtile/bar.py"}]} |
gh_patches_debug_1205 | rasdani/github-patches | git_diff | sublimelsp__LSP-555 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TypeError: expected string or buffer
https://github.com/tomv564/LSP/blob/e37c4e6d7d959890c465cada35dff7fef22feb6e/plugin/core/types.py#L50-L54
It happened only once so far, when `plugin_loaded` was called on start up. After restarting Sublime Text right away, it did not happened again.
```
Traceback (most recent call last):
File "F:\SublimeText\sublime_plugin.py", line 298, in on_api_ready
plc()
File "F:\SublimeText\Data\Packages\LSP\boot.py", line 30, in plugin_loaded
startup()
File "F:\SublimeText\Data\Packages\LSP\plugin\core\main.py", line 25, in startup
start_active_window()
File "F:\SublimeText\Data\Packages\LSP\plugin\core\main.py", line 44, in start_active_window
windows.lookup(window).start_active_views()
File "F:\SublimeText\Data\Packages\LSP\plugin\core\windows.py", line 336, in start_active_views
self._initialize_on_open(view)
File "F:\SublimeText\Data\Packages\LSP\plugin\core\windows.py", line 348, in _initialize_on_open
self._configs.syntax_configs(view))
File "F:\SublimeText\Data\Packages\LSP\plugin\core\configurations.py", line 120, in syntax_configs
return list(filter(lambda c: config_supports_syntax(c, syntax) and c.enabled, self.all))
File "F:\SublimeText\Data\Packages\LSP\plugin\core\configurations.py", line 120, in <lambda>
return list(filter(lambda c: config_supports_syntax(c, syntax) and c.enabled, self.all))
File "F:\SublimeText\Data\Packages\LSP\plugin\core\types.py", line 81, in config_supports_syntax
if re.search(r'|'.join(r'\b%s\b' % re.escape(s) for s in language.syntaxes), syntax, re.IGNORECASE):
File "./python3.3/re.py", line 161, in search
TypeError: expected string or buffer
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugin/completion.py`
Content:
```
1 import sublime
2 import sublime_plugin
3
4 try:
5 from typing import Any, List, Dict, Tuple, Callable, Optional
6 assert Any and List and Dict and Tuple and Callable and Optional
7 except ImportError:
8 pass
9
10 from .core.protocol import Request
11 from .core.events import global_events
12 from .core.settings import settings, client_configs
13 from .core.logging import debug
14 from .core.completion import parse_completion_response
15 from .core.registry import session_for_view, client_for_view
16 from .core.configurations import is_supported_syntax
17 from .core.documents import get_document_position
18 from .core.sessions import Session
19
20 NO_COMPLETION_SCOPES = 'comment, string'
21
22
23 class CompletionState(object):
24 IDLE = 0
25 REQUESTING = 1
26 APPLYING = 2
27 CANCELLING = 3
28
29
30 last_text_command = None
31
32
33 class CompletionHelper(sublime_plugin.EventListener):
34 def on_text_command(self, view, command_name, args):
35 global last_text_command
36 last_text_command = command_name
37
38
39 class CompletionHandler(sublime_plugin.ViewEventListener):
40 def __init__(self, view):
41 self.view = view
42 self.initialized = False
43 self.enabled = False
44 self.trigger_chars = [] # type: List[str]
45 self.state = CompletionState.IDLE
46 self.completions = [] # type: List[Any]
47 self.next_request = None # type: Optional[Tuple[str, List[int]]]
48 self.last_prefix = ""
49 self.last_location = 0
50
51 @classmethod
52 def is_applicable(cls, settings):
53 syntax = settings.get('syntax')
54 if syntax is not None:
55 return is_supported_syntax(syntax, client_configs.all)
56 else:
57 return False
58
59 def initialize(self):
60 self.initialized = True
61 session = session_for_view(self.view)
62 if session:
63 completionProvider = session.get_capability(
64 'completionProvider')
65 if completionProvider:
66 self.enabled = True
67 self.trigger_chars = completionProvider.get(
68 'triggerCharacters') or []
69 if self.trigger_chars:
70 self.register_trigger_chars(session)
71
72 def _view_language(self, config_name: str) -> 'Optional[str]':
73 languages = self.view.settings().get('lsp_language')
74 return languages.get(config_name) if languages else None
75
76 def register_trigger_chars(self, session: Session) -> None:
77 completion_triggers = self.view.settings().get('auto_complete_triggers', [])
78 view_language = self._view_language(session.config.name)
79 if view_language:
80 for language in session.config.languages:
81 if language.id == view_language:
82 for scope in language.scopes:
83 # debug("registering", self.trigger_chars, "for", scope)
84 scope_trigger = next(
85 (trigger for trigger in completion_triggers if trigger.get('selector', None) == scope),
86 None
87 )
88 if scope_trigger:
89 scope_trigger['characters'] = "".join(self.trigger_chars)
90 else:
91 completion_triggers.append({
92 'characters': "".join(self.trigger_chars),
93 'selector': scope
94 })
95
96 self.view.settings().set('auto_complete_triggers', completion_triggers)
97
98 def is_after_trigger_character(self, location):
99 if location > 0:
100 prev_char = self.view.substr(location - 1)
101 return prev_char in self.trigger_chars
102
103 def is_same_completion(self, prefix, locations):
104 # completion requests from the same location with the same prefix are cached.
105 current_start = locations[0] - len(prefix)
106 last_start = self.last_location - len(self.last_prefix)
107 return prefix.startswith(self.last_prefix) and current_start == last_start
108
109 def on_modified(self):
110 # hide completion when backspacing past last completion.
111 if self.view.sel()[0].begin() < self.last_location:
112 self.last_location = 0
113 self.view.run_command("hide_auto_complete")
114 # cancel current completion if the previous input is an space
115 prev_char = self.view.substr(self.view.sel()[0].begin() - 1)
116 if self.state == CompletionState.REQUESTING and prev_char.isspace():
117 self.state = CompletionState.CANCELLING
118
119 def on_query_completions(self, prefix, locations):
120 if prefix != "" and self.view.match_selector(locations[0], NO_COMPLETION_SCOPES):
121 # debug('discarding completion because no completion scope with prefix {}'.format(prefix))
122 return (
123 [],
124 sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS
125 )
126
127 if not self.initialized:
128 self.initialize()
129
130 if self.enabled:
131 reuse_completion = self.is_same_completion(prefix, locations)
132 if self.state == CompletionState.IDLE:
133 if not reuse_completion:
134 self.last_prefix = prefix
135 self.last_location = locations[0]
136 self.do_request(prefix, locations)
137 self.completions = []
138
139 elif self.state in (CompletionState.REQUESTING, CompletionState.CANCELLING):
140 self.next_request = (prefix, locations)
141 self.state = CompletionState.CANCELLING
142
143 elif self.state == CompletionState.APPLYING:
144 self.state = CompletionState.IDLE
145
146 return (
147 self.completions,
148 0 if not settings.only_show_lsp_completions
149 else sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS
150 )
151
152 def do_request(self, prefix: str, locations: 'List[int]'):
153 self.next_request = None
154 view = self.view
155
156 # don't store client so we can handle restarts
157 client = client_for_view(view)
158 if not client:
159 return
160
161 if settings.complete_all_chars or self.is_after_trigger_character(locations[0]):
162 global_events.publish("view.on_purge_changes", self.view)
163 document_position = get_document_position(view, locations[0])
164 if document_position:
165 client.send_request(
166 Request.complete(document_position),
167 self.handle_response,
168 self.handle_error)
169 self.state = CompletionState.REQUESTING
170
171 def handle_response(self, response: 'Optional[Dict]'):
172
173 if self.state == CompletionState.REQUESTING:
174 last_start = self.last_location - len(self.last_prefix)
175 last_row, last_col = self.view.rowcol(last_start)
176 self.completions = parse_completion_response(response, last_col, settings)
177
178 # if insert_best_completion was just ran, undo it before presenting new completions.
179 prev_char = self.view.substr(self.view.sel()[0].begin() - 1)
180 if prev_char.isspace():
181 if last_text_command == "insert_best_completion":
182 self.view.run_command("undo")
183
184 self.state = CompletionState.APPLYING
185 self.view.run_command("hide_auto_complete")
186 self.run_auto_complete()
187 elif self.state == CompletionState.CANCELLING:
188 self.state = CompletionState.IDLE
189 if self.next_request:
190 prefix, locations = self.next_request
191 self.do_request(prefix, locations)
192 else:
193 debug('Got unexpected response while in state {}'.format(self.state))
194
195 def handle_error(self, error: dict):
196 sublime.status_message('Completion error: ' + str(error.get('message')))
197 self.state = CompletionState.IDLE
198
199 def run_auto_complete(self):
200 self.view.run_command(
201 "auto_complete", {
202 'disable_auto_insert': True,
203 'api_completions_only': settings.only_show_lsp_completions,
204 'next_completion_if_showing': False
205 })
206
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugin/completion.py b/plugin/completion.py
--- a/plugin/completion.py
+++ b/plugin/completion.py
@@ -51,10 +51,7 @@
@classmethod
def is_applicable(cls, settings):
syntax = settings.get('syntax')
- if syntax is not None:
- return is_supported_syntax(syntax, client_configs.all)
- else:
- return False
+ return is_supported_syntax(syntax) if syntax else False
def initialize(self):
self.initialized = True
| {"golden_diff": "diff --git a/plugin/completion.py b/plugin/completion.py\n--- a/plugin/completion.py\n+++ b/plugin/completion.py\n@@ -51,10 +51,7 @@\n @classmethod\n def is_applicable(cls, settings):\n syntax = settings.get('syntax')\n- if syntax is not None:\n- return is_supported_syntax(syntax, client_configs.all)\n- else:\n- return False\n+ return is_supported_syntax(syntax) if syntax else False\n \n def initialize(self):\n self.initialized = True\n", "issue": "TypeError: expected string or buffer\nhttps://github.com/tomv564/LSP/blob/e37c4e6d7d959890c465cada35dff7fef22feb6e/plugin/core/types.py#L50-L54\r\n\r\nIt happened only once so far, when `plugin_loaded` was called on start up. After restarting Sublime Text right away, it did not happened again.\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"F:\\SublimeText\\sublime_plugin.py\", line 298, in on_api_ready\r\n plc()\r\n File \"F:\\SublimeText\\Data\\Packages\\LSP\\boot.py\", line 30, in plugin_loaded\r\n startup()\r\n File \"F:\\SublimeText\\Data\\Packages\\LSP\\plugin\\core\\main.py\", line 25, in startup\r\n start_active_window()\r\n File \"F:\\SublimeText\\Data\\Packages\\LSP\\plugin\\core\\main.py\", line 44, in start_active_window\r\n windows.lookup(window).start_active_views()\r\n File \"F:\\SublimeText\\Data\\Packages\\LSP\\plugin\\core\\windows.py\", line 336, in start_active_views\r\n self._initialize_on_open(view)\r\n File \"F:\\SublimeText\\Data\\Packages\\LSP\\plugin\\core\\windows.py\", line 348, in _initialize_on_open\r\n self._configs.syntax_configs(view))\r\n File \"F:\\SublimeText\\Data\\Packages\\LSP\\plugin\\core\\configurations.py\", line 120, in syntax_configs\r\n return list(filter(lambda c: config_supports_syntax(c, syntax) and c.enabled, self.all))\r\n File \"F:\\SublimeText\\Data\\Packages\\LSP\\plugin\\core\\configurations.py\", line 120, in <lambda>\r\n return list(filter(lambda c: config_supports_syntax(c, syntax) and c.enabled, self.all))\r\n File \"F:\\SublimeText\\Data\\Packages\\LSP\\plugin\\core\\types.py\", line 81, in config_supports_syntax\r\n if re.search(r'|'.join(r'\\b%s\\b' % re.escape(s) for s in language.syntaxes), syntax, re.IGNORECASE):\r\n File \"./python3.3/re.py\", line 161, in search\r\nTypeError: expected string or buffer\r\n```\n", "before_files": [{"content": "import sublime\nimport sublime_plugin\n\ntry:\n from typing import Any, List, Dict, Tuple, Callable, Optional\n assert Any and List and Dict and Tuple and Callable and Optional\nexcept ImportError:\n pass\n\nfrom .core.protocol import Request\nfrom .core.events import global_events\nfrom .core.settings import settings, client_configs\nfrom .core.logging import debug\nfrom .core.completion import parse_completion_response\nfrom .core.registry import session_for_view, client_for_view\nfrom .core.configurations import is_supported_syntax\nfrom .core.documents import get_document_position\nfrom .core.sessions import Session\n\nNO_COMPLETION_SCOPES = 'comment, string'\n\n\nclass CompletionState(object):\n IDLE = 0\n REQUESTING = 1\n APPLYING = 2\n CANCELLING = 3\n\n\nlast_text_command = None\n\n\nclass CompletionHelper(sublime_plugin.EventListener):\n def on_text_command(self, view, command_name, args):\n global last_text_command\n last_text_command = command_name\n\n\nclass CompletionHandler(sublime_plugin.ViewEventListener):\n def __init__(self, view):\n self.view = view\n self.initialized = False\n self.enabled = False\n self.trigger_chars = [] # type: List[str]\n self.state = CompletionState.IDLE\n self.completions = [] # type: List[Any]\n self.next_request = None # type: Optional[Tuple[str, List[int]]]\n self.last_prefix = \"\"\n self.last_location = 0\n\n @classmethod\n def is_applicable(cls, settings):\n syntax = settings.get('syntax')\n if syntax is not None:\n return is_supported_syntax(syntax, client_configs.all)\n else:\n return False\n\n def initialize(self):\n self.initialized = True\n session = session_for_view(self.view)\n if session:\n completionProvider = session.get_capability(\n 'completionProvider')\n if completionProvider:\n self.enabled = True\n self.trigger_chars = completionProvider.get(\n 'triggerCharacters') or []\n if self.trigger_chars:\n self.register_trigger_chars(session)\n\n def _view_language(self, config_name: str) -> 'Optional[str]':\n languages = self.view.settings().get('lsp_language')\n return languages.get(config_name) if languages else None\n\n def register_trigger_chars(self, session: Session) -> None:\n completion_triggers = self.view.settings().get('auto_complete_triggers', [])\n view_language = self._view_language(session.config.name)\n if view_language:\n for language in session.config.languages:\n if language.id == view_language:\n for scope in language.scopes:\n # debug(\"registering\", self.trigger_chars, \"for\", scope)\n scope_trigger = next(\n (trigger for trigger in completion_triggers if trigger.get('selector', None) == scope),\n None\n )\n if scope_trigger:\n scope_trigger['characters'] = \"\".join(self.trigger_chars)\n else:\n completion_triggers.append({\n 'characters': \"\".join(self.trigger_chars),\n 'selector': scope\n })\n\n self.view.settings().set('auto_complete_triggers', completion_triggers)\n\n def is_after_trigger_character(self, location):\n if location > 0:\n prev_char = self.view.substr(location - 1)\n return prev_char in self.trigger_chars\n\n def is_same_completion(self, prefix, locations):\n # completion requests from the same location with the same prefix are cached.\n current_start = locations[0] - len(prefix)\n last_start = self.last_location - len(self.last_prefix)\n return prefix.startswith(self.last_prefix) and current_start == last_start\n\n def on_modified(self):\n # hide completion when backspacing past last completion.\n if self.view.sel()[0].begin() < self.last_location:\n self.last_location = 0\n self.view.run_command(\"hide_auto_complete\")\n # cancel current completion if the previous input is an space\n prev_char = self.view.substr(self.view.sel()[0].begin() - 1)\n if self.state == CompletionState.REQUESTING and prev_char.isspace():\n self.state = CompletionState.CANCELLING\n\n def on_query_completions(self, prefix, locations):\n if prefix != \"\" and self.view.match_selector(locations[0], NO_COMPLETION_SCOPES):\n # debug('discarding completion because no completion scope with prefix {}'.format(prefix))\n return (\n [],\n sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS\n )\n\n if not self.initialized:\n self.initialize()\n\n if self.enabled:\n reuse_completion = self.is_same_completion(prefix, locations)\n if self.state == CompletionState.IDLE:\n if not reuse_completion:\n self.last_prefix = prefix\n self.last_location = locations[0]\n self.do_request(prefix, locations)\n self.completions = []\n\n elif self.state in (CompletionState.REQUESTING, CompletionState.CANCELLING):\n self.next_request = (prefix, locations)\n self.state = CompletionState.CANCELLING\n\n elif self.state == CompletionState.APPLYING:\n self.state = CompletionState.IDLE\n\n return (\n self.completions,\n 0 if not settings.only_show_lsp_completions\n else sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS\n )\n\n def do_request(self, prefix: str, locations: 'List[int]'):\n self.next_request = None\n view = self.view\n\n # don't store client so we can handle restarts\n client = client_for_view(view)\n if not client:\n return\n\n if settings.complete_all_chars or self.is_after_trigger_character(locations[0]):\n global_events.publish(\"view.on_purge_changes\", self.view)\n document_position = get_document_position(view, locations[0])\n if document_position:\n client.send_request(\n Request.complete(document_position),\n self.handle_response,\n self.handle_error)\n self.state = CompletionState.REQUESTING\n\n def handle_response(self, response: 'Optional[Dict]'):\n\n if self.state == CompletionState.REQUESTING:\n last_start = self.last_location - len(self.last_prefix)\n last_row, last_col = self.view.rowcol(last_start)\n self.completions = parse_completion_response(response, last_col, settings)\n\n # if insert_best_completion was just ran, undo it before presenting new completions.\n prev_char = self.view.substr(self.view.sel()[0].begin() - 1)\n if prev_char.isspace():\n if last_text_command == \"insert_best_completion\":\n self.view.run_command(\"undo\")\n\n self.state = CompletionState.APPLYING\n self.view.run_command(\"hide_auto_complete\")\n self.run_auto_complete()\n elif self.state == CompletionState.CANCELLING:\n self.state = CompletionState.IDLE\n if self.next_request:\n prefix, locations = self.next_request\n self.do_request(prefix, locations)\n else:\n debug('Got unexpected response while in state {}'.format(self.state))\n\n def handle_error(self, error: dict):\n sublime.status_message('Completion error: ' + str(error.get('message')))\n self.state = CompletionState.IDLE\n\n def run_auto_complete(self):\n self.view.run_command(\n \"auto_complete\", {\n 'disable_auto_insert': True,\n 'api_completions_only': settings.only_show_lsp_completions,\n 'next_completion_if_showing': False\n })\n", "path": "plugin/completion.py"}], "after_files": [{"content": "import sublime\nimport sublime_plugin\n\ntry:\n from typing import Any, List, Dict, Tuple, Callable, Optional\n assert Any and List and Dict and Tuple and Callable and Optional\nexcept ImportError:\n pass\n\nfrom .core.protocol import Request\nfrom .core.events import global_events\nfrom .core.settings import settings, client_configs\nfrom .core.logging import debug\nfrom .core.completion import parse_completion_response\nfrom .core.registry import session_for_view, client_for_view\nfrom .core.configurations import is_supported_syntax\nfrom .core.documents import get_document_position\nfrom .core.sessions import Session\n\nNO_COMPLETION_SCOPES = 'comment, string'\n\n\nclass CompletionState(object):\n IDLE = 0\n REQUESTING = 1\n APPLYING = 2\n CANCELLING = 3\n\n\nlast_text_command = None\n\n\nclass CompletionHelper(sublime_plugin.EventListener):\n def on_text_command(self, view, command_name, args):\n global last_text_command\n last_text_command = command_name\n\n\nclass CompletionHandler(sublime_plugin.ViewEventListener):\n def __init__(self, view):\n self.view = view\n self.initialized = False\n self.enabled = False\n self.trigger_chars = [] # type: List[str]\n self.state = CompletionState.IDLE\n self.completions = [] # type: List[Any]\n self.next_request = None # type: Optional[Tuple[str, List[int]]]\n self.last_prefix = \"\"\n self.last_location = 0\n\n @classmethod\n def is_applicable(cls, settings):\n syntax = settings.get('syntax')\n return is_supported_syntax(syntax) if syntax else False\n\n def initialize(self):\n self.initialized = True\n session = session_for_view(self.view)\n if session:\n completionProvider = session.get_capability(\n 'completionProvider')\n if completionProvider:\n self.enabled = True\n self.trigger_chars = completionProvider.get(\n 'triggerCharacters') or []\n if self.trigger_chars:\n self.register_trigger_chars(session)\n\n def _view_language(self, config_name: str) -> 'Optional[str]':\n languages = self.view.settings().get('lsp_language')\n return languages.get(config_name) if languages else None\n\n def register_trigger_chars(self, session: Session) -> None:\n completion_triggers = self.view.settings().get('auto_complete_triggers', [])\n view_language = self._view_language(session.config.name)\n if view_language:\n for language in session.config.languages:\n if language.id == view_language:\n for scope in language.scopes:\n # debug(\"registering\", self.trigger_chars, \"for\", scope)\n scope_trigger = next(\n (trigger for trigger in completion_triggers if trigger.get('selector', None) == scope),\n None\n )\n if scope_trigger:\n scope_trigger['characters'] = \"\".join(self.trigger_chars)\n else:\n completion_triggers.append({\n 'characters': \"\".join(self.trigger_chars),\n 'selector': scope\n })\n\n self.view.settings().set('auto_complete_triggers', completion_triggers)\n\n def is_after_trigger_character(self, location):\n if location > 0:\n prev_char = self.view.substr(location - 1)\n return prev_char in self.trigger_chars\n\n def is_same_completion(self, prefix, locations):\n # completion requests from the same location with the same prefix are cached.\n current_start = locations[0] - len(prefix)\n last_start = self.last_location - len(self.last_prefix)\n return prefix.startswith(self.last_prefix) and current_start == last_start\n\n def on_modified(self):\n # hide completion when backspacing past last completion.\n if self.view.sel()[0].begin() < self.last_location:\n self.last_location = 0\n self.view.run_command(\"hide_auto_complete\")\n # cancel current completion if the previous input is an space\n prev_char = self.view.substr(self.view.sel()[0].begin() - 1)\n if self.state == CompletionState.REQUESTING and prev_char.isspace():\n self.state = CompletionState.CANCELLING\n\n def on_query_completions(self, prefix, locations):\n if prefix != \"\" and self.view.match_selector(locations[0], NO_COMPLETION_SCOPES):\n # debug('discarding completion because no completion scope with prefix {}'.format(prefix))\n return (\n [],\n sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS\n )\n\n if not self.initialized:\n self.initialize()\n\n if self.enabled:\n reuse_completion = self.is_same_completion(prefix, locations)\n if self.state == CompletionState.IDLE:\n if not reuse_completion:\n self.last_prefix = prefix\n self.last_location = locations[0]\n self.do_request(prefix, locations)\n self.completions = []\n\n elif self.state in (CompletionState.REQUESTING, CompletionState.CANCELLING):\n self.next_request = (prefix, locations)\n self.state = CompletionState.CANCELLING\n\n elif self.state == CompletionState.APPLYING:\n self.state = CompletionState.IDLE\n\n return (\n self.completions,\n 0 if not settings.only_show_lsp_completions\n else sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS\n )\n\n def do_request(self, prefix: str, locations: 'List[int]'):\n self.next_request = None\n view = self.view\n\n # don't store client so we can handle restarts\n client = client_for_view(view)\n if not client:\n return\n\n if settings.complete_all_chars or self.is_after_trigger_character(locations[0]):\n global_events.publish(\"view.on_purge_changes\", self.view)\n document_position = get_document_position(view, locations[0])\n if document_position:\n client.send_request(\n Request.complete(document_position),\n self.handle_response,\n self.handle_error)\n self.state = CompletionState.REQUESTING\n\n def handle_response(self, response: 'Optional[Dict]'):\n\n if self.state == CompletionState.REQUESTING:\n last_start = self.last_location - len(self.last_prefix)\n last_row, last_col = self.view.rowcol(last_start)\n self.completions = parse_completion_response(response, last_col, settings)\n\n # if insert_best_completion was just ran, undo it before presenting new completions.\n prev_char = self.view.substr(self.view.sel()[0].begin() - 1)\n if prev_char.isspace():\n if last_text_command == \"insert_best_completion\":\n self.view.run_command(\"undo\")\n\n self.state = CompletionState.APPLYING\n self.view.run_command(\"hide_auto_complete\")\n self.run_auto_complete()\n elif self.state == CompletionState.CANCELLING:\n self.state = CompletionState.IDLE\n if self.next_request:\n prefix, locations = self.next_request\n self.do_request(prefix, locations)\n else:\n debug('Got unexpected response while in state {}'.format(self.state))\n\n def handle_error(self, error: dict):\n sublime.status_message('Completion error: ' + str(error.get('message')))\n self.state = CompletionState.IDLE\n\n def run_auto_complete(self):\n self.view.run_command(\n \"auto_complete\", {\n 'disable_auto_insert': True,\n 'api_completions_only': settings.only_show_lsp_completions,\n 'next_completion_if_showing': False\n })\n", "path": "plugin/completion.py"}]} |
gh_patches_debug_1206 | rasdani/github-patches | git_diff | facebookresearch__ParlAI-1428 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pytorch Data Teacher seems to hang
I observe a weird hang when using the pytorch data teacher:
```
doesn't hang
PYTHONPATH=. python examples/display_data.py -t wizard_of_wikipedia:WizardDialogKnowledgeTeacher
seems to hang indefinitely
PYTHONPATH=. python examples/display_data.py -pyt wizard_of_wikipedia:WizardDialogKnowledgeTeacher
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parlai/core/pytorch_data_teacher.py`
Content:
```
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6 """
7 (NOTE: To use this class, please follow the tutorial here:
8 http://parl.ai/static/docs/tutorial_worlds.html#multiprocessed-pytorch-dataloader)
9
10 """
11 from .teachers import FixedDialogTeacher
12 from parlai.scripts.build_pytorch_data import build_data
13 from .agents import get_agent_module
14 import json
15 import math
16 import collections
17 import random
18 import os
19 from functools import wraps
20 import importlib
21 from functools import lru_cache
22 try:
23 import torch # noqa: F401
24 except ImportError:
25 raise ImportError('Need to install Pytorch: go to pytorch.org')
26 from torch.utils.data import ConcatDataset, Dataset, DataLoader, sampler
27 from torch.multiprocessing import Lock, Value
28 import ctypes
29 from threading import Thread, Condition, RLock
30
31
32 class BatchSortCache(object):
33 """
34 Object that encapsulates the functionality of the batch sort cache.
35
36 Maps episode length to dictionary with following keys:
37 current_idx: which episode in the list are we at (if simply indexing
38 into list)
39 ep_list: list of episodes of the length of the key
40 bucket_complete: if there are no more episodes left to consider in
41 the bucket
42 """
43 @classmethod
44 def create(cls):
45 if not hasattr(cls, 'length_to_eps'):
46 # Maps episode length to list of episodes
47 cls.length_to_eps = {}
48 if not hasattr(cls, 'ep_indices'):
49 # Set of episode indices already in the cache
50 cls.ep_indices = set()
51 if not hasattr(cls, 'batches'):
52 # List of batches if popping batches
53 cls.batches = []
54 if not hasattr(cls, 'load_complete'):
55 # If all episodes have been loaded into memory
56 cls.load_complete = Value(ctypes.c_bool, False)
57 if not hasattr(cls, 'batches_lock'):
58 # Lock to access batches
59 cls.batches_lock = Lock()
60 if not hasattr(cls, 'cache_lock'):
61 # Lock to access length_to_eps
62 cls.cache_lock = Lock()
63 if not hasattr(cls, 'fill_cache_lock'):
64 # Lock for condition variables
65 cls.fill_cache_lock = RLock()
66 if not hasattr(cls, 'add_to_cache_cv'):
67 # Condition notifying Loader to add to cache
68 cls.add_to_cache_cv = Condition(lock=cls.fill_cache_lock)
69 if not hasattr(cls, 'cache_filled_cv'):
70 # Condition notifying teacher that cache has episodes
71 cls.cache_filled_cv = Condition(lock=cls.fill_cache_lock)
72
73 @classmethod
74 def batch_cache(cls, function):
75 max_cache_size = 10000 # Max unseen eps
76 min_cache_size = 1000 # Min unseen eps
77
78 def get_cache_size():
79 '''Returns number of available episodes '''
80 return sum(
81 len(v['ep_list']) - v['current_idx']
82 for k, v in cls.length_to_eps.items()
83 )
84
85 def get_available_buckets(bsz):
86 '''Returns buckets where there are enough episodes for a batch'''
87 if cls.load_complete.value:
88 return {
89 k: v
90 for k, v in cls.length_to_eps.items()
91 if not v['bucket_complete']
92 or len(v['ep_list']) - v['current_idx'] > 0
93 }
94 else:
95 return {
96 k: v
97 for k, v in cls.length_to_eps.items()
98 if len(v['ep_list']) - v['current_idx'] >= bsz
99 }
100
101 def reset():
102 '''Resets the indices into the buckets'''
103 with cls.cache_lock:
104 for idx in cls.length_to_eps:
105 cls.length_to_eps[idx]['current_idx'] = 0
106 cls.length_to_eps[idx]['bucket_complete'] = False
107
108 def consolidate(caller):
109 '''Consolidate remaining episodes into batches'''
110 cls.load_complete.value = True
111 bsz = caller.bsz
112 batch = []
113 sorted_lengths = sorted(cls.length_to_eps.keys())
114 with cls.cache_lock:
115 if caller.batch_cache_type == 'index':
116 for length in sorted_lengths:
117 current_idx = cls.length_to_eps[length]['current_idx']
118 ep_list = cls.length_to_eps[length]['ep_list']
119 unseen_eps = ep_list[current_idx:]
120 cls.length_to_eps[length]['ep_list'] = ep_list[:current_idx]
121 batch = unseen_eps + batch
122 while len(batch) >= bsz:
123 cls.length_to_eps[length]['ep_list'] += batch[:bsz]
124 batch = batch[bsz:]
125 if len(batch) > 0:
126 cls.length_to_eps[-1] = {
127 'current_idx': 0,
128 'ep_list': batch,
129 'bucket_complete': False
130 }
131 elif caller.batch_cache_type == 'pop':
132 for length in sorted_lengths:
133 batch += cls.length_to_eps[length]['ep_list']
134 with cls.batches_lock:
135 while len(batch) >= bsz:
136 cls.batches.append(batch[:bsz])
137 batch = batch[bsz:]
138 if len(batch) > 0:
139 with cls.batches_lock:
140 cls.batches.append(batch)
141
142 def flatten(l):
143 '''Helper function for flattening a list'''
144 return [item for sublist in l for item in sublist]
145
146 def put_in_cache(ep_idx, episode, caller):
147 '''Put episode `ep_idx` into cache'''
148 length = ep_length(episode[caller.batch_sort_field])
149 lengths = [length] + flatten([
150 [length + i, length + (i * -1)]
151 for i in range(1, caller.batch_length_range)
152 ])
153 lengths = [max(i, 1) for i in lengths]
154 in_cache = ep_idx in cls.ep_indices
155 # first check if episode can go in existing bucket
156 if not in_cache:
157 for l in lengths:
158 if l in cls.length_to_eps:
159 with cls.cache_lock:
160 cls.length_to_eps[l]['ep_list'] += [(ep_idx, episode)]
161 cls.ep_indices.add(ep_idx)
162 in_cache = True
163 break
164 # otherwise, make a new bucket
165 if not in_cache:
166 with cls.cache_lock:
167 cls.length_to_eps[length] = {
168 'current_idx': 0,
169 'ep_list': [(ep_idx, episode)],
170 'bucket_complete': False
171 }
172 cls.ep_indices.add(ep_idx)
173 if ep_idx == caller.dataset.num_episodes() - 1:
174 consolidate(caller)
175 with cls.add_to_cache_cv:
176 cls.cache_filled_cv.notify_all()
177
178 @wraps(function)
179 def wrapper(*args):
180 caller = args[0]
181 batch_sort = caller.batch_sort
182 batch_cache_type = caller.batch_cache_type
183 bsz = caller.bsz
184 if not batch_sort or not caller.datatype.startswith('train'):
185 return function(*args)
186 # If Loader, put episodes in cache
187 if isinstance(caller, LoaderProcess):
188 with cls.add_to_cache_cv:
189 while (get_cache_size() >= max_cache_size and
190 len(get_available_buckets(bsz)) > 0):
191 cls.cache_filled_cv.notify_all()
192 cls.add_to_cache_cv.wait()
193 idx_and_batch = function(*args)
194 if idx_and_batch is None:
195 return None
196 for ep_index, ep in idx_and_batch[1]:
197 put_in_cache(ep_index, ep, caller)
198 return idx_and_batch
199 # If teacher, return batch of episodes
200 else:
201 teacher = caller
202 num_batches = teacher.num_batches
203 while True:
204 with cls.cache_filled_cv:
205 while (not cls.load_complete.value and
206 (get_cache_size() <= min_cache_size or
207 len(get_available_buckets(bsz)) == 0)):
208 cls.add_to_cache_cv.notify()
209 cls.cache_filled_cv.wait()
210 available_buckets = get_available_buckets(bsz)
211 if cls.load_complete.value and batch_cache_type == 'pop':
212 return teacher.batch_idx + 1, random.choice(cls.batches)
213 batch = None
214 available_buckets = get_available_buckets(bsz)
215 if len(available_buckets) != 0:
216 # Pick length index at random
217 length = random.choice(list(available_buckets.keys()))
218 with cls.cache_lock:
219 current_idx = cls.length_to_eps[length]['current_idx']
220 ep_list = cls.length_to_eps[length]['ep_list']
221 num_eps = len(ep_list)
222 if num_eps - current_idx >= bsz:
223 if batch_cache_type == 'pop':
224 batch = ep_list[:bsz]
225 cls.length_to_eps[length]['ep_list'] = ep_list[bsz:]
226 else:
227 batch = ep_list[current_idx: current_idx + bsz]
228 cls.length_to_eps[length]['current_idx'] = (
229 current_idx + bsz
230 )
231 elif cls.load_complete.value and num_eps > 0:
232 if batch_cache_type == 'pop':
233 batch = ep_list
234 elif num_eps - current_idx > 0:
235 batch = ep_list[current_idx:]
236 cls.length_to_eps[length]['current_idx'] = \
237 num_eps - 1
238 cls.length_to_eps[length]['bucket_complete'] = True
239
240 if batch is not None:
241 if batch_cache_type == 'pop':
242 with cls.batches_lock:
243 cls.batches.append(batch)
244 elif teacher.batch_idx + 1 >= num_batches:
245 reset()
246 return teacher.batch_idx + 1, batch
247
248 return wrapper
249
250
251 def ep_length(val):
252 '''Determines the length of an episode, given the specified value'''
253 if isinstance(val, (int, bytes, bool)):
254 return 1
255 if isinstance(val, str):
256 return len(val.replace('\n', ' ').split(' '))
257 if isinstance(val, (collections.Mapping,
258 collections.Sequence,
259 torch.Tensor)):
260 if (isinstance(val, collections.Mapping) and
261 val.get('deserialized_tensor', False)):
262 return len(val['value'])
263 return len(val)
264
265
266 # Get Datasets from the options
267 def get_dataset_classes(opt):
268 """ To use a custom dataset (as opposed to the StreamDataset or ParlAIDataset),
269 you can subclass the pytorch Dataset class and specify its
270 location on the command line.
271
272 For example, the VQA v1 task provides a custom dataset, which can
273 be specified on the command line as follows:
274 ``-pytd vqa_v1:VQADataset``
275
276 Note that if the dataset is named ``DefaultDataset``, then you do
277 not need to specify its name following the colon; e.g., it
278 would just be:
279 ``-pytd vqa_v1``
280 """
281 if 'stream' in opt.get('datatype'):
282 default_dataset = StreamDataset
283 else:
284 default_dataset = ParlAIDataset
285 dataset_name = opt.get('pytorch_teacher_dataset')
286 task_name = opt.get('pytorch_teacher_task')
287 datasets = []
288 if task_name is not None:
289 datasets += [
290 (default_dataset, default_collate, task)
291 for task in task_name.split(',')
292 ]
293 if not dataset_name:
294 return datasets
295 sps = [d.strip() for d in dataset_name.split(',')]
296 for sp in sps:
297 full_task_name = sp
298 repo = 'parlai'
299 if sp.startswith('internal:'):
300 # To switch to local repo, useful for non-public projects
301 # (make a directory called 'parlai_internal' with your private agents)
302 repo = 'parlai_internal'
303 sp = sp[9:]
304 sp = sp.split(':')
305 if '.' in sp[0]:
306 module_name = sp[0]
307 else:
308 dataset = sp[0].lower()
309 module_name = '{}.tasks.{}.agents'.format(repo, dataset)
310 if len(sp) > 1:
311 sp[1] = sp[1][0].upper() + sp[1][1:]
312 dataset = sp[1]
313 if '.' not in sp[0] and 'Dataset' not in dataset:
314 # Reformat from underscore to CamelCase and append "Dataset" to
315 # class name by default if a complete path is not given.
316 words = dataset.split('_')
317 teacher_name = ''
318 for w in words:
319 teacher_name += (w[0].upper() + w[1:])
320 dataset = teacher_name + 'Dataset'
321 else:
322 dataset = 'DefaultDataset'
323 my_module = importlib.import_module(module_name)
324 dataset_class = getattr(my_module, dataset)
325
326 collate = default_collate
327 if hasattr(dataset_class, 'collate'):
328 collate = dataset_class.collate
329 elif opt.get('model', False):
330 agent_class = get_agent_module(opt.get('model'))
331 if hasattr(agent_class, 'collate'):
332 collate = agent_class.collate
333 datasets.append((dataset_class, collate, full_task_name))
334 return datasets
335
336
337 class LoaderProcess(Thread):
338 """A background process that submits jobs to the DataLoader
339 to load examples into cache
340 """
341 def __init__(self, opt):
342 super().__init__(daemon=True)
343 dataset_classes = get_dataset_classes(opt)
344 if len(dataset_classes) > 1:
345 datasets = []
346 for class_name, collate_fn, task_name in dataset_classes:
347 opt['pytorch_teacher_task'] = task_name
348 opt['task'] = task_name
349 datasets.append(class_name(opt))
350 self.collate = collate_fn
351 self.dataset = ParlAIConcatDataset(datasets)
352 else:
353 class_name, self.collate, task_name = dataset_classes[0]
354 self.dataset = class_name(opt)
355 self.bsz = opt.get('batchsize', 1)
356 self.num_workers = opt.get('num_workers', 4)
357 self.dataloader = DataLoader(
358 self.dataset,
359 batch_size=self.bsz,
360 shuffle=False,
361 sampler=sampler.SequentialSampler(self.dataset),
362 num_workers=self.num_workers,
363 collate_fn=self.collate,
364 pin_memory=False,
365 drop_last=False,
366 )
367 self.datatype = opt.get('datatype')
368 self.data = enumerate(self.dataloader)
369 self.batch_sort = opt.get('pytorch_teacher_batch_sort')
370 self.batch_cache_type = opt.get('batch_sort_cache_type')
371 self.batch_length_range = opt.get('batch_length_range')
372 self.batch_sort_field = opt.get('batch_sort_field')
373
374 def run(self):
375 while True:
376 idx_and_batch = self.load_next()
377 if idx_and_batch is None:
378 return
379
380 @BatchSortCache.batch_cache
381 def load_next(self):
382 try:
383 return next(self.data)
384 except StopIteration:
385 return None
386
387
388 """
389 Collating, deserializing, processing batches
390 """
391 TORCH_DTYPES = [torch.float32, torch.float64, torch.float16, torch.uint8,
392 torch.int8, torch.int16, torch.int32, torch.int64]
393 STR_TO_TORCH_DTYPE = {str(d): d for d in TORCH_DTYPES}
394
395
396 def default_collate(batch):
397 """
398 Default collate function, used for ParlAIDataset and StreamDataset
399 """
400 new_batch = []
401 for b in batch:
402 idx = b[0]
403 if type(b[1]) is list:
404 ep = b[1][0]
405 else:
406 ep = b[1]
407 new_batch.append((idx, ep))
408 return new_batch
409
410
411 def deserialize(obj):
412 """
413 Deserializes lists into Tensors
414 """
415 for key in obj:
416 if type(obj[key]) is dict and obj[key].get('deserialized_tensor', False):
417 dtype = STR_TO_TORCH_DTYPE[obj[key]['type']]
418 val = obj[key]['value']
419 del obj[key]
420 obj[key] = torch.as_tensor(val, dtype=dtype)
421 return obj
422
423
424 def process(ex_or_batch):
425 """
426 Process examples/batches, i.e. deserialize if necessary
427 """
428 if type(ex_or_batch) is list:
429 if all([ep.get('preprocessed') for ep in ex_or_batch]):
430 ex_or_batch = [deserialize(ep) for ep in ex_or_batch]
431 else:
432 if ex_or_batch.get('preprocessed'):
433 ex_or_batch = deserialize(ex_or_batch)
434 return ex_or_batch
435
436
437 """
438 ParlAI Implementations of Pytorch Datasets
439 """
440
441
442 class StreamDataset(Dataset):
443 """A Pytorch Dataset utilizing streaming"""
444 def __init__(self, opt):
445 self.opt = opt
446 self.datatype = opt.get('datatype')
447 self.datapath = build_data(self.opt)
448 self.length_datafile = os.path.join(self.datapath, 'data_length')
449 self.char_index_file = os.path.join(self.datapath, 'char_index')
450 self.datafile = os.path.join(self.datapath, 'data')
451 self.training = self.datatype.startswith('train')
452 self.ordered = ('ordered' in self.datatype or
453 ('stream' in self.datatype and not opt.get('shuffle')))
454 self._load_lens()
455
456 def __getitem__(self, index):
457 if self.ordered or not self.training:
458 if not hasattr(self, 'data_gen'):
459 self.data_gen = self._read_episode()
460 while True:
461 idx, ep = next(self.data_gen)
462 if idx == index:
463 return (index, ep)
464 else:
465 episode = []
466 episode_done = False
467 with open(self.datafile) as f:
468 ex_offset = self.char_index[index]
469 f.seek(ex_offset)
470 while not episode_done:
471 example = json.loads(f.readline())
472 episode.append(example)
473 episode_done = example['episode_done']
474 return (index, episode)
475
476 def __len__(self):
477 return self.num_episodes()
478
479 def _load_lens(self):
480 with open(self.length_datafile) as length:
481 lengths = json.load(length)
482 self.num_eps = lengths['num_eps']
483 self.num_exs = lengths['num_exs']
484 with open(self.char_index_file) as char:
485 self.char_index = json.load(char)
486
487 def _data_generator(self):
488 while True:
489 for idx, episode in self._read_episode():
490 yield idx, episode
491
492 def _read_episode(self):
493 read = open(self.datafile)
494 episode = []
495 for idx, line in enumerate(read):
496 example = json.loads(line)
497 episode.append(example)
498 if example['episode_done']:
499 yield idx, episode
500 episode = []
501 read.close()
502
503 def num_episodes(self):
504 return self.num_eps
505
506 def num_examples(self):
507 return self.num_exs
508
509
510 class ParlAIDataset(Dataset):
511 """A Pytorch Dataset, for random sampling"""
512 def __init__(self, opt):
513 self.opt = opt
514 self.datatype = opt.get('datatype')
515 self.datapath = build_data(self.opt)
516 self.length_datafile = os.path.join(self.datapath, 'data_length')
517 self.datafile = os.path.join(self.datapath, 'data')
518 self.training = self.datatype.startswith('train')
519 self._load_lens()
520 self._setup_data()
521
522 def __getitem__(self, index):
523 return index, self.data[index]
524
525 def __len__(self):
526 return self.num_episodes()
527
528 def _load_lens(self):
529 with open(self.length_datafile) as length:
530 lengths = json.load(length)
531 self.num_eps = lengths['num_eps']
532 self.num_exs = lengths['num_exs']
533
534 def _setup_data(self):
535 self.data = []
536 with open(self.datafile) as f:
537 for line in f:
538 self.data.append(json.loads(line))
539
540 def num_episodes(self):
541 return self.num_eps
542
543 def num_examples(self):
544 return self.num_exs
545
546
547 class ParlAIConcatDataset(ConcatDataset):
548 """Override to set num_eps and num_exs"""
549
550 @lru_cache(maxsize=1)
551 def num_episodes(self):
552 return sum(d.num_episodes() for d in self.datasets)
553
554 @lru_cache(maxsize=1)
555 def num_examples(self):
556 return sum(d.num_examples() for d in self.datasets)
557
558
559 class PytorchDataTeacher(FixedDialogTeacher):
560 """
561 A teacher that loads data using Pytorch Datasets. For details on how
562 to use, please follow the tutorial here:
563 http://parl.ai/static/docs/tutorial_worlds.html#multiprocessed-pytorch-dataloader
564 """
565 def __init__(self, opt, shared=None):
566 opt['batch_sort'] = False
567 super().__init__(opt, shared)
568 self.use_batch_act = self.bsz > 1
569 self.num_workers = opt['numworkers']
570 self.batch_sort = opt.get('pytorch_teacher_batch_sort') and \
571 'train' in self.datatype
572 self.batch_cache_type = opt.get('batch_sort_cache_type')
573 self.batch_sort_field = opt.get('batch_sort_field')
574 # One can specify a collate function to use for preparing a batch
575 self.opt = opt.copy()
576 self.is_shared = shared is not None
577 dataset_classes = self.get_dataset_class(opt)
578 self.ordered = ('ordered' in self.datatype or
579 ('stream' in self.datatype and not opt.get('shuffle')))
580 if self.ordered:
581 # force index for ordered, so that we see every example
582 self.batch_cache_type = 'index'
583
584 if not shared:
585 BatchSortCache.create()
586 if len(dataset_classes) > 1:
587 datasets = []
588 for class_name, collate_fn, task_name in dataset_classes:
589 dataset_opt = opt.copy()
590 dataset_opt['pytorch_teacher_task'] = task_name
591 dataset_opt['task'] = task_name
592 datasets.append(class_name(dataset_opt))
593 self.collate_fn = collate_fn
594 self.id = ','.join([d[2] for d in dataset_classes])
595 self.dataset = ParlAIConcatDataset(datasets)
596 else:
597 class_name, self.collate_fn, task_name = dataset_classes[0]
598 self.id = task_name
599 self.dataset = class_name(opt)
600 if self.ordered or not self.training:
601 data_sampler = sampler.SequentialSampler(self.dataset)
602 else:
603 data_sampler = sampler.RandomSampler(self.dataset)
604
605 self.pytorch_dataloader = DataLoader(
606 self.dataset,
607 batch_size=self.bsz,
608 sampler=data_sampler,
609 num_workers=self.num_workers,
610 collate_fn=self.collate_fn,
611 pin_memory=False,
612 drop_last=False,
613 )
614
615 self.lastYs = [None] * self.bsz
616 if self.batch_sort:
617 self.loader_process = LoaderProcess(opt)
618 self.loader_process.start()
619 self.data = enumerate(self.pytorch_dataloader)
620 else:
621 self.dataset = shared['dataset']
622 self.pytorch_dataloader = shared['pytorch_dataloader']
623 self.lastYs = shared['lastYs']
624 self.data = shared['data']
625 self.id = shared['id']
626
627 self.num_batches = math.ceil(self.dataset.num_episodes() / self.bsz)
628 self.reset()
629
630 def get_dataset_class(self, opt):
631 return get_dataset_classes(opt)
632
633 def reset(self):
634 """Reset the dialog so that it is at the start of the epoch,
635 and all metrics are reset.
636 """
637 super().reset()
638 self.reset_data()
639
640 def reset_data(self):
641 if not self.is_shared:
642 self.data = enumerate(self.pytorch_dataloader)
643 self.lastY = None
644 self.epochDone = False
645 self.episode = None
646 self.episode_done = True
647 self.episode_idx = 0
648 self.batch_idx = 0
649
650 def share(self):
651 shared = super().share()
652 shared['pytorch_dataloader'] = self.pytorch_dataloader
653 shared['dataset'] = self.dataset
654 shared['data'] = self.data
655 shared['id'] = self.id
656 return shared
657
658 def next_example(self):
659 if self.epochDone:
660 if not self.training:
661 return {'episode_done': True, 'id': self.getID()}, True
662 else:
663 # Reset the data because it is streaming data
664 self.reset_data()
665 if self.episode_done:
666 try:
667 self.episode_idx, episode = next(self.data)
668 if self.collate_fn == default_collate:
669 episode = [ex[1] for ex in episode]
670 self.episode = process(episode)
671 self.entry_idx = 0
672 epoch_done = False
673 except StopIteration:
674 ex = {'episode_done': True, 'id': self.getID()}
675 epoch_done = True
676 else:
677 self.entry_idx += 1
678
679 if not epoch_done:
680 ex = self.episode[self.entry_idx]
681 self.episode_done = ex['episode_done']
682 if (self.episode_done and
683 self.episode_idx + self.bsz >= self.num_episodes()):
684 epoch_done = True
685 return ex, epoch_done
686
687 @BatchSortCache.batch_cache
688 def get_next_batch(self):
689 # employs a cache to see if there is a batch of equal size ready
690 batch = next(self.data)
691 return batch
692
693 def next_batch(self):
694 if self.epochDone:
695 if not self.training:
696 return [{'episode_done': True, 'id': self.getID()}] * self.bsz
697 else:
698 # Reset the data because it is streaming data
699 self.reset_data()
700 try:
701 self.batch_idx, batch = self.get_next_batch()
702 if self.collate_fn == default_collate:
703 batch = [b[1] for b in batch]
704 batch = process(batch)
705 epoch_done = False
706 except StopIteration:
707 batch = [{'episode_done': True, 'id': self.getID()}] * self.bsz
708 epoch_done = True
709 if not epoch_done and self.batch_idx == self.num_batches:
710 epoch_done = True
711 self.epochDone = epoch_done
712 return batch
713
714 def num_episodes(self):
715 """Get the number of episodes in this dataset."""
716 return self.dataset.num_episodes()
717
718 def num_examples(self):
719 """Get the total number of examples in this dataset."""
720 return self.dataset.num_examples()
721
722 def act(self):
723 """Send new dialog message."""
724 action = super().act()
725 self.lastY = action.get('labels', action.get('eval_labels', None))
726 return action
727
728
729 class DefaultTeacher(PytorchDataTeacher):
730 pass
731
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parlai/core/pytorch_data_teacher.py b/parlai/core/pytorch_data_teacher.py
--- a/parlai/core/pytorch_data_teacher.py
+++ b/parlai/core/pytorch_data_teacher.py
@@ -29,6 +29,13 @@
from threading import Thread, Condition, RLock
+if torch.version.__version__.startswith('0.'):
+ raise ImportError(
+ "Please upgrade to PyTorch >=1.0; "
+ "visit https://pytorch.org for instructions."
+ )
+
+
class BatchSortCache(object):
"""
Object that encapsulates the functionality of the batch sort cache.
| {"golden_diff": "diff --git a/parlai/core/pytorch_data_teacher.py b/parlai/core/pytorch_data_teacher.py\n--- a/parlai/core/pytorch_data_teacher.py\n+++ b/parlai/core/pytorch_data_teacher.py\n@@ -29,6 +29,13 @@\n from threading import Thread, Condition, RLock\n \n \n+if torch.version.__version__.startswith('0.'):\n+ raise ImportError(\n+ \"Please upgrade to PyTorch >=1.0; \"\n+ \"visit https://pytorch.org for instructions.\"\n+ )\n+\n+\n class BatchSortCache(object):\n \"\"\"\n Object that encapsulates the functionality of the batch sort cache.\n", "issue": "Pytorch Data Teacher seems to hang\nI observe a weird hang when using the pytorch data teacher:\r\n\r\n```\r\ndoesn't hang\r\nPYTHONPATH=. python examples/display_data.py -t wizard_of_wikipedia:WizardDialogKnowledgeTeacher\r\n\r\nseems to hang indefinitely\r\nPYTHONPATH=. python examples/display_data.py -pyt wizard_of_wikipedia:WizardDialogKnowledgeTeacher\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"\n (NOTE: To use this class, please follow the tutorial here:\n http://parl.ai/static/docs/tutorial_worlds.html#multiprocessed-pytorch-dataloader)\n\n\"\"\"\nfrom .teachers import FixedDialogTeacher\nfrom parlai.scripts.build_pytorch_data import build_data\nfrom .agents import get_agent_module\nimport json\nimport math\nimport collections\nimport random\nimport os\nfrom functools import wraps\nimport importlib\nfrom functools import lru_cache\ntry:\n import torch # noqa: F401\nexcept ImportError:\n raise ImportError('Need to install Pytorch: go to pytorch.org')\nfrom torch.utils.data import ConcatDataset, Dataset, DataLoader, sampler\nfrom torch.multiprocessing import Lock, Value\nimport ctypes\nfrom threading import Thread, Condition, RLock\n\n\nclass BatchSortCache(object):\n \"\"\"\n Object that encapsulates the functionality of the batch sort cache.\n\n Maps episode length to dictionary with following keys:\n current_idx: which episode in the list are we at (if simply indexing\n into list)\n ep_list: list of episodes of the length of the key\n bucket_complete: if there are no more episodes left to consider in\n the bucket\n \"\"\"\n @classmethod\n def create(cls):\n if not hasattr(cls, 'length_to_eps'):\n # Maps episode length to list of episodes\n cls.length_to_eps = {}\n if not hasattr(cls, 'ep_indices'):\n # Set of episode indices already in the cache\n cls.ep_indices = set()\n if not hasattr(cls, 'batches'):\n # List of batches if popping batches\n cls.batches = []\n if not hasattr(cls, 'load_complete'):\n # If all episodes have been loaded into memory\n cls.load_complete = Value(ctypes.c_bool, False)\n if not hasattr(cls, 'batches_lock'):\n # Lock to access batches\n cls.batches_lock = Lock()\n if not hasattr(cls, 'cache_lock'):\n # Lock to access length_to_eps\n cls.cache_lock = Lock()\n if not hasattr(cls, 'fill_cache_lock'):\n # Lock for condition variables\n cls.fill_cache_lock = RLock()\n if not hasattr(cls, 'add_to_cache_cv'):\n # Condition notifying Loader to add to cache\n cls.add_to_cache_cv = Condition(lock=cls.fill_cache_lock)\n if not hasattr(cls, 'cache_filled_cv'):\n # Condition notifying teacher that cache has episodes\n cls.cache_filled_cv = Condition(lock=cls.fill_cache_lock)\n\n @classmethod\n def batch_cache(cls, function):\n max_cache_size = 10000 # Max unseen eps\n min_cache_size = 1000 # Min unseen eps\n\n def get_cache_size():\n '''Returns number of available episodes '''\n return sum(\n len(v['ep_list']) - v['current_idx']\n for k, v in cls.length_to_eps.items()\n )\n\n def get_available_buckets(bsz):\n '''Returns buckets where there are enough episodes for a batch'''\n if cls.load_complete.value:\n return {\n k: v\n for k, v in cls.length_to_eps.items()\n if not v['bucket_complete']\n or len(v['ep_list']) - v['current_idx'] > 0\n }\n else:\n return {\n k: v\n for k, v in cls.length_to_eps.items()\n if len(v['ep_list']) - v['current_idx'] >= bsz\n }\n\n def reset():\n '''Resets the indices into the buckets'''\n with cls.cache_lock:\n for idx in cls.length_to_eps:\n cls.length_to_eps[idx]['current_idx'] = 0\n cls.length_to_eps[idx]['bucket_complete'] = False\n\n def consolidate(caller):\n '''Consolidate remaining episodes into batches'''\n cls.load_complete.value = True\n bsz = caller.bsz\n batch = []\n sorted_lengths = sorted(cls.length_to_eps.keys())\n with cls.cache_lock:\n if caller.batch_cache_type == 'index':\n for length in sorted_lengths:\n current_idx = cls.length_to_eps[length]['current_idx']\n ep_list = cls.length_to_eps[length]['ep_list']\n unseen_eps = ep_list[current_idx:]\n cls.length_to_eps[length]['ep_list'] = ep_list[:current_idx]\n batch = unseen_eps + batch\n while len(batch) >= bsz:\n cls.length_to_eps[length]['ep_list'] += batch[:bsz]\n batch = batch[bsz:]\n if len(batch) > 0:\n cls.length_to_eps[-1] = {\n 'current_idx': 0,\n 'ep_list': batch,\n 'bucket_complete': False\n }\n elif caller.batch_cache_type == 'pop':\n for length in sorted_lengths:\n batch += cls.length_to_eps[length]['ep_list']\n with cls.batches_lock:\n while len(batch) >= bsz:\n cls.batches.append(batch[:bsz])\n batch = batch[bsz:]\n if len(batch) > 0:\n with cls.batches_lock:\n cls.batches.append(batch)\n\n def flatten(l):\n '''Helper function for flattening a list'''\n return [item for sublist in l for item in sublist]\n\n def put_in_cache(ep_idx, episode, caller):\n '''Put episode `ep_idx` into cache'''\n length = ep_length(episode[caller.batch_sort_field])\n lengths = [length] + flatten([\n [length + i, length + (i * -1)]\n for i in range(1, caller.batch_length_range)\n ])\n lengths = [max(i, 1) for i in lengths]\n in_cache = ep_idx in cls.ep_indices\n # first check if episode can go in existing bucket\n if not in_cache:\n for l in lengths:\n if l in cls.length_to_eps:\n with cls.cache_lock:\n cls.length_to_eps[l]['ep_list'] += [(ep_idx, episode)]\n cls.ep_indices.add(ep_idx)\n in_cache = True\n break\n # otherwise, make a new bucket\n if not in_cache:\n with cls.cache_lock:\n cls.length_to_eps[length] = {\n 'current_idx': 0,\n 'ep_list': [(ep_idx, episode)],\n 'bucket_complete': False\n }\n cls.ep_indices.add(ep_idx)\n if ep_idx == caller.dataset.num_episodes() - 1:\n consolidate(caller)\n with cls.add_to_cache_cv:\n cls.cache_filled_cv.notify_all()\n\n @wraps(function)\n def wrapper(*args):\n caller = args[0]\n batch_sort = caller.batch_sort\n batch_cache_type = caller.batch_cache_type\n bsz = caller.bsz\n if not batch_sort or not caller.datatype.startswith('train'):\n return function(*args)\n # If Loader, put episodes in cache\n if isinstance(caller, LoaderProcess):\n with cls.add_to_cache_cv:\n while (get_cache_size() >= max_cache_size and\n len(get_available_buckets(bsz)) > 0):\n cls.cache_filled_cv.notify_all()\n cls.add_to_cache_cv.wait()\n idx_and_batch = function(*args)\n if idx_and_batch is None:\n return None\n for ep_index, ep in idx_and_batch[1]:\n put_in_cache(ep_index, ep, caller)\n return idx_and_batch\n # If teacher, return batch of episodes\n else:\n teacher = caller\n num_batches = teacher.num_batches\n while True:\n with cls.cache_filled_cv:\n while (not cls.load_complete.value and\n (get_cache_size() <= min_cache_size or\n len(get_available_buckets(bsz)) == 0)):\n cls.add_to_cache_cv.notify()\n cls.cache_filled_cv.wait()\n available_buckets = get_available_buckets(bsz)\n if cls.load_complete.value and batch_cache_type == 'pop':\n return teacher.batch_idx + 1, random.choice(cls.batches)\n batch = None\n available_buckets = get_available_buckets(bsz)\n if len(available_buckets) != 0:\n # Pick length index at random\n length = random.choice(list(available_buckets.keys()))\n with cls.cache_lock:\n current_idx = cls.length_to_eps[length]['current_idx']\n ep_list = cls.length_to_eps[length]['ep_list']\n num_eps = len(ep_list)\n if num_eps - current_idx >= bsz:\n if batch_cache_type == 'pop':\n batch = ep_list[:bsz]\n cls.length_to_eps[length]['ep_list'] = ep_list[bsz:]\n else:\n batch = ep_list[current_idx: current_idx + bsz]\n cls.length_to_eps[length]['current_idx'] = (\n current_idx + bsz\n )\n elif cls.load_complete.value and num_eps > 0:\n if batch_cache_type == 'pop':\n batch = ep_list\n elif num_eps - current_idx > 0:\n batch = ep_list[current_idx:]\n cls.length_to_eps[length]['current_idx'] = \\\n num_eps - 1\n cls.length_to_eps[length]['bucket_complete'] = True\n\n if batch is not None:\n if batch_cache_type == 'pop':\n with cls.batches_lock:\n cls.batches.append(batch)\n elif teacher.batch_idx + 1 >= num_batches:\n reset()\n return teacher.batch_idx + 1, batch\n\n return wrapper\n\n\ndef ep_length(val):\n '''Determines the length of an episode, given the specified value'''\n if isinstance(val, (int, bytes, bool)):\n return 1\n if isinstance(val, str):\n return len(val.replace('\\n', ' ').split(' '))\n if isinstance(val, (collections.Mapping,\n collections.Sequence,\n torch.Tensor)):\n if (isinstance(val, collections.Mapping) and\n val.get('deserialized_tensor', False)):\n return len(val['value'])\n return len(val)\n\n\n# Get Datasets from the options\ndef get_dataset_classes(opt):\n \"\"\" To use a custom dataset (as opposed to the StreamDataset or ParlAIDataset),\n you can subclass the pytorch Dataset class and specify its\n location on the command line.\n\n For example, the VQA v1 task provides a custom dataset, which can\n be specified on the command line as follows:\n ``-pytd vqa_v1:VQADataset``\n\n Note that if the dataset is named ``DefaultDataset``, then you do\n not need to specify its name following the colon; e.g., it\n would just be:\n ``-pytd vqa_v1``\n \"\"\"\n if 'stream' in opt.get('datatype'):\n default_dataset = StreamDataset\n else:\n default_dataset = ParlAIDataset\n dataset_name = opt.get('pytorch_teacher_dataset')\n task_name = opt.get('pytorch_teacher_task')\n datasets = []\n if task_name is not None:\n datasets += [\n (default_dataset, default_collate, task)\n for task in task_name.split(',')\n ]\n if not dataset_name:\n return datasets\n sps = [d.strip() for d in dataset_name.split(',')]\n for sp in sps:\n full_task_name = sp\n repo = 'parlai'\n if sp.startswith('internal:'):\n # To switch to local repo, useful for non-public projects\n # (make a directory called 'parlai_internal' with your private agents)\n repo = 'parlai_internal'\n sp = sp[9:]\n sp = sp.split(':')\n if '.' in sp[0]:\n module_name = sp[0]\n else:\n dataset = sp[0].lower()\n module_name = '{}.tasks.{}.agents'.format(repo, dataset)\n if len(sp) > 1:\n sp[1] = sp[1][0].upper() + sp[1][1:]\n dataset = sp[1]\n if '.' not in sp[0] and 'Dataset' not in dataset:\n # Reformat from underscore to CamelCase and append \"Dataset\" to\n # class name by default if a complete path is not given.\n words = dataset.split('_')\n teacher_name = ''\n for w in words:\n teacher_name += (w[0].upper() + w[1:])\n dataset = teacher_name + 'Dataset'\n else:\n dataset = 'DefaultDataset'\n my_module = importlib.import_module(module_name)\n dataset_class = getattr(my_module, dataset)\n\n collate = default_collate\n if hasattr(dataset_class, 'collate'):\n collate = dataset_class.collate\n elif opt.get('model', False):\n agent_class = get_agent_module(opt.get('model'))\n if hasattr(agent_class, 'collate'):\n collate = agent_class.collate\n datasets.append((dataset_class, collate, full_task_name))\n return datasets\n\n\nclass LoaderProcess(Thread):\n \"\"\"A background process that submits jobs to the DataLoader\n to load examples into cache\n \"\"\"\n def __init__(self, opt):\n super().__init__(daemon=True)\n dataset_classes = get_dataset_classes(opt)\n if len(dataset_classes) > 1:\n datasets = []\n for class_name, collate_fn, task_name in dataset_classes:\n opt['pytorch_teacher_task'] = task_name\n opt['task'] = task_name\n datasets.append(class_name(opt))\n self.collate = collate_fn\n self.dataset = ParlAIConcatDataset(datasets)\n else:\n class_name, self.collate, task_name = dataset_classes[0]\n self.dataset = class_name(opt)\n self.bsz = opt.get('batchsize', 1)\n self.num_workers = opt.get('num_workers', 4)\n self.dataloader = DataLoader(\n self.dataset,\n batch_size=self.bsz,\n shuffle=False,\n sampler=sampler.SequentialSampler(self.dataset),\n num_workers=self.num_workers,\n collate_fn=self.collate,\n pin_memory=False,\n drop_last=False,\n )\n self.datatype = opt.get('datatype')\n self.data = enumerate(self.dataloader)\n self.batch_sort = opt.get('pytorch_teacher_batch_sort')\n self.batch_cache_type = opt.get('batch_sort_cache_type')\n self.batch_length_range = opt.get('batch_length_range')\n self.batch_sort_field = opt.get('batch_sort_field')\n\n def run(self):\n while True:\n idx_and_batch = self.load_next()\n if idx_and_batch is None:\n return\n\n @BatchSortCache.batch_cache\n def load_next(self):\n try:\n return next(self.data)\n except StopIteration:\n return None\n\n\n\"\"\"\n Collating, deserializing, processing batches\n\"\"\"\nTORCH_DTYPES = [torch.float32, torch.float64, torch.float16, torch.uint8,\n torch.int8, torch.int16, torch.int32, torch.int64]\nSTR_TO_TORCH_DTYPE = {str(d): d for d in TORCH_DTYPES}\n\n\ndef default_collate(batch):\n \"\"\"\n Default collate function, used for ParlAIDataset and StreamDataset\n \"\"\"\n new_batch = []\n for b in batch:\n idx = b[0]\n if type(b[1]) is list:\n ep = b[1][0]\n else:\n ep = b[1]\n new_batch.append((idx, ep))\n return new_batch\n\n\ndef deserialize(obj):\n \"\"\"\n Deserializes lists into Tensors\n \"\"\"\n for key in obj:\n if type(obj[key]) is dict and obj[key].get('deserialized_tensor', False):\n dtype = STR_TO_TORCH_DTYPE[obj[key]['type']]\n val = obj[key]['value']\n del obj[key]\n obj[key] = torch.as_tensor(val, dtype=dtype)\n return obj\n\n\ndef process(ex_or_batch):\n \"\"\"\n Process examples/batches, i.e. deserialize if necessary\n \"\"\"\n if type(ex_or_batch) is list:\n if all([ep.get('preprocessed') for ep in ex_or_batch]):\n ex_or_batch = [deserialize(ep) for ep in ex_or_batch]\n else:\n if ex_or_batch.get('preprocessed'):\n ex_or_batch = deserialize(ex_or_batch)\n return ex_or_batch\n\n\n\"\"\"\n ParlAI Implementations of Pytorch Datasets\n\"\"\"\n\n\nclass StreamDataset(Dataset):\n \"\"\"A Pytorch Dataset utilizing streaming\"\"\"\n def __init__(self, opt):\n self.opt = opt\n self.datatype = opt.get('datatype')\n self.datapath = build_data(self.opt)\n self.length_datafile = os.path.join(self.datapath, 'data_length')\n self.char_index_file = os.path.join(self.datapath, 'char_index')\n self.datafile = os.path.join(self.datapath, 'data')\n self.training = self.datatype.startswith('train')\n self.ordered = ('ordered' in self.datatype or\n ('stream' in self.datatype and not opt.get('shuffle')))\n self._load_lens()\n\n def __getitem__(self, index):\n if self.ordered or not self.training:\n if not hasattr(self, 'data_gen'):\n self.data_gen = self._read_episode()\n while True:\n idx, ep = next(self.data_gen)\n if idx == index:\n return (index, ep)\n else:\n episode = []\n episode_done = False\n with open(self.datafile) as f:\n ex_offset = self.char_index[index]\n f.seek(ex_offset)\n while not episode_done:\n example = json.loads(f.readline())\n episode.append(example)\n episode_done = example['episode_done']\n return (index, episode)\n\n def __len__(self):\n return self.num_episodes()\n\n def _load_lens(self):\n with open(self.length_datafile) as length:\n lengths = json.load(length)\n self.num_eps = lengths['num_eps']\n self.num_exs = lengths['num_exs']\n with open(self.char_index_file) as char:\n self.char_index = json.load(char)\n\n def _data_generator(self):\n while True:\n for idx, episode in self._read_episode():\n yield idx, episode\n\n def _read_episode(self):\n read = open(self.datafile)\n episode = []\n for idx, line in enumerate(read):\n example = json.loads(line)\n episode.append(example)\n if example['episode_done']:\n yield idx, episode\n episode = []\n read.close()\n\n def num_episodes(self):\n return self.num_eps\n\n def num_examples(self):\n return self.num_exs\n\n\nclass ParlAIDataset(Dataset):\n \"\"\"A Pytorch Dataset, for random sampling\"\"\"\n def __init__(self, opt):\n self.opt = opt\n self.datatype = opt.get('datatype')\n self.datapath = build_data(self.opt)\n self.length_datafile = os.path.join(self.datapath, 'data_length')\n self.datafile = os.path.join(self.datapath, 'data')\n self.training = self.datatype.startswith('train')\n self._load_lens()\n self._setup_data()\n\n def __getitem__(self, index):\n return index, self.data[index]\n\n def __len__(self):\n return self.num_episodes()\n\n def _load_lens(self):\n with open(self.length_datafile) as length:\n lengths = json.load(length)\n self.num_eps = lengths['num_eps']\n self.num_exs = lengths['num_exs']\n\n def _setup_data(self):\n self.data = []\n with open(self.datafile) as f:\n for line in f:\n self.data.append(json.loads(line))\n\n def num_episodes(self):\n return self.num_eps\n\n def num_examples(self):\n return self.num_exs\n\n\nclass ParlAIConcatDataset(ConcatDataset):\n \"\"\"Override to set num_eps and num_exs\"\"\"\n\n @lru_cache(maxsize=1)\n def num_episodes(self):\n return sum(d.num_episodes() for d in self.datasets)\n\n @lru_cache(maxsize=1)\n def num_examples(self):\n return sum(d.num_examples() for d in self.datasets)\n\n\nclass PytorchDataTeacher(FixedDialogTeacher):\n \"\"\"\n A teacher that loads data using Pytorch Datasets. For details on how\n to use, please follow the tutorial here:\n http://parl.ai/static/docs/tutorial_worlds.html#multiprocessed-pytorch-dataloader\n \"\"\"\n def __init__(self, opt, shared=None):\n opt['batch_sort'] = False\n super().__init__(opt, shared)\n self.use_batch_act = self.bsz > 1\n self.num_workers = opt['numworkers']\n self.batch_sort = opt.get('pytorch_teacher_batch_sort') and \\\n 'train' in self.datatype\n self.batch_cache_type = opt.get('batch_sort_cache_type')\n self.batch_sort_field = opt.get('batch_sort_field')\n # One can specify a collate function to use for preparing a batch\n self.opt = opt.copy()\n self.is_shared = shared is not None\n dataset_classes = self.get_dataset_class(opt)\n self.ordered = ('ordered' in self.datatype or\n ('stream' in self.datatype and not opt.get('shuffle')))\n if self.ordered:\n # force index for ordered, so that we see every example\n self.batch_cache_type = 'index'\n\n if not shared:\n BatchSortCache.create()\n if len(dataset_classes) > 1:\n datasets = []\n for class_name, collate_fn, task_name in dataset_classes:\n dataset_opt = opt.copy()\n dataset_opt['pytorch_teacher_task'] = task_name\n dataset_opt['task'] = task_name\n datasets.append(class_name(dataset_opt))\n self.collate_fn = collate_fn\n self.id = ','.join([d[2] for d in dataset_classes])\n self.dataset = ParlAIConcatDataset(datasets)\n else:\n class_name, self.collate_fn, task_name = dataset_classes[0]\n self.id = task_name\n self.dataset = class_name(opt)\n if self.ordered or not self.training:\n data_sampler = sampler.SequentialSampler(self.dataset)\n else:\n data_sampler = sampler.RandomSampler(self.dataset)\n\n self.pytorch_dataloader = DataLoader(\n self.dataset,\n batch_size=self.bsz,\n sampler=data_sampler,\n num_workers=self.num_workers,\n collate_fn=self.collate_fn,\n pin_memory=False,\n drop_last=False,\n )\n\n self.lastYs = [None] * self.bsz\n if self.batch_sort:\n self.loader_process = LoaderProcess(opt)\n self.loader_process.start()\n self.data = enumerate(self.pytorch_dataloader)\n else:\n self.dataset = shared['dataset']\n self.pytorch_dataloader = shared['pytorch_dataloader']\n self.lastYs = shared['lastYs']\n self.data = shared['data']\n self.id = shared['id']\n\n self.num_batches = math.ceil(self.dataset.num_episodes() / self.bsz)\n self.reset()\n\n def get_dataset_class(self, opt):\n return get_dataset_classes(opt)\n\n def reset(self):\n \"\"\"Reset the dialog so that it is at the start of the epoch,\n and all metrics are reset.\n \"\"\"\n super().reset()\n self.reset_data()\n\n def reset_data(self):\n if not self.is_shared:\n self.data = enumerate(self.pytorch_dataloader)\n self.lastY = None\n self.epochDone = False\n self.episode = None\n self.episode_done = True\n self.episode_idx = 0\n self.batch_idx = 0\n\n def share(self):\n shared = super().share()\n shared['pytorch_dataloader'] = self.pytorch_dataloader\n shared['dataset'] = self.dataset\n shared['data'] = self.data\n shared['id'] = self.id\n return shared\n\n def next_example(self):\n if self.epochDone:\n if not self.training:\n return {'episode_done': True, 'id': self.getID()}, True\n else:\n # Reset the data because it is streaming data\n self.reset_data()\n if self.episode_done:\n try:\n self.episode_idx, episode = next(self.data)\n if self.collate_fn == default_collate:\n episode = [ex[1] for ex in episode]\n self.episode = process(episode)\n self.entry_idx = 0\n epoch_done = False\n except StopIteration:\n ex = {'episode_done': True, 'id': self.getID()}\n epoch_done = True\n else:\n self.entry_idx += 1\n\n if not epoch_done:\n ex = self.episode[self.entry_idx]\n self.episode_done = ex['episode_done']\n if (self.episode_done and\n self.episode_idx + self.bsz >= self.num_episodes()):\n epoch_done = True\n return ex, epoch_done\n\n @BatchSortCache.batch_cache\n def get_next_batch(self):\n # employs a cache to see if there is a batch of equal size ready\n batch = next(self.data)\n return batch\n\n def next_batch(self):\n if self.epochDone:\n if not self.training:\n return [{'episode_done': True, 'id': self.getID()}] * self.bsz\n else:\n # Reset the data because it is streaming data\n self.reset_data()\n try:\n self.batch_idx, batch = self.get_next_batch()\n if self.collate_fn == default_collate:\n batch = [b[1] for b in batch]\n batch = process(batch)\n epoch_done = False\n except StopIteration:\n batch = [{'episode_done': True, 'id': self.getID()}] * self.bsz\n epoch_done = True\n if not epoch_done and self.batch_idx == self.num_batches:\n epoch_done = True\n self.epochDone = epoch_done\n return batch\n\n def num_episodes(self):\n \"\"\"Get the number of episodes in this dataset.\"\"\"\n return self.dataset.num_episodes()\n\n def num_examples(self):\n \"\"\"Get the total number of examples in this dataset.\"\"\"\n return self.dataset.num_examples()\n\n def act(self):\n \"\"\"Send new dialog message.\"\"\"\n action = super().act()\n self.lastY = action.get('labels', action.get('eval_labels', None))\n return action\n\n\nclass DefaultTeacher(PytorchDataTeacher):\n pass\n", "path": "parlai/core/pytorch_data_teacher.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"\n (NOTE: To use this class, please follow the tutorial here:\n http://parl.ai/static/docs/tutorial_worlds.html#multiprocessed-pytorch-dataloader)\n\n\"\"\"\nfrom .teachers import FixedDialogTeacher\nfrom parlai.scripts.build_pytorch_data import build_data\nfrom .agents import get_agent_module\nimport json\nimport math\nimport collections\nimport random\nimport os\nfrom functools import wraps\nimport importlib\nfrom functools import lru_cache\ntry:\n import torch # noqa: F401\nexcept ImportError:\n raise ImportError('Need to install Pytorch: go to pytorch.org')\nfrom torch.utils.data import ConcatDataset, Dataset, DataLoader, sampler\nfrom torch.multiprocessing import Lock, Value\nimport ctypes\nfrom threading import Thread, Condition, RLock\n\n\nif torch.version.__version__.startswith('0.'):\n raise ImportError(\n \"Please upgrade to PyTorch >=1.0; \"\n \"visit https://pytorch.org for instructions.\"\n )\n\n\nclass BatchSortCache(object):\n \"\"\"\n Object that encapsulates the functionality of the batch sort cache.\n\n Maps episode length to dictionary with following keys:\n current_idx: which episode in the list are we at (if simply indexing\n into list)\n ep_list: list of episodes of the length of the key\n bucket_complete: if there are no more episodes left to consider in\n the bucket\n \"\"\"\n @classmethod\n def create(cls):\n if not hasattr(cls, 'length_to_eps'):\n # Maps episode length to list of episodes\n cls.length_to_eps = {}\n if not hasattr(cls, 'ep_indices'):\n # Set of episode indices already in the cache\n cls.ep_indices = set()\n if not hasattr(cls, 'batches'):\n # List of batches if popping batches\n cls.batches = []\n if not hasattr(cls, 'load_complete'):\n # If all episodes have been loaded into memory\n cls.load_complete = Value(ctypes.c_bool, False)\n if not hasattr(cls, 'batches_lock'):\n # Lock to access batches\n cls.batches_lock = Lock()\n if not hasattr(cls, 'cache_lock'):\n # Lock to access length_to_eps\n cls.cache_lock = Lock()\n if not hasattr(cls, 'fill_cache_lock'):\n # Lock for condition variables\n cls.fill_cache_lock = RLock()\n if not hasattr(cls, 'add_to_cache_cv'):\n # Condition notifying Loader to add to cache\n cls.add_to_cache_cv = Condition(lock=cls.fill_cache_lock)\n if not hasattr(cls, 'cache_filled_cv'):\n # Condition notifying teacher that cache has episodes\n cls.cache_filled_cv = Condition(lock=cls.fill_cache_lock)\n\n @classmethod\n def batch_cache(cls, function):\n max_cache_size = 10000 # Max unseen eps\n min_cache_size = 1000 # Min unseen eps\n\n def get_cache_size():\n '''Returns number of available episodes '''\n return sum(\n len(v['ep_list']) - v['current_idx']\n for k, v in cls.length_to_eps.items()\n )\n\n def get_available_buckets(bsz):\n '''Returns buckets where there are enough episodes for a batch'''\n if cls.load_complete.value:\n return {\n k: v\n for k, v in cls.length_to_eps.items()\n if not v['bucket_complete']\n or len(v['ep_list']) - v['current_idx'] > 0\n }\n else:\n return {\n k: v\n for k, v in cls.length_to_eps.items()\n if len(v['ep_list']) - v['current_idx'] >= bsz\n }\n\n def reset():\n '''Resets the indices into the buckets'''\n with cls.cache_lock:\n for idx in cls.length_to_eps:\n cls.length_to_eps[idx]['current_idx'] = 0\n cls.length_to_eps[idx]['bucket_complete'] = False\n\n def consolidate(caller):\n '''Consolidate remaining episodes into batches'''\n cls.load_complete.value = True\n bsz = caller.bsz\n batch = []\n sorted_lengths = sorted(cls.length_to_eps.keys())\n with cls.cache_lock:\n if caller.batch_cache_type == 'index':\n for length in sorted_lengths:\n current_idx = cls.length_to_eps[length]['current_idx']\n ep_list = cls.length_to_eps[length]['ep_list']\n unseen_eps = ep_list[current_idx:]\n cls.length_to_eps[length]['ep_list'] = ep_list[:current_idx]\n batch = unseen_eps + batch\n while len(batch) >= bsz:\n cls.length_to_eps[length]['ep_list'] += batch[:bsz]\n batch = batch[bsz:]\n if len(batch) > 0:\n cls.length_to_eps[-1] = {\n 'current_idx': 0,\n 'ep_list': batch,\n 'bucket_complete': False\n }\n elif caller.batch_cache_type == 'pop':\n for length in sorted_lengths:\n batch += cls.length_to_eps[length]['ep_list']\n with cls.batches_lock:\n while len(batch) >= bsz:\n cls.batches.append(batch[:bsz])\n batch = batch[bsz:]\n if len(batch) > 0:\n with cls.batches_lock:\n cls.batches.append(batch)\n\n def flatten(l):\n '''Helper function for flattening a list'''\n return [item for sublist in l for item in sublist]\n\n def put_in_cache(ep_idx, episode, caller):\n '''Put episode `ep_idx` into cache'''\n length = ep_length(episode[caller.batch_sort_field])\n lengths = [length] + flatten([\n [length + i, length + (i * -1)]\n for i in range(1, caller.batch_length_range)\n ])\n lengths = [max(i, 1) for i in lengths]\n in_cache = ep_idx in cls.ep_indices\n # first check if episode can go in existing bucket\n if not in_cache:\n for l in lengths:\n if l in cls.length_to_eps:\n with cls.cache_lock:\n cls.length_to_eps[l]['ep_list'] += [(ep_idx, episode)]\n cls.ep_indices.add(ep_idx)\n in_cache = True\n break\n # otherwise, make a new bucket\n if not in_cache:\n with cls.cache_lock:\n cls.length_to_eps[length] = {\n 'current_idx': 0,\n 'ep_list': [(ep_idx, episode)],\n 'bucket_complete': False\n }\n cls.ep_indices.add(ep_idx)\n if ep_idx == caller.dataset.num_episodes() - 1:\n consolidate(caller)\n with cls.add_to_cache_cv:\n cls.cache_filled_cv.notify_all()\n\n @wraps(function)\n def wrapper(*args):\n caller = args[0]\n batch_sort = caller.batch_sort\n batch_cache_type = caller.batch_cache_type\n bsz = caller.bsz\n if not batch_sort or not caller.datatype.startswith('train'):\n return function(*args)\n # If Loader, put episodes in cache\n if isinstance(caller, LoaderProcess):\n with cls.add_to_cache_cv:\n while (get_cache_size() >= max_cache_size and\n len(get_available_buckets(bsz)) > 0):\n cls.cache_filled_cv.notify_all()\n cls.add_to_cache_cv.wait()\n idx_and_batch = function(*args)\n if idx_and_batch is None:\n return None\n for ep_index, ep in idx_and_batch[1]:\n put_in_cache(ep_index, ep, caller)\n return idx_and_batch\n # If teacher, return batch of episodes\n else:\n teacher = caller\n num_batches = teacher.num_batches\n while True:\n with cls.cache_filled_cv:\n while (not cls.load_complete.value and\n (get_cache_size() <= min_cache_size or\n len(get_available_buckets(bsz)) == 0)):\n cls.add_to_cache_cv.notify()\n cls.cache_filled_cv.wait()\n available_buckets = get_available_buckets(bsz)\n if cls.load_complete.value and batch_cache_type == 'pop':\n return teacher.batch_idx + 1, random.choice(cls.batches)\n batch = None\n available_buckets = get_available_buckets(bsz)\n if len(available_buckets) != 0:\n # Pick length index at random\n length = random.choice(list(available_buckets.keys()))\n with cls.cache_lock:\n current_idx = cls.length_to_eps[length]['current_idx']\n ep_list = cls.length_to_eps[length]['ep_list']\n num_eps = len(ep_list)\n if num_eps - current_idx >= bsz:\n if batch_cache_type == 'pop':\n batch = ep_list[:bsz]\n cls.length_to_eps[length]['ep_list'] = ep_list[bsz:]\n else:\n batch = ep_list[current_idx: current_idx + bsz]\n cls.length_to_eps[length]['current_idx'] = (\n current_idx + bsz\n )\n elif cls.load_complete.value and num_eps > 0:\n if batch_cache_type == 'pop':\n batch = ep_list\n elif num_eps - current_idx > 0:\n batch = ep_list[current_idx:]\n cls.length_to_eps[length]['current_idx'] = \\\n num_eps - 1\n cls.length_to_eps[length]['bucket_complete'] = True\n\n if batch is not None:\n if batch_cache_type == 'pop':\n with cls.batches_lock:\n cls.batches.append(batch)\n elif teacher.batch_idx + 1 >= num_batches:\n reset()\n return teacher.batch_idx + 1, batch\n\n return wrapper\n\n\ndef ep_length(val):\n '''Determines the length of an episode, given the specified value'''\n if isinstance(val, (int, bytes, bool)):\n return 1\n if isinstance(val, str):\n return len(val.replace('\\n', ' ').split(' '))\n if isinstance(val, (collections.Mapping,\n collections.Sequence,\n torch.Tensor)):\n if (isinstance(val, collections.Mapping) and\n val.get('deserialized_tensor', False)):\n return len(val['value'])\n return len(val)\n\n\n# Get Datasets from the options\ndef get_dataset_classes(opt):\n \"\"\" To use a custom dataset (as opposed to the StreamDataset or ParlAIDataset),\n you can subclass the pytorch Dataset class and specify its\n location on the command line.\n\n For example, the VQA v1 task provides a custom dataset, which can\n be specified on the command line as follows:\n ``-pytd vqa_v1:VQADataset``\n\n Note that if the dataset is named ``DefaultDataset``, then you do\n not need to specify its name following the colon; e.g., it\n would just be:\n ``-pytd vqa_v1``\n \"\"\"\n if 'stream' in opt.get('datatype'):\n default_dataset = StreamDataset\n else:\n default_dataset = ParlAIDataset\n dataset_name = opt.get('pytorch_teacher_dataset')\n task_name = opt.get('pytorch_teacher_task')\n datasets = []\n if task_name is not None:\n datasets += [\n (default_dataset, default_collate, task)\n for task in task_name.split(',')\n ]\n if not dataset_name:\n return datasets\n sps = [d.strip() for d in dataset_name.split(',')]\n for sp in sps:\n full_task_name = sp\n repo = 'parlai'\n if sp.startswith('internal:'):\n # To switch to local repo, useful for non-public projects\n # (make a directory called 'parlai_internal' with your private agents)\n repo = 'parlai_internal'\n sp = sp[9:]\n sp = sp.split(':')\n if '.' in sp[0]:\n module_name = sp[0]\n else:\n dataset = sp[0].lower()\n module_name = '{}.tasks.{}.agents'.format(repo, dataset)\n if len(sp) > 1:\n sp[1] = sp[1][0].upper() + sp[1][1:]\n dataset = sp[1]\n if '.' not in sp[0] and 'Dataset' not in dataset:\n # Reformat from underscore to CamelCase and append \"Dataset\" to\n # class name by default if a complete path is not given.\n words = dataset.split('_')\n teacher_name = ''\n for w in words:\n teacher_name += (w[0].upper() + w[1:])\n dataset = teacher_name + 'Dataset'\n else:\n dataset = 'DefaultDataset'\n my_module = importlib.import_module(module_name)\n dataset_class = getattr(my_module, dataset)\n\n collate = default_collate\n if hasattr(dataset_class, 'collate'):\n collate = dataset_class.collate\n elif opt.get('model', False):\n agent_class = get_agent_module(opt.get('model'))\n if hasattr(agent_class, 'collate'):\n collate = agent_class.collate\n datasets.append((dataset_class, collate, full_task_name))\n return datasets\n\n\nclass LoaderProcess(Thread):\n \"\"\"A background process that submits jobs to the DataLoader\n to load examples into cache\n \"\"\"\n def __init__(self, opt):\n super().__init__(daemon=True)\n dataset_classes = get_dataset_classes(opt)\n if len(dataset_classes) > 1:\n datasets = []\n for class_name, collate_fn, task_name in dataset_classes:\n opt['pytorch_teacher_task'] = task_name\n opt['task'] = task_name\n datasets.append(class_name(opt))\n self.collate = collate_fn\n self.dataset = ParlAIConcatDataset(datasets)\n else:\n class_name, self.collate, task_name = dataset_classes[0]\n self.dataset = class_name(opt)\n self.bsz = opt.get('batchsize', 1)\n self.num_workers = opt.get('num_workers', 4)\n self.dataloader = DataLoader(\n self.dataset,\n batch_size=self.bsz,\n shuffle=False,\n sampler=sampler.SequentialSampler(self.dataset),\n num_workers=self.num_workers,\n collate_fn=self.collate,\n pin_memory=False,\n drop_last=False,\n )\n self.datatype = opt.get('datatype')\n self.data = enumerate(self.dataloader)\n self.batch_sort = opt.get('pytorch_teacher_batch_sort')\n self.batch_cache_type = opt.get('batch_sort_cache_type')\n self.batch_length_range = opt.get('batch_length_range')\n self.batch_sort_field = opt.get('batch_sort_field')\n\n def run(self):\n while True:\n idx_and_batch = self.load_next()\n if idx_and_batch is None:\n return\n\n @BatchSortCache.batch_cache\n def load_next(self):\n try:\n return next(self.data)\n except StopIteration:\n return None\n\n\n\"\"\"\n Collating, deserializing, processing batches\n\"\"\"\nTORCH_DTYPES = [torch.float32, torch.float64, torch.float16, torch.uint8,\n torch.int8, torch.int16, torch.int32, torch.int64]\nSTR_TO_TORCH_DTYPE = {str(d): d for d in TORCH_DTYPES}\n\n\ndef default_collate(batch):\n \"\"\"\n Default collate function, used for ParlAIDataset and StreamDataset\n \"\"\"\n new_batch = []\n for b in batch:\n idx = b[0]\n if type(b[1]) is list:\n ep = b[1][0]\n else:\n ep = b[1]\n new_batch.append((idx, ep))\n return new_batch\n\n\ndef deserialize(obj):\n \"\"\"\n Deserializes lists into Tensors\n \"\"\"\n for key in obj:\n if type(obj[key]) is dict and obj[key].get('deserialized_tensor', False):\n dtype = STR_TO_TORCH_DTYPE[obj[key]['type']]\n val = obj[key]['value']\n del obj[key]\n obj[key] = torch.as_tensor(val, dtype=dtype)\n return obj\n\n\ndef process(ex_or_batch):\n \"\"\"\n Process examples/batches, i.e. deserialize if necessary\n \"\"\"\n if type(ex_or_batch) is list:\n if all([ep.get('preprocessed') for ep in ex_or_batch]):\n ex_or_batch = [deserialize(ep) for ep in ex_or_batch]\n else:\n if ex_or_batch.get('preprocessed'):\n ex_or_batch = deserialize(ex_or_batch)\n return ex_or_batch\n\n\n\"\"\"\n ParlAI Implementations of Pytorch Datasets\n\"\"\"\n\n\nclass StreamDataset(Dataset):\n \"\"\"A Pytorch Dataset utilizing streaming\"\"\"\n def __init__(self, opt):\n self.opt = opt\n self.datatype = opt.get('datatype')\n self.datapath = build_data(self.opt)\n self.length_datafile = os.path.join(self.datapath, 'data_length')\n self.char_index_file = os.path.join(self.datapath, 'char_index')\n self.datafile = os.path.join(self.datapath, 'data')\n self.training = self.datatype.startswith('train')\n self.ordered = ('ordered' in self.datatype or\n ('stream' in self.datatype and not opt.get('shuffle')))\n self._load_lens()\n\n def __getitem__(self, index):\n if self.ordered or not self.training:\n if not hasattr(self, 'data_gen'):\n self.data_gen = self._read_episode()\n while True:\n idx, ep = next(self.data_gen)\n if idx == index:\n return (index, ep)\n else:\n episode = []\n episode_done = False\n with open(self.datafile) as f:\n ex_offset = self.char_index[index]\n f.seek(ex_offset)\n while not episode_done:\n example = json.loads(f.readline())\n episode.append(example)\n episode_done = example['episode_done']\n return (index, episode)\n\n def __len__(self):\n return self.num_episodes()\n\n def _load_lens(self):\n with open(self.length_datafile) as length:\n lengths = json.load(length)\n self.num_eps = lengths['num_eps']\n self.num_exs = lengths['num_exs']\n with open(self.char_index_file) as char:\n self.char_index = json.load(char)\n\n def _data_generator(self):\n while True:\n for idx, episode in self._read_episode():\n yield idx, episode\n\n def _read_episode(self):\n read = open(self.datafile)\n episode = []\n for idx, line in enumerate(read):\n example = json.loads(line)\n episode.append(example)\n if example['episode_done']:\n yield idx, episode\n episode = []\n read.close()\n\n def num_episodes(self):\n return self.num_eps\n\n def num_examples(self):\n return self.num_exs\n\n\nclass ParlAIDataset(Dataset):\n \"\"\"A Pytorch Dataset, for random sampling\"\"\"\n def __init__(self, opt):\n self.opt = opt\n self.datatype = opt.get('datatype')\n self.datapath = build_data(self.opt)\n self.length_datafile = os.path.join(self.datapath, 'data_length')\n self.datafile = os.path.join(self.datapath, 'data')\n self.training = self.datatype.startswith('train')\n self._load_lens()\n self._setup_data()\n\n def __getitem__(self, index):\n return index, self.data[index]\n\n def __len__(self):\n return self.num_episodes()\n\n def _load_lens(self):\n with open(self.length_datafile) as length:\n lengths = json.load(length)\n self.num_eps = lengths['num_eps']\n self.num_exs = lengths['num_exs']\n\n def _setup_data(self):\n self.data = []\n with open(self.datafile) as f:\n for line in f:\n self.data.append(json.loads(line))\n\n def num_episodes(self):\n return self.num_eps\n\n def num_examples(self):\n return self.num_exs\n\n\nclass ParlAIConcatDataset(ConcatDataset):\n \"\"\"Override to set num_eps and num_exs\"\"\"\n\n @lru_cache(maxsize=1)\n def num_episodes(self):\n return sum(d.num_episodes() for d in self.datasets)\n\n @lru_cache(maxsize=1)\n def num_examples(self):\n return sum(d.num_examples() for d in self.datasets)\n\n\nclass PytorchDataTeacher(FixedDialogTeacher):\n \"\"\"\n A teacher that loads data using Pytorch Datasets. For details on how\n to use, please follow the tutorial here:\n http://parl.ai/static/docs/tutorial_worlds.html#multiprocessed-pytorch-dataloader\n \"\"\"\n def __init__(self, opt, shared=None):\n opt['batch_sort'] = False\n super().__init__(opt, shared)\n self.use_batch_act = self.bsz > 1\n self.num_workers = opt['numworkers']\n self.batch_sort = opt.get('pytorch_teacher_batch_sort') and \\\n 'train' in self.datatype\n self.batch_cache_type = opt.get('batch_sort_cache_type')\n self.batch_sort_field = opt.get('batch_sort_field')\n # One can specify a collate function to use for preparing a batch\n self.opt = opt.copy()\n self.is_shared = shared is not None\n dataset_classes = self.get_dataset_class(opt)\n self.ordered = ('ordered' in self.datatype or\n ('stream' in self.datatype and not opt.get('shuffle')))\n if self.ordered:\n # force index for ordered, so that we see every example\n self.batch_cache_type = 'index'\n\n if not shared:\n BatchSortCache.create()\n if len(dataset_classes) > 1:\n datasets = []\n for class_name, collate_fn, task_name in dataset_classes:\n dataset_opt = opt.copy()\n dataset_opt['pytorch_teacher_task'] = task_name\n dataset_opt['task'] = task_name\n datasets.append(class_name(dataset_opt))\n self.collate_fn = collate_fn\n self.id = ','.join([d[2] for d in dataset_classes])\n self.dataset = ParlAIConcatDataset(datasets)\n else:\n class_name, self.collate_fn, task_name = dataset_classes[0]\n self.id = task_name\n self.dataset = class_name(opt)\n if self.ordered or not self.training:\n data_sampler = sampler.SequentialSampler(self.dataset)\n else:\n data_sampler = sampler.RandomSampler(self.dataset)\n\n self.pytorch_dataloader = DataLoader(\n self.dataset,\n batch_size=self.bsz,\n sampler=data_sampler,\n num_workers=self.num_workers,\n collate_fn=self.collate_fn,\n pin_memory=False,\n drop_last=False,\n )\n\n self.lastYs = [None] * self.bsz\n if self.batch_sort:\n self.loader_process = LoaderProcess(opt)\n self.loader_process.start()\n self.data = enumerate(self.pytorch_dataloader)\n else:\n self.dataset = shared['dataset']\n self.pytorch_dataloader = shared['pytorch_dataloader']\n self.lastYs = shared['lastYs']\n self.data = shared['data']\n self.id = shared['id']\n\n self.num_batches = math.ceil(self.dataset.num_episodes() / self.bsz)\n self.reset()\n\n def get_dataset_class(self, opt):\n return get_dataset_classes(opt)\n\n def reset(self):\n \"\"\"Reset the dialog so that it is at the start of the epoch,\n and all metrics are reset.\n \"\"\"\n super().reset()\n self.reset_data()\n\n def reset_data(self):\n if not self.is_shared:\n self.data = enumerate(self.pytorch_dataloader)\n self.lastY = None\n self.epochDone = False\n self.episode = None\n self.episode_done = True\n self.episode_idx = 0\n self.batch_idx = 0\n\n def share(self):\n shared = super().share()\n shared['pytorch_dataloader'] = self.pytorch_dataloader\n shared['dataset'] = self.dataset\n shared['data'] = self.data\n shared['id'] = self.id\n return shared\n\n def next_example(self):\n if self.epochDone:\n if not self.training:\n return {'episode_done': True, 'id': self.getID()}, True\n else:\n # Reset the data because it is streaming data\n self.reset_data()\n if self.episode_done:\n try:\n self.episode_idx, episode = next(self.data)\n if self.collate_fn == default_collate:\n episode = [ex[1] for ex in episode]\n self.episode = process(episode)\n self.entry_idx = 0\n epoch_done = False\n except StopIteration:\n ex = {'episode_done': True, 'id': self.getID()}\n epoch_done = True\n else:\n self.entry_idx += 1\n\n if not epoch_done:\n ex = self.episode[self.entry_idx]\n self.episode_done = ex['episode_done']\n if (self.episode_done and\n self.episode_idx + self.bsz >= self.num_episodes()):\n epoch_done = True\n return ex, epoch_done\n\n @BatchSortCache.batch_cache\n def get_next_batch(self):\n # employs a cache to see if there is a batch of equal size ready\n batch = next(self.data)\n return batch\n\n def next_batch(self):\n if self.epochDone:\n if not self.training:\n return [{'episode_done': True, 'id': self.getID()}] * self.bsz\n else:\n # Reset the data because it is streaming data\n self.reset_data()\n try:\n self.batch_idx, batch = self.get_next_batch()\n if self.collate_fn == default_collate:\n batch = [b[1] for b in batch]\n batch = process(batch)\n epoch_done = False\n except StopIteration:\n batch = [{'episode_done': True, 'id': self.getID()}] * self.bsz\n epoch_done = True\n if not epoch_done and self.batch_idx == self.num_batches:\n epoch_done = True\n self.epochDone = epoch_done\n return batch\n\n def num_episodes(self):\n \"\"\"Get the number of episodes in this dataset.\"\"\"\n return self.dataset.num_episodes()\n\n def num_examples(self):\n \"\"\"Get the total number of examples in this dataset.\"\"\"\n return self.dataset.num_examples()\n\n def act(self):\n \"\"\"Send new dialog message.\"\"\"\n action = super().act()\n self.lastY = action.get('labels', action.get('eval_labels', None))\n return action\n\n\nclass DefaultTeacher(PytorchDataTeacher):\n pass\n", "path": "parlai/core/pytorch_data_teacher.py"}]} |
gh_patches_debug_1207 | rasdani/github-patches | git_diff | litestar-org__litestar-1327 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Rule exceptions using noqa should be very specific
I think this would ignore all linting here instead of maybe the one we want. May be best to be specific?
https://beta.ruff.rs/docs/configuration/#error-suppression
_Originally posted by @JacobCoffee in https://github.com/starlite-api/starlite/pull/1323#discussion_r1135989720_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlite/contrib/sqlalchemy_1/config.py`
Content:
```
1 from __future__ import annotations
2
3 from dataclasses import asdict, dataclass, field
4 from typing import TYPE_CHECKING, Any, Callable, Literal, cast
5
6 from starlite.exceptions import ImproperlyConfiguredException, MissingDependencyException
7 from starlite.logging.config import BaseLoggingConfig, LoggingConfig
8 from starlite.serialization import decode_json, encode_json
9 from starlite.utils import AsyncCallable
10
11 try:
12 import sqlalchemy # noqa: F401
13 except ImportError as e:
14 raise MissingDependencyException("sqlalchemy is not installed") from e
15
16 from sqlalchemy import create_engine
17 from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession, create_async_engine
18 from sqlalchemy.orm import Query, Session, sessionmaker
19
20 __all__ = ("SQLAlchemyConfig", "SQLAlchemyEngineConfig", "SQLAlchemySessionConfig")
21
22
23 if TYPE_CHECKING:
24 from sqlalchemy.engine import Engine
25 from sqlalchemy.future import Engine as FutureEngine
26 from sqlalchemy.pool import Pool
27
28 from starlite.datastructures.state import State
29 from starlite.types import BeforeMessageSendHookHandler, Message, Scope
30
31 from .types import SessionMakerInstanceProtocol, SessionMakerTypeProtocol
32
33 IsolationLevel = Literal["AUTOCOMMIT", "READ COMMITTED", "READ UNCOMMITTED", "REPEATABLE READ", "SERIALIZABLE"]
34
35 SESSION_SCOPE_KEY = "_sql_alchemy_db_session"
36 SESSION_TERMINUS_ASGI_EVENTS = {"http.response.start", "http.disconnect", "websocket.disconnect", "websocket.close"}
37
38
39 def serializer(value: Any) -> str:
40 """Serialize JSON field values.
41
42 Args:
43 value: Any json serializable value.
44
45 Returns:
46 JSON string.
47 """
48 return encode_json(value).decode("utf-8")
49
50
51 async def default_before_send_handler(message: "Message", _: "State", scope: "Scope") -> None:
52 """Handle closing and cleaning up sessions before sending.
53
54 Args:
55 message: ASGI-``Message``
56 _: A ``State`` (not used)
57 scope: An ASGI-``Scope``
58
59 Returns:
60 None
61 """
62 session = cast("Session | AsyncSession | None", scope.get(SESSION_SCOPE_KEY))
63 if session and message["type"] in SESSION_TERMINUS_ASGI_EVENTS:
64 if isinstance(session, AsyncSession):
65 await session.close()
66 else:
67 session.close()
68 del scope[SESSION_SCOPE_KEY] # type: ignore
69
70
71 @dataclass
72 class SQLAlchemySessionConfig:
73 """Configuration for a SQLAlchemy-Session."""
74
75 autocommit: bool | None = field(default=None)
76 autoflush: bool | None = field(default=None)
77 bind: Any | None = field(default=None)
78 binds: Any | None = field(default=None)
79 enable_baked_queries: bool | None = field(default=None)
80 expire_on_commit: bool = field(default=False)
81 future: bool | None = field(default=None)
82 info: dict[str, Any] | None = field(default=None)
83 query_cls: type[Query] | None = field(default=None)
84 twophase: bool | None = field(default=None)
85
86
87 @dataclass
88 class SQLAlchemyEngineConfig:
89 """Configuration for SQLAlchemy's :class`Engine <sqlalchemy.engine.Engine>`.
90
91 For details see: https://docs.sqlalchemy.org/en/14/core/engines.html
92 """
93
94 connect_args: dict[str, Any] | None = field(default=None)
95 echo: bool | None = field(default=None)
96 echo_pool: bool | None = field(default=None)
97 enable_from_linting: bool | None = field(default=None)
98 future: bool = field(default=True)
99 hide_parameters: bool | None = field(default=None)
100 isolation_level: IsolationLevel | None = field(default=None)
101 json_deserializer: Callable[[str], Any] = field(default=decode_json)
102 json_serializer: Callable[[Any], str] = field(default=serializer)
103 label_length: int | None = field(default=None)
104 listeners: Any = field(default=None)
105 logging_level: int | str | None = field(default=None)
106 logging_name: str | None = field(default=None)
107 max_identifier_length: int | None = field(default=None)
108 max_overflow: int | None = field(default=None)
109 module: Any = field(default=None)
110 paramstyle: Literal["qmark", "numeric", "named", "format", "pyformat"] | None = field(default=None)
111 plugins: list[str] | None = field(default=None)
112 pool: Pool | None = field(default=None)
113 pool_logging_name: str | None = field(default=None)
114 pool_pre_ping: bool | None = field(default=None)
115 pool_recycle: int | None = field(default=None)
116 pool_reset_on_return: Literal["rollback", "commit"] | None = field(default=None)
117 pool_size: int | None = field(default=None)
118 pool_timeout: int | None = field(default=None)
119 pool_use_lifo: bool | None = field(default=None)
120 poolclass: type[Pool] | None = field(default=None)
121 query_cache_size: int | None = field(default=None)
122 strategy: str | None = field(default=None)
123
124
125 @dataclass
126 class SQLAlchemyConfig:
127 """Configuration for SQLAlchemy's :class:`sessionmaker <sqlalchemy.orm.sessionmaker>`.
128
129 For details see: https://docs.sqlalchemy.org/en/14/orm/session_api.html
130 """
131
132 connection_string: str | None = field(default=None)
133 """Database connection string in one of the formats supported by SQLAlchemy.
134
135 Notes:
136 - For async connections, the connection string must include the correct async prefix.
137 e.g. ``'postgresql+asyncpg://...'`` instead of ``'postgresql://'``, and for sync connections its the opposite.
138
139 """
140 use_async_engine: bool = field(default=True)
141 """Dictate whether the engine created is an async connection or not.
142
143 Notes:
144 - This option must correlate to the type of ``connection_string``. That is, an async connection string required an
145 async connection and vice versa.
146
147 """
148 create_async_engine_callable: Callable[[str], AsyncEngine] = field(default=create_async_engine)
149 """Callable that creates an :class:`AsyncEngine <sqlalchemy.ext.asyncio.AsyncEngine>` instance or instance of its
150 subclass.
151 """
152 create_engine_callable: Callable[[str], Engine | FutureEngine] = field(default=create_engine)
153 """Callable that creates an :class:`Engine <sqlalchemy.engine.Engine>` or ``FutureEngine`` instance or instance of its
154 subclass."""
155 dependency_key: str = field(default="db_session")
156 """Key to use for the dependency injection of database sessions."""
157 engine_app_state_key: str = field(default="db_engine")
158 """Key under which to store the SQLAlchemy engine in the application :class:`State <.datastructures.State>`
159 instance.
160 """
161 engine_config: SQLAlchemyEngineConfig = field(default_factory=SQLAlchemyEngineConfig)
162 """Configuration for the SQLAlchemy engine.
163
164 The configuration options are documented in the SQLAlchemy documentation.
165 """
166 set_json_serializers: bool = field(default=True)
167 """A boolean flag dictating whether to set ``msgspec`` based serializer/deserializer functions.
168
169 Notes:
170 - Some databases or some versions of some databases do not have a JSON column type. E.g. some older versions of
171 SQLite for example. In this case this flag should be false or an error will be raised by SQLAlchemy.
172
173 """
174 session_class: type[Session] | type[AsyncSession] | None = field(default=None)
175 """The session class to use.
176
177 If not set, the session class will default to :class:`sqlalchemy.orm.Session` for sync connections and
178 :class:`sqlalchemy.ext.asyncio.AsyncSession` for async ones.
179 """
180 session_config: SQLAlchemySessionConfig = field(default_factory=SQLAlchemySessionConfig)
181 """Configuration options for the ``sessionmaker``.
182
183 The configuration options are documented in the SQLAlchemy documentation.
184 """
185 session_maker_class: type[SessionMakerTypeProtocol] = field(default=sessionmaker)
186 """Sessionmaker class to use."""
187 session_maker_app_state_key: str = field(default="session_maker_class")
188 """Key under which to store the SQLAlchemy ``sessionmaker`` in the application
189 :class:`State <.datastructures.State>` instance.
190 """
191 session_maker_instance: SessionMakerInstanceProtocol | None = field(default=None)
192 """Optional sessionmaker to use.
193
194 If set, the plugin will use the provided instance rather than instantiate a sessionmaker.
195 """
196 engine_instance: Engine | FutureEngine | AsyncEngine | None = field(default=None)
197 """Optional engine to use.
198
199 If set, the plugin will use the provided instance rather than instantiate an engine.
200 """
201 before_send_handler: BeforeMessageSendHookHandler = field(default=default_before_send_handler)
202 """Handler to call before the ASGI message is sent.
203
204 The handler should handle closing the session stored in the ASGI scope, if its still open, and committing and
205 uncommitted data.
206 """
207
208 def __post_init__(self) -> None:
209 if self.connection_string is None and self.engine_instance is None:
210 raise ImproperlyConfiguredException("One of 'connection_string' or 'engine_instance' must be provided.")
211
212 if self.connection_string is not None and self.engine_instance is not None:
213 raise ImproperlyConfiguredException("Only one of 'connection_string' or 'engine_instance' can be provided.")
214
215 self.before_send_handler = AsyncCallable(self.before_send_handler) # type: ignore
216
217 @property
218 def engine_config_dict(self) -> dict[str, Any]:
219 """Return the engine configuration as a dict.
220
221 Returns:
222 A string keyed dict of config kwargs for the SQLAlchemy ``create_engine`` function.
223 """
224 engine_excluded_fields: set[str] = {"future", "logging_level"} if self.use_async_engine else {"logging_level"}
225
226 if not self.set_json_serializers:
227 engine_excluded_fields.update({"json_deserializer", "json_serializer"})
228
229 return {
230 k: v for k, v in asdict(self.engine_config).items() if v is not None and k not in engine_excluded_fields
231 }
232
233 @property
234 def engine(self) -> Engine | FutureEngine | AsyncEngine:
235 """Return an engine. If none exists yet, create one.
236
237 Returns:
238 Getter that returns the engine instance used by the plugin.
239 """
240 if not self.engine_instance:
241 create_engine_callable = (
242 self.create_async_engine_callable if self.use_async_engine else self.create_engine_callable
243 )
244 self.engine_instance = create_engine_callable(
245 self.connection_string, **self.engine_config_dict # type:ignore[arg-type]
246 )
247 return cast("Engine | FutureEngine | AsyncEngine", self.engine_instance)
248
249 @property
250 def session_maker(self) -> sessionmaker:
251 """Get a sessionmaker. If none exists yet, create one.
252
253 Returns:
254 Getter that returns the session_maker instance used by the plugin.
255 """
256 if not self.session_maker_instance:
257 session_maker_kwargs = {
258 k: v
259 for k, v in asdict(self.session_config).items()
260 if v is not None and ((self.use_async_engine and k != "future") or not self.use_async_engine)
261 }
262 session_class = self.session_class or (AsyncSession if self.use_async_engine else Session)
263 self.session_maker_instance = self.session_maker_class(
264 self.engine, class_=session_class, **session_maker_kwargs
265 )
266 return cast("sessionmaker", self.session_maker_instance)
267
268 def create_db_session_dependency(self, state: State, scope: Scope) -> Union[Session, AsyncSession]: # noqa
269 """Create a session instance.
270
271 Args:
272 state: The ``Starlite.state`` instance.
273 scope: The current connection's scope.
274
275 Returns:
276 A session instance T.
277 """
278 session = scope.get(SESSION_SCOPE_KEY)
279 if not session:
280 session_maker = cast("sessionmaker", state[self.session_maker_app_state_key])
281 session = scope[SESSION_SCOPE_KEY] = session_maker() # type: ignore
282 return cast("Session | AsyncSession", session)
283
284 def update_app_state(self, state: State) -> None:
285 """Create a DB engine and stores it in the application state.
286
287 Args:
288 state: The ``Starlite.state`` instance.
289
290 Returns:
291 None
292 """
293
294 state[self.engine_app_state_key] = self.engine
295 state[self.session_maker_app_state_key] = self.session_maker
296
297 async def on_shutdown(self, state: State) -> None:
298 """Disposes of the SQLAlchemy engine.
299
300 Args:
301 state: The ``Starlite.state`` instance.
302
303 Returns:
304 None
305 """
306 engine = cast("Engine | AsyncEngine", state[self.engine_app_state_key])
307 if isinstance(engine, AsyncEngine):
308 await engine.dispose()
309 else:
310 engine.dispose()
311 del state[self.engine_app_state_key]
312
313 def config_sql_alchemy_logging(self, logging_config: BaseLoggingConfig | None) -> None:
314 """Add the SQLAlchemy loggers to the logging config.
315
316 Notes:
317 - Currently only works with :class:`LoggingConfig <.logging.config.LoggingConfig>`.
318
319 Args:
320 logging_config: Logging config.
321
322 Returns:
323 None.
324 """
325 if isinstance(logging_config, LoggingConfig):
326 logger_settings = {
327 "level": self.engine_config.logging_level or "WARNING",
328 "handlers": logging_config.loggers["starlite"]["handlers"],
329 }
330 for logger in (
331 "sqlalchemy",
332 self.engine_config.logging_name or "sqlalchemy.engine",
333 self.engine_config.pool_logging_name or "sqlalchemy.pool",
334 ):
335 if logger not in logging_config.loggers:
336 logging_config.loggers[logger] = logger_settings
337
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/starlite/contrib/sqlalchemy_1/config.py b/starlite/contrib/sqlalchemy_1/config.py
--- a/starlite/contrib/sqlalchemy_1/config.py
+++ b/starlite/contrib/sqlalchemy_1/config.py
@@ -265,7 +265,7 @@
)
return cast("sessionmaker", self.session_maker_instance)
- def create_db_session_dependency(self, state: State, scope: Scope) -> Union[Session, AsyncSession]: # noqa
+ def create_db_session_dependency(self, state: State, scope: Scope) -> Union[Session, AsyncSession]: # noqa: F821
"""Create a session instance.
Args:
| {"golden_diff": "diff --git a/starlite/contrib/sqlalchemy_1/config.py b/starlite/contrib/sqlalchemy_1/config.py\n--- a/starlite/contrib/sqlalchemy_1/config.py\n+++ b/starlite/contrib/sqlalchemy_1/config.py\n@@ -265,7 +265,7 @@\n )\n return cast(\"sessionmaker\", self.session_maker_instance)\n \n- def create_db_session_dependency(self, state: State, scope: Scope) -> Union[Session, AsyncSession]: # noqa\n+ def create_db_session_dependency(self, state: State, scope: Scope) -> Union[Session, AsyncSession]: # noqa: F821\n \"\"\"Create a session instance.\n \n Args:\n", "issue": "Rule exceptions using noqa should be very specific \nI think this would ignore all linting here instead of maybe the one we want. May be best to be specific?\r\n\r\nhttps://beta.ruff.rs/docs/configuration/#error-suppression\r\n\r\n_Originally posted by @JacobCoffee in https://github.com/starlite-api/starlite/pull/1323#discussion_r1135989720_\r\n \n", "before_files": [{"content": "from __future__ import annotations\n\nfrom dataclasses import asdict, dataclass, field\nfrom typing import TYPE_CHECKING, Any, Callable, Literal, cast\n\nfrom starlite.exceptions import ImproperlyConfiguredException, MissingDependencyException\nfrom starlite.logging.config import BaseLoggingConfig, LoggingConfig\nfrom starlite.serialization import decode_json, encode_json\nfrom starlite.utils import AsyncCallable\n\ntry:\n import sqlalchemy # noqa: F401\nexcept ImportError as e:\n raise MissingDependencyException(\"sqlalchemy is not installed\") from e\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession, create_async_engine\nfrom sqlalchemy.orm import Query, Session, sessionmaker\n\n__all__ = (\"SQLAlchemyConfig\", \"SQLAlchemyEngineConfig\", \"SQLAlchemySessionConfig\")\n\n\nif TYPE_CHECKING:\n from sqlalchemy.engine import Engine\n from sqlalchemy.future import Engine as FutureEngine\n from sqlalchemy.pool import Pool\n\n from starlite.datastructures.state import State\n from starlite.types import BeforeMessageSendHookHandler, Message, Scope\n\n from .types import SessionMakerInstanceProtocol, SessionMakerTypeProtocol\n\nIsolationLevel = Literal[\"AUTOCOMMIT\", \"READ COMMITTED\", \"READ UNCOMMITTED\", \"REPEATABLE READ\", \"SERIALIZABLE\"]\n\nSESSION_SCOPE_KEY = \"_sql_alchemy_db_session\"\nSESSION_TERMINUS_ASGI_EVENTS = {\"http.response.start\", \"http.disconnect\", \"websocket.disconnect\", \"websocket.close\"}\n\n\ndef serializer(value: Any) -> str:\n \"\"\"Serialize JSON field values.\n\n Args:\n value: Any json serializable value.\n\n Returns:\n JSON string.\n \"\"\"\n return encode_json(value).decode(\"utf-8\")\n\n\nasync def default_before_send_handler(message: \"Message\", _: \"State\", scope: \"Scope\") -> None:\n \"\"\"Handle closing and cleaning up sessions before sending.\n\n Args:\n message: ASGI-``Message``\n _: A ``State`` (not used)\n scope: An ASGI-``Scope``\n\n Returns:\n None\n \"\"\"\n session = cast(\"Session | AsyncSession | None\", scope.get(SESSION_SCOPE_KEY))\n if session and message[\"type\"] in SESSION_TERMINUS_ASGI_EVENTS:\n if isinstance(session, AsyncSession):\n await session.close()\n else:\n session.close()\n del scope[SESSION_SCOPE_KEY] # type: ignore\n\n\n@dataclass\nclass SQLAlchemySessionConfig:\n \"\"\"Configuration for a SQLAlchemy-Session.\"\"\"\n\n autocommit: bool | None = field(default=None)\n autoflush: bool | None = field(default=None)\n bind: Any | None = field(default=None)\n binds: Any | None = field(default=None)\n enable_baked_queries: bool | None = field(default=None)\n expire_on_commit: bool = field(default=False)\n future: bool | None = field(default=None)\n info: dict[str, Any] | None = field(default=None)\n query_cls: type[Query] | None = field(default=None)\n twophase: bool | None = field(default=None)\n\n\n@dataclass\nclass SQLAlchemyEngineConfig:\n \"\"\"Configuration for SQLAlchemy's :class`Engine <sqlalchemy.engine.Engine>`.\n\n For details see: https://docs.sqlalchemy.org/en/14/core/engines.html\n \"\"\"\n\n connect_args: dict[str, Any] | None = field(default=None)\n echo: bool | None = field(default=None)\n echo_pool: bool | None = field(default=None)\n enable_from_linting: bool | None = field(default=None)\n future: bool = field(default=True)\n hide_parameters: bool | None = field(default=None)\n isolation_level: IsolationLevel | None = field(default=None)\n json_deserializer: Callable[[str], Any] = field(default=decode_json)\n json_serializer: Callable[[Any], str] = field(default=serializer)\n label_length: int | None = field(default=None)\n listeners: Any = field(default=None)\n logging_level: int | str | None = field(default=None)\n logging_name: str | None = field(default=None)\n max_identifier_length: int | None = field(default=None)\n max_overflow: int | None = field(default=None)\n module: Any = field(default=None)\n paramstyle: Literal[\"qmark\", \"numeric\", \"named\", \"format\", \"pyformat\"] | None = field(default=None)\n plugins: list[str] | None = field(default=None)\n pool: Pool | None = field(default=None)\n pool_logging_name: str | None = field(default=None)\n pool_pre_ping: bool | None = field(default=None)\n pool_recycle: int | None = field(default=None)\n pool_reset_on_return: Literal[\"rollback\", \"commit\"] | None = field(default=None)\n pool_size: int | None = field(default=None)\n pool_timeout: int | None = field(default=None)\n pool_use_lifo: bool | None = field(default=None)\n poolclass: type[Pool] | None = field(default=None)\n query_cache_size: int | None = field(default=None)\n strategy: str | None = field(default=None)\n\n\n@dataclass\nclass SQLAlchemyConfig:\n \"\"\"Configuration for SQLAlchemy's :class:`sessionmaker <sqlalchemy.orm.sessionmaker>`.\n\n For details see: https://docs.sqlalchemy.org/en/14/orm/session_api.html\n \"\"\"\n\n connection_string: str | None = field(default=None)\n \"\"\"Database connection string in one of the formats supported by SQLAlchemy.\n\n Notes:\n - For async connections, the connection string must include the correct async prefix.\n e.g. ``'postgresql+asyncpg://...'`` instead of ``'postgresql://'``, and for sync connections its the opposite.\n\n \"\"\"\n use_async_engine: bool = field(default=True)\n \"\"\"Dictate whether the engine created is an async connection or not.\n\n Notes:\n - This option must correlate to the type of ``connection_string``. That is, an async connection string required an\n async connection and vice versa.\n\n \"\"\"\n create_async_engine_callable: Callable[[str], AsyncEngine] = field(default=create_async_engine)\n \"\"\"Callable that creates an :class:`AsyncEngine <sqlalchemy.ext.asyncio.AsyncEngine>` instance or instance of its\n subclass.\n \"\"\"\n create_engine_callable: Callable[[str], Engine | FutureEngine] = field(default=create_engine)\n \"\"\"Callable that creates an :class:`Engine <sqlalchemy.engine.Engine>` or ``FutureEngine`` instance or instance of its\n subclass.\"\"\"\n dependency_key: str = field(default=\"db_session\")\n \"\"\"Key to use for the dependency injection of database sessions.\"\"\"\n engine_app_state_key: str = field(default=\"db_engine\")\n \"\"\"Key under which to store the SQLAlchemy engine in the application :class:`State <.datastructures.State>`\n instance.\n \"\"\"\n engine_config: SQLAlchemyEngineConfig = field(default_factory=SQLAlchemyEngineConfig)\n \"\"\"Configuration for the SQLAlchemy engine.\n\n The configuration options are documented in the SQLAlchemy documentation.\n \"\"\"\n set_json_serializers: bool = field(default=True)\n \"\"\"A boolean flag dictating whether to set ``msgspec`` based serializer/deserializer functions.\n\n Notes:\n - Some databases or some versions of some databases do not have a JSON column type. E.g. some older versions of\n SQLite for example. In this case this flag should be false or an error will be raised by SQLAlchemy.\n\n \"\"\"\n session_class: type[Session] | type[AsyncSession] | None = field(default=None)\n \"\"\"The session class to use.\n\n If not set, the session class will default to :class:`sqlalchemy.orm.Session` for sync connections and\n :class:`sqlalchemy.ext.asyncio.AsyncSession` for async ones.\n \"\"\"\n session_config: SQLAlchemySessionConfig = field(default_factory=SQLAlchemySessionConfig)\n \"\"\"Configuration options for the ``sessionmaker``.\n\n The configuration options are documented in the SQLAlchemy documentation.\n \"\"\"\n session_maker_class: type[SessionMakerTypeProtocol] = field(default=sessionmaker)\n \"\"\"Sessionmaker class to use.\"\"\"\n session_maker_app_state_key: str = field(default=\"session_maker_class\")\n \"\"\"Key under which to store the SQLAlchemy ``sessionmaker`` in the application\n :class:`State <.datastructures.State>` instance.\n \"\"\"\n session_maker_instance: SessionMakerInstanceProtocol | None = field(default=None)\n \"\"\"Optional sessionmaker to use.\n\n If set, the plugin will use the provided instance rather than instantiate a sessionmaker.\n \"\"\"\n engine_instance: Engine | FutureEngine | AsyncEngine | None = field(default=None)\n \"\"\"Optional engine to use.\n\n If set, the plugin will use the provided instance rather than instantiate an engine.\n \"\"\"\n before_send_handler: BeforeMessageSendHookHandler = field(default=default_before_send_handler)\n \"\"\"Handler to call before the ASGI message is sent.\n\n The handler should handle closing the session stored in the ASGI scope, if its still open, and committing and\n uncommitted data.\n \"\"\"\n\n def __post_init__(self) -> None:\n if self.connection_string is None and self.engine_instance is None:\n raise ImproperlyConfiguredException(\"One of 'connection_string' or 'engine_instance' must be provided.\")\n\n if self.connection_string is not None and self.engine_instance is not None:\n raise ImproperlyConfiguredException(\"Only one of 'connection_string' or 'engine_instance' can be provided.\")\n\n self.before_send_handler = AsyncCallable(self.before_send_handler) # type: ignore\n\n @property\n def engine_config_dict(self) -> dict[str, Any]:\n \"\"\"Return the engine configuration as a dict.\n\n Returns:\n A string keyed dict of config kwargs for the SQLAlchemy ``create_engine`` function.\n \"\"\"\n engine_excluded_fields: set[str] = {\"future\", \"logging_level\"} if self.use_async_engine else {\"logging_level\"}\n\n if not self.set_json_serializers:\n engine_excluded_fields.update({\"json_deserializer\", \"json_serializer\"})\n\n return {\n k: v for k, v in asdict(self.engine_config).items() if v is not None and k not in engine_excluded_fields\n }\n\n @property\n def engine(self) -> Engine | FutureEngine | AsyncEngine:\n \"\"\"Return an engine. If none exists yet, create one.\n\n Returns:\n Getter that returns the engine instance used by the plugin.\n \"\"\"\n if not self.engine_instance:\n create_engine_callable = (\n self.create_async_engine_callable if self.use_async_engine else self.create_engine_callable\n )\n self.engine_instance = create_engine_callable(\n self.connection_string, **self.engine_config_dict # type:ignore[arg-type]\n )\n return cast(\"Engine | FutureEngine | AsyncEngine\", self.engine_instance)\n\n @property\n def session_maker(self) -> sessionmaker:\n \"\"\"Get a sessionmaker. If none exists yet, create one.\n\n Returns:\n Getter that returns the session_maker instance used by the plugin.\n \"\"\"\n if not self.session_maker_instance:\n session_maker_kwargs = {\n k: v\n for k, v in asdict(self.session_config).items()\n if v is not None and ((self.use_async_engine and k != \"future\") or not self.use_async_engine)\n }\n session_class = self.session_class or (AsyncSession if self.use_async_engine else Session)\n self.session_maker_instance = self.session_maker_class(\n self.engine, class_=session_class, **session_maker_kwargs\n )\n return cast(\"sessionmaker\", self.session_maker_instance)\n\n def create_db_session_dependency(self, state: State, scope: Scope) -> Union[Session, AsyncSession]: # noqa\n \"\"\"Create a session instance.\n\n Args:\n state: The ``Starlite.state`` instance.\n scope: The current connection's scope.\n\n Returns:\n A session instance T.\n \"\"\"\n session = scope.get(SESSION_SCOPE_KEY)\n if not session:\n session_maker = cast(\"sessionmaker\", state[self.session_maker_app_state_key])\n session = scope[SESSION_SCOPE_KEY] = session_maker() # type: ignore\n return cast(\"Session | AsyncSession\", session)\n\n def update_app_state(self, state: State) -> None:\n \"\"\"Create a DB engine and stores it in the application state.\n\n Args:\n state: The ``Starlite.state`` instance.\n\n Returns:\n None\n \"\"\"\n\n state[self.engine_app_state_key] = self.engine\n state[self.session_maker_app_state_key] = self.session_maker\n\n async def on_shutdown(self, state: State) -> None:\n \"\"\"Disposes of the SQLAlchemy engine.\n\n Args:\n state: The ``Starlite.state`` instance.\n\n Returns:\n None\n \"\"\"\n engine = cast(\"Engine | AsyncEngine\", state[self.engine_app_state_key])\n if isinstance(engine, AsyncEngine):\n await engine.dispose()\n else:\n engine.dispose()\n del state[self.engine_app_state_key]\n\n def config_sql_alchemy_logging(self, logging_config: BaseLoggingConfig | None) -> None:\n \"\"\"Add the SQLAlchemy loggers to the logging config.\n\n Notes:\n - Currently only works with :class:`LoggingConfig <.logging.config.LoggingConfig>`.\n\n Args:\n logging_config: Logging config.\n\n Returns:\n None.\n \"\"\"\n if isinstance(logging_config, LoggingConfig):\n logger_settings = {\n \"level\": self.engine_config.logging_level or \"WARNING\",\n \"handlers\": logging_config.loggers[\"starlite\"][\"handlers\"],\n }\n for logger in (\n \"sqlalchemy\",\n self.engine_config.logging_name or \"sqlalchemy.engine\",\n self.engine_config.pool_logging_name or \"sqlalchemy.pool\",\n ):\n if logger not in logging_config.loggers:\n logging_config.loggers[logger] = logger_settings\n", "path": "starlite/contrib/sqlalchemy_1/config.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom dataclasses import asdict, dataclass, field\nfrom typing import TYPE_CHECKING, Any, Callable, Literal, cast\n\nfrom starlite.exceptions import ImproperlyConfiguredException, MissingDependencyException\nfrom starlite.logging.config import BaseLoggingConfig, LoggingConfig\nfrom starlite.serialization import decode_json, encode_json\nfrom starlite.utils import AsyncCallable\n\ntry:\n import sqlalchemy # noqa: F401\nexcept ImportError as e:\n raise MissingDependencyException(\"sqlalchemy is not installed\") from e\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession, create_async_engine\nfrom sqlalchemy.orm import Query, Session, sessionmaker\n\n__all__ = (\"SQLAlchemyConfig\", \"SQLAlchemyEngineConfig\", \"SQLAlchemySessionConfig\")\n\n\nif TYPE_CHECKING:\n from sqlalchemy.engine import Engine\n from sqlalchemy.future import Engine as FutureEngine\n from sqlalchemy.pool import Pool\n\n from starlite.datastructures.state import State\n from starlite.types import BeforeMessageSendHookHandler, Message, Scope\n\n from .types import SessionMakerInstanceProtocol, SessionMakerTypeProtocol\n\nIsolationLevel = Literal[\"AUTOCOMMIT\", \"READ COMMITTED\", \"READ UNCOMMITTED\", \"REPEATABLE READ\", \"SERIALIZABLE\"]\n\nSESSION_SCOPE_KEY = \"_sql_alchemy_db_session\"\nSESSION_TERMINUS_ASGI_EVENTS = {\"http.response.start\", \"http.disconnect\", \"websocket.disconnect\", \"websocket.close\"}\n\n\ndef serializer(value: Any) -> str:\n \"\"\"Serialize JSON field values.\n\n Args:\n value: Any json serializable value.\n\n Returns:\n JSON string.\n \"\"\"\n return encode_json(value).decode(\"utf-8\")\n\n\nasync def default_before_send_handler(message: \"Message\", _: \"State\", scope: \"Scope\") -> None:\n \"\"\"Handle closing and cleaning up sessions before sending.\n\n Args:\n message: ASGI-``Message``\n _: A ``State`` (not used)\n scope: An ASGI-``Scope``\n\n Returns:\n None\n \"\"\"\n session = cast(\"Session | AsyncSession | None\", scope.get(SESSION_SCOPE_KEY))\n if session and message[\"type\"] in SESSION_TERMINUS_ASGI_EVENTS:\n if isinstance(session, AsyncSession):\n await session.close()\n else:\n session.close()\n del scope[SESSION_SCOPE_KEY] # type: ignore\n\n\n@dataclass\nclass SQLAlchemySessionConfig:\n \"\"\"Configuration for a SQLAlchemy-Session.\"\"\"\n\n autocommit: bool | None = field(default=None)\n autoflush: bool | None = field(default=None)\n bind: Any | None = field(default=None)\n binds: Any | None = field(default=None)\n enable_baked_queries: bool | None = field(default=None)\n expire_on_commit: bool = field(default=False)\n future: bool | None = field(default=None)\n info: dict[str, Any] | None = field(default=None)\n query_cls: type[Query] | None = field(default=None)\n twophase: bool | None = field(default=None)\n\n\n@dataclass\nclass SQLAlchemyEngineConfig:\n \"\"\"Configuration for SQLAlchemy's :class`Engine <sqlalchemy.engine.Engine>`.\n\n For details see: https://docs.sqlalchemy.org/en/14/core/engines.html\n \"\"\"\n\n connect_args: dict[str, Any] | None = field(default=None)\n echo: bool | None = field(default=None)\n echo_pool: bool | None = field(default=None)\n enable_from_linting: bool | None = field(default=None)\n future: bool = field(default=True)\n hide_parameters: bool | None = field(default=None)\n isolation_level: IsolationLevel | None = field(default=None)\n json_deserializer: Callable[[str], Any] = field(default=decode_json)\n json_serializer: Callable[[Any], str] = field(default=serializer)\n label_length: int | None = field(default=None)\n listeners: Any = field(default=None)\n logging_level: int | str | None = field(default=None)\n logging_name: str | None = field(default=None)\n max_identifier_length: int | None = field(default=None)\n max_overflow: int | None = field(default=None)\n module: Any = field(default=None)\n paramstyle: Literal[\"qmark\", \"numeric\", \"named\", \"format\", \"pyformat\"] | None = field(default=None)\n plugins: list[str] | None = field(default=None)\n pool: Pool | None = field(default=None)\n pool_logging_name: str | None = field(default=None)\n pool_pre_ping: bool | None = field(default=None)\n pool_recycle: int | None = field(default=None)\n pool_reset_on_return: Literal[\"rollback\", \"commit\"] | None = field(default=None)\n pool_size: int | None = field(default=None)\n pool_timeout: int | None = field(default=None)\n pool_use_lifo: bool | None = field(default=None)\n poolclass: type[Pool] | None = field(default=None)\n query_cache_size: int | None = field(default=None)\n strategy: str | None = field(default=None)\n\n\n@dataclass\nclass SQLAlchemyConfig:\n \"\"\"Configuration for SQLAlchemy's :class:`sessionmaker <sqlalchemy.orm.sessionmaker>`.\n\n For details see: https://docs.sqlalchemy.org/en/14/orm/session_api.html\n \"\"\"\n\n connection_string: str | None = field(default=None)\n \"\"\"Database connection string in one of the formats supported by SQLAlchemy.\n\n Notes:\n - For async connections, the connection string must include the correct async prefix.\n e.g. ``'postgresql+asyncpg://...'`` instead of ``'postgresql://'``, and for sync connections its the opposite.\n\n \"\"\"\n use_async_engine: bool = field(default=True)\n \"\"\"Dictate whether the engine created is an async connection or not.\n\n Notes:\n - This option must correlate to the type of ``connection_string``. That is, an async connection string required an\n async connection and vice versa.\n\n \"\"\"\n create_async_engine_callable: Callable[[str], AsyncEngine] = field(default=create_async_engine)\n \"\"\"Callable that creates an :class:`AsyncEngine <sqlalchemy.ext.asyncio.AsyncEngine>` instance or instance of its\n subclass.\n \"\"\"\n create_engine_callable: Callable[[str], Engine | FutureEngine] = field(default=create_engine)\n \"\"\"Callable that creates an :class:`Engine <sqlalchemy.engine.Engine>` or ``FutureEngine`` instance or instance of its\n subclass.\"\"\"\n dependency_key: str = field(default=\"db_session\")\n \"\"\"Key to use for the dependency injection of database sessions.\"\"\"\n engine_app_state_key: str = field(default=\"db_engine\")\n \"\"\"Key under which to store the SQLAlchemy engine in the application :class:`State <.datastructures.State>`\n instance.\n \"\"\"\n engine_config: SQLAlchemyEngineConfig = field(default_factory=SQLAlchemyEngineConfig)\n \"\"\"Configuration for the SQLAlchemy engine.\n\n The configuration options are documented in the SQLAlchemy documentation.\n \"\"\"\n set_json_serializers: bool = field(default=True)\n \"\"\"A boolean flag dictating whether to set ``msgspec`` based serializer/deserializer functions.\n\n Notes:\n - Some databases or some versions of some databases do not have a JSON column type. E.g. some older versions of\n SQLite for example. In this case this flag should be false or an error will be raised by SQLAlchemy.\n\n \"\"\"\n session_class: type[Session] | type[AsyncSession] | None = field(default=None)\n \"\"\"The session class to use.\n\n If not set, the session class will default to :class:`sqlalchemy.orm.Session` for sync connections and\n :class:`sqlalchemy.ext.asyncio.AsyncSession` for async ones.\n \"\"\"\n session_config: SQLAlchemySessionConfig = field(default_factory=SQLAlchemySessionConfig)\n \"\"\"Configuration options for the ``sessionmaker``.\n\n The configuration options are documented in the SQLAlchemy documentation.\n \"\"\"\n session_maker_class: type[SessionMakerTypeProtocol] = field(default=sessionmaker)\n \"\"\"Sessionmaker class to use.\"\"\"\n session_maker_app_state_key: str = field(default=\"session_maker_class\")\n \"\"\"Key under which to store the SQLAlchemy ``sessionmaker`` in the application\n :class:`State <.datastructures.State>` instance.\n \"\"\"\n session_maker_instance: SessionMakerInstanceProtocol | None = field(default=None)\n \"\"\"Optional sessionmaker to use.\n\n If set, the plugin will use the provided instance rather than instantiate a sessionmaker.\n \"\"\"\n engine_instance: Engine | FutureEngine | AsyncEngine | None = field(default=None)\n \"\"\"Optional engine to use.\n\n If set, the plugin will use the provided instance rather than instantiate an engine.\n \"\"\"\n before_send_handler: BeforeMessageSendHookHandler = field(default=default_before_send_handler)\n \"\"\"Handler to call before the ASGI message is sent.\n\n The handler should handle closing the session stored in the ASGI scope, if its still open, and committing and\n uncommitted data.\n \"\"\"\n\n def __post_init__(self) -> None:\n if self.connection_string is None and self.engine_instance is None:\n raise ImproperlyConfiguredException(\"One of 'connection_string' or 'engine_instance' must be provided.\")\n\n if self.connection_string is not None and self.engine_instance is not None:\n raise ImproperlyConfiguredException(\"Only one of 'connection_string' or 'engine_instance' can be provided.\")\n\n self.before_send_handler = AsyncCallable(self.before_send_handler) # type: ignore\n\n @property\n def engine_config_dict(self) -> dict[str, Any]:\n \"\"\"Return the engine configuration as a dict.\n\n Returns:\n A string keyed dict of config kwargs for the SQLAlchemy ``create_engine`` function.\n \"\"\"\n engine_excluded_fields: set[str] = {\"future\", \"logging_level\"} if self.use_async_engine else {\"logging_level\"}\n\n if not self.set_json_serializers:\n engine_excluded_fields.update({\"json_deserializer\", \"json_serializer\"})\n\n return {\n k: v for k, v in asdict(self.engine_config).items() if v is not None and k not in engine_excluded_fields\n }\n\n @property\n def engine(self) -> Engine | FutureEngine | AsyncEngine:\n \"\"\"Return an engine. If none exists yet, create one.\n\n Returns:\n Getter that returns the engine instance used by the plugin.\n \"\"\"\n if not self.engine_instance:\n create_engine_callable = (\n self.create_async_engine_callable if self.use_async_engine else self.create_engine_callable\n )\n self.engine_instance = create_engine_callable(\n self.connection_string, **self.engine_config_dict # type:ignore[arg-type]\n )\n return cast(\"Engine | FutureEngine | AsyncEngine\", self.engine_instance)\n\n @property\n def session_maker(self) -> sessionmaker:\n \"\"\"Get a sessionmaker. If none exists yet, create one.\n\n Returns:\n Getter that returns the session_maker instance used by the plugin.\n \"\"\"\n if not self.session_maker_instance:\n session_maker_kwargs = {\n k: v\n for k, v in asdict(self.session_config).items()\n if v is not None and ((self.use_async_engine and k != \"future\") or not self.use_async_engine)\n }\n session_class = self.session_class or (AsyncSession if self.use_async_engine else Session)\n self.session_maker_instance = self.session_maker_class(\n self.engine, class_=session_class, **session_maker_kwargs\n )\n return cast(\"sessionmaker\", self.session_maker_instance)\n\n def create_db_session_dependency(self, state: State, scope: Scope) -> Union[Session, AsyncSession]: # noqa: F821\n \"\"\"Create a session instance.\n\n Args:\n state: The ``Starlite.state`` instance.\n scope: The current connection's scope.\n\n Returns:\n A session instance T.\n \"\"\"\n session = scope.get(SESSION_SCOPE_KEY)\n if not session:\n session_maker = cast(\"sessionmaker\", state[self.session_maker_app_state_key])\n session = scope[SESSION_SCOPE_KEY] = session_maker() # type: ignore\n return cast(\"Session | AsyncSession\", session)\n\n def update_app_state(self, state: State) -> None:\n \"\"\"Create a DB engine and stores it in the application state.\n\n Args:\n state: The ``Starlite.state`` instance.\n\n Returns:\n None\n \"\"\"\n\n state[self.engine_app_state_key] = self.engine\n state[self.session_maker_app_state_key] = self.session_maker\n\n async def on_shutdown(self, state: State) -> None:\n \"\"\"Disposes of the SQLAlchemy engine.\n\n Args:\n state: The ``Starlite.state`` instance.\n\n Returns:\n None\n \"\"\"\n engine = cast(\"Engine | AsyncEngine\", state[self.engine_app_state_key])\n if isinstance(engine, AsyncEngine):\n await engine.dispose()\n else:\n engine.dispose()\n del state[self.engine_app_state_key]\n\n def config_sql_alchemy_logging(self, logging_config: BaseLoggingConfig | None) -> None:\n \"\"\"Add the SQLAlchemy loggers to the logging config.\n\n Notes:\n - Currently only works with :class:`LoggingConfig <.logging.config.LoggingConfig>`.\n\n Args:\n logging_config: Logging config.\n\n Returns:\n None.\n \"\"\"\n if isinstance(logging_config, LoggingConfig):\n logger_settings = {\n \"level\": self.engine_config.logging_level or \"WARNING\",\n \"handlers\": logging_config.loggers[\"starlite\"][\"handlers\"],\n }\n for logger in (\n \"sqlalchemy\",\n self.engine_config.logging_name or \"sqlalchemy.engine\",\n self.engine_config.pool_logging_name or \"sqlalchemy.pool\",\n ):\n if logger not in logging_config.loggers:\n logging_config.loggers[logger] = logger_settings\n", "path": "starlite/contrib/sqlalchemy_1/config.py"}]} |
gh_patches_debug_1208 | rasdani/github-patches | git_diff | docker__docker-py-2213 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
swarm.init() does not return a value
No return value unlike as specified in documentation.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/models/swarm.py`
Content:
```
1 from docker.api import APIClient
2 from docker.errors import APIError
3 from .resource import Model
4
5
6 class Swarm(Model):
7 """
8 The server's Swarm state. This a singleton that must be reloaded to get
9 the current state of the Swarm.
10 """
11 id_attribute = 'ID'
12
13 def __init__(self, *args, **kwargs):
14 super(Swarm, self).__init__(*args, **kwargs)
15 if self.client:
16 try:
17 self.reload()
18 except APIError as e:
19 # FIXME: https://github.com/docker/docker/issues/29192
20 if e.response.status_code not in (406, 503):
21 raise
22
23 @property
24 def version(self):
25 """
26 The version number of the swarm. If this is not the same as the
27 server, the :py:meth:`update` function will not work and you will
28 need to call :py:meth:`reload` before calling it again.
29 """
30 return self.attrs.get('Version').get('Index')
31
32 def get_unlock_key(self):
33 return self.client.api.get_unlock_key()
34 get_unlock_key.__doc__ = APIClient.get_unlock_key.__doc__
35
36 def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
37 force_new_cluster=False, **kwargs):
38 """
39 Initialize a new swarm on this Engine.
40
41 Args:
42 advertise_addr (str): Externally reachable address advertised to
43 other nodes. This can either be an address/port combination in
44 the form ``192.168.1.1:4567``, or an interface followed by a
45 port number, like ``eth0:4567``. If the port number is omitted,
46 the port number from the listen address is used.
47
48 If not specified, it will be automatically detected when
49 possible.
50 listen_addr (str): Listen address used for inter-manager
51 communication, as well as determining the networking interface
52 used for the VXLAN Tunnel Endpoint (VTEP). This can either be
53 an address/port combination in the form ``192.168.1.1:4567``,
54 or an interface followed by a port number, like ``eth0:4567``.
55 If the port number is omitted, the default swarm listening port
56 is used. Default: ``0.0.0.0:2377``
57 force_new_cluster (bool): Force creating a new Swarm, even if
58 already part of one. Default: False
59 task_history_retention_limit (int): Maximum number of tasks
60 history stored.
61 snapshot_interval (int): Number of logs entries between snapshot.
62 keep_old_snapshots (int): Number of snapshots to keep beyond the
63 current snapshot.
64 log_entries_for_slow_followers (int): Number of log entries to
65 keep around to sync up slow followers after a snapshot is
66 created.
67 heartbeat_tick (int): Amount of ticks (in seconds) between each
68 heartbeat.
69 election_tick (int): Amount of ticks (in seconds) needed without a
70 leader to trigger a new election.
71 dispatcher_heartbeat_period (int): The delay for an agent to send
72 a heartbeat to the dispatcher.
73 node_cert_expiry (int): Automatic expiry for nodes certificates.
74 external_ca (dict): Configuration for forwarding signing requests
75 to an external certificate authority. Use
76 ``docker.types.SwarmExternalCA``.
77 name (string): Swarm's name
78 labels (dict): User-defined key/value metadata.
79 signing_ca_cert (str): The desired signing CA certificate for all
80 swarm node TLS leaf certificates, in PEM format.
81 signing_ca_key (str): The desired signing CA key for all swarm
82 node TLS leaf certificates, in PEM format.
83 ca_force_rotate (int): An integer whose purpose is to force swarm
84 to generate a new signing CA certificate and key, if none have
85 been specified.
86 autolock_managers (boolean): If set, generate a key and use it to
87 lock data stored on the managers.
88 log_driver (DriverConfig): The default log driver to use for tasks
89 created in the orchestrator.
90
91 Returns:
92 ``True`` if the request went through.
93
94 Raises:
95 :py:class:`docker.errors.APIError`
96 If the server returns an error.
97
98 Example:
99
100 >>> client.swarm.init(
101 advertise_addr='eth0', listen_addr='0.0.0.0:5000',
102 force_new_cluster=False, snapshot_interval=5000,
103 log_entries_for_slow_followers=1200
104 )
105
106 """
107 init_kwargs = {
108 'advertise_addr': advertise_addr,
109 'listen_addr': listen_addr,
110 'force_new_cluster': force_new_cluster
111 }
112 init_kwargs['swarm_spec'] = self.client.api.create_swarm_spec(**kwargs)
113 self.client.api.init_swarm(**init_kwargs)
114 self.reload()
115
116 def join(self, *args, **kwargs):
117 return self.client.api.join_swarm(*args, **kwargs)
118 join.__doc__ = APIClient.join_swarm.__doc__
119
120 def leave(self, *args, **kwargs):
121 return self.client.api.leave_swarm(*args, **kwargs)
122 leave.__doc__ = APIClient.leave_swarm.__doc__
123
124 def reload(self):
125 """
126 Inspect the swarm on the server and store the response in
127 :py:attr:`attrs`.
128
129 Raises:
130 :py:class:`docker.errors.APIError`
131 If the server returns an error.
132 """
133 self.attrs = self.client.api.inspect_swarm()
134
135 def unlock(self, key):
136 return self.client.api.unlock_swarm(key)
137 unlock.__doc__ = APIClient.unlock_swarm.__doc__
138
139 def update(self, rotate_worker_token=False, rotate_manager_token=False,
140 **kwargs):
141 """
142 Update the swarm's configuration.
143
144 It takes the same arguments as :py:meth:`init`, except
145 ``advertise_addr``, ``listen_addr``, and ``force_new_cluster``. In
146 addition, it takes these arguments:
147
148 Args:
149 rotate_worker_token (bool): Rotate the worker join token. Default:
150 ``False``.
151 rotate_manager_token (bool): Rotate the manager join token.
152 Default: ``False``.
153
154 Raises:
155 :py:class:`docker.errors.APIError`
156 If the server returns an error.
157
158 """
159 # this seems to have to be set
160 if kwargs.get('node_cert_expiry') is None:
161 kwargs['node_cert_expiry'] = 7776000000000000
162
163 return self.client.api.update_swarm(
164 version=self.version,
165 swarm_spec=self.client.api.create_swarm_spec(**kwargs),
166 rotate_worker_token=rotate_worker_token,
167 rotate_manager_token=rotate_manager_token
168 )
169
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docker/models/swarm.py b/docker/models/swarm.py
--- a/docker/models/swarm.py
+++ b/docker/models/swarm.py
@@ -112,6 +112,7 @@
init_kwargs['swarm_spec'] = self.client.api.create_swarm_spec(**kwargs)
self.client.api.init_swarm(**init_kwargs)
self.reload()
+ return True
def join(self, *args, **kwargs):
return self.client.api.join_swarm(*args, **kwargs)
| {"golden_diff": "diff --git a/docker/models/swarm.py b/docker/models/swarm.py\n--- a/docker/models/swarm.py\n+++ b/docker/models/swarm.py\n@@ -112,6 +112,7 @@\n init_kwargs['swarm_spec'] = self.client.api.create_swarm_spec(**kwargs)\n self.client.api.init_swarm(**init_kwargs)\n self.reload()\n+ return True\n \n def join(self, *args, **kwargs):\n return self.client.api.join_swarm(*args, **kwargs)\n", "issue": "swarm.init() does not return a value\nNo return value unlike as specified in documentation. \n", "before_files": [{"content": "from docker.api import APIClient\nfrom docker.errors import APIError\nfrom .resource import Model\n\n\nclass Swarm(Model):\n \"\"\"\n The server's Swarm state. This a singleton that must be reloaded to get\n the current state of the Swarm.\n \"\"\"\n id_attribute = 'ID'\n\n def __init__(self, *args, **kwargs):\n super(Swarm, self).__init__(*args, **kwargs)\n if self.client:\n try:\n self.reload()\n except APIError as e:\n # FIXME: https://github.com/docker/docker/issues/29192\n if e.response.status_code not in (406, 503):\n raise\n\n @property\n def version(self):\n \"\"\"\n The version number of the swarm. If this is not the same as the\n server, the :py:meth:`update` function will not work and you will\n need to call :py:meth:`reload` before calling it again.\n \"\"\"\n return self.attrs.get('Version').get('Index')\n\n def get_unlock_key(self):\n return self.client.api.get_unlock_key()\n get_unlock_key.__doc__ = APIClient.get_unlock_key.__doc__\n\n def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377',\n force_new_cluster=False, **kwargs):\n \"\"\"\n Initialize a new swarm on this Engine.\n\n Args:\n advertise_addr (str): Externally reachable address advertised to\n other nodes. This can either be an address/port combination in\n the form ``192.168.1.1:4567``, or an interface followed by a\n port number, like ``eth0:4567``. If the port number is omitted,\n the port number from the listen address is used.\n\n If not specified, it will be automatically detected when\n possible.\n listen_addr (str): Listen address used for inter-manager\n communication, as well as determining the networking interface\n used for the VXLAN Tunnel Endpoint (VTEP). This can either be\n an address/port combination in the form ``192.168.1.1:4567``,\n or an interface followed by a port number, like ``eth0:4567``.\n If the port number is omitted, the default swarm listening port\n is used. Default: ``0.0.0.0:2377``\n force_new_cluster (bool): Force creating a new Swarm, even if\n already part of one. Default: False\n task_history_retention_limit (int): Maximum number of tasks\n history stored.\n snapshot_interval (int): Number of logs entries between snapshot.\n keep_old_snapshots (int): Number of snapshots to keep beyond the\n current snapshot.\n log_entries_for_slow_followers (int): Number of log entries to\n keep around to sync up slow followers after a snapshot is\n created.\n heartbeat_tick (int): Amount of ticks (in seconds) between each\n heartbeat.\n election_tick (int): Amount of ticks (in seconds) needed without a\n leader to trigger a new election.\n dispatcher_heartbeat_period (int): The delay for an agent to send\n a heartbeat to the dispatcher.\n node_cert_expiry (int): Automatic expiry for nodes certificates.\n external_ca (dict): Configuration for forwarding signing requests\n to an external certificate authority. Use\n ``docker.types.SwarmExternalCA``.\n name (string): Swarm's name\n labels (dict): User-defined key/value metadata.\n signing_ca_cert (str): The desired signing CA certificate for all\n swarm node TLS leaf certificates, in PEM format.\n signing_ca_key (str): The desired signing CA key for all swarm\n node TLS leaf certificates, in PEM format.\n ca_force_rotate (int): An integer whose purpose is to force swarm\n to generate a new signing CA certificate and key, if none have\n been specified.\n autolock_managers (boolean): If set, generate a key and use it to\n lock data stored on the managers.\n log_driver (DriverConfig): The default log driver to use for tasks\n created in the orchestrator.\n\n Returns:\n ``True`` if the request went through.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Example:\n\n >>> client.swarm.init(\n advertise_addr='eth0', listen_addr='0.0.0.0:5000',\n force_new_cluster=False, snapshot_interval=5000,\n log_entries_for_slow_followers=1200\n )\n\n \"\"\"\n init_kwargs = {\n 'advertise_addr': advertise_addr,\n 'listen_addr': listen_addr,\n 'force_new_cluster': force_new_cluster\n }\n init_kwargs['swarm_spec'] = self.client.api.create_swarm_spec(**kwargs)\n self.client.api.init_swarm(**init_kwargs)\n self.reload()\n\n def join(self, *args, **kwargs):\n return self.client.api.join_swarm(*args, **kwargs)\n join.__doc__ = APIClient.join_swarm.__doc__\n\n def leave(self, *args, **kwargs):\n return self.client.api.leave_swarm(*args, **kwargs)\n leave.__doc__ = APIClient.leave_swarm.__doc__\n\n def reload(self):\n \"\"\"\n Inspect the swarm on the server and store the response in\n :py:attr:`attrs`.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n self.attrs = self.client.api.inspect_swarm()\n\n def unlock(self, key):\n return self.client.api.unlock_swarm(key)\n unlock.__doc__ = APIClient.unlock_swarm.__doc__\n\n def update(self, rotate_worker_token=False, rotate_manager_token=False,\n **kwargs):\n \"\"\"\n Update the swarm's configuration.\n\n It takes the same arguments as :py:meth:`init`, except\n ``advertise_addr``, ``listen_addr``, and ``force_new_cluster``. In\n addition, it takes these arguments:\n\n Args:\n rotate_worker_token (bool): Rotate the worker join token. Default:\n ``False``.\n rotate_manager_token (bool): Rotate the manager join token.\n Default: ``False``.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n \"\"\"\n # this seems to have to be set\n if kwargs.get('node_cert_expiry') is None:\n kwargs['node_cert_expiry'] = 7776000000000000\n\n return self.client.api.update_swarm(\n version=self.version,\n swarm_spec=self.client.api.create_swarm_spec(**kwargs),\n rotate_worker_token=rotate_worker_token,\n rotate_manager_token=rotate_manager_token\n )\n", "path": "docker/models/swarm.py"}], "after_files": [{"content": "from docker.api import APIClient\nfrom docker.errors import APIError\nfrom .resource import Model\n\n\nclass Swarm(Model):\n \"\"\"\n The server's Swarm state. This a singleton that must be reloaded to get\n the current state of the Swarm.\n \"\"\"\n id_attribute = 'ID'\n\n def __init__(self, *args, **kwargs):\n super(Swarm, self).__init__(*args, **kwargs)\n if self.client:\n try:\n self.reload()\n except APIError as e:\n # FIXME: https://github.com/docker/docker/issues/29192\n if e.response.status_code not in (406, 503):\n raise\n\n @property\n def version(self):\n \"\"\"\n The version number of the swarm. If this is not the same as the\n server, the :py:meth:`update` function will not work and you will\n need to call :py:meth:`reload` before calling it again.\n \"\"\"\n return self.attrs.get('Version').get('Index')\n\n def get_unlock_key(self):\n return self.client.api.get_unlock_key()\n get_unlock_key.__doc__ = APIClient.get_unlock_key.__doc__\n\n def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377',\n force_new_cluster=False, **kwargs):\n \"\"\"\n Initialize a new swarm on this Engine.\n\n Args:\n advertise_addr (str): Externally reachable address advertised to\n other nodes. This can either be an address/port combination in\n the form ``192.168.1.1:4567``, or an interface followed by a\n port number, like ``eth0:4567``. If the port number is omitted,\n the port number from the listen address is used.\n\n If not specified, it will be automatically detected when\n possible.\n listen_addr (str): Listen address used for inter-manager\n communication, as well as determining the networking interface\n used for the VXLAN Tunnel Endpoint (VTEP). This can either be\n an address/port combination in the form ``192.168.1.1:4567``,\n or an interface followed by a port number, like ``eth0:4567``.\n If the port number is omitted, the default swarm listening port\n is used. Default: ``0.0.0.0:2377``\n force_new_cluster (bool): Force creating a new Swarm, even if\n already part of one. Default: False\n task_history_retention_limit (int): Maximum number of tasks\n history stored.\n snapshot_interval (int): Number of logs entries between snapshot.\n keep_old_snapshots (int): Number of snapshots to keep beyond the\n current snapshot.\n log_entries_for_slow_followers (int): Number of log entries to\n keep around to sync up slow followers after a snapshot is\n created.\n heartbeat_tick (int): Amount of ticks (in seconds) between each\n heartbeat.\n election_tick (int): Amount of ticks (in seconds) needed without a\n leader to trigger a new election.\n dispatcher_heartbeat_period (int): The delay for an agent to send\n a heartbeat to the dispatcher.\n node_cert_expiry (int): Automatic expiry for nodes certificates.\n external_ca (dict): Configuration for forwarding signing requests\n to an external certificate authority. Use\n ``docker.types.SwarmExternalCA``.\n name (string): Swarm's name\n labels (dict): User-defined key/value metadata.\n signing_ca_cert (str): The desired signing CA certificate for all\n swarm node TLS leaf certificates, in PEM format.\n signing_ca_key (str): The desired signing CA key for all swarm\n node TLS leaf certificates, in PEM format.\n ca_force_rotate (int): An integer whose purpose is to force swarm\n to generate a new signing CA certificate and key, if none have\n been specified.\n autolock_managers (boolean): If set, generate a key and use it to\n lock data stored on the managers.\n log_driver (DriverConfig): The default log driver to use for tasks\n created in the orchestrator.\n\n Returns:\n ``True`` if the request went through.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Example:\n\n >>> client.swarm.init(\n advertise_addr='eth0', listen_addr='0.0.0.0:5000',\n force_new_cluster=False, snapshot_interval=5000,\n log_entries_for_slow_followers=1200\n )\n\n \"\"\"\n init_kwargs = {\n 'advertise_addr': advertise_addr,\n 'listen_addr': listen_addr,\n 'force_new_cluster': force_new_cluster\n }\n init_kwargs['swarm_spec'] = self.client.api.create_swarm_spec(**kwargs)\n self.client.api.init_swarm(**init_kwargs)\n self.reload()\n return True\n\n def join(self, *args, **kwargs):\n return self.client.api.join_swarm(*args, **kwargs)\n join.__doc__ = APIClient.join_swarm.__doc__\n\n def leave(self, *args, **kwargs):\n return self.client.api.leave_swarm(*args, **kwargs)\n leave.__doc__ = APIClient.leave_swarm.__doc__\n\n def reload(self):\n \"\"\"\n Inspect the swarm on the server and store the response in\n :py:attr:`attrs`.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n self.attrs = self.client.api.inspect_swarm()\n\n def unlock(self, key):\n return self.client.api.unlock_swarm(key)\n unlock.__doc__ = APIClient.unlock_swarm.__doc__\n\n def update(self, rotate_worker_token=False, rotate_manager_token=False,\n **kwargs):\n \"\"\"\n Update the swarm's configuration.\n\n It takes the same arguments as :py:meth:`init`, except\n ``advertise_addr``, ``listen_addr``, and ``force_new_cluster``. In\n addition, it takes these arguments:\n\n Args:\n rotate_worker_token (bool): Rotate the worker join token. Default:\n ``False``.\n rotate_manager_token (bool): Rotate the manager join token.\n Default: ``False``.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n \"\"\"\n # this seems to have to be set\n if kwargs.get('node_cert_expiry') is None:\n kwargs['node_cert_expiry'] = 7776000000000000\n\n return self.client.api.update_swarm(\n version=self.version,\n swarm_spec=self.client.api.create_swarm_spec(**kwargs),\n rotate_worker_token=rotate_worker_token,\n rotate_manager_token=rotate_manager_token\n )\n", "path": "docker/models/swarm.py"}]} |
gh_patches_debug_1209 | rasdani/github-patches | git_diff | open-mmlab__mmcv-474 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug feedback.
Thanks for your codes.
In [mmcv/runner/base_runner](https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/base_runner.py#L385),
` def register_logger_hooks(self, log_config):
log_interval = log_config['interval']
for info in log_config['hooks']:
logger_hook = mmcv.build_from_cfg(
info, HOOKS, default_args=dict(interval=log_interval))
self.register_hook(logger_hook, priority='VERY_LOW')
`
.
Only the argument 'interval' in log_config is fed into logger_hook. However, the argument 'by_epoch' is ignored, always true, so that if iter_based_runner is used, the logger will work unexpectedly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmcv/runner/iter_based_runner.py`
Content:
```
1 # Copyright (c) Open-MMLab. All rights reserved.
2 import os.path as osp
3 import time
4
5 import torch
6 from torch.optim import Optimizer
7
8 import mmcv
9 from .base_runner import BaseRunner
10 from .checkpoint import save_checkpoint
11 from .hooks import IterTimerHook
12 from .utils import get_host_info
13
14
15 class IterLoader:
16
17 def __init__(self, dataloader):
18 self._dataloader = dataloader
19 self.iter_loader = iter(self._dataloader)
20 self._epoch = 0
21
22 @property
23 def epoch(self):
24 return self._epoch
25
26 def __next__(self):
27 try:
28 data = next(self.iter_loader)
29 except StopIteration:
30 self._epoch += 1
31 if hasattr(self._dataloader.sampler, 'set_epoch'):
32 self._dataloader.sampler.set_epoch(self._epoch)
33 self.iter_loader = iter(self._dataloader)
34 data = next(self.iter_loader)
35
36 return data
37
38 def __len__(self):
39 return len(self._dataloader)
40
41
42 class IterBasedRunner(BaseRunner):
43 """Iteration-based Runner.
44
45 This runner train models iteration by iteration.
46 """
47
48 def train(self, data_loader, **kwargs):
49 self.model.train()
50 self.mode = 'train'
51 self.data_loader = data_loader
52 self._epoch = data_loader.epoch
53 self.call_hook('before_train_iter')
54 data_batch = next(data_loader)
55 outputs = self.model.train_step(data_batch, self.optimizer, **kwargs)
56 if not isinstance(outputs, dict):
57 raise TypeError('model.train_step() must return a dict')
58 if 'log_vars' in outputs:
59 self.log_buffer.update(outputs['log_vars'], outputs['num_samples'])
60 self.outputs = outputs
61 self.call_hook('after_train_iter')
62 self._inner_iter += 1
63 self._iter += 1
64
65 def val(self, data_loader, **kwargs):
66 self.model.eval()
67 self.mode = 'val'
68 self.data_loader = data_loader
69 self.call_hook('before_val_iter')
70 data_batch = next(data_loader)
71 outputs = self.model.val_step(data_batch, **kwargs)
72 if not isinstance(outputs, dict):
73 raise TypeError('model.val_step() must return a dict')
74 if 'log_vars' in outputs:
75 self.log_buffer.update(outputs['log_vars'], outputs['num_samples'])
76 self.outputs = outputs
77 self.call_hook('after_val_iter')
78 self._inner_iter += 1
79
80 def run(self, data_loaders, workflow, max_iters, **kwargs):
81 """Start running.
82
83 Args:
84 data_loaders (list[:obj:`DataLoader`]): Dataloaders for training
85 and validation.
86 workflow (list[tuple]): A list of (phase, iters) to specify the
87 running order and iterations. E.g, [('train', 10000),
88 ('val', 1000)] means running 10000 iterations for training and
89 1000 iterations for validation, iteratively.
90 max_iters (int): Total training iterations.
91 """
92 assert isinstance(data_loaders, list)
93 assert mmcv.is_list_of(workflow, tuple)
94 assert len(data_loaders) == len(workflow)
95
96 self._max_iters = max_iters
97 work_dir = self.work_dir if self.work_dir is not None else 'NONE'
98 self.logger.info('Start running, host: %s, work_dir: %s',
99 get_host_info(), work_dir)
100 self.logger.info('workflow: %s, max: %d iters', workflow, max_iters)
101 self.call_hook('before_run')
102
103 iter_loaders = [IterLoader(x) for x in data_loaders]
104
105 self.call_hook('before_epoch')
106
107 while self.iter < max_iters:
108 for i, flow in enumerate(workflow):
109 self._inner_iter = 0
110 mode, iters = flow
111 if not isinstance(mode, str) or not hasattr(self, mode):
112 raise ValueError(
113 'runner has no method named "{}" to run a workflow'.
114 format(mode))
115 iter_runner = getattr(self, mode)
116 for _ in range(iters):
117 if mode == 'train' and self.iter >= max_iters:
118 break
119 iter_runner(iter_loaders[i], **kwargs)
120
121 time.sleep(1) # wait for some hooks like loggers to finish
122 self.call_hook('after_epoch')
123 self.call_hook('after_run')
124
125 def resume(self,
126 checkpoint,
127 resume_optimizer=True,
128 map_location='default'):
129 """Resume model from checkpoint.
130
131 Args:
132 checkpoint (str): Checkpoint to resume from.
133 resume_optimizer (bool, optional): Whether resume the optimizer(s)
134 if the checkpoint file includes optimizer(s). Default to True.
135 map_location (str, optional): Same as :func:`torch.load`.
136 Default to 'default'.
137 """
138 if map_location == 'default':
139 device_id = torch.cuda.current_device()
140 checkpoint = self.load_checkpoint(
141 checkpoint,
142 map_location=lambda storage, loc: storage.cuda(device_id))
143 else:
144 checkpoint = self.load_checkpoint(
145 checkpoint, map_location=map_location)
146
147 self._epoch = checkpoint['meta']['epoch']
148 self._iter = checkpoint['meta']['iter']
149 self._inner_iter = checkpoint['meta']['iter']
150 if 'optimizer' in checkpoint and resume_optimizer:
151 if isinstance(self.optimizer, Optimizer):
152 self.optimizer.load_state_dict(checkpoint['optimizer'])
153 elif isinstance(self.optimizer, dict):
154 for k in self.optimizer.keys():
155 self.optimizer[k].load_state_dict(
156 checkpoint['optimizer'][k])
157
158 self.logger.info(f'resumed from epoch: {self.epoch}, iter {self.iter}')
159
160 def save_checkpoint(self,
161 out_dir,
162 filename_tmpl='iter_{}.pth',
163 meta=None,
164 save_optimizer=True,
165 create_symlink=True):
166 """Save checkpoint to file.
167
168 Args:
169 out_dir (str): Directory to save checkpoint files.
170 filename_tmpl (str, optional): Checkpoint file template.
171 Defaults to 'iter_{}.pth'.
172 meta (dict, optional): Metadata to be saved in checkpoint.
173 Defaults to None.
174 save_optimizer (bool, optional): Whether save optimizer.
175 Defaults to True.
176 create_symlink (bool, optional): Whether create symlink to the
177 latest checkpoint file. Defaults to True.
178 """
179 if meta is None:
180 meta = dict(iter=self.iter + 1, epoch=self.epoch + 1)
181 elif isinstance(meta, dict):
182 meta.update(iter=self.iter + 1, epoch=self.epoch + 1)
183 else:
184 raise TypeError(
185 f'meta should be a dict or None, but got {type(meta)}')
186 if self.meta is not None:
187 meta.update(self.meta)
188
189 filename = filename_tmpl.format(self.iter + 1)
190 filepath = osp.join(out_dir, filename)
191 optimizer = self.optimizer if save_optimizer else None
192 save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta)
193 # in some environments, `os.symlink` is not supported, you may need to
194 # set `create_symlink` to False
195 if create_symlink:
196 mmcv.symlink(filename, osp.join(out_dir, 'latest.pth'))
197
198 def register_training_hooks(self,
199 lr_config,
200 optimizer_config=None,
201 checkpoint_config=None,
202 log_config=None,
203 momentum_config=None):
204 """Register default hooks for iter-based training.
205
206 Default hooks include:
207
208 - LrUpdaterHook
209 - MomentumUpdaterHook
210 - OptimizerStepperHook
211 - CheckpointSaverHook
212 - IterTimerHook
213 - LoggerHook(s)
214 """
215 if checkpoint_config is not None:
216 checkpoint_config.setdefault('by_epoch', False)
217 if lr_config is not None:
218 lr_config.setdefault('by_epoch', False)
219 self.register_lr_hook(lr_config)
220 self.register_momentum_hook(momentum_config)
221 self.register_optimizer_hook(optimizer_config)
222 self.register_checkpoint_hook(checkpoint_config)
223 self.register_hook(IterTimerHook())
224 if log_config is not None:
225 log_config.setdefault('by_epoch', False)
226 self.register_logger_hooks(log_config)
227
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mmcv/runner/iter_based_runner.py b/mmcv/runner/iter_based_runner.py
--- a/mmcv/runner/iter_based_runner.py
+++ b/mmcv/runner/iter_based_runner.py
@@ -222,5 +222,6 @@
self.register_checkpoint_hook(checkpoint_config)
self.register_hook(IterTimerHook())
if log_config is not None:
- log_config.setdefault('by_epoch', False)
+ for info in log_config['hooks']:
+ info.setdefault('by_epoch', False)
self.register_logger_hooks(log_config)
| {"golden_diff": "diff --git a/mmcv/runner/iter_based_runner.py b/mmcv/runner/iter_based_runner.py\n--- a/mmcv/runner/iter_based_runner.py\n+++ b/mmcv/runner/iter_based_runner.py\n@@ -222,5 +222,6 @@\n self.register_checkpoint_hook(checkpoint_config)\n self.register_hook(IterTimerHook())\n if log_config is not None:\n- log_config.setdefault('by_epoch', False)\n+ for info in log_config['hooks']:\n+ info.setdefault('by_epoch', False)\n self.register_logger_hooks(log_config)\n", "issue": "Bug feedback.\nThanks for your codes. \r\nIn [mmcv/runner/base_runner](https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/base_runner.py#L385), \r\n` def register_logger_hooks(self, log_config):\r\n log_interval = log_config['interval']\r\n for info in log_config['hooks']:\r\n logger_hook = mmcv.build_from_cfg(\r\n info, HOOKS, default_args=dict(interval=log_interval))\r\n self.register_hook(logger_hook, priority='VERY_LOW')\r\n`\r\n.\r\nOnly the argument 'interval' in log_config is fed into logger_hook. However, the argument 'by_epoch' is ignored, always true, so that if iter_based_runner is used, the logger will work unexpectedly. \n", "before_files": [{"content": "# Copyright (c) Open-MMLab. All rights reserved.\nimport os.path as osp\nimport time\n\nimport torch\nfrom torch.optim import Optimizer\n\nimport mmcv\nfrom .base_runner import BaseRunner\nfrom .checkpoint import save_checkpoint\nfrom .hooks import IterTimerHook\nfrom .utils import get_host_info\n\n\nclass IterLoader:\n\n def __init__(self, dataloader):\n self._dataloader = dataloader\n self.iter_loader = iter(self._dataloader)\n self._epoch = 0\n\n @property\n def epoch(self):\n return self._epoch\n\n def __next__(self):\n try:\n data = next(self.iter_loader)\n except StopIteration:\n self._epoch += 1\n if hasattr(self._dataloader.sampler, 'set_epoch'):\n self._dataloader.sampler.set_epoch(self._epoch)\n self.iter_loader = iter(self._dataloader)\n data = next(self.iter_loader)\n\n return data\n\n def __len__(self):\n return len(self._dataloader)\n\n\nclass IterBasedRunner(BaseRunner):\n \"\"\"Iteration-based Runner.\n\n This runner train models iteration by iteration.\n \"\"\"\n\n def train(self, data_loader, **kwargs):\n self.model.train()\n self.mode = 'train'\n self.data_loader = data_loader\n self._epoch = data_loader.epoch\n self.call_hook('before_train_iter')\n data_batch = next(data_loader)\n outputs = self.model.train_step(data_batch, self.optimizer, **kwargs)\n if not isinstance(outputs, dict):\n raise TypeError('model.train_step() must return a dict')\n if 'log_vars' in outputs:\n self.log_buffer.update(outputs['log_vars'], outputs['num_samples'])\n self.outputs = outputs\n self.call_hook('after_train_iter')\n self._inner_iter += 1\n self._iter += 1\n\n def val(self, data_loader, **kwargs):\n self.model.eval()\n self.mode = 'val'\n self.data_loader = data_loader\n self.call_hook('before_val_iter')\n data_batch = next(data_loader)\n outputs = self.model.val_step(data_batch, **kwargs)\n if not isinstance(outputs, dict):\n raise TypeError('model.val_step() must return a dict')\n if 'log_vars' in outputs:\n self.log_buffer.update(outputs['log_vars'], outputs['num_samples'])\n self.outputs = outputs\n self.call_hook('after_val_iter')\n self._inner_iter += 1\n\n def run(self, data_loaders, workflow, max_iters, **kwargs):\n \"\"\"Start running.\n\n Args:\n data_loaders (list[:obj:`DataLoader`]): Dataloaders for training\n and validation.\n workflow (list[tuple]): A list of (phase, iters) to specify the\n running order and iterations. E.g, [('train', 10000),\n ('val', 1000)] means running 10000 iterations for training and\n 1000 iterations for validation, iteratively.\n max_iters (int): Total training iterations.\n \"\"\"\n assert isinstance(data_loaders, list)\n assert mmcv.is_list_of(workflow, tuple)\n assert len(data_loaders) == len(workflow)\n\n self._max_iters = max_iters\n work_dir = self.work_dir if self.work_dir is not None else 'NONE'\n self.logger.info('Start running, host: %s, work_dir: %s',\n get_host_info(), work_dir)\n self.logger.info('workflow: %s, max: %d iters', workflow, max_iters)\n self.call_hook('before_run')\n\n iter_loaders = [IterLoader(x) for x in data_loaders]\n\n self.call_hook('before_epoch')\n\n while self.iter < max_iters:\n for i, flow in enumerate(workflow):\n self._inner_iter = 0\n mode, iters = flow\n if not isinstance(mode, str) or not hasattr(self, mode):\n raise ValueError(\n 'runner has no method named \"{}\" to run a workflow'.\n format(mode))\n iter_runner = getattr(self, mode)\n for _ in range(iters):\n if mode == 'train' and self.iter >= max_iters:\n break\n iter_runner(iter_loaders[i], **kwargs)\n\n time.sleep(1) # wait for some hooks like loggers to finish\n self.call_hook('after_epoch')\n self.call_hook('after_run')\n\n def resume(self,\n checkpoint,\n resume_optimizer=True,\n map_location='default'):\n \"\"\"Resume model from checkpoint.\n\n Args:\n checkpoint (str): Checkpoint to resume from.\n resume_optimizer (bool, optional): Whether resume the optimizer(s)\n if the checkpoint file includes optimizer(s). Default to True.\n map_location (str, optional): Same as :func:`torch.load`.\n Default to 'default'.\n \"\"\"\n if map_location == 'default':\n device_id = torch.cuda.current_device()\n checkpoint = self.load_checkpoint(\n checkpoint,\n map_location=lambda storage, loc: storage.cuda(device_id))\n else:\n checkpoint = self.load_checkpoint(\n checkpoint, map_location=map_location)\n\n self._epoch = checkpoint['meta']['epoch']\n self._iter = checkpoint['meta']['iter']\n self._inner_iter = checkpoint['meta']['iter']\n if 'optimizer' in checkpoint and resume_optimizer:\n if isinstance(self.optimizer, Optimizer):\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n elif isinstance(self.optimizer, dict):\n for k in self.optimizer.keys():\n self.optimizer[k].load_state_dict(\n checkpoint['optimizer'][k])\n\n self.logger.info(f'resumed from epoch: {self.epoch}, iter {self.iter}')\n\n def save_checkpoint(self,\n out_dir,\n filename_tmpl='iter_{}.pth',\n meta=None,\n save_optimizer=True,\n create_symlink=True):\n \"\"\"Save checkpoint to file.\n\n Args:\n out_dir (str): Directory to save checkpoint files.\n filename_tmpl (str, optional): Checkpoint file template.\n Defaults to 'iter_{}.pth'.\n meta (dict, optional): Metadata to be saved in checkpoint.\n Defaults to None.\n save_optimizer (bool, optional): Whether save optimizer.\n Defaults to True.\n create_symlink (bool, optional): Whether create symlink to the\n latest checkpoint file. Defaults to True.\n \"\"\"\n if meta is None:\n meta = dict(iter=self.iter + 1, epoch=self.epoch + 1)\n elif isinstance(meta, dict):\n meta.update(iter=self.iter + 1, epoch=self.epoch + 1)\n else:\n raise TypeError(\n f'meta should be a dict or None, but got {type(meta)}')\n if self.meta is not None:\n meta.update(self.meta)\n\n filename = filename_tmpl.format(self.iter + 1)\n filepath = osp.join(out_dir, filename)\n optimizer = self.optimizer if save_optimizer else None\n save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta)\n # in some environments, `os.symlink` is not supported, you may need to\n # set `create_symlink` to False\n if create_symlink:\n mmcv.symlink(filename, osp.join(out_dir, 'latest.pth'))\n\n def register_training_hooks(self,\n lr_config,\n optimizer_config=None,\n checkpoint_config=None,\n log_config=None,\n momentum_config=None):\n \"\"\"Register default hooks for iter-based training.\n\n Default hooks include:\n\n - LrUpdaterHook\n - MomentumUpdaterHook\n - OptimizerStepperHook\n - CheckpointSaverHook\n - IterTimerHook\n - LoggerHook(s)\n \"\"\"\n if checkpoint_config is not None:\n checkpoint_config.setdefault('by_epoch', False)\n if lr_config is not None:\n lr_config.setdefault('by_epoch', False)\n self.register_lr_hook(lr_config)\n self.register_momentum_hook(momentum_config)\n self.register_optimizer_hook(optimizer_config)\n self.register_checkpoint_hook(checkpoint_config)\n self.register_hook(IterTimerHook())\n if log_config is not None:\n log_config.setdefault('by_epoch', False)\n self.register_logger_hooks(log_config)\n", "path": "mmcv/runner/iter_based_runner.py"}], "after_files": [{"content": "# Copyright (c) Open-MMLab. All rights reserved.\nimport os.path as osp\nimport time\n\nimport torch\nfrom torch.optim import Optimizer\n\nimport mmcv\nfrom .base_runner import BaseRunner\nfrom .checkpoint import save_checkpoint\nfrom .hooks import IterTimerHook\nfrom .utils import get_host_info\n\n\nclass IterLoader:\n\n def __init__(self, dataloader):\n self._dataloader = dataloader\n self.iter_loader = iter(self._dataloader)\n self._epoch = 0\n\n @property\n def epoch(self):\n return self._epoch\n\n def __next__(self):\n try:\n data = next(self.iter_loader)\n except StopIteration:\n self._epoch += 1\n if hasattr(self._dataloader.sampler, 'set_epoch'):\n self._dataloader.sampler.set_epoch(self._epoch)\n self.iter_loader = iter(self._dataloader)\n data = next(self.iter_loader)\n\n return data\n\n def __len__(self):\n return len(self._dataloader)\n\n\nclass IterBasedRunner(BaseRunner):\n \"\"\"Iteration-based Runner.\n\n This runner train models iteration by iteration.\n \"\"\"\n\n def train(self, data_loader, **kwargs):\n self.model.train()\n self.mode = 'train'\n self.data_loader = data_loader\n self._epoch = data_loader.epoch\n self.call_hook('before_train_iter')\n data_batch = next(data_loader)\n outputs = self.model.train_step(data_batch, self.optimizer, **kwargs)\n if not isinstance(outputs, dict):\n raise TypeError('model.train_step() must return a dict')\n if 'log_vars' in outputs:\n self.log_buffer.update(outputs['log_vars'], outputs['num_samples'])\n self.outputs = outputs\n self.call_hook('after_train_iter')\n self._inner_iter += 1\n self._iter += 1\n\n def val(self, data_loader, **kwargs):\n self.model.eval()\n self.mode = 'val'\n self.data_loader = data_loader\n self.call_hook('before_val_iter')\n data_batch = next(data_loader)\n outputs = self.model.val_step(data_batch, **kwargs)\n if not isinstance(outputs, dict):\n raise TypeError('model.val_step() must return a dict')\n if 'log_vars' in outputs:\n self.log_buffer.update(outputs['log_vars'], outputs['num_samples'])\n self.outputs = outputs\n self.call_hook('after_val_iter')\n self._inner_iter += 1\n\n def run(self, data_loaders, workflow, max_iters, **kwargs):\n \"\"\"Start running.\n\n Args:\n data_loaders (list[:obj:`DataLoader`]): Dataloaders for training\n and validation.\n workflow (list[tuple]): A list of (phase, iters) to specify the\n running order and iterations. E.g, [('train', 10000),\n ('val', 1000)] means running 10000 iterations for training and\n 1000 iterations for validation, iteratively.\n max_iters (int): Total training iterations.\n \"\"\"\n assert isinstance(data_loaders, list)\n assert mmcv.is_list_of(workflow, tuple)\n assert len(data_loaders) == len(workflow)\n\n self._max_iters = max_iters\n work_dir = self.work_dir if self.work_dir is not None else 'NONE'\n self.logger.info('Start running, host: %s, work_dir: %s',\n get_host_info(), work_dir)\n self.logger.info('workflow: %s, max: %d iters', workflow, max_iters)\n self.call_hook('before_run')\n\n iter_loaders = [IterLoader(x) for x in data_loaders]\n\n self.call_hook('before_epoch')\n\n while self.iter < max_iters:\n for i, flow in enumerate(workflow):\n self._inner_iter = 0\n mode, iters = flow\n if not isinstance(mode, str) or not hasattr(self, mode):\n raise ValueError(\n 'runner has no method named \"{}\" to run a workflow'.\n format(mode))\n iter_runner = getattr(self, mode)\n for _ in range(iters):\n if mode == 'train' and self.iter >= max_iters:\n break\n iter_runner(iter_loaders[i], **kwargs)\n\n time.sleep(1) # wait for some hooks like loggers to finish\n self.call_hook('after_epoch')\n self.call_hook('after_run')\n\n def resume(self,\n checkpoint,\n resume_optimizer=True,\n map_location='default'):\n \"\"\"Resume model from checkpoint.\n\n Args:\n checkpoint (str): Checkpoint to resume from.\n resume_optimizer (bool, optional): Whether resume the optimizer(s)\n if the checkpoint file includes optimizer(s). Default to True.\n map_location (str, optional): Same as :func:`torch.load`.\n Default to 'default'.\n \"\"\"\n if map_location == 'default':\n device_id = torch.cuda.current_device()\n checkpoint = self.load_checkpoint(\n checkpoint,\n map_location=lambda storage, loc: storage.cuda(device_id))\n else:\n checkpoint = self.load_checkpoint(\n checkpoint, map_location=map_location)\n\n self._epoch = checkpoint['meta']['epoch']\n self._iter = checkpoint['meta']['iter']\n self._inner_iter = checkpoint['meta']['iter']\n if 'optimizer' in checkpoint and resume_optimizer:\n if isinstance(self.optimizer, Optimizer):\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n elif isinstance(self.optimizer, dict):\n for k in self.optimizer.keys():\n self.optimizer[k].load_state_dict(\n checkpoint['optimizer'][k])\n\n self.logger.info(f'resumed from epoch: {self.epoch}, iter {self.iter}')\n\n def save_checkpoint(self,\n out_dir,\n filename_tmpl='iter_{}.pth',\n meta=None,\n save_optimizer=True,\n create_symlink=True):\n \"\"\"Save checkpoint to file.\n\n Args:\n out_dir (str): Directory to save checkpoint files.\n filename_tmpl (str, optional): Checkpoint file template.\n Defaults to 'iter_{}.pth'.\n meta (dict, optional): Metadata to be saved in checkpoint.\n Defaults to None.\n save_optimizer (bool, optional): Whether save optimizer.\n Defaults to True.\n create_symlink (bool, optional): Whether create symlink to the\n latest checkpoint file. Defaults to True.\n \"\"\"\n if meta is None:\n meta = dict(iter=self.iter + 1, epoch=self.epoch + 1)\n elif isinstance(meta, dict):\n meta.update(iter=self.iter + 1, epoch=self.epoch + 1)\n else:\n raise TypeError(\n f'meta should be a dict or None, but got {type(meta)}')\n if self.meta is not None:\n meta.update(self.meta)\n\n filename = filename_tmpl.format(self.iter + 1)\n filepath = osp.join(out_dir, filename)\n optimizer = self.optimizer if save_optimizer else None\n save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta)\n # in some environments, `os.symlink` is not supported, you may need to\n # set `create_symlink` to False\n if create_symlink:\n mmcv.symlink(filename, osp.join(out_dir, 'latest.pth'))\n\n def register_training_hooks(self,\n lr_config,\n optimizer_config=None,\n checkpoint_config=None,\n log_config=None,\n momentum_config=None):\n \"\"\"Register default hooks for iter-based training.\n\n Default hooks include:\n\n - LrUpdaterHook\n - MomentumUpdaterHook\n - OptimizerStepperHook\n - CheckpointSaverHook\n - IterTimerHook\n - LoggerHook(s)\n \"\"\"\n if checkpoint_config is not None:\n checkpoint_config.setdefault('by_epoch', False)\n if lr_config is not None:\n lr_config.setdefault('by_epoch', False)\n self.register_lr_hook(lr_config)\n self.register_momentum_hook(momentum_config)\n self.register_optimizer_hook(optimizer_config)\n self.register_checkpoint_hook(checkpoint_config)\n self.register_hook(IterTimerHook())\n if log_config is not None:\n for info in log_config['hooks']:\n info.setdefault('by_epoch', False)\n self.register_logger_hooks(log_config)\n", "path": "mmcv/runner/iter_based_runner.py"}]} |
gh_patches_debug_1210 | rasdani/github-patches | git_diff | kserve__kserve-3005 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Load kubeconfig from dict
/kind feature
**Allow loading of kubeconfig from dictionary in KServe python client**
The Kubernetes python client has a function to load kubeconfigs from dict which is quite handy in somecases compared to the kubecfg file. Currently the `KServeClient` supports only loading from kubeconfig i.e, `config.load_kube_config()`.
I have verified that the [KServeClient](https://github.com/kserve/kserve/blob/master/python/kserve/kserve/api/kserve_client.py#L40) in the master branch does not have it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/kserve/kserve/api/kserve_client.py`
Content:
```
1 # Copyright 2021 The KServe Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import time
16 from urllib.parse import urlparse
17
18 import requests
19 from kubernetes import client, config
20
21 from .creds_utils import set_gcs_credentials, set_s3_credentials, set_azure_credentials
22 from .watch import isvc_watch
23 from ..constants import constants
24 from ..models import V1alpha1InferenceGraph
25 from ..utils import utils
26
27
28 class KServeClient(object):
29
30 def __init__(self, config_file=None, config_dict=None, context=None, # pylint: disable=too-many-arguments
31 client_configuration=None, persist_config=True):
32 """
33 KServe client constructor
34 :param config_file: kubeconfig file, defaults to ~/.kube/config
35 :param config_dict: Takes the config file as a dict.
36 :param context: kubernetes context
37 :param client_configuration: kubernetes configuration object
38 :param persist_config:
39 """
40 if config_file or config_dict or not utils.is_running_in_k8s():
41 if config_dict:
42 config.load_kube_config_from_dict(
43 config_dict=config_dict,
44 context=context,
45 client_configuration=None,
46 persist_config=True
47 )
48 else:
49 config.load_kube_config(
50 config_file=config_file,
51 context=context,
52 client_configuration=client_configuration,
53 persist_config=persist_config
54 )
55 else:
56 config.load_incluster_config()
57 self.core_api = client.CoreV1Api()
58 self.app_api = client.AppsV1Api()
59 self.api_instance = client.CustomObjectsApi()
60
61 def set_credentials(self, storage_type, namespace=None, credentials_file=None,
62 service_account=constants.DEFAULT_SA_NAME, **kwargs):
63 """
64 Setup credentials for KServe.
65
66 :param storage_type: Valid value: GCS or S3 (required)
67 :param namespace: inference service deployment namespace
68 :param credentials_file: the path for the credentials file.
69 :param service_account: the name of service account.
70 :param kwargs: Others parameters for each storage_type
71 :return:
72 """
73
74 if namespace is None:
75 namespace = utils.get_default_target_namespace()
76
77 if storage_type.lower() == 'gcs':
78 if credentials_file is None:
79 credentials_file = constants.GCS_DEFAULT_CREDS_FILE
80 set_gcs_credentials(namespace=namespace,
81 credentials_file=credentials_file,
82 service_account=service_account)
83 elif storage_type.lower() == 's3':
84 if credentials_file is None:
85 credentials_file = constants.S3_DEFAULT_CREDS_FILE
86 set_s3_credentials(namespace=namespace,
87 credentials_file=credentials_file,
88 service_account=service_account,
89 **kwargs)
90 elif storage_type.lower() == 'azure':
91 if credentials_file is None:
92 credentials_file = constants.AZ_DEFAULT_CREDS_FILE
93 set_azure_credentials(namespace=namespace,
94 credentials_file=credentials_file,
95 service_account=service_account)
96 else:
97 raise RuntimeError("Invalid storage_type: %s, only support GCS, S3 and Azure\
98 currently.\n" % storage_type)
99
100 def create(self, inferenceservice, namespace=None, watch=False,
101 timeout_seconds=600): # pylint:disable=inconsistent-return-statements
102 """
103 Create the inference service
104 :param inferenceservice: inference service object
105 :param namespace: defaults to current or default namespace
106 :param watch: True to watch the created service until timeout elapsed or status is ready
107 :param timeout_seconds: timeout seconds for watch, default to 600s
108 :return: created inference service
109 """
110
111 version = inferenceservice.api_version.split("/")[1]
112
113 if namespace is None:
114 namespace = utils.get_isvc_namespace(inferenceservice)
115
116 try:
117 outputs = self.api_instance.create_namespaced_custom_object(
118 constants.KSERVE_GROUP,
119 version,
120 namespace,
121 constants.KSERVE_PLURAL,
122 inferenceservice)
123 except client.rest.ApiException as e:
124 raise RuntimeError(
125 "Exception when calling CustomObjectsApi->create_namespaced_custom_object:\
126 %s\n" % e)
127
128 if watch:
129 isvc_watch(
130 name=outputs['metadata']['name'],
131 namespace=namespace,
132 timeout_seconds=timeout_seconds)
133 else:
134 return outputs
135
136 def get(self, name=None, namespace=None, watch=False, timeout_seconds=600,
137 version=constants.KSERVE_V1BETA1_VERSION): # pylint:disable=inconsistent-return-statements
138 """
139 Get the inference service
140 :param name: existing inference service name
141 :param namespace: defaults to current or default namespace
142 :param watch: True to watch the service until timeout elapsed or status is ready
143 :param timeout_seconds: timeout seconds for watch, default to 600s
144 :param version: api group version
145 :return: inference service
146 """
147
148 if namespace is None:
149 namespace = utils.get_default_target_namespace()
150
151 if name:
152 if watch:
153 isvc_watch(
154 name=name,
155 namespace=namespace,
156 timeout_seconds=timeout_seconds)
157 else:
158 try:
159 return self.api_instance.get_namespaced_custom_object(
160 constants.KSERVE_GROUP,
161 version,
162 namespace,
163 constants.KSERVE_PLURAL,
164 name)
165 except client.rest.ApiException as e:
166 raise RuntimeError(
167 "Exception when calling CustomObjectsApi->get_namespaced_custom_object:\
168 %s\n" % e)
169 else:
170 if watch:
171 isvc_watch(
172 namespace=namespace,
173 timeout_seconds=timeout_seconds)
174 else:
175 try:
176 return self.api_instance.list_namespaced_custom_object(
177 constants.KSERVE_GROUP,
178 version,
179 namespace,
180 constants.KSERVE_PLURAL)
181 except client.rest.ApiException as e:
182 raise RuntimeError(
183 "Exception when calling CustomObjectsApi->list_namespaced_custom_object:\
184 %s\n" % e)
185
186 def patch(self, name, inferenceservice, namespace=None, watch=False,
187 timeout_seconds=600): # pylint:disable=too-many-arguments,inconsistent-return-statements
188 """
189 Patch existing inference service
190 :param name: existing inference service name
191 :param inferenceservice: patched inference service
192 :param namespace: defaults to current or default namespace
193 :param watch: True to watch the patched service until timeout elapsed or status is ready
194 :param timeout_seconds: timeout seconds for watch, default to 600s
195 :return: patched inference service
196 """
197
198 version = inferenceservice.api_version.split("/")[1]
199 if namespace is None:
200 namespace = utils.get_isvc_namespace(inferenceservice)
201
202 try:
203 outputs = self.api_instance.patch_namespaced_custom_object(
204 constants.KSERVE_GROUP,
205 version,
206 namespace,
207 constants.KSERVE_PLURAL,
208 name,
209 inferenceservice)
210 except client.rest.ApiException as e:
211 raise RuntimeError(
212 "Exception when calling CustomObjectsApi->patch_namespaced_custom_object:\
213 %s\n" % e)
214
215 if watch:
216 # Sleep 3 to avoid status still be True within a very short time.
217 time.sleep(3)
218 isvc_watch(
219 name=outputs['metadata']['name'],
220 namespace=namespace,
221 timeout_seconds=timeout_seconds)
222 else:
223 return outputs
224
225 def replace(self, name, inferenceservice, namespace=None, watch=False,
226 timeout_seconds=600): # pylint:disable=too-many-arguments,inconsistent-return-statements
227 """
228 Replace the existing inference service
229 :param name: existing inference service name
230 :param inferenceservice: replacing inference service
231 :param namespace: defaults to current or default namespace
232 :param watch: True to watch the replaced service until timeout elapsed or status is ready
233 :param timeout_seconds: timeout seconds for watch, default to 600s
234 :return: replaced inference service
235 """
236
237 version = inferenceservice.api_version.split("/")[1]
238
239 if namespace is None:
240 namespace = utils.get_isvc_namespace(inferenceservice)
241
242 if inferenceservice.metadata.resource_version is None:
243 current_isvc = self.get(name, namespace=namespace)
244 current_resource_version = current_isvc['metadata']['resourceVersion']
245 inferenceservice.metadata.resource_version = current_resource_version
246
247 try:
248 outputs = self.api_instance.replace_namespaced_custom_object(
249 constants.KSERVE_GROUP,
250 version,
251 namespace,
252 constants.KSERVE_PLURAL,
253 name,
254 inferenceservice)
255 except client.rest.ApiException as e:
256 raise RuntimeError(
257 "Exception when calling CustomObjectsApi->replace_namespaced_custom_object:\
258 %s\n" % e)
259
260 if watch:
261 isvc_watch(
262 name=outputs['metadata']['name'],
263 namespace=namespace,
264 timeout_seconds=timeout_seconds,
265 generation=outputs['metadata']['generation'])
266 else:
267 return outputs
268
269 def delete(self, name, namespace=None, version=constants.KSERVE_V1BETA1_VERSION):
270 """
271 Delete the inference service
272 :param name: inference service name
273 :param namespace: defaults to current or default namespace
274 :param version: api group version
275 :return:
276 """
277 if namespace is None:
278 namespace = utils.get_default_target_namespace()
279
280 try:
281 return self.api_instance.delete_namespaced_custom_object(
282 constants.KSERVE_GROUP,
283 version,
284 namespace,
285 constants.KSERVE_PLURAL,
286 name)
287 except client.rest.ApiException as e:
288 raise RuntimeError(
289 "Exception when calling CustomObjectsApi->delete_namespaced_custom_object:\
290 %s\n" % e)
291
292 def is_isvc_ready(self, name, namespace=None,
293 version=constants.KSERVE_V1BETA1_VERSION): # pylint:disable=inconsistent-return-statements
294 """
295 Check if the inference service is ready.
296 :param version:
297 :param name: inference service name
298 :param namespace: defaults to current or default namespace
299 :return:
300 """
301 kfsvc_status = self.get(name, namespace=namespace,
302 version=version)
303 if 'status' not in kfsvc_status:
304 return False
305 status = 'Unknown'
306 for condition in kfsvc_status['status'].get('conditions', {}):
307 if condition.get('type', '') == 'Ready':
308 status = condition.get('status', 'Unknown')
309 return status.lower() == "true"
310 return False
311
312 def wait_isvc_ready(self, name, namespace=None, # pylint:disable=too-many-arguments
313 watch=False,
314 timeout_seconds=600,
315 polling_interval=10,
316 version=constants.KSERVE_V1BETA1_VERSION):
317 """
318 Waiting for inference service ready, print out the inference service if timeout.
319 :param name: inference service name
320 :param namespace: defaults to current or default namespace
321 :param watch: True to watch the service until timeout elapsed or status is ready
322 :param timeout_seconds: timeout seconds for waiting, default to 600s.
323 Print out the InferenceService if timeout.
324 :param polling_interval: The time interval to poll status
325 :param version: api group version
326 :return:
327 """
328 if watch:
329 isvc_watch(
330 name=name,
331 namespace=namespace,
332 timeout_seconds=timeout_seconds)
333 else:
334 for _ in range(round(timeout_seconds / polling_interval)):
335 time.sleep(polling_interval)
336 if self.is_isvc_ready(name, namespace=namespace, version=version):
337 return
338
339 current_isvc = self.get(name, namespace=namespace, version=version)
340 raise RuntimeError("Timeout to start the InferenceService {}. \
341 The InferenceService is as following: {}".format(name, current_isvc))
342
343 def create_trained_model(self, trainedmodel, namespace):
344 """
345 Create a trained model
346 :param trainedmodel: trainedmodel object
347 :param namespace: defaults to current or default namespace
348 :return:
349 """
350 version = trainedmodel.api_version.split("/")[1]
351
352 try:
353 self.api_instance.create_namespaced_custom_object(
354 constants.KSERVE_GROUP,
355 version,
356 namespace,
357 constants.KSERVE_PLURAL_TRAINEDMODEL,
358 trainedmodel)
359 except client.rest.ApiException as e:
360 raise RuntimeError(
361 "Exception when calling CustomObjectsApi->create_namespaced_custom_object:\
362 %s\n" % e)
363
364 def delete_trained_model(
365 self, name, namespace=None, version=constants.KSERVE_V1ALPHA1_VERSION
366 ):
367 """
368 Delete the trained model
369 :param name: trained model name
370 :param namespace: defaults to current or default namespace
371 :param version: api group version
372 :return:
373 """
374 if namespace is None:
375 namespace = utils.get_default_target_namespace()
376
377 try:
378 return self.api_instance.delete_namespaced_custom_object(
379 constants.KSERVE_GROUP,
380 version,
381 namespace,
382 constants.KSERVE_PLURAL_TRAINEDMODEL,
383 name,
384 )
385 except client.rest.ApiException as e:
386 raise RuntimeError(
387 "Exception when calling CustomObjectsApi->delete_namespaced_custom_object:\
388 %s\n"
389 % e
390 )
391
392 def wait_model_ready(self, service_name, model_name, isvc_namespace=None, # pylint:disable=too-many-arguments
393 isvc_version=constants.KSERVE_V1BETA1_VERSION,
394 cluster_ip=None,
395 protocol_version="v1",
396 timeout_seconds=600,
397 polling_interval=10):
398 """
399 Waiting for model to be ready to service, print out trained model if timeout.
400 :param service_name: inference service name
401 :param model_name: trained model name
402 :param isvc_namespace: defaults to current or default namespace of inference service
403 :param isvc_version: api group version of inference service
404 :param protocol_version: version of the dataplane protocol
405 :param cluster_ip: ip of the kubernetes cluster
406 :param timeout_seconds: timeout seconds for waiting, default to 600s.
407 Print out the InferenceService if timeout.
408 :param polling_interval: The time interval to poll status
409 :return:
410 """
411 isvc = self.get(
412 service_name,
413 namespace=isvc_namespace,
414 version=isvc_version,
415 )
416
417 host = urlparse(isvc["status"]["url"]).netloc
418 headers = {"Host": host}
419
420 for _ in range(round(timeout_seconds / polling_interval)):
421 time.sleep(polling_interval)
422 # Check model health API
423 url = f"http://{cluster_ip}/{protocol_version}/models/{model_name}"
424 response = requests.get(url, headers=headers).status_code
425 if response == 200:
426 return
427
428 raise RuntimeError(f"InferenceService ({service_name}) has not loaded the \
429 model ({model_name}) before the timeout.")
430
431 def create_inference_graph(self, inferencegraph: V1alpha1InferenceGraph, namespace: str = None) -> object:
432 """
433 create a inference graph
434
435 :param inferencegraph: inference graph object
436 :param namespace: defaults to current or default namespace
437 :return: created inference graph
438 """
439 version = inferencegraph.api_version.split("/")[1]
440 if namespace is None:
441 namespace = utils.get_ig_namespace(inferencegraph)
442
443 try:
444 outputs = self.api_instance.create_namespaced_custom_object(
445 constants.KSERVE_GROUP,
446 version,
447 namespace,
448 constants.KSERVE_PLURAL_INFERENCEGRAPH,
449 inferencegraph
450 )
451 except client.rest.ApiException as e:
452 raise RuntimeError(
453 "Exception when calling CustomObjectsApi->create_namespaced_custom_object:\
454 %s\n"
455 % e
456 )
457 return outputs
458
459 def delete_inference_graph(self, name: str, namespace: str = None,
460 version: str = constants.KSERVE_V1ALPHA1_VERSION):
461 """
462 Delete the inference graph
463
464 :param name: inference graph name
465 :param namespace: defaults to current or default namespace
466 :param version: api group version
467 """
468 if namespace is None:
469 namespace = utils.get_default_target_namespace()
470
471 try:
472 self.api_instance.delete_namespaced_custom_object(
473 constants.KSERVE_GROUP,
474 version,
475 namespace,
476 constants.KSERVE_PLURAL_INFERENCEGRAPH,
477 name,
478 )
479 except client.rest.ApiException as e:
480 raise RuntimeError(
481 "Exception when calling CustomObjectsApi->create_namespaced_custom_object:\
482 %s\n"
483 % e
484 )
485
486 def get_inference_graph(self, name: str, namespace: str = None,
487 version: str = constants.KSERVE_V1ALPHA1_VERSION) -> object:
488 """
489 Get the inference graph
490
491 :param name: existing inference graph name
492 :param namespace: defaults to current or default namespace
493 :param version: api group version
494 :return: inference graph
495 """
496
497 if namespace is None:
498 namespace = utils.get_default_target_namespace()
499
500 try:
501 return self.api_instance.get_namespaced_custom_object(
502 constants.KSERVE_GROUP,
503 version,
504 namespace,
505 constants.KSERVE_PLURAL_INFERENCEGRAPH,
506 name)
507 except client.rest.ApiException as e:
508 raise RuntimeError(
509 "Exception when calling CustomObjectsApi->get_namespaced_custom_object:\
510 %s\n" % e)
511
512 def is_ig_ready(self, name: str, namespace: str = None, version: str = constants.KSERVE_V1ALPHA1_VERSION) -> bool:
513 """
514 Check if the inference graph is ready.
515
516 :param name: inference graph name
517 :param namespace: defaults to current or default namespace
518 :param version: api group version
519 :return: true if inference graph is ready, else false.
520 """
521 if namespace is None:
522 namespace = utils.get_default_target_namespace()
523
524 ig: dict = self.get_inference_graph(name, namespace=namespace, version=version)
525 for condition in ig.get('status', {}).get('conditions', {}):
526 if condition.get('type', '') == 'Ready':
527 status = condition.get('status', 'Unknown')
528 return status.lower() == "true"
529 return False
530
531 def wait_ig_ready(self, name: str, namespace: str = None, version: str = constants.KSERVE_V1ALPHA1_VERSION,
532 timeout_seconds: int = 600,
533 polling_interval: int = 10):
534 """
535 Wait for inference graph to be ready until timeout. Print out the inference graph if timeout.
536
537 :param name: inference graph name
538 :param namespace: defaults to current or default namespace
539 :param version: api group version
540 :param timeout_seconds: timeout seconds for waiting, default to 600s.
541 :param polling_interval: The time interval to poll status
542 :return:
543 """
544 for _ in range(round(timeout_seconds / polling_interval)):
545 time.sleep(polling_interval)
546 if self.is_ig_ready(name, namespace, version):
547 return
548
549 current_ig = self.get_inference_graph(name, namespace=namespace, version=version)
550 raise RuntimeError("Timeout to start the InferenceGraph {}. \
551 The InferenceGraph is as following: {}".format(name, current_ig))
552
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/kserve/kserve/api/kserve_client.py b/python/kserve/kserve/api/kserve_client.py
--- a/python/kserve/kserve/api/kserve_client.py
+++ b/python/kserve/kserve/api/kserve_client.py
@@ -43,7 +43,7 @@
config_dict=config_dict,
context=context,
client_configuration=None,
- persist_config=True
+ persist_config=persist_config
)
else:
config.load_kube_config(
| {"golden_diff": "diff --git a/python/kserve/kserve/api/kserve_client.py b/python/kserve/kserve/api/kserve_client.py\n--- a/python/kserve/kserve/api/kserve_client.py\n+++ b/python/kserve/kserve/api/kserve_client.py\n@@ -43,7 +43,7 @@\n config_dict=config_dict,\n context=context,\n client_configuration=None,\n- persist_config=True\n+ persist_config=persist_config\n )\n else:\n config.load_kube_config(\n", "issue": "Load kubeconfig from dict\n/kind feature\r\n\r\n**Allow loading of kubeconfig from dictionary in KServe python client**\r\nThe Kubernetes python client has a function to load kubeconfigs from dict which is quite handy in somecases compared to the kubecfg file. Currently the `KServeClient` supports only loading from kubeconfig i.e, `config.load_kube_config()`.\r\n\r\nI have verified that the [KServeClient](https://github.com/kserve/kserve/blob/master/python/kserve/kserve/api/kserve_client.py#L40) in the master branch does not have it.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2021 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport time\nfrom urllib.parse import urlparse\n\nimport requests\nfrom kubernetes import client, config\n\nfrom .creds_utils import set_gcs_credentials, set_s3_credentials, set_azure_credentials\nfrom .watch import isvc_watch\nfrom ..constants import constants\nfrom ..models import V1alpha1InferenceGraph\nfrom ..utils import utils\n\n\nclass KServeClient(object):\n\n def __init__(self, config_file=None, config_dict=None, context=None, # pylint: disable=too-many-arguments\n client_configuration=None, persist_config=True):\n \"\"\"\n KServe client constructor\n :param config_file: kubeconfig file, defaults to ~/.kube/config\n :param config_dict: Takes the config file as a dict.\n :param context: kubernetes context\n :param client_configuration: kubernetes configuration object\n :param persist_config:\n \"\"\"\n if config_file or config_dict or not utils.is_running_in_k8s():\n if config_dict:\n config.load_kube_config_from_dict(\n config_dict=config_dict,\n context=context,\n client_configuration=None,\n persist_config=True\n )\n else:\n config.load_kube_config(\n config_file=config_file,\n context=context,\n client_configuration=client_configuration,\n persist_config=persist_config\n )\n else:\n config.load_incluster_config()\n self.core_api = client.CoreV1Api()\n self.app_api = client.AppsV1Api()\n self.api_instance = client.CustomObjectsApi()\n\n def set_credentials(self, storage_type, namespace=None, credentials_file=None,\n service_account=constants.DEFAULT_SA_NAME, **kwargs):\n \"\"\"\n Setup credentials for KServe.\n\n :param storage_type: Valid value: GCS or S3 (required)\n :param namespace: inference service deployment namespace\n :param credentials_file: the path for the credentials file.\n :param service_account: the name of service account.\n :param kwargs: Others parameters for each storage_type\n :return:\n \"\"\"\n\n if namespace is None:\n namespace = utils.get_default_target_namespace()\n\n if storage_type.lower() == 'gcs':\n if credentials_file is None:\n credentials_file = constants.GCS_DEFAULT_CREDS_FILE\n set_gcs_credentials(namespace=namespace,\n credentials_file=credentials_file,\n service_account=service_account)\n elif storage_type.lower() == 's3':\n if credentials_file is None:\n credentials_file = constants.S3_DEFAULT_CREDS_FILE\n set_s3_credentials(namespace=namespace,\n credentials_file=credentials_file,\n service_account=service_account,\n **kwargs)\n elif storage_type.lower() == 'azure':\n if credentials_file is None:\n credentials_file = constants.AZ_DEFAULT_CREDS_FILE\n set_azure_credentials(namespace=namespace,\n credentials_file=credentials_file,\n service_account=service_account)\n else:\n raise RuntimeError(\"Invalid storage_type: %s, only support GCS, S3 and Azure\\\n currently.\\n\" % storage_type)\n\n def create(self, inferenceservice, namespace=None, watch=False,\n timeout_seconds=600): # pylint:disable=inconsistent-return-statements\n \"\"\"\n Create the inference service\n :param inferenceservice: inference service object\n :param namespace: defaults to current or default namespace\n :param watch: True to watch the created service until timeout elapsed or status is ready\n :param timeout_seconds: timeout seconds for watch, default to 600s\n :return: created inference service\n \"\"\"\n\n version = inferenceservice.api_version.split(\"/\")[1]\n\n if namespace is None:\n namespace = utils.get_isvc_namespace(inferenceservice)\n\n try:\n outputs = self.api_instance.create_namespaced_custom_object(\n constants.KSERVE_GROUP,\n version,\n namespace,\n constants.KSERVE_PLURAL,\n inferenceservice)\n except client.rest.ApiException as e:\n raise RuntimeError(\n \"Exception when calling CustomObjectsApi->create_namespaced_custom_object:\\\n %s\\n\" % e)\n\n if watch:\n isvc_watch(\n name=outputs['metadata']['name'],\n namespace=namespace,\n timeout_seconds=timeout_seconds)\n else:\n return outputs\n\n def get(self, name=None, namespace=None, watch=False, timeout_seconds=600,\n version=constants.KSERVE_V1BETA1_VERSION): # pylint:disable=inconsistent-return-statements\n \"\"\"\n Get the inference service\n :param name: existing inference service name\n :param namespace: defaults to current or default namespace\n :param watch: True to watch the service until timeout elapsed or status is ready\n :param timeout_seconds: timeout seconds for watch, default to 600s\n :param version: api group version\n :return: inference service\n \"\"\"\n\n if namespace is None:\n namespace = utils.get_default_target_namespace()\n\n if name:\n if watch:\n isvc_watch(\n name=name,\n namespace=namespace,\n timeout_seconds=timeout_seconds)\n else:\n try:\n return self.api_instance.get_namespaced_custom_object(\n constants.KSERVE_GROUP,\n version,\n namespace,\n constants.KSERVE_PLURAL,\n name)\n except client.rest.ApiException as e:\n raise RuntimeError(\n \"Exception when calling CustomObjectsApi->get_namespaced_custom_object:\\\n %s\\n\" % e)\n else:\n if watch:\n isvc_watch(\n namespace=namespace,\n timeout_seconds=timeout_seconds)\n else:\n try:\n return self.api_instance.list_namespaced_custom_object(\n constants.KSERVE_GROUP,\n version,\n namespace,\n constants.KSERVE_PLURAL)\n except client.rest.ApiException as e:\n raise RuntimeError(\n \"Exception when calling CustomObjectsApi->list_namespaced_custom_object:\\\n %s\\n\" % e)\n\n def patch(self, name, inferenceservice, namespace=None, watch=False,\n timeout_seconds=600): # pylint:disable=too-many-arguments,inconsistent-return-statements\n \"\"\"\n Patch existing inference service\n :param name: existing inference service name\n :param inferenceservice: patched inference service\n :param namespace: defaults to current or default namespace\n :param watch: True to watch the patched service until timeout elapsed or status is ready\n :param timeout_seconds: timeout seconds for watch, default to 600s\n :return: patched inference service\n \"\"\"\n\n version = inferenceservice.api_version.split(\"/\")[1]\n if namespace is None:\n namespace = utils.get_isvc_namespace(inferenceservice)\n\n try:\n outputs = self.api_instance.patch_namespaced_custom_object(\n constants.KSERVE_GROUP,\n version,\n namespace,\n constants.KSERVE_PLURAL,\n name,\n inferenceservice)\n except client.rest.ApiException as e:\n raise RuntimeError(\n \"Exception when calling CustomObjectsApi->patch_namespaced_custom_object:\\\n %s\\n\" % e)\n\n if watch:\n # Sleep 3 to avoid status still be True within a very short time.\n time.sleep(3)\n isvc_watch(\n name=outputs['metadata']['name'],\n namespace=namespace,\n timeout_seconds=timeout_seconds)\n else:\n return outputs\n\n def replace(self, name, inferenceservice, namespace=None, watch=False,\n timeout_seconds=600): # pylint:disable=too-many-arguments,inconsistent-return-statements\n \"\"\"\n Replace the existing inference service\n :param name: existing inference service name\n :param inferenceservice: replacing inference service\n :param namespace: defaults to current or default namespace\n :param watch: True to watch the replaced service until timeout elapsed or status is ready\n :param timeout_seconds: timeout seconds for watch, default to 600s\n :return: replaced inference service\n \"\"\"\n\n version = inferenceservice.api_version.split(\"/\")[1]\n\n if namespace is None:\n namespace = utils.get_isvc_namespace(inferenceservice)\n\n if inferenceservice.metadata.resource_version is None:\n current_isvc = self.get(name, namespace=namespace)\n current_resource_version = current_isvc['metadata']['resourceVersion']\n inferenceservice.metadata.resource_version = current_resource_version\n\n try:\n outputs = self.api_instance.replace_namespaced_custom_object(\n constants.KSERVE_GROUP,\n version,\n namespace,\n constants.KSERVE_PLURAL,\n name,\n inferenceservice)\n except client.rest.ApiException as e:\n raise RuntimeError(\n \"Exception when calling CustomObjectsApi->replace_namespaced_custom_object:\\\n %s\\n\" % e)\n\n if watch:\n isvc_watch(\n name=outputs['metadata']['name'],\n namespace=namespace,\n timeout_seconds=timeout_seconds,\n generation=outputs['metadata']['generation'])\n else:\n return outputs\n\n def delete(self, name, namespace=None, version=constants.KSERVE_V1BETA1_VERSION):\n \"\"\"\n Delete the inference service\n :param name: inference service name\n :param namespace: defaults to current or default namespace\n :param version: api group version\n :return:\n \"\"\"\n if namespace is None:\n namespace = utils.get_default_target_namespace()\n\n try:\n return self.api_instance.delete_namespaced_custom_object(\n constants.KSERVE_GROUP,\n version,\n namespace,\n constants.KSERVE_PLURAL,\n name)\n except client.rest.ApiException as e:\n raise RuntimeError(\n \"Exception when calling CustomObjectsApi->delete_namespaced_custom_object:\\\n %s\\n\" % e)\n\n def is_isvc_ready(self, name, namespace=None,\n version=constants.KSERVE_V1BETA1_VERSION): # pylint:disable=inconsistent-return-statements\n \"\"\"\n Check if the inference service is ready.\n :param version:\n :param name: inference service name\n :param namespace: defaults to current or default namespace\n :return:\n \"\"\"\n kfsvc_status = self.get(name, namespace=namespace,\n version=version)\n if 'status' not in kfsvc_status:\n return False\n status = 'Unknown'\n for condition in kfsvc_status['status'].get('conditions', {}):\n if condition.get('type', '') == 'Ready':\n status = condition.get('status', 'Unknown')\n return status.lower() == \"true\"\n return False\n\n def wait_isvc_ready(self, name, namespace=None, # pylint:disable=too-many-arguments\n watch=False,\n timeout_seconds=600,\n polling_interval=10,\n version=constants.KSERVE_V1BETA1_VERSION):\n \"\"\"\n Waiting for inference service ready, print out the inference service if timeout.\n :param name: inference service name\n :param namespace: defaults to current or default namespace\n :param watch: True to watch the service until timeout elapsed or status is ready\n :param timeout_seconds: timeout seconds for waiting, default to 600s.\n Print out the InferenceService if timeout.\n :param polling_interval: The time interval to poll status\n :param version: api group version\n :return:\n \"\"\"\n if watch:\n isvc_watch(\n name=name,\n namespace=namespace,\n timeout_seconds=timeout_seconds)\n else:\n for _ in range(round(timeout_seconds / polling_interval)):\n time.sleep(polling_interval)\n if self.is_isvc_ready(name, namespace=namespace, version=version):\n return\n\n current_isvc = self.get(name, namespace=namespace, version=version)\n raise RuntimeError(\"Timeout to start the InferenceService {}. \\\n The InferenceService is as following: {}\".format(name, current_isvc))\n\n def create_trained_model(self, trainedmodel, namespace):\n \"\"\"\n Create a trained model\n :param trainedmodel: trainedmodel object\n :param namespace: defaults to current or default namespace\n :return:\n \"\"\"\n version = trainedmodel.api_version.split(\"/\")[1]\n\n try:\n self.api_instance.create_namespaced_custom_object(\n constants.KSERVE_GROUP,\n version,\n namespace,\n constants.KSERVE_PLURAL_TRAINEDMODEL,\n trainedmodel)\n except client.rest.ApiException as e:\n raise RuntimeError(\n \"Exception when calling CustomObjectsApi->create_namespaced_custom_object:\\\n %s\\n\" % e)\n\n def delete_trained_model(\n self, name, namespace=None, version=constants.KSERVE_V1ALPHA1_VERSION\n ):\n \"\"\"\n Delete the trained model\n :param name: trained model name\n :param namespace: defaults to current or default namespace\n :param version: api group version\n :return:\n \"\"\"\n if namespace is None:\n namespace = utils.get_default_target_namespace()\n\n try:\n return self.api_instance.delete_namespaced_custom_object(\n constants.KSERVE_GROUP,\n version,\n namespace,\n constants.KSERVE_PLURAL_TRAINEDMODEL,\n name,\n )\n except client.rest.ApiException as e:\n raise RuntimeError(\n \"Exception when calling CustomObjectsApi->delete_namespaced_custom_object:\\\n %s\\n\"\n % e\n )\n\n def wait_model_ready(self, service_name, model_name, isvc_namespace=None, # pylint:disable=too-many-arguments\n isvc_version=constants.KSERVE_V1BETA1_VERSION,\n cluster_ip=None,\n protocol_version=\"v1\",\n timeout_seconds=600,\n polling_interval=10):\n \"\"\"\n Waiting for model to be ready to service, print out trained model if timeout.\n :param service_name: inference service name\n :param model_name: trained model name\n :param isvc_namespace: defaults to current or default namespace of inference service\n :param isvc_version: api group version of inference service\n :param protocol_version: version of the dataplane protocol\n :param cluster_ip: ip of the kubernetes cluster\n :param timeout_seconds: timeout seconds for waiting, default to 600s.\n Print out the InferenceService if timeout.\n :param polling_interval: The time interval to poll status\n :return:\n \"\"\"\n isvc = self.get(\n service_name,\n namespace=isvc_namespace,\n version=isvc_version,\n )\n\n host = urlparse(isvc[\"status\"][\"url\"]).netloc\n headers = {\"Host\": host}\n\n for _ in range(round(timeout_seconds / polling_interval)):\n time.sleep(polling_interval)\n # Check model health API\n url = f\"http://{cluster_ip}/{protocol_version}/models/{model_name}\"\n response = requests.get(url, headers=headers).status_code\n if response == 200:\n return\n\n raise RuntimeError(f\"InferenceService ({service_name}) has not loaded the \\\n model ({model_name}) before the timeout.\")\n\n def create_inference_graph(self, inferencegraph: V1alpha1InferenceGraph, namespace: str = None) -> object:\n \"\"\"\n create a inference graph\n\n :param inferencegraph: inference graph object\n :param namespace: defaults to current or default namespace\n :return: created inference graph\n \"\"\"\n version = inferencegraph.api_version.split(\"/\")[1]\n if namespace is None:\n namespace = utils.get_ig_namespace(inferencegraph)\n\n try:\n outputs = self.api_instance.create_namespaced_custom_object(\n constants.KSERVE_GROUP,\n version,\n namespace,\n constants.KSERVE_PLURAL_INFERENCEGRAPH,\n inferencegraph\n )\n except client.rest.ApiException as e:\n raise RuntimeError(\n \"Exception when calling CustomObjectsApi->create_namespaced_custom_object:\\\n %s\\n\"\n % e\n )\n return outputs\n\n def delete_inference_graph(self, name: str, namespace: str = None,\n version: str = constants.KSERVE_V1ALPHA1_VERSION):\n \"\"\"\n Delete the inference graph\n\n :param name: inference graph name\n :param namespace: defaults to current or default namespace\n :param version: api group version\n \"\"\"\n if namespace is None:\n namespace = utils.get_default_target_namespace()\n\n try:\n self.api_instance.delete_namespaced_custom_object(\n constants.KSERVE_GROUP,\n version,\n namespace,\n constants.KSERVE_PLURAL_INFERENCEGRAPH,\n name,\n )\n except client.rest.ApiException as e:\n raise RuntimeError(\n \"Exception when calling CustomObjectsApi->create_namespaced_custom_object:\\\n %s\\n\"\n % e\n )\n\n def get_inference_graph(self, name: str, namespace: str = None,\n version: str = constants.KSERVE_V1ALPHA1_VERSION) -> object:\n \"\"\"\n Get the inference graph\n\n :param name: existing inference graph name\n :param namespace: defaults to current or default namespace\n :param version: api group version\n :return: inference graph\n \"\"\"\n\n if namespace is None:\n namespace = utils.get_default_target_namespace()\n\n try:\n return self.api_instance.get_namespaced_custom_object(\n constants.KSERVE_GROUP,\n version,\n namespace,\n constants.KSERVE_PLURAL_INFERENCEGRAPH,\n name)\n except client.rest.ApiException as e:\n raise RuntimeError(\n \"Exception when calling CustomObjectsApi->get_namespaced_custom_object:\\\n %s\\n\" % e)\n\n def is_ig_ready(self, name: str, namespace: str = None, version: str = constants.KSERVE_V1ALPHA1_VERSION) -> bool:\n \"\"\"\n Check if the inference graph is ready.\n\n :param name: inference graph name\n :param namespace: defaults to current or default namespace\n :param version: api group version\n :return: true if inference graph is ready, else false.\n \"\"\"\n if namespace is None:\n namespace = utils.get_default_target_namespace()\n\n ig: dict = self.get_inference_graph(name, namespace=namespace, version=version)\n for condition in ig.get('status', {}).get('conditions', {}):\n if condition.get('type', '') == 'Ready':\n status = condition.get('status', 'Unknown')\n return status.lower() == \"true\"\n return False\n\n def wait_ig_ready(self, name: str, namespace: str = None, version: str = constants.KSERVE_V1ALPHA1_VERSION,\n timeout_seconds: int = 600,\n polling_interval: int = 10):\n \"\"\"\n Wait for inference graph to be ready until timeout. Print out the inference graph if timeout.\n\n :param name: inference graph name\n :param namespace: defaults to current or default namespace\n :param version: api group version\n :param timeout_seconds: timeout seconds for waiting, default to 600s.\n :param polling_interval: The time interval to poll status\n :return:\n \"\"\"\n for _ in range(round(timeout_seconds / polling_interval)):\n time.sleep(polling_interval)\n if self.is_ig_ready(name, namespace, version):\n return\n\n current_ig = self.get_inference_graph(name, namespace=namespace, version=version)\n raise RuntimeError(\"Timeout to start the InferenceGraph {}. \\\n The InferenceGraph is as following: {}\".format(name, current_ig))\n", "path": "python/kserve/kserve/api/kserve_client.py"}], "after_files": [{"content": "# Copyright 2021 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport time\nfrom urllib.parse import urlparse\n\nimport requests\nfrom kubernetes import client, config\n\nfrom .creds_utils import set_gcs_credentials, set_s3_credentials, set_azure_credentials\nfrom .watch import isvc_watch\nfrom ..constants import constants\nfrom ..models import V1alpha1InferenceGraph\nfrom ..utils import utils\n\n\nclass KServeClient(object):\n\n def __init__(self, config_file=None, config_dict=None, context=None, # pylint: disable=too-many-arguments\n client_configuration=None, persist_config=True):\n \"\"\"\n KServe client constructor\n :param config_file: kubeconfig file, defaults to ~/.kube/config\n :param config_dict: Takes the config file as a dict.\n :param context: kubernetes context\n :param client_configuration: kubernetes configuration object\n :param persist_config:\n \"\"\"\n if config_file or config_dict or not utils.is_running_in_k8s():\n if config_dict:\n config.load_kube_config_from_dict(\n config_dict=config_dict,\n context=context,\n client_configuration=None,\n persist_config=persist_config\n )\n else:\n config.load_kube_config(\n config_file=config_file,\n context=context,\n client_configuration=client_configuration,\n persist_config=persist_config\n )\n else:\n config.load_incluster_config()\n self.core_api = client.CoreV1Api()\n self.app_api = client.AppsV1Api()\n self.api_instance = client.CustomObjectsApi()\n\n def set_credentials(self, storage_type, namespace=None, credentials_file=None,\n service_account=constants.DEFAULT_SA_NAME, **kwargs):\n \"\"\"\n Setup credentials for KServe.\n\n :param storage_type: Valid value: GCS or S3 (required)\n :param namespace: inference service deployment namespace\n :param credentials_file: the path for the credentials file.\n :param service_account: the name of service account.\n :param kwargs: Others parameters for each storage_type\n :return:\n \"\"\"\n\n if namespace is None:\n namespace = utils.get_default_target_namespace()\n\n if storage_type.lower() == 'gcs':\n if credentials_file is None:\n credentials_file = constants.GCS_DEFAULT_CREDS_FILE\n set_gcs_credentials(namespace=namespace,\n credentials_file=credentials_file,\n service_account=service_account)\n elif storage_type.lower() == 's3':\n if credentials_file is None:\n credentials_file = constants.S3_DEFAULT_CREDS_FILE\n set_s3_credentials(namespace=namespace,\n credentials_file=credentials_file,\n service_account=service_account,\n **kwargs)\n elif storage_type.lower() == 'azure':\n if credentials_file is None:\n credentials_file = constants.AZ_DEFAULT_CREDS_FILE\n set_azure_credentials(namespace=namespace,\n credentials_file=credentials_file,\n service_account=service_account)\n else:\n raise RuntimeError(\"Invalid storage_type: %s, only support GCS, S3 and Azure\\\n currently.\\n\" % storage_type)\n\n def create(self, inferenceservice, namespace=None, watch=False,\n timeout_seconds=600): # pylint:disable=inconsistent-return-statements\n \"\"\"\n Create the inference service\n :param inferenceservice: inference service object\n :param namespace: defaults to current or default namespace\n :param watch: True to watch the created service until timeout elapsed or status is ready\n :param timeout_seconds: timeout seconds for watch, default to 600s\n :return: created inference service\n \"\"\"\n\n version = inferenceservice.api_version.split(\"/\")[1]\n\n if namespace is None:\n namespace = utils.get_isvc_namespace(inferenceservice)\n\n try:\n outputs = self.api_instance.create_namespaced_custom_object(\n constants.KSERVE_GROUP,\n version,\n namespace,\n constants.KSERVE_PLURAL,\n inferenceservice)\n except client.rest.ApiException as e:\n raise RuntimeError(\n \"Exception when calling CustomObjectsApi->create_namespaced_custom_object:\\\n %s\\n\" % e)\n\n if watch:\n isvc_watch(\n name=outputs['metadata']['name'],\n namespace=namespace,\n timeout_seconds=timeout_seconds)\n else:\n return outputs\n\n def get(self, name=None, namespace=None, watch=False, timeout_seconds=600,\n version=constants.KSERVE_V1BETA1_VERSION): # pylint:disable=inconsistent-return-statements\n \"\"\"\n Get the inference service\n :param name: existing inference service name\n :param namespace: defaults to current or default namespace\n :param watch: True to watch the service until timeout elapsed or status is ready\n :param timeout_seconds: timeout seconds for watch, default to 600s\n :param version: api group version\n :return: inference service\n \"\"\"\n\n if namespace is None:\n namespace = utils.get_default_target_namespace()\n\n if name:\n if watch:\n isvc_watch(\n name=name,\n namespace=namespace,\n timeout_seconds=timeout_seconds)\n else:\n try:\n return self.api_instance.get_namespaced_custom_object(\n constants.KSERVE_GROUP,\n version,\n namespace,\n constants.KSERVE_PLURAL,\n name)\n except client.rest.ApiException as e:\n raise RuntimeError(\n \"Exception when calling CustomObjectsApi->get_namespaced_custom_object:\\\n %s\\n\" % e)\n else:\n if watch:\n isvc_watch(\n namespace=namespace,\n timeout_seconds=timeout_seconds)\n else:\n try:\n return self.api_instance.list_namespaced_custom_object(\n constants.KSERVE_GROUP,\n version,\n namespace,\n constants.KSERVE_PLURAL)\n except client.rest.ApiException as e:\n raise RuntimeError(\n \"Exception when calling CustomObjectsApi->list_namespaced_custom_object:\\\n %s\\n\" % e)\n\n def patch(self, name, inferenceservice, namespace=None, watch=False,\n timeout_seconds=600): # pylint:disable=too-many-arguments,inconsistent-return-statements\n \"\"\"\n Patch existing inference service\n :param name: existing inference service name\n :param inferenceservice: patched inference service\n :param namespace: defaults to current or default namespace\n :param watch: True to watch the patched service until timeout elapsed or status is ready\n :param timeout_seconds: timeout seconds for watch, default to 600s\n :return: patched inference service\n \"\"\"\n\n version = inferenceservice.api_version.split(\"/\")[1]\n if namespace is None:\n namespace = utils.get_isvc_namespace(inferenceservice)\n\n try:\n outputs = self.api_instance.patch_namespaced_custom_object(\n constants.KSERVE_GROUP,\n version,\n namespace,\n constants.KSERVE_PLURAL,\n name,\n inferenceservice)\n except client.rest.ApiException as e:\n raise RuntimeError(\n \"Exception when calling CustomObjectsApi->patch_namespaced_custom_object:\\\n %s\\n\" % e)\n\n if watch:\n # Sleep 3 to avoid status still be True within a very short time.\n time.sleep(3)\n isvc_watch(\n name=outputs['metadata']['name'],\n namespace=namespace,\n timeout_seconds=timeout_seconds)\n else:\n return outputs\n\n def replace(self, name, inferenceservice, namespace=None, watch=False,\n timeout_seconds=600): # pylint:disable=too-many-arguments,inconsistent-return-statements\n \"\"\"\n Replace the existing inference service\n :param name: existing inference service name\n :param inferenceservice: replacing inference service\n :param namespace: defaults to current or default namespace\n :param watch: True to watch the replaced service until timeout elapsed or status is ready\n :param timeout_seconds: timeout seconds for watch, default to 600s\n :return: replaced inference service\n \"\"\"\n\n version = inferenceservice.api_version.split(\"/\")[1]\n\n if namespace is None:\n namespace = utils.get_isvc_namespace(inferenceservice)\n\n if inferenceservice.metadata.resource_version is None:\n current_isvc = self.get(name, namespace=namespace)\n current_resource_version = current_isvc['metadata']['resourceVersion']\n inferenceservice.metadata.resource_version = current_resource_version\n\n try:\n outputs = self.api_instance.replace_namespaced_custom_object(\n constants.KSERVE_GROUP,\n version,\n namespace,\n constants.KSERVE_PLURAL,\n name,\n inferenceservice)\n except client.rest.ApiException as e:\n raise RuntimeError(\n \"Exception when calling CustomObjectsApi->replace_namespaced_custom_object:\\\n %s\\n\" % e)\n\n if watch:\n isvc_watch(\n name=outputs['metadata']['name'],\n namespace=namespace,\n timeout_seconds=timeout_seconds,\n generation=outputs['metadata']['generation'])\n else:\n return outputs\n\n def delete(self, name, namespace=None, version=constants.KSERVE_V1BETA1_VERSION):\n \"\"\"\n Delete the inference service\n :param name: inference service name\n :param namespace: defaults to current or default namespace\n :param version: api group version\n :return:\n \"\"\"\n if namespace is None:\n namespace = utils.get_default_target_namespace()\n\n try:\n return self.api_instance.delete_namespaced_custom_object(\n constants.KSERVE_GROUP,\n version,\n namespace,\n constants.KSERVE_PLURAL,\n name)\n except client.rest.ApiException as e:\n raise RuntimeError(\n \"Exception when calling CustomObjectsApi->delete_namespaced_custom_object:\\\n %s\\n\" % e)\n\n def is_isvc_ready(self, name, namespace=None,\n version=constants.KSERVE_V1BETA1_VERSION): # pylint:disable=inconsistent-return-statements\n \"\"\"\n Check if the inference service is ready.\n :param version:\n :param name: inference service name\n :param namespace: defaults to current or default namespace\n :return:\n \"\"\"\n kfsvc_status = self.get(name, namespace=namespace,\n version=version)\n if 'status' not in kfsvc_status:\n return False\n status = 'Unknown'\n for condition in kfsvc_status['status'].get('conditions', {}):\n if condition.get('type', '') == 'Ready':\n status = condition.get('status', 'Unknown')\n return status.lower() == \"true\"\n return False\n\n def wait_isvc_ready(self, name, namespace=None, # pylint:disable=too-many-arguments\n watch=False,\n timeout_seconds=600,\n polling_interval=10,\n version=constants.KSERVE_V1BETA1_VERSION):\n \"\"\"\n Waiting for inference service ready, print out the inference service if timeout.\n :param name: inference service name\n :param namespace: defaults to current or default namespace\n :param watch: True to watch the service until timeout elapsed or status is ready\n :param timeout_seconds: timeout seconds for waiting, default to 600s.\n Print out the InferenceService if timeout.\n :param polling_interval: The time interval to poll status\n :param version: api group version\n :return:\n \"\"\"\n if watch:\n isvc_watch(\n name=name,\n namespace=namespace,\n timeout_seconds=timeout_seconds)\n else:\n for _ in range(round(timeout_seconds / polling_interval)):\n time.sleep(polling_interval)\n if self.is_isvc_ready(name, namespace=namespace, version=version):\n return\n\n current_isvc = self.get(name, namespace=namespace, version=version)\n raise RuntimeError(\"Timeout to start the InferenceService {}. \\\n The InferenceService is as following: {}\".format(name, current_isvc))\n\n def create_trained_model(self, trainedmodel, namespace):\n \"\"\"\n Create a trained model\n :param trainedmodel: trainedmodel object\n :param namespace: defaults to current or default namespace\n :return:\n \"\"\"\n version = trainedmodel.api_version.split(\"/\")[1]\n\n try:\n self.api_instance.create_namespaced_custom_object(\n constants.KSERVE_GROUP,\n version,\n namespace,\n constants.KSERVE_PLURAL_TRAINEDMODEL,\n trainedmodel)\n except client.rest.ApiException as e:\n raise RuntimeError(\n \"Exception when calling CustomObjectsApi->create_namespaced_custom_object:\\\n %s\\n\" % e)\n\n def delete_trained_model(\n self, name, namespace=None, version=constants.KSERVE_V1ALPHA1_VERSION\n ):\n \"\"\"\n Delete the trained model\n :param name: trained model name\n :param namespace: defaults to current or default namespace\n :param version: api group version\n :return:\n \"\"\"\n if namespace is None:\n namespace = utils.get_default_target_namespace()\n\n try:\n return self.api_instance.delete_namespaced_custom_object(\n constants.KSERVE_GROUP,\n version,\n namespace,\n constants.KSERVE_PLURAL_TRAINEDMODEL,\n name,\n )\n except client.rest.ApiException as e:\n raise RuntimeError(\n \"Exception when calling CustomObjectsApi->delete_namespaced_custom_object:\\\n %s\\n\"\n % e\n )\n\n def wait_model_ready(self, service_name, model_name, isvc_namespace=None, # pylint:disable=too-many-arguments\n isvc_version=constants.KSERVE_V1BETA1_VERSION,\n cluster_ip=None,\n protocol_version=\"v1\",\n timeout_seconds=600,\n polling_interval=10):\n \"\"\"\n Waiting for model to be ready to service, print out trained model if timeout.\n :param service_name: inference service name\n :param model_name: trained model name\n :param isvc_namespace: defaults to current or default namespace of inference service\n :param isvc_version: api group version of inference service\n :param protocol_version: version of the dataplane protocol\n :param cluster_ip: ip of the kubernetes cluster\n :param timeout_seconds: timeout seconds for waiting, default to 600s.\n Print out the InferenceService if timeout.\n :param polling_interval: The time interval to poll status\n :return:\n \"\"\"\n isvc = self.get(\n service_name,\n namespace=isvc_namespace,\n version=isvc_version,\n )\n\n host = urlparse(isvc[\"status\"][\"url\"]).netloc\n headers = {\"Host\": host}\n\n for _ in range(round(timeout_seconds / polling_interval)):\n time.sleep(polling_interval)\n # Check model health API\n url = f\"http://{cluster_ip}/{protocol_version}/models/{model_name}\"\n response = requests.get(url, headers=headers).status_code\n if response == 200:\n return\n\n raise RuntimeError(f\"InferenceService ({service_name}) has not loaded the \\\n model ({model_name}) before the timeout.\")\n\n def create_inference_graph(self, inferencegraph: V1alpha1InferenceGraph, namespace: str = None) -> object:\n \"\"\"\n create a inference graph\n\n :param inferencegraph: inference graph object\n :param namespace: defaults to current or default namespace\n :return: created inference graph\n \"\"\"\n version = inferencegraph.api_version.split(\"/\")[1]\n if namespace is None:\n namespace = utils.get_ig_namespace(inferencegraph)\n\n try:\n outputs = self.api_instance.create_namespaced_custom_object(\n constants.KSERVE_GROUP,\n version,\n namespace,\n constants.KSERVE_PLURAL_INFERENCEGRAPH,\n inferencegraph\n )\n except client.rest.ApiException as e:\n raise RuntimeError(\n \"Exception when calling CustomObjectsApi->create_namespaced_custom_object:\\\n %s\\n\"\n % e\n )\n return outputs\n\n def delete_inference_graph(self, name: str, namespace: str = None,\n version: str = constants.KSERVE_V1ALPHA1_VERSION):\n \"\"\"\n Delete the inference graph\n\n :param name: inference graph name\n :param namespace: defaults to current or default namespace\n :param version: api group version\n \"\"\"\n if namespace is None:\n namespace = utils.get_default_target_namespace()\n\n try:\n self.api_instance.delete_namespaced_custom_object(\n constants.KSERVE_GROUP,\n version,\n namespace,\n constants.KSERVE_PLURAL_INFERENCEGRAPH,\n name,\n )\n except client.rest.ApiException as e:\n raise RuntimeError(\n \"Exception when calling CustomObjectsApi->create_namespaced_custom_object:\\\n %s\\n\"\n % e\n )\n\n def get_inference_graph(self, name: str, namespace: str = None,\n version: str = constants.KSERVE_V1ALPHA1_VERSION) -> object:\n \"\"\"\n Get the inference graph\n\n :param name: existing inference graph name\n :param namespace: defaults to current or default namespace\n :param version: api group version\n :return: inference graph\n \"\"\"\n\n if namespace is None:\n namespace = utils.get_default_target_namespace()\n\n try:\n return self.api_instance.get_namespaced_custom_object(\n constants.KSERVE_GROUP,\n version,\n namespace,\n constants.KSERVE_PLURAL_INFERENCEGRAPH,\n name)\n except client.rest.ApiException as e:\n raise RuntimeError(\n \"Exception when calling CustomObjectsApi->get_namespaced_custom_object:\\\n %s\\n\" % e)\n\n def is_ig_ready(self, name: str, namespace: str = None, version: str = constants.KSERVE_V1ALPHA1_VERSION) -> bool:\n \"\"\"\n Check if the inference graph is ready.\n\n :param name: inference graph name\n :param namespace: defaults to current or default namespace\n :param version: api group version\n :return: true if inference graph is ready, else false.\n \"\"\"\n if namespace is None:\n namespace = utils.get_default_target_namespace()\n\n ig: dict = self.get_inference_graph(name, namespace=namespace, version=version)\n for condition in ig.get('status', {}).get('conditions', {}):\n if condition.get('type', '') == 'Ready':\n status = condition.get('status', 'Unknown')\n return status.lower() == \"true\"\n return False\n\n def wait_ig_ready(self, name: str, namespace: str = None, version: str = constants.KSERVE_V1ALPHA1_VERSION,\n timeout_seconds: int = 600,\n polling_interval: int = 10):\n \"\"\"\n Wait for inference graph to be ready until timeout. Print out the inference graph if timeout.\n\n :param name: inference graph name\n :param namespace: defaults to current or default namespace\n :param version: api group version\n :param timeout_seconds: timeout seconds for waiting, default to 600s.\n :param polling_interval: The time interval to poll status\n :return:\n \"\"\"\n for _ in range(round(timeout_seconds / polling_interval)):\n time.sleep(polling_interval)\n if self.is_ig_ready(name, namespace, version):\n return\n\n current_ig = self.get_inference_graph(name, namespace=namespace, version=version)\n raise RuntimeError(\"Timeout to start the InferenceGraph {}. \\\n The InferenceGraph is as following: {}\".format(name, current_ig))\n", "path": "python/kserve/kserve/api/kserve_client.py"}]} |
gh_patches_debug_1211 | rasdani/github-patches | git_diff | openmc-dev__openmc-678 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tally with cell filter assumes the cells tallied to are mutually exclusive
If a tally is created with a cell filter, the bins (i.e. cells) are assumed to be mutually exclusive. For instance, we can modify the `examples/xml/lattice/simple/tallies.xml` file for a 4x4 lattice of pin cells to only have the following tally:
```
<tally id="1">
<filter type="cell" bins="1 101" />
<scores>total</scores>
</tally>
```
Where cell `1` is the cell filled with the lattice and cell `101` is a the fuel region of one of the pins. The `tallies.out` file would be:
```
============================> TALLY 1 <============================
Cell 1
Total Material
Total Reaction Rate 1.09346 +/- 6.47323E-03
Cell 101
Total Material
Total Reaction Rate 0.0 +/- 0.0
```
The tallies over the two cells can be put into separate tallies as follows:
```
<tally id="1">
<filter type="cell" bins="1" />
<scores>total</scores>
</tally>
<tally id="2">
<filter type="cell" bins="101" />
<scores>total</scores>
</tally>
```
Yielding the desired results:
```
============================> TALLY 1 <============================
Cell 1
Total Material
Total Reaction Rate 1.09346 +/- 6.47323E-03
============================> TALLY 2 <============================
Cell 101
Total Material
Total Reaction Rate 4.99603E-02 +/- 4.27083E-04
```
The openmc API will merge tallies with cell filters assuming that the cells are mutually exclusive so this issue should be addressed in both the Fortran code and Python API.
As far as I can see, the documentation does not indicate that bins for the cell filter must be mutually exclusive. Fixing this issue seems like it might be difficult and waiting for the tally module to be refactored could be the best option.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openmc/filter.py`
Content:
```
1 from collections import Iterable, OrderedDict
2 import copy
3 from numbers import Real, Integral
4 import sys
5
6 import numpy as np
7
8 from openmc import Mesh
9 import openmc.checkvalue as cv
10
11
12 if sys.version_info[0] >= 3:
13 basestring = str
14
15
16 _FILTER_TYPES = ['universe', 'material', 'cell', 'cellborn', 'surface',
17 'mesh', 'energy', 'energyout', 'mu', 'polar', 'azimuthal',
18 'distribcell', 'delayedgroup']
19
20 class Filter(object):
21 """A filter used to constrain a tally to a specific criterion, e.g. only
22 tally events when the particle is in a certain cell and energy range.
23
24 Parameters
25 ----------
26 type : str
27 The type of the tally filter. Acceptable values are "universe",
28 "material", "cell", "cellborn", "surface", "mesh", "energy",
29 "energyout", "distribcell", "mu", "polar", "azimuthal", and
30 "delayedgroup".
31 bins : Integral or Iterable of Integral or Iterable of Real
32 The bins for the filter. This takes on different meaning for different
33 filters. See the OpenMC online documentation for more details.
34
35 Attributes
36 ----------
37 type : str
38 The type of the tally filter
39 bins : Integral or Iterable of Real
40 The bins for the filter
41 num_bins : Integral
42 The number of filter bins
43 mesh : openmc.Mesh or None
44 A Mesh object for 'mesh' type filters.
45 stride : Integral
46 The number of filter, nuclide and score bins within each of this
47 filter's bins.
48 distribcell_paths : list of str
49 The paths traversed through the CSG tree to reach each distribcell
50 instance (for 'distribcell' filters only)
51
52 """
53
54 # Initialize Filter class attributes
55 def __init__(self, type=None, bins=None):
56
57 self._type = None
58 self._num_bins = 0
59 self._bins = None
60 self._mesh = None
61 self._stride = None
62 self._distribcell_paths = None
63
64 if type is not None:
65 self.type = type
66 if bins is not None:
67 self.bins = bins
68
69 def __eq__(self, other):
70 if not isinstance(other, Filter):
71 return False
72 elif self.type != other.type:
73 return False
74 elif len(self.bins) != len(other.bins):
75 return False
76 elif not np.allclose(self.bins, other.bins):
77 return False
78 else:
79 return True
80
81 def __ne__(self, other):
82 return not self == other
83
84 def __gt__(self, other):
85 if self.type != other.type:
86 if self.type in _FILTER_TYPES and other.type in _FILTER_TYPES:
87 delta = _FILTER_TYPES.index(self.type) - \
88 _FILTER_TYPES.index(other.type)
89 return delta > 0
90 else:
91 return False
92 else:
93 # Compare largest/smallest energy bin edges in energy filters
94 # This logic is used when merging tallies with energy filters
95 if 'energy' in self.type and 'energy' in other.type:
96 return self.bins[0] >= other.bins[-1]
97 else:
98 return max(self.bins) > max(other.bins)
99
100 def __lt__(self, other):
101 return not self > other
102
103 def __hash__(self):
104 return hash(repr(self))
105
106 def __repr__(self):
107 string = 'Filter\n'
108 string += '{0: <16}{1}{2}\n'.format('\tType', '=\t', self.type)
109 string += '{0: <16}{1}{2}\n'.format('\tBins', '=\t', self.bins)
110 return string
111
112 @property
113 def type(self):
114 return self._type
115
116 @property
117 def bins(self):
118 return self._bins
119
120 @property
121 def num_bins(self):
122 if self.bins is None:
123 return 0
124 elif self.type in ['energy', 'energyout']:
125 return len(self.bins) - 1
126 elif self.type in ['cell', 'cellborn', 'surface', 'universe', 'material']:
127 return len(self.bins)
128 else:
129 return self._num_bins
130
131 @property
132 def mesh(self):
133 return self._mesh
134
135 @property
136 def stride(self):
137 return self._stride
138
139 @property
140 def distribcell_paths(self):
141 return self._distribcell_paths
142
143 @type.setter
144 def type(self, type):
145 if type is None:
146 self._type = type
147 elif type not in _FILTER_TYPES:
148 msg = 'Unable to set Filter type to "{0}" since it is not one ' \
149 'of the supported types'.format(type)
150 raise ValueError(msg)
151
152 self._type = type
153
154 @bins.setter
155 def bins(self, bins):
156 if self.type is None:
157 msg = 'Unable to set bins for Filter to "{0}" since ' \
158 'the Filter type has not yet been set'.format(bins)
159 raise ValueError(msg)
160
161 # If the bin edge is a single value, it is a Cell, Material, etc. ID
162 if not isinstance(bins, Iterable):
163 bins = [bins]
164
165 # If the bins are in a collection, convert it to a list
166 else:
167 bins = list(bins)
168
169 if self.type in ['cell', 'cellborn', 'surface', 'material',
170 'universe', 'distribcell', 'delayedgroup']:
171 cv.check_iterable_type('filter bins', bins, Integral)
172 for edge in bins:
173 cv.check_greater_than('filter bin', edge, 0, equality=True)
174
175 elif self.type in ['energy', 'energyout']:
176 for edge in bins:
177 if not isinstance(edge, Real):
178 msg = 'Unable to add bin edge "{0}" to a "{1}" Filter ' \
179 'since it is a non-integer or floating point ' \
180 'value'.format(edge, self.type)
181 raise ValueError(msg)
182 elif edge < 0.:
183 msg = 'Unable to add bin edge "{0}" to a "{1}" Filter ' \
184 'since it is a negative value'.format(edge, self.type)
185 raise ValueError(msg)
186
187 # Check that bin edges are monotonically increasing
188 for index in range(len(bins)):
189 if index > 0 and bins[index] < bins[index-1]:
190 msg = 'Unable to add bin edges "{0}" to a "{1}" Filter ' \
191 'since they are not monotonically ' \
192 'increasing'.format(bins, self.type)
193 raise ValueError(msg)
194
195 # mesh filters
196 elif self.type == 'mesh':
197 if not len(bins) == 1:
198 msg = 'Unable to add bins "{0}" to a mesh Filter since ' \
199 'only a single mesh can be used per tally'.format(bins)
200 raise ValueError(msg)
201 elif not isinstance(bins[0], Integral):
202 msg = 'Unable to add bin "{0}" to mesh Filter since it ' \
203 'is a non-integer'.format(bins[0])
204 raise ValueError(msg)
205 elif bins[0] < 0:
206 msg = 'Unable to add bin "{0}" to mesh Filter since it ' \
207 'is a negative integer'.format(bins[0])
208 raise ValueError(msg)
209
210 # If all error checks passed, add bin edges
211 self._bins = np.array(bins)
212
213 @num_bins.setter
214 def num_bins(self, num_bins):
215 cv.check_type('filter num_bins', num_bins, Integral)
216 cv.check_greater_than('filter num_bins', num_bins, 0, equality=True)
217 self._num_bins = num_bins
218
219 @mesh.setter
220 def mesh(self, mesh):
221 cv.check_type('filter mesh', mesh, Mesh)
222
223 self._mesh = mesh
224 self.type = 'mesh'
225 self.bins = self.mesh.id
226
227 @stride.setter
228 def stride(self, stride):
229 cv.check_type('filter stride', stride, Integral)
230 if stride < 0:
231 msg = 'Unable to set stride "{0}" for a "{1}" Filter since it ' \
232 'is a negative value'.format(stride, self.type)
233 raise ValueError(msg)
234
235 self._stride = stride
236
237 @distribcell_paths.setter
238 def distribcell_paths(self, distribcell_paths):
239 cv.check_iterable_type('distribcell_paths', distribcell_paths, str)
240 self._distribcell_paths = distribcell_paths
241
242 def can_merge(self, other):
243 """Determine if filter can be merged with another.
244
245 Parameters
246 ----------
247 other : openmc.Filter
248 Filter to compare with
249
250 Returns
251 -------
252 bool
253 Whether the filter can be merged
254
255 """
256
257 if not isinstance(other, Filter):
258 return False
259
260 # Filters must be of the same type
261 if self.type != other.type:
262 return False
263
264 # Distribcell filters cannot have more than one bin
265 if self.type == 'distribcell':
266 return False
267
268 # Mesh filters cannot have more than one bin
269 elif self.type == 'mesh':
270 return False
271
272 # Different energy bins structures must be mutually exclusive and
273 # share only one shared bin edge at the minimum or maximum energy
274 elif 'energy' in self.type:
275 # This low energy edge coincides with other's high energy edge
276 if self.bins[0] == other.bins[-1]:
277 return True
278 # This high energy edge coincides with other's low energy edge
279 elif self.bins[-1] == other.bins[0]:
280 return True
281 else:
282 return False
283
284 else:
285 return True
286
287 def merge(self, other):
288 """Merge this filter with another.
289
290 Parameters
291 ----------
292 other : openmc.Filter
293 Filter to merge with
294
295 Returns
296 -------
297 merged_filter : openmc.Filter
298 Filter resulting from the merge
299
300 """
301
302 if not self.can_merge(other):
303 msg = 'Unable to merge "{0}" with "{1}" ' \
304 'filters'.format(self.type, other.type)
305 raise ValueError(msg)
306
307 # Create deep copy of filter to return as merged filter
308 merged_filter = copy.deepcopy(self)
309
310 # Merge unique filter bins
311 merged_bins = np.concatenate((self.bins, other.bins))
312 merged_bins = np.unique(merged_bins)
313
314 # Sort energy bin edges
315 if 'energy' in self.type:
316 merged_bins = sorted(merged_bins)
317
318 # Assign merged bins to merged filter
319 merged_filter.bins = list(merged_bins)
320
321 # Count bins in the merged filter
322 if 'energy' in merged_filter.type:
323 merged_filter.num_bins = len(merged_bins) - 1
324 else:
325 merged_filter.num_bins = len(merged_bins)
326
327 return merged_filter
328
329 def is_subset(self, other):
330 """Determine if another filter is a subset of this filter.
331
332 If all of the bins in the other filter are included as bins in this
333 filter, then it is a subset of this filter.
334
335 Parameters
336 ----------
337 other : openmc.Filter
338 The filter to query as a subset of this filter
339
340 Returns
341 -------
342 bool
343 Whether or not the other filter is a subset of this filter
344
345 """
346
347 if not isinstance(other, Filter):
348 return False
349 elif self.type != other.type:
350 return False
351 elif self.type in ['energy', 'energyout']:
352 if len(self.bins) != len(other.bins):
353 return False
354 else:
355 return np.allclose(self.bins, other.bins)
356
357 for bin in other.bins:
358 if bin not in self.bins:
359 return False
360
361 return True
362
363 def get_bin_index(self, filter_bin):
364 """Returns the index in the Filter for some bin.
365
366 Parameters
367 ----------
368 filter_bin : Integral or tuple
369 The bin is the integer ID for 'material', 'surface', 'cell',
370 'cellborn', and 'universe' Filters. The bin is an integer for the
371 cell instance ID for 'distribcell' Filters. The bin is a 2-tuple of
372 floats for 'energy' and 'energyout' filters corresponding to the
373 energy boundaries of the bin of interest. The bin is an (x,y,z)
374 3-tuple for 'mesh' filters corresponding to the mesh cell
375 interest.
376
377 Returns
378 -------
379 filter_index : Integral
380 The index in the Tally data array for this filter bin.
381
382 See also
383 --------
384 Filter.get_bin()
385
386 """
387
388 try:
389 # Filter bins for a mesh are an (x,y,z) tuple
390 if self.type == 'mesh':
391 # Convert (x,y,z) to a single bin -- this is similar to
392 # subroutine mesh_indices_to_bin in openmc/src/mesh.F90.
393 if len(self.mesh.dimension) == 3:
394 nx, ny, nz = self.mesh.dimension
395 val = (filter_bin[0] - 1) * ny * nz + \
396 (filter_bin[1] - 1) * nz + \
397 (filter_bin[2] - 1)
398 else:
399 nx, ny = self.mesh.dimension
400 val = (filter_bin[0] - 1) * ny + \
401 (filter_bin[1] - 1)
402
403 filter_index = val
404
405 # Use lower energy bound to find index for energy Filters
406 elif self.type in ['energy', 'energyout']:
407 deltas = np.abs(self.bins - filter_bin[1]) / filter_bin[1]
408 min_delta = np.min(deltas)
409 if min_delta < 1E-3:
410 filter_index = deltas.argmin() - 1
411 else:
412 raise ValueError
413
414 # Filter bins for distribcells are "IDs" of each unique placement
415 # of the Cell in the Geometry (integers starting at 0)
416 elif self.type == 'distribcell':
417 filter_index = filter_bin
418
419 # Use ID for all other Filters (e.g., material, cell, etc.)
420 else:
421 val = np.where(self.bins == filter_bin)[0][0]
422 filter_index = val
423
424 except ValueError:
425 msg = 'Unable to get the bin index for Filter since "{0}" ' \
426 'is not one of the bins'.format(filter_bin)
427 raise ValueError(msg)
428
429 return filter_index
430
431 def get_bin(self, bin_index):
432 """Returns the filter bin for some filter bin index.
433
434 Parameters
435 ----------
436 bin_index : Integral
437 The zero-based index into the filter's array of bins. The bin
438 index for 'material', 'surface', 'cell', 'cellborn', and 'universe'
439 filters corresponds to the ID in the filter's list of bins. For
440 'distribcell' tallies the bin index necessarily can only be zero
441 since only one cell can be tracked per tally. The bin index for
442 'energy' and 'energyout' filters corresponds to the energy range of
443 interest in the filter bins of energies. The bin index for 'mesh'
444 filters is the index into the flattened array of (x,y) or (x,y,z)
445 mesh cell bins.
446
447 Returns
448 -------
449 bin : 1-, 2-, or 3-tuple of Real
450 The bin in the Tally data array. The bin for 'material', surface',
451 'cell', 'cellborn', 'universe' and 'distribcell' filters is a
452 1-tuple of the ID corresponding to the appropriate filter bin.
453 The bin for 'energy' and 'energyout' filters is a 2-tuple of the
454 lower and upper energies bounding the energy interval for the filter
455 bin. The bin for 'mesh' tallies is a 2-tuple or 3-tuple of the x,y
456 or x,y,z mesh cell indices corresponding to the bin in a 2D/3D mesh.
457
458 See also
459 --------
460 Filter.get_bin_index()
461
462 """
463
464 cv.check_type('bin_index', bin_index, Integral)
465 cv.check_greater_than('bin_index', bin_index, 0, equality=True)
466 cv.check_less_than('bin_index', bin_index, self.num_bins)
467
468 if self.type == 'mesh':
469
470 # Construct 3-tuple of x,y,z cell indices for a 3D mesh
471 if len(self.mesh.dimension) == 3:
472 nx, ny, nz = self.mesh.dimension
473 x = bin_index / (ny * nz)
474 y = (bin_index - (x * ny * nz)) / nz
475 z = bin_index - (x * ny * nz) - (y * nz)
476 filter_bin = (x, y, z)
477
478 # Construct 2-tuple of x,y cell indices for a 2D mesh
479 else:
480 nx, ny = self.mesh.dimension
481 x = bin_index / ny
482 y = bin_index - (x * ny)
483 filter_bin = (x, y)
484
485 # Construct 2-tuple of lower, upper energies for energy(out) filters
486 elif self.type in ['energy', 'energyout']:
487 filter_bin = (self.bins[bin_index], self.bins[bin_index+1])
488 # Construct 1-tuple of with the cell ID for distribcell filters
489 elif self.type == 'distribcell':
490 filter_bin = (self.bins[0],)
491 # Construct 1-tuple with domain ID (e.g., material) for other filters
492 else:
493 filter_bin = (self.bins[bin_index],)
494
495 return filter_bin
496
497 def get_pandas_dataframe(self, data_size, distribcell_paths=True):
498 """Builds a Pandas DataFrame for the Filter's bins.
499
500 This method constructs a Pandas DataFrame object for the filter with
501 columns annotated by filter bin information. This is a helper method for
502 :meth:`Tally.get_pandas_dataframe`.
503
504 This capability has been tested for Pandas >=0.13.1. However, it is
505 recommended to use v0.16 or newer versions of Pandas since this method
506 uses Pandas' Multi-index functionality.
507
508 Parameters
509 ----------
510 data_size : Integral
511 The total number of bins in the tally corresponding to this filter
512 distribcell_paths : bool, optional
513 Construct columns for distribcell tally filters (default is True).
514 The geometric information in the Summary object is embedded into a
515 Multi-index column with a geometric "path" to each distribcell
516 instance. NOTE: This option assumes that all distribcell paths are
517 of the same length and do not have the same universes and cells but
518 different lattice cell indices.
519
520 Returns
521 -------
522 pandas.DataFrame
523 A Pandas DataFrame with columns of strings that characterize the
524 filter's bins. The number of rows in the DataFrame is the same as
525 the total number of bins in the corresponding tally, with the filter
526 bin appropriately tiled to map to the corresponding tally bins.
527
528 For 'cell', 'cellborn', 'surface', 'material', and 'universe'
529 filters, the DataFrame includes a single column with the cell,
530 surface, material or universe ID corresponding to each filter bin.
531
532 For 'distribcell' filters, the DataFrame either includes:
533
534 1. a single column with the cell instance IDs (without summary info)
535 2. separate columns for the cell IDs, universe IDs, and lattice IDs
536 and x,y,z cell indices corresponding to each (distribcell paths).
537
538 For 'energy' and 'energyout' filters, the DataFrame includes one
539 column for the lower energy bound and one column for the upper
540 energy bound for each filter bin.
541
542 For 'mesh' filters, the DataFrame includes three columns for the
543 x,y,z mesh cell indices corresponding to each filter bin.
544
545 Raises
546 ------
547 ImportError
548 When Pandas is not installed
549
550 See also
551 --------
552 Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()
553
554 """
555
556 # Initialize Pandas DataFrame
557 import pandas as pd
558 df = pd.DataFrame()
559
560 # mesh filters
561 if self.type == 'mesh':
562
563 # Initialize dictionary to build Pandas Multi-index column
564 filter_dict = {}
565
566 # Append Mesh ID as outermost index of multi-index
567 mesh_key = 'mesh {0}'.format(self.mesh.id)
568
569 # Find mesh dimensions - use 3D indices for simplicity
570 if len(self.mesh.dimension) == 3:
571 nx, ny, nz = self.mesh.dimension
572 else:
573 nx, ny = self.mesh.dimension
574 nz = 1
575
576 # Generate multi-index sub-column for x-axis
577 filter_bins = np.arange(1, nx+1)
578 repeat_factor = ny * nz * self.stride
579 filter_bins = np.repeat(filter_bins, repeat_factor)
580 tile_factor = data_size / len(filter_bins)
581 filter_bins = np.tile(filter_bins, tile_factor)
582 filter_dict[(mesh_key, 'x')] = filter_bins
583
584 # Generate multi-index sub-column for y-axis
585 filter_bins = np.arange(1, ny+1)
586 repeat_factor = nz * self.stride
587 filter_bins = np.repeat(filter_bins, repeat_factor)
588 tile_factor = data_size / len(filter_bins)
589 filter_bins = np.tile(filter_bins, tile_factor)
590 filter_dict[(mesh_key, 'y')] = filter_bins
591
592 # Generate multi-index sub-column for z-axis
593 filter_bins = np.arange(1, nz+1)
594 repeat_factor = self.stride
595 filter_bins = np.repeat(filter_bins, repeat_factor)
596 tile_factor = data_size / len(filter_bins)
597 filter_bins = np.tile(filter_bins, tile_factor)
598 filter_dict[(mesh_key, 'z')] = filter_bins
599
600 # Initialize a Pandas DataFrame from the mesh dictionary
601 df = pd.concat([df, pd.DataFrame(filter_dict)])
602
603 # distribcell filters
604 elif self.type == 'distribcell':
605 level_df = None
606
607 # Create Pandas Multi-index columns for each level in CSG tree
608 if distribcell_paths:
609
610 # Distribcell paths require linked metadata from the Summary
611 if self.distribcell_paths is None:
612 msg = 'Unable to construct distribcell paths since ' \
613 'the Summary is not linked to the StatePoint'
614 raise ValueError(msg)
615
616 # Make copy of array of distribcell paths to use in
617 # Pandas Multi-index column construction
618 distribcell_paths = copy.deepcopy(self.distribcell_paths)
619 num_offsets = len(distribcell_paths)
620
621 # Loop over CSG levels in the distribcell paths
622 level_counter = 0
623 levels_remain = True
624 while levels_remain:
625
626 # Use level key as first index in Pandas Multi-index column
627 level_counter += 1
628 level_key = 'level {}'.format(level_counter)
629
630 # Use the first distribcell path to determine if level
631 # is a universe/cell or lattice level
632 first_path = distribcell_paths[0]
633 next_index = first_path.index('-')
634 level = first_path[:next_index]
635
636 # Trim universe/lattice info from path
637 first_path = first_path[next_index+2:]
638
639 # Create a dictionary for this level for Pandas Multi-index
640 level_dict = OrderedDict()
641
642 # This level is a lattice (e.g., ID(x,y,z))
643 if '(' in level:
644 level_type = 'lattice'
645
646 # Initialize prefix Multi-index keys
647 lat_id_key = (level_key, 'lat', 'id')
648 lat_x_key = (level_key, 'lat', 'x')
649 lat_y_key = (level_key, 'lat', 'y')
650 lat_z_key = (level_key, 'lat', 'z')
651
652 # Allocate NumPy arrays for each CSG level and
653 # each Multi-index column in the DataFrame
654 level_dict[lat_id_key] = np.empty(num_offsets)
655 level_dict[lat_x_key] = np.empty(num_offsets)
656 level_dict[lat_y_key] = np.empty(num_offsets)
657 level_dict[lat_z_key] = np.empty(num_offsets)
658
659 # This level is a universe / cell (e.g., ID->ID)
660 else:
661 level_type = 'universe'
662
663 # Initialize prefix Multi-index keys
664 univ_key = (level_key, 'univ', 'id')
665 cell_key = (level_key, 'cell', 'id')
666
667 # Allocate NumPy arrays for each CSG level and
668 # each Multi-index column in the DataFrame
669 level_dict[univ_key] = np.empty(num_offsets)
670 level_dict[cell_key] = np.empty(num_offsets)
671
672 # Determine any levels remain in path
673 if '-' not in first_path:
674 levels_remain = False
675
676 # Populate Multi-index arrays with all distribcell paths
677 for i, path in enumerate(distribcell_paths):
678
679 if level_type == 'lattice':
680 # Extract lattice ID, indices from path
681 next_index = path.index('-')
682 lat_id_indices = path[:next_index]
683
684 # Trim lattice info from distribcell path
685 distribcell_paths[i] = path[next_index+2:]
686
687 # Extract the lattice cell indices from the path
688 i1 = lat_id_indices.index('(')
689 i2 = lat_id_indices.index(')')
690 i3 = lat_id_indices[i1+1:i2]
691
692 # Assign entry to Lattice Multi-index column
693 level_dict[lat_id_key][i] = path[:i1]
694 level_dict[lat_x_key][i] = int(i3.split(',')[0]) - 1
695 level_dict[lat_y_key][i] = int(i3.split(',')[1]) - 1
696 level_dict[lat_z_key][i] = int(i3.split(',')[2]) - 1
697
698 else:
699 # Extract universe ID from path
700 next_index = path.index('-')
701 universe_id = int(path[:next_index])
702
703 # Trim universe info from distribcell path
704 path = path[next_index+2:]
705
706 # Extract cell ID from path
707 if '-' in path:
708 next_index = path.index('-')
709 cell_id = int(path[:next_index])
710 distribcell_paths[i] = path[next_index+2:]
711 else:
712 cell_id = int(path)
713 distribcell_paths[i] = ''
714
715 # Assign entry to Universe, Cell Multi-index columns
716 level_dict[univ_key][i] = universe_id
717 level_dict[cell_key][i] = cell_id
718
719 # Tile the Multi-index columns
720 for level_key, level_bins in level_dict.items():
721 level_bins = np.repeat(level_bins, self.stride)
722 tile_factor = data_size / len(level_bins)
723 level_bins = np.tile(level_bins, tile_factor)
724 level_dict[level_key] = level_bins
725
726 # Initialize a Pandas DataFrame from the level dictionary
727 if level_df is None:
728 level_df = pd.DataFrame(level_dict)
729 else:
730 level_df = pd.concat([level_df, pd.DataFrame(level_dict)], axis=1)
731
732 # Create DataFrame column for distribcell instance IDs
733 # NOTE: This is performed regardless of whether the user
734 # requests Summary geometric information
735 filter_bins = np.arange(self.num_bins)
736 filter_bins = np.repeat(filter_bins, self.stride)
737 tile_factor = data_size / len(filter_bins)
738 filter_bins = np.tile(filter_bins, tile_factor)
739 df = pd.DataFrame({self.type : filter_bins})
740
741 # If OpenCG level info DataFrame was created, concatenate
742 # with DataFrame of distribcell instance IDs
743 if level_df is not None:
744 level_df = level_df.dropna(axis=1, how='all')
745 level_df = level_df.astype(np.int)
746 df = pd.concat([level_df, df], axis=1)
747
748 # energy, energyout filters
749 elif 'energy' in self.type:
750 # Extract the lower and upper energy bounds, then repeat and tile
751 # them as necessary to account for other filters.
752 lo_bins = np.repeat(self.bins[:-1], self.stride)
753 hi_bins = np.repeat(self.bins[1:], self.stride)
754 tile_factor = data_size / len(lo_bins)
755 lo_bins = np.tile(lo_bins, tile_factor)
756 hi_bins = np.tile(hi_bins, tile_factor)
757
758 # Add the new energy columns to the DataFrame.
759 df.loc[:, self.type + ' low [MeV]'] = lo_bins
760 df.loc[:, self.type + ' high [MeV]'] = hi_bins
761
762 elif self.type in ('azimuthal', 'polar'):
763 # Extract the lower and upper angle bounds, then repeat and tile
764 # them as necessary to account for other filters.
765 lo_bins = np.repeat(self.bins[:-1], self.stride)
766 hi_bins = np.repeat(self.bins[1:], self.stride)
767 tile_factor = data_size / len(lo_bins)
768 lo_bins = np.tile(lo_bins, tile_factor)
769 hi_bins = np.tile(hi_bins, tile_factor)
770
771 # Add the new angle columns to the DataFrame.
772 df.loc[:, self.type + ' low'] = lo_bins
773 df.loc[:, self.type + ' high'] = hi_bins
774
775 # universe, material, surface, cell, and cellborn filters
776 else:
777 filter_bins = np.repeat(self.bins, self.stride)
778 tile_factor = data_size / len(filter_bins)
779 filter_bins = np.tile(filter_bins, tile_factor)
780 filter_bins = filter_bins
781 df = pd.concat([df, pd.DataFrame({self.type : filter_bins})])
782
783 return df
784
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/openmc/filter.py b/openmc/filter.py
--- a/openmc/filter.py
+++ b/openmc/filter.py
@@ -162,6 +162,11 @@
if not isinstance(bins, Iterable):
bins = [bins]
+ # If the bin is 0D numpy array, promote to 1D
+ elif isinstance(bins, np.ndarray):
+ if bins.shape == ():
+ bins.shape = (1,)
+
# If the bins are in a collection, convert it to a list
else:
bins = list(bins)
| {"golden_diff": "diff --git a/openmc/filter.py b/openmc/filter.py\n--- a/openmc/filter.py\n+++ b/openmc/filter.py\n@@ -162,6 +162,11 @@\n if not isinstance(bins, Iterable):\n bins = [bins]\n \n+ # If the bin is 0D numpy array, promote to 1D\n+ elif isinstance(bins, np.ndarray):\n+ if bins.shape == ():\n+ bins.shape = (1,)\n+\n # If the bins are in a collection, convert it to a list\n else:\n bins = list(bins)\n", "issue": "Tally with cell filter assumes the cells tallied to are mutually exclusive\nIf a tally is created with a cell filter, the bins (i.e. cells) are assumed to be mutually exclusive. For instance, we can modify the `examples/xml/lattice/simple/tallies.xml` file for a 4x4 lattice of pin cells to only have the following tally:\n\n```\n<tally id=\"1\">\n <filter type=\"cell\" bins=\"1 101\" />\n <scores>total</scores>\n</tally>\n```\n\nWhere cell `1` is the cell filled with the lattice and cell `101` is a the fuel region of one of the pins. The `tallies.out` file would be:\n\n```\n============================> TALLY 1 <============================\n\nCell 1\n Total Material\n Total Reaction Rate 1.09346 +/- 6.47323E-03\nCell 101\n Total Material\n Total Reaction Rate 0.0 +/- 0.0\n```\n\nThe tallies over the two cells can be put into separate tallies as follows:\n\n```\n<tally id=\"1\">\n <filter type=\"cell\" bins=\"1\" />\n <scores>total</scores>\n</tally>\n<tally id=\"2\">\n <filter type=\"cell\" bins=\"101\" />\n <scores>total</scores>\n</tally> \n```\n\nYielding the desired results:\n\n```\n============================> TALLY 1 <============================\n\nCell 1\n Total Material\n Total Reaction Rate 1.09346 +/- 6.47323E-03\n\n============================> TALLY 2 <============================\n\nCell 101\n Total Material\n Total Reaction Rate 4.99603E-02 +/- 4.27083E-04\n```\n\nThe openmc API will merge tallies with cell filters assuming that the cells are mutually exclusive so this issue should be addressed in both the Fortran code and Python API.\n\nAs far as I can see, the documentation does not indicate that bins for the cell filter must be mutually exclusive. Fixing this issue seems like it might be difficult and waiting for the tally module to be refactored could be the best option.\n\n", "before_files": [{"content": "from collections import Iterable, OrderedDict\nimport copy\nfrom numbers import Real, Integral\nimport sys\n\nimport numpy as np\n\nfrom openmc import Mesh\nimport openmc.checkvalue as cv\n\n\nif sys.version_info[0] >= 3:\n basestring = str\n\n\n_FILTER_TYPES = ['universe', 'material', 'cell', 'cellborn', 'surface',\n 'mesh', 'energy', 'energyout', 'mu', 'polar', 'azimuthal',\n 'distribcell', 'delayedgroup']\n\nclass Filter(object):\n \"\"\"A filter used to constrain a tally to a specific criterion, e.g. only\n tally events when the particle is in a certain cell and energy range.\n\n Parameters\n ----------\n type : str\n The type of the tally filter. Acceptable values are \"universe\",\n \"material\", \"cell\", \"cellborn\", \"surface\", \"mesh\", \"energy\",\n \"energyout\", \"distribcell\", \"mu\", \"polar\", \"azimuthal\", and\n \"delayedgroup\".\n bins : Integral or Iterable of Integral or Iterable of Real\n The bins for the filter. This takes on different meaning for different\n filters. See the OpenMC online documentation for more details.\n\n Attributes\n ----------\n type : str\n The type of the tally filter\n bins : Integral or Iterable of Real\n The bins for the filter\n num_bins : Integral\n The number of filter bins\n mesh : openmc.Mesh or None\n A Mesh object for 'mesh' type filters.\n stride : Integral\n The number of filter, nuclide and score bins within each of this\n filter's bins.\n distribcell_paths : list of str\n The paths traversed through the CSG tree to reach each distribcell\n instance (for 'distribcell' filters only)\n\n \"\"\"\n\n # Initialize Filter class attributes\n def __init__(self, type=None, bins=None):\n\n self._type = None\n self._num_bins = 0\n self._bins = None\n self._mesh = None\n self._stride = None\n self._distribcell_paths = None\n\n if type is not None:\n self.type = type\n if bins is not None:\n self.bins = bins\n\n def __eq__(self, other):\n if not isinstance(other, Filter):\n return False\n elif self.type != other.type:\n return False\n elif len(self.bins) != len(other.bins):\n return False\n elif not np.allclose(self.bins, other.bins):\n return False\n else:\n return True\n\n def __ne__(self, other):\n return not self == other\n\n def __gt__(self, other):\n if self.type != other.type:\n if self.type in _FILTER_TYPES and other.type in _FILTER_TYPES:\n delta = _FILTER_TYPES.index(self.type) - \\\n _FILTER_TYPES.index(other.type)\n return delta > 0\n else:\n return False\n else:\n # Compare largest/smallest energy bin edges in energy filters\n # This logic is used when merging tallies with energy filters\n if 'energy' in self.type and 'energy' in other.type:\n return self.bins[0] >= other.bins[-1]\n else:\n return max(self.bins) > max(other.bins)\n\n def __lt__(self, other):\n return not self > other\n\n def __hash__(self):\n return hash(repr(self))\n\n def __repr__(self):\n string = 'Filter\\n'\n string += '{0: <16}{1}{2}\\n'.format('\\tType', '=\\t', self.type)\n string += '{0: <16}{1}{2}\\n'.format('\\tBins', '=\\t', self.bins)\n return string\n\n @property\n def type(self):\n return self._type\n\n @property\n def bins(self):\n return self._bins\n\n @property\n def num_bins(self):\n if self.bins is None:\n return 0\n elif self.type in ['energy', 'energyout']:\n return len(self.bins) - 1\n elif self.type in ['cell', 'cellborn', 'surface', 'universe', 'material']:\n return len(self.bins)\n else:\n return self._num_bins\n\n @property\n def mesh(self):\n return self._mesh\n\n @property\n def stride(self):\n return self._stride\n\n @property\n def distribcell_paths(self):\n return self._distribcell_paths\n\n @type.setter\n def type(self, type):\n if type is None:\n self._type = type\n elif type not in _FILTER_TYPES:\n msg = 'Unable to set Filter type to \"{0}\" since it is not one ' \\\n 'of the supported types'.format(type)\n raise ValueError(msg)\n\n self._type = type\n\n @bins.setter\n def bins(self, bins):\n if self.type is None:\n msg = 'Unable to set bins for Filter to \"{0}\" since ' \\\n 'the Filter type has not yet been set'.format(bins)\n raise ValueError(msg)\n\n # If the bin edge is a single value, it is a Cell, Material, etc. ID\n if not isinstance(bins, Iterable):\n bins = [bins]\n\n # If the bins are in a collection, convert it to a list\n else:\n bins = list(bins)\n\n if self.type in ['cell', 'cellborn', 'surface', 'material',\n 'universe', 'distribcell', 'delayedgroup']:\n cv.check_iterable_type('filter bins', bins, Integral)\n for edge in bins:\n cv.check_greater_than('filter bin', edge, 0, equality=True)\n\n elif self.type in ['energy', 'energyout']:\n for edge in bins:\n if not isinstance(edge, Real):\n msg = 'Unable to add bin edge \"{0}\" to a \"{1}\" Filter ' \\\n 'since it is a non-integer or floating point ' \\\n 'value'.format(edge, self.type)\n raise ValueError(msg)\n elif edge < 0.:\n msg = 'Unable to add bin edge \"{0}\" to a \"{1}\" Filter ' \\\n 'since it is a negative value'.format(edge, self.type)\n raise ValueError(msg)\n\n # Check that bin edges are monotonically increasing\n for index in range(len(bins)):\n if index > 0 and bins[index] < bins[index-1]:\n msg = 'Unable to add bin edges \"{0}\" to a \"{1}\" Filter ' \\\n 'since they are not monotonically ' \\\n 'increasing'.format(bins, self.type)\n raise ValueError(msg)\n\n # mesh filters\n elif self.type == 'mesh':\n if not len(bins) == 1:\n msg = 'Unable to add bins \"{0}\" to a mesh Filter since ' \\\n 'only a single mesh can be used per tally'.format(bins)\n raise ValueError(msg)\n elif not isinstance(bins[0], Integral):\n msg = 'Unable to add bin \"{0}\" to mesh Filter since it ' \\\n 'is a non-integer'.format(bins[0])\n raise ValueError(msg)\n elif bins[0] < 0:\n msg = 'Unable to add bin \"{0}\" to mesh Filter since it ' \\\n 'is a negative integer'.format(bins[0])\n raise ValueError(msg)\n\n # If all error checks passed, add bin edges\n self._bins = np.array(bins)\n\n @num_bins.setter\n def num_bins(self, num_bins):\n cv.check_type('filter num_bins', num_bins, Integral)\n cv.check_greater_than('filter num_bins', num_bins, 0, equality=True)\n self._num_bins = num_bins\n\n @mesh.setter\n def mesh(self, mesh):\n cv.check_type('filter mesh', mesh, Mesh)\n\n self._mesh = mesh\n self.type = 'mesh'\n self.bins = self.mesh.id\n\n @stride.setter\n def stride(self, stride):\n cv.check_type('filter stride', stride, Integral)\n if stride < 0:\n msg = 'Unable to set stride \"{0}\" for a \"{1}\" Filter since it ' \\\n 'is a negative value'.format(stride, self.type)\n raise ValueError(msg)\n\n self._stride = stride\n\n @distribcell_paths.setter\n def distribcell_paths(self, distribcell_paths):\n cv.check_iterable_type('distribcell_paths', distribcell_paths, str)\n self._distribcell_paths = distribcell_paths\n\n def can_merge(self, other):\n \"\"\"Determine if filter can be merged with another.\n\n Parameters\n ----------\n other : openmc.Filter\n Filter to compare with\n\n Returns\n -------\n bool\n Whether the filter can be merged\n\n \"\"\"\n\n if not isinstance(other, Filter):\n return False\n\n # Filters must be of the same type\n if self.type != other.type:\n return False\n\n # Distribcell filters cannot have more than one bin\n if self.type == 'distribcell':\n return False\n\n # Mesh filters cannot have more than one bin\n elif self.type == 'mesh':\n return False\n\n # Different energy bins structures must be mutually exclusive and\n # share only one shared bin edge at the minimum or maximum energy\n elif 'energy' in self.type:\n # This low energy edge coincides with other's high energy edge\n if self.bins[0] == other.bins[-1]:\n return True\n # This high energy edge coincides with other's low energy edge\n elif self.bins[-1] == other.bins[0]:\n return True\n else:\n return False\n\n else:\n return True\n\n def merge(self, other):\n \"\"\"Merge this filter with another.\n\n Parameters\n ----------\n other : openmc.Filter\n Filter to merge with\n\n Returns\n -------\n merged_filter : openmc.Filter\n Filter resulting from the merge\n\n \"\"\"\n\n if not self.can_merge(other):\n msg = 'Unable to merge \"{0}\" with \"{1}\" ' \\\n 'filters'.format(self.type, other.type)\n raise ValueError(msg)\n\n # Create deep copy of filter to return as merged filter\n merged_filter = copy.deepcopy(self)\n\n # Merge unique filter bins\n merged_bins = np.concatenate((self.bins, other.bins))\n merged_bins = np.unique(merged_bins)\n\n # Sort energy bin edges\n if 'energy' in self.type:\n merged_bins = sorted(merged_bins)\n\n # Assign merged bins to merged filter\n merged_filter.bins = list(merged_bins)\n\n # Count bins in the merged filter\n if 'energy' in merged_filter.type:\n merged_filter.num_bins = len(merged_bins) - 1\n else:\n merged_filter.num_bins = len(merged_bins)\n\n return merged_filter\n\n def is_subset(self, other):\n \"\"\"Determine if another filter is a subset of this filter.\n\n If all of the bins in the other filter are included as bins in this\n filter, then it is a subset of this filter.\n\n Parameters\n ----------\n other : openmc.Filter\n The filter to query as a subset of this filter\n\n Returns\n -------\n bool\n Whether or not the other filter is a subset of this filter\n\n \"\"\"\n\n if not isinstance(other, Filter):\n return False\n elif self.type != other.type:\n return False\n elif self.type in ['energy', 'energyout']:\n if len(self.bins) != len(other.bins):\n return False\n else:\n return np.allclose(self.bins, other.bins)\n\n for bin in other.bins:\n if bin not in self.bins:\n return False\n\n return True\n\n def get_bin_index(self, filter_bin):\n \"\"\"Returns the index in the Filter for some bin.\n\n Parameters\n ----------\n filter_bin : Integral or tuple\n The bin is the integer ID for 'material', 'surface', 'cell',\n 'cellborn', and 'universe' Filters. The bin is an integer for the\n cell instance ID for 'distribcell' Filters. The bin is a 2-tuple of\n floats for 'energy' and 'energyout' filters corresponding to the\n energy boundaries of the bin of interest. The bin is an (x,y,z)\n 3-tuple for 'mesh' filters corresponding to the mesh cell\n interest.\n\n Returns\n -------\n filter_index : Integral\n The index in the Tally data array for this filter bin.\n\n See also\n --------\n Filter.get_bin()\n\n \"\"\"\n\n try:\n # Filter bins for a mesh are an (x,y,z) tuple\n if self.type == 'mesh':\n # Convert (x,y,z) to a single bin -- this is similar to\n # subroutine mesh_indices_to_bin in openmc/src/mesh.F90.\n if len(self.mesh.dimension) == 3:\n nx, ny, nz = self.mesh.dimension\n val = (filter_bin[0] - 1) * ny * nz + \\\n (filter_bin[1] - 1) * nz + \\\n (filter_bin[2] - 1)\n else:\n nx, ny = self.mesh.dimension\n val = (filter_bin[0] - 1) * ny + \\\n (filter_bin[1] - 1)\n\n filter_index = val\n\n # Use lower energy bound to find index for energy Filters\n elif self.type in ['energy', 'energyout']:\n deltas = np.abs(self.bins - filter_bin[1]) / filter_bin[1]\n min_delta = np.min(deltas)\n if min_delta < 1E-3:\n filter_index = deltas.argmin() - 1\n else:\n raise ValueError\n\n # Filter bins for distribcells are \"IDs\" of each unique placement\n # of the Cell in the Geometry (integers starting at 0)\n elif self.type == 'distribcell':\n filter_index = filter_bin\n\n # Use ID for all other Filters (e.g., material, cell, etc.)\n else:\n val = np.where(self.bins == filter_bin)[0][0]\n filter_index = val\n\n except ValueError:\n msg = 'Unable to get the bin index for Filter since \"{0}\" ' \\\n 'is not one of the bins'.format(filter_bin)\n raise ValueError(msg)\n\n return filter_index\n\n def get_bin(self, bin_index):\n \"\"\"Returns the filter bin for some filter bin index.\n\n Parameters\n ----------\n bin_index : Integral\n The zero-based index into the filter's array of bins. The bin\n index for 'material', 'surface', 'cell', 'cellborn', and 'universe'\n filters corresponds to the ID in the filter's list of bins. For\n 'distribcell' tallies the bin index necessarily can only be zero\n since only one cell can be tracked per tally. The bin index for\n 'energy' and 'energyout' filters corresponds to the energy range of\n interest in the filter bins of energies. The bin index for 'mesh'\n filters is the index into the flattened array of (x,y) or (x,y,z)\n mesh cell bins.\n\n Returns\n -------\n bin : 1-, 2-, or 3-tuple of Real\n The bin in the Tally data array. The bin for 'material', surface',\n 'cell', 'cellborn', 'universe' and 'distribcell' filters is a\n 1-tuple of the ID corresponding to the appropriate filter bin.\n The bin for 'energy' and 'energyout' filters is a 2-tuple of the\n lower and upper energies bounding the energy interval for the filter\n bin. The bin for 'mesh' tallies is a 2-tuple or 3-tuple of the x,y\n or x,y,z mesh cell indices corresponding to the bin in a 2D/3D mesh.\n\n See also\n --------\n Filter.get_bin_index()\n\n \"\"\"\n\n cv.check_type('bin_index', bin_index, Integral)\n cv.check_greater_than('bin_index', bin_index, 0, equality=True)\n cv.check_less_than('bin_index', bin_index, self.num_bins)\n\n if self.type == 'mesh':\n\n # Construct 3-tuple of x,y,z cell indices for a 3D mesh\n if len(self.mesh.dimension) == 3:\n nx, ny, nz = self.mesh.dimension\n x = bin_index / (ny * nz)\n y = (bin_index - (x * ny * nz)) / nz\n z = bin_index - (x * ny * nz) - (y * nz)\n filter_bin = (x, y, z)\n\n # Construct 2-tuple of x,y cell indices for a 2D mesh\n else:\n nx, ny = self.mesh.dimension\n x = bin_index / ny\n y = bin_index - (x * ny)\n filter_bin = (x, y)\n\n # Construct 2-tuple of lower, upper energies for energy(out) filters\n elif self.type in ['energy', 'energyout']:\n filter_bin = (self.bins[bin_index], self.bins[bin_index+1])\n # Construct 1-tuple of with the cell ID for distribcell filters\n elif self.type == 'distribcell':\n filter_bin = (self.bins[0],)\n # Construct 1-tuple with domain ID (e.g., material) for other filters\n else:\n filter_bin = (self.bins[bin_index],)\n\n return filter_bin\n\n def get_pandas_dataframe(self, data_size, distribcell_paths=True):\n \"\"\"Builds a Pandas DataFrame for the Filter's bins.\n\n This method constructs a Pandas DataFrame object for the filter with\n columns annotated by filter bin information. This is a helper method for\n :meth:`Tally.get_pandas_dataframe`.\n\n This capability has been tested for Pandas >=0.13.1. However, it is\n recommended to use v0.16 or newer versions of Pandas since this method\n uses Pandas' Multi-index functionality.\n\n Parameters\n ----------\n data_size : Integral\n The total number of bins in the tally corresponding to this filter\n distribcell_paths : bool, optional\n Construct columns for distribcell tally filters (default is True).\n The geometric information in the Summary object is embedded into a\n Multi-index column with a geometric \"path\" to each distribcell\n instance. NOTE: This option assumes that all distribcell paths are\n of the same length and do not have the same universes and cells but\n different lattice cell indices.\n\n Returns\n -------\n pandas.DataFrame\n A Pandas DataFrame with columns of strings that characterize the\n filter's bins. The number of rows in the DataFrame is the same as\n the total number of bins in the corresponding tally, with the filter\n bin appropriately tiled to map to the corresponding tally bins.\n\n For 'cell', 'cellborn', 'surface', 'material', and 'universe'\n filters, the DataFrame includes a single column with the cell,\n surface, material or universe ID corresponding to each filter bin.\n\n For 'distribcell' filters, the DataFrame either includes:\n\n 1. a single column with the cell instance IDs (without summary info)\n 2. separate columns for the cell IDs, universe IDs, and lattice IDs\n and x,y,z cell indices corresponding to each (distribcell paths).\n\n For 'energy' and 'energyout' filters, the DataFrame includes one\n column for the lower energy bound and one column for the upper\n energy bound for each filter bin.\n\n For 'mesh' filters, the DataFrame includes three columns for the\n x,y,z mesh cell indices corresponding to each filter bin.\n\n Raises\n ------\n ImportError\n When Pandas is not installed\n\n See also\n --------\n Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()\n\n \"\"\"\n\n # Initialize Pandas DataFrame\n import pandas as pd\n df = pd.DataFrame()\n\n # mesh filters\n if self.type == 'mesh':\n\n # Initialize dictionary to build Pandas Multi-index column\n filter_dict = {}\n\n # Append Mesh ID as outermost index of multi-index\n mesh_key = 'mesh {0}'.format(self.mesh.id)\n\n # Find mesh dimensions - use 3D indices for simplicity\n if len(self.mesh.dimension) == 3:\n nx, ny, nz = self.mesh.dimension\n else:\n nx, ny = self.mesh.dimension\n nz = 1\n\n # Generate multi-index sub-column for x-axis\n filter_bins = np.arange(1, nx+1)\n repeat_factor = ny * nz * self.stride\n filter_bins = np.repeat(filter_bins, repeat_factor)\n tile_factor = data_size / len(filter_bins)\n filter_bins = np.tile(filter_bins, tile_factor)\n filter_dict[(mesh_key, 'x')] = filter_bins\n\n # Generate multi-index sub-column for y-axis\n filter_bins = np.arange(1, ny+1)\n repeat_factor = nz * self.stride\n filter_bins = np.repeat(filter_bins, repeat_factor)\n tile_factor = data_size / len(filter_bins)\n filter_bins = np.tile(filter_bins, tile_factor)\n filter_dict[(mesh_key, 'y')] = filter_bins\n\n # Generate multi-index sub-column for z-axis\n filter_bins = np.arange(1, nz+1)\n repeat_factor = self.stride\n filter_bins = np.repeat(filter_bins, repeat_factor)\n tile_factor = data_size / len(filter_bins)\n filter_bins = np.tile(filter_bins, tile_factor)\n filter_dict[(mesh_key, 'z')] = filter_bins\n\n # Initialize a Pandas DataFrame from the mesh dictionary\n df = pd.concat([df, pd.DataFrame(filter_dict)])\n\n # distribcell filters\n elif self.type == 'distribcell':\n level_df = None\n\n # Create Pandas Multi-index columns for each level in CSG tree\n if distribcell_paths:\n\n # Distribcell paths require linked metadata from the Summary\n if self.distribcell_paths is None:\n msg = 'Unable to construct distribcell paths since ' \\\n 'the Summary is not linked to the StatePoint'\n raise ValueError(msg)\n\n # Make copy of array of distribcell paths to use in\n # Pandas Multi-index column construction\n distribcell_paths = copy.deepcopy(self.distribcell_paths)\n num_offsets = len(distribcell_paths)\n\n # Loop over CSG levels in the distribcell paths\n level_counter = 0\n levels_remain = True\n while levels_remain:\n\n # Use level key as first index in Pandas Multi-index column\n level_counter += 1\n level_key = 'level {}'.format(level_counter)\n\n # Use the first distribcell path to determine if level\n # is a universe/cell or lattice level\n first_path = distribcell_paths[0]\n next_index = first_path.index('-')\n level = first_path[:next_index]\n\n # Trim universe/lattice info from path\n first_path = first_path[next_index+2:]\n\n # Create a dictionary for this level for Pandas Multi-index\n level_dict = OrderedDict()\n\n # This level is a lattice (e.g., ID(x,y,z))\n if '(' in level:\n level_type = 'lattice'\n\n # Initialize prefix Multi-index keys\n lat_id_key = (level_key, 'lat', 'id')\n lat_x_key = (level_key, 'lat', 'x')\n lat_y_key = (level_key, 'lat', 'y')\n lat_z_key = (level_key, 'lat', 'z')\n\n # Allocate NumPy arrays for each CSG level and\n # each Multi-index column in the DataFrame\n level_dict[lat_id_key] = np.empty(num_offsets)\n level_dict[lat_x_key] = np.empty(num_offsets)\n level_dict[lat_y_key] = np.empty(num_offsets)\n level_dict[lat_z_key] = np.empty(num_offsets)\n\n # This level is a universe / cell (e.g., ID->ID)\n else:\n level_type = 'universe'\n\n # Initialize prefix Multi-index keys\n univ_key = (level_key, 'univ', 'id')\n cell_key = (level_key, 'cell', 'id')\n\n # Allocate NumPy arrays for each CSG level and\n # each Multi-index column in the DataFrame\n level_dict[univ_key] = np.empty(num_offsets)\n level_dict[cell_key] = np.empty(num_offsets)\n\n # Determine any levels remain in path\n if '-' not in first_path:\n levels_remain = False\n\n # Populate Multi-index arrays with all distribcell paths\n for i, path in enumerate(distribcell_paths):\n\n if level_type == 'lattice':\n # Extract lattice ID, indices from path\n next_index = path.index('-')\n lat_id_indices = path[:next_index]\n\n # Trim lattice info from distribcell path\n distribcell_paths[i] = path[next_index+2:]\n\n # Extract the lattice cell indices from the path\n i1 = lat_id_indices.index('(')\n i2 = lat_id_indices.index(')')\n i3 = lat_id_indices[i1+1:i2]\n\n # Assign entry to Lattice Multi-index column\n level_dict[lat_id_key][i] = path[:i1]\n level_dict[lat_x_key][i] = int(i3.split(',')[0]) - 1\n level_dict[lat_y_key][i] = int(i3.split(',')[1]) - 1\n level_dict[lat_z_key][i] = int(i3.split(',')[2]) - 1\n\n else:\n # Extract universe ID from path\n next_index = path.index('-')\n universe_id = int(path[:next_index])\n\n # Trim universe info from distribcell path\n path = path[next_index+2:]\n\n # Extract cell ID from path\n if '-' in path:\n next_index = path.index('-')\n cell_id = int(path[:next_index])\n distribcell_paths[i] = path[next_index+2:]\n else:\n cell_id = int(path)\n distribcell_paths[i] = ''\n\n # Assign entry to Universe, Cell Multi-index columns\n level_dict[univ_key][i] = universe_id\n level_dict[cell_key][i] = cell_id\n\n # Tile the Multi-index columns\n for level_key, level_bins in level_dict.items():\n level_bins = np.repeat(level_bins, self.stride)\n tile_factor = data_size / len(level_bins)\n level_bins = np.tile(level_bins, tile_factor)\n level_dict[level_key] = level_bins\n\n # Initialize a Pandas DataFrame from the level dictionary\n if level_df is None:\n level_df = pd.DataFrame(level_dict)\n else:\n level_df = pd.concat([level_df, pd.DataFrame(level_dict)], axis=1)\n\n # Create DataFrame column for distribcell instance IDs\n # NOTE: This is performed regardless of whether the user\n # requests Summary geometric information\n filter_bins = np.arange(self.num_bins)\n filter_bins = np.repeat(filter_bins, self.stride)\n tile_factor = data_size / len(filter_bins)\n filter_bins = np.tile(filter_bins, tile_factor)\n df = pd.DataFrame({self.type : filter_bins})\n\n # If OpenCG level info DataFrame was created, concatenate\n # with DataFrame of distribcell instance IDs\n if level_df is not None:\n level_df = level_df.dropna(axis=1, how='all')\n level_df = level_df.astype(np.int)\n df = pd.concat([level_df, df], axis=1)\n\n # energy, energyout filters\n elif 'energy' in self.type:\n # Extract the lower and upper energy bounds, then repeat and tile\n # them as necessary to account for other filters.\n lo_bins = np.repeat(self.bins[:-1], self.stride)\n hi_bins = np.repeat(self.bins[1:], self.stride)\n tile_factor = data_size / len(lo_bins)\n lo_bins = np.tile(lo_bins, tile_factor)\n hi_bins = np.tile(hi_bins, tile_factor)\n\n # Add the new energy columns to the DataFrame.\n df.loc[:, self.type + ' low [MeV]'] = lo_bins\n df.loc[:, self.type + ' high [MeV]'] = hi_bins\n\n elif self.type in ('azimuthal', 'polar'):\n # Extract the lower and upper angle bounds, then repeat and tile\n # them as necessary to account for other filters.\n lo_bins = np.repeat(self.bins[:-1], self.stride)\n hi_bins = np.repeat(self.bins[1:], self.stride)\n tile_factor = data_size / len(lo_bins)\n lo_bins = np.tile(lo_bins, tile_factor)\n hi_bins = np.tile(hi_bins, tile_factor)\n\n # Add the new angle columns to the DataFrame.\n df.loc[:, self.type + ' low'] = lo_bins\n df.loc[:, self.type + ' high'] = hi_bins\n\n # universe, material, surface, cell, and cellborn filters\n else:\n filter_bins = np.repeat(self.bins, self.stride)\n tile_factor = data_size / len(filter_bins)\n filter_bins = np.tile(filter_bins, tile_factor)\n filter_bins = filter_bins\n df = pd.concat([df, pd.DataFrame({self.type : filter_bins})])\n\n return df\n", "path": "openmc/filter.py"}], "after_files": [{"content": "from collections import Iterable, OrderedDict\nimport copy\nfrom numbers import Real, Integral\nimport sys\n\nimport numpy as np\n\nfrom openmc import Mesh\nimport openmc.checkvalue as cv\n\n\nif sys.version_info[0] >= 3:\n basestring = str\n\n\n_FILTER_TYPES = ['universe', 'material', 'cell', 'cellborn', 'surface',\n 'mesh', 'energy', 'energyout', 'mu', 'polar', 'azimuthal',\n 'distribcell', 'delayedgroup']\n\nclass Filter(object):\n \"\"\"A filter used to constrain a tally to a specific criterion, e.g. only\n tally events when the particle is in a certain cell and energy range.\n\n Parameters\n ----------\n type : str\n The type of the tally filter. Acceptable values are \"universe\",\n \"material\", \"cell\", \"cellborn\", \"surface\", \"mesh\", \"energy\",\n \"energyout\", \"distribcell\", \"mu\", \"polar\", \"azimuthal\", and\n \"delayedgroup\".\n bins : Integral or Iterable of Integral or Iterable of Real\n The bins for the filter. This takes on different meaning for different\n filters. See the OpenMC online documentation for more details.\n\n Attributes\n ----------\n type : str\n The type of the tally filter\n bins : Integral or Iterable of Real\n The bins for the filter\n num_bins : Integral\n The number of filter bins\n mesh : openmc.Mesh or None\n A Mesh object for 'mesh' type filters.\n stride : Integral\n The number of filter, nuclide and score bins within each of this\n filter's bins.\n distribcell_paths : list of str\n The paths traversed through the CSG tree to reach each distribcell\n instance (for 'distribcell' filters only)\n\n \"\"\"\n\n # Initialize Filter class attributes\n def __init__(self, type=None, bins=None):\n\n self._type = None\n self._num_bins = 0\n self._bins = None\n self._mesh = None\n self._stride = None\n self._distribcell_paths = None\n\n if type is not None:\n self.type = type\n if bins is not None:\n self.bins = bins\n\n def __eq__(self, other):\n if not isinstance(other, Filter):\n return False\n elif self.type != other.type:\n return False\n elif len(self.bins) != len(other.bins):\n return False\n elif not np.allclose(self.bins, other.bins):\n return False\n else:\n return True\n\n def __ne__(self, other):\n return not self == other\n\n def __gt__(self, other):\n if self.type != other.type:\n if self.type in _FILTER_TYPES and other.type in _FILTER_TYPES:\n delta = _FILTER_TYPES.index(self.type) - \\\n _FILTER_TYPES.index(other.type)\n return delta > 0\n else:\n return False\n else:\n # Compare largest/smallest energy bin edges in energy filters\n # This logic is used when merging tallies with energy filters\n if 'energy' in self.type and 'energy' in other.type:\n return self.bins[0] >= other.bins[-1]\n else:\n return max(self.bins) > max(other.bins)\n\n def __lt__(self, other):\n return not self > other\n\n def __hash__(self):\n return hash(repr(self))\n\n def __repr__(self):\n string = 'Filter\\n'\n string += '{0: <16}{1}{2}\\n'.format('\\tType', '=\\t', self.type)\n string += '{0: <16}{1}{2}\\n'.format('\\tBins', '=\\t', self.bins)\n return string\n\n @property\n def type(self):\n return self._type\n\n @property\n def bins(self):\n return self._bins\n\n @property\n def num_bins(self):\n if self.bins is None:\n return 0\n elif self.type in ['energy', 'energyout']:\n return len(self.bins) - 1\n elif self.type in ['cell', 'cellborn', 'surface', 'universe', 'material']:\n return len(self.bins)\n else:\n return self._num_bins\n\n @property\n def mesh(self):\n return self._mesh\n\n @property\n def stride(self):\n return self._stride\n\n @property\n def distribcell_paths(self):\n return self._distribcell_paths\n\n @type.setter\n def type(self, type):\n if type is None:\n self._type = type\n elif type not in _FILTER_TYPES:\n msg = 'Unable to set Filter type to \"{0}\" since it is not one ' \\\n 'of the supported types'.format(type)\n raise ValueError(msg)\n\n self._type = type\n\n @bins.setter\n def bins(self, bins):\n if self.type is None:\n msg = 'Unable to set bins for Filter to \"{0}\" since ' \\\n 'the Filter type has not yet been set'.format(bins)\n raise ValueError(msg)\n\n # If the bin edge is a single value, it is a Cell, Material, etc. ID\n if not isinstance(bins, Iterable):\n bins = [bins]\n\n # If the bin is 0D numpy array, promote to 1D\n elif isinstance(bins, np.ndarray):\n if bins.shape == ():\n bins.shape = (1,)\n\n # If the bins are in a collection, convert it to a list\n else:\n bins = list(bins)\n\n if self.type in ['cell', 'cellborn', 'surface', 'material',\n 'universe', 'distribcell', 'delayedgroup']:\n cv.check_iterable_type('filter bins', bins, Integral)\n for edge in bins:\n cv.check_greater_than('filter bin', edge, 0, equality=True)\n\n elif self.type in ['energy', 'energyout']:\n for edge in bins:\n if not isinstance(edge, Real):\n msg = 'Unable to add bin edge \"{0}\" to a \"{1}\" Filter ' \\\n 'since it is a non-integer or floating point ' \\\n 'value'.format(edge, self.type)\n raise ValueError(msg)\n elif edge < 0.:\n msg = 'Unable to add bin edge \"{0}\" to a \"{1}\" Filter ' \\\n 'since it is a negative value'.format(edge, self.type)\n raise ValueError(msg)\n\n # Check that bin edges are monotonically increasing\n for index in range(len(bins)):\n if index > 0 and bins[index] < bins[index-1]:\n msg = 'Unable to add bin edges \"{0}\" to a \"{1}\" Filter ' \\\n 'since they are not monotonically ' \\\n 'increasing'.format(bins, self.type)\n raise ValueError(msg)\n\n # mesh filters\n elif self.type == 'mesh':\n if not len(bins) == 1:\n msg = 'Unable to add bins \"{0}\" to a mesh Filter since ' \\\n 'only a single mesh can be used per tally'.format(bins)\n raise ValueError(msg)\n elif not isinstance(bins[0], Integral):\n msg = 'Unable to add bin \"{0}\" to mesh Filter since it ' \\\n 'is a non-integer'.format(bins[0])\n raise ValueError(msg)\n elif bins[0] < 0:\n msg = 'Unable to add bin \"{0}\" to mesh Filter since it ' \\\n 'is a negative integer'.format(bins[0])\n raise ValueError(msg)\n\n # If all error checks passed, add bin edges\n self._bins = np.array(bins)\n\n @num_bins.setter\n def num_bins(self, num_bins):\n cv.check_type('filter num_bins', num_bins, Integral)\n cv.check_greater_than('filter num_bins', num_bins, 0, equality=True)\n self._num_bins = num_bins\n\n @mesh.setter\n def mesh(self, mesh):\n cv.check_type('filter mesh', mesh, Mesh)\n\n self._mesh = mesh\n self.type = 'mesh'\n self.bins = self.mesh.id\n\n @stride.setter\n def stride(self, stride):\n cv.check_type('filter stride', stride, Integral)\n if stride < 0:\n msg = 'Unable to set stride \"{0}\" for a \"{1}\" Filter since it ' \\\n 'is a negative value'.format(stride, self.type)\n raise ValueError(msg)\n\n self._stride = stride\n\n @distribcell_paths.setter\n def distribcell_paths(self, distribcell_paths):\n cv.check_iterable_type('distribcell_paths', distribcell_paths, str)\n self._distribcell_paths = distribcell_paths\n\n def can_merge(self, other):\n \"\"\"Determine if filter can be merged with another.\n\n Parameters\n ----------\n other : openmc.Filter\n Filter to compare with\n\n Returns\n -------\n bool\n Whether the filter can be merged\n\n \"\"\"\n\n if not isinstance(other, Filter):\n return False\n\n # Filters must be of the same type\n if self.type != other.type:\n return False\n\n # Distribcell filters cannot have more than one bin\n if self.type == 'distribcell':\n return False\n\n # Mesh filters cannot have more than one bin\n elif self.type == 'mesh':\n return False\n\n # Different energy bins structures must be mutually exclusive and\n # share only one shared bin edge at the minimum or maximum energy\n elif 'energy' in self.type:\n # This low energy edge coincides with other's high energy edge\n if self.bins[0] == other.bins[-1]:\n return True\n # This high energy edge coincides with other's low energy edge\n elif self.bins[-1] == other.bins[0]:\n return True\n else:\n return False\n\n else:\n return True\n\n def merge(self, other):\n \"\"\"Merge this filter with another.\n\n Parameters\n ----------\n other : openmc.Filter\n Filter to merge with\n\n Returns\n -------\n merged_filter : openmc.Filter\n Filter resulting from the merge\n\n \"\"\"\n\n if not self.can_merge(other):\n msg = 'Unable to merge \"{0}\" with \"{1}\" ' \\\n 'filters'.format(self.type, other.type)\n raise ValueError(msg)\n\n # Create deep copy of filter to return as merged filter\n merged_filter = copy.deepcopy(self)\n\n # Merge unique filter bins\n merged_bins = np.concatenate((self.bins, other.bins))\n merged_bins = np.unique(merged_bins)\n\n # Sort energy bin edges\n if 'energy' in self.type:\n merged_bins = sorted(merged_bins)\n\n # Assign merged bins to merged filter\n merged_filter.bins = list(merged_bins)\n\n # Count bins in the merged filter\n if 'energy' in merged_filter.type:\n merged_filter.num_bins = len(merged_bins) - 1\n else:\n merged_filter.num_bins = len(merged_bins)\n\n return merged_filter\n\n def is_subset(self, other):\n \"\"\"Determine if another filter is a subset of this filter.\n\n If all of the bins in the other filter are included as bins in this\n filter, then it is a subset of this filter.\n\n Parameters\n ----------\n other : openmc.Filter\n The filter to query as a subset of this filter\n\n Returns\n -------\n bool\n Whether or not the other filter is a subset of this filter\n\n \"\"\"\n\n if not isinstance(other, Filter):\n return False\n elif self.type != other.type:\n return False\n elif self.type in ['energy', 'energyout']:\n if len(self.bins) != len(other.bins):\n return False\n else:\n return np.allclose(self.bins, other.bins)\n\n for bin in other.bins:\n if bin not in self.bins:\n return False\n\n return True\n\n def get_bin_index(self, filter_bin):\n \"\"\"Returns the index in the Filter for some bin.\n\n Parameters\n ----------\n filter_bin : Integral or tuple\n The bin is the integer ID for 'material', 'surface', 'cell',\n 'cellborn', and 'universe' Filters. The bin is an integer for the\n cell instance ID for 'distribcell' Filters. The bin is a 2-tuple of\n floats for 'energy' and 'energyout' filters corresponding to the\n energy boundaries of the bin of interest. The bin is an (x,y,z)\n 3-tuple for 'mesh' filters corresponding to the mesh cell\n interest.\n\n Returns\n -------\n filter_index : Integral\n The index in the Tally data array for this filter bin.\n\n See also\n --------\n Filter.get_bin()\n\n \"\"\"\n\n try:\n # Filter bins for a mesh are an (x,y,z) tuple\n if self.type == 'mesh':\n # Convert (x,y,z) to a single bin -- this is similar to\n # subroutine mesh_indices_to_bin in openmc/src/mesh.F90.\n if len(self.mesh.dimension) == 3:\n nx, ny, nz = self.mesh.dimension\n val = (filter_bin[0] - 1) * ny * nz + \\\n (filter_bin[1] - 1) * nz + \\\n (filter_bin[2] - 1)\n else:\n nx, ny = self.mesh.dimension\n val = (filter_bin[0] - 1) * ny + \\\n (filter_bin[1] - 1)\n\n filter_index = val\n\n # Use lower energy bound to find index for energy Filters\n elif self.type in ['energy', 'energyout']:\n deltas = np.abs(self.bins - filter_bin[1]) / filter_bin[1]\n min_delta = np.min(deltas)\n if min_delta < 1E-3:\n filter_index = deltas.argmin() - 1\n else:\n raise ValueError\n\n # Filter bins for distribcells are \"IDs\" of each unique placement\n # of the Cell in the Geometry (integers starting at 0)\n elif self.type == 'distribcell':\n filter_index = filter_bin\n\n # Use ID for all other Filters (e.g., material, cell, etc.)\n else:\n val = np.where(self.bins == filter_bin)[0][0]\n filter_index = val\n\n except ValueError:\n msg = 'Unable to get the bin index for Filter since \"{0}\" ' \\\n 'is not one of the bins'.format(filter_bin)\n raise ValueError(msg)\n\n return filter_index\n\n def get_bin(self, bin_index):\n \"\"\"Returns the filter bin for some filter bin index.\n\n Parameters\n ----------\n bin_index : Integral\n The zero-based index into the filter's array of bins. The bin\n index for 'material', 'surface', 'cell', 'cellborn', and 'universe'\n filters corresponds to the ID in the filter's list of bins. For\n 'distribcell' tallies the bin index necessarily can only be zero\n since only one cell can be tracked per tally. The bin index for\n 'energy' and 'energyout' filters corresponds to the energy range of\n interest in the filter bins of energies. The bin index for 'mesh'\n filters is the index into the flattened array of (x,y) or (x,y,z)\n mesh cell bins.\n\n Returns\n -------\n bin : 1-, 2-, or 3-tuple of Real\n The bin in the Tally data array. The bin for 'material', surface',\n 'cell', 'cellborn', 'universe' and 'distribcell' filters is a\n 1-tuple of the ID corresponding to the appropriate filter bin.\n The bin for 'energy' and 'energyout' filters is a 2-tuple of the\n lower and upper energies bounding the energy interval for the filter\n bin. The bin for 'mesh' tallies is a 2-tuple or 3-tuple of the x,y\n or x,y,z mesh cell indices corresponding to the bin in a 2D/3D mesh.\n\n See also\n --------\n Filter.get_bin_index()\n\n \"\"\"\n\n cv.check_type('bin_index', bin_index, Integral)\n cv.check_greater_than('bin_index', bin_index, 0, equality=True)\n cv.check_less_than('bin_index', bin_index, self.num_bins)\n\n if self.type == 'mesh':\n\n # Construct 3-tuple of x,y,z cell indices for a 3D mesh\n if len(self.mesh.dimension) == 3:\n nx, ny, nz = self.mesh.dimension\n x = bin_index / (ny * nz)\n y = (bin_index - (x * ny * nz)) / nz\n z = bin_index - (x * ny * nz) - (y * nz)\n filter_bin = (x, y, z)\n\n # Construct 2-tuple of x,y cell indices for a 2D mesh\n else:\n nx, ny = self.mesh.dimension\n x = bin_index / ny\n y = bin_index - (x * ny)\n filter_bin = (x, y)\n\n # Construct 2-tuple of lower, upper energies for energy(out) filters\n elif self.type in ['energy', 'energyout']:\n filter_bin = (self.bins[bin_index], self.bins[bin_index+1])\n # Construct 1-tuple of with the cell ID for distribcell filters\n elif self.type == 'distribcell':\n filter_bin = (self.bins[0],)\n # Construct 1-tuple with domain ID (e.g., material) for other filters\n else:\n filter_bin = (self.bins[bin_index],)\n\n return filter_bin\n\n def get_pandas_dataframe(self, data_size, distribcell_paths=True):\n \"\"\"Builds a Pandas DataFrame for the Filter's bins.\n\n This method constructs a Pandas DataFrame object for the filter with\n columns annotated by filter bin information. This is a helper method for\n :meth:`Tally.get_pandas_dataframe`.\n\n This capability has been tested for Pandas >=0.13.1. However, it is\n recommended to use v0.16 or newer versions of Pandas since this method\n uses Pandas' Multi-index functionality.\n\n Parameters\n ----------\n data_size : Integral\n The total number of bins in the tally corresponding to this filter\n distribcell_paths : bool, optional\n Construct columns for distribcell tally filters (default is True).\n The geometric information in the Summary object is embedded into a\n Multi-index column with a geometric \"path\" to each distribcell\n instance. NOTE: This option assumes that all distribcell paths are\n of the same length and do not have the same universes and cells but\n different lattice cell indices.\n\n Returns\n -------\n pandas.DataFrame\n A Pandas DataFrame with columns of strings that characterize the\n filter's bins. The number of rows in the DataFrame is the same as\n the total number of bins in the corresponding tally, with the filter\n bin appropriately tiled to map to the corresponding tally bins.\n\n For 'cell', 'cellborn', 'surface', 'material', and 'universe'\n filters, the DataFrame includes a single column with the cell,\n surface, material or universe ID corresponding to each filter bin.\n\n For 'distribcell' filters, the DataFrame either includes:\n\n 1. a single column with the cell instance IDs (without summary info)\n 2. separate columns for the cell IDs, universe IDs, and lattice IDs\n and x,y,z cell indices corresponding to each (distribcell paths).\n\n For 'energy' and 'energyout' filters, the DataFrame includes one\n column for the lower energy bound and one column for the upper\n energy bound for each filter bin.\n\n For 'mesh' filters, the DataFrame includes three columns for the\n x,y,z mesh cell indices corresponding to each filter bin.\n\n Raises\n ------\n ImportError\n When Pandas is not installed\n\n See also\n --------\n Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()\n\n \"\"\"\n\n # Initialize Pandas DataFrame\n import pandas as pd\n df = pd.DataFrame()\n\n # mesh filters\n if self.type == 'mesh':\n\n # Initialize dictionary to build Pandas Multi-index column\n filter_dict = {}\n\n # Append Mesh ID as outermost index of multi-index\n mesh_key = 'mesh {0}'.format(self.mesh.id)\n\n # Find mesh dimensions - use 3D indices for simplicity\n if len(self.mesh.dimension) == 3:\n nx, ny, nz = self.mesh.dimension\n else:\n nx, ny = self.mesh.dimension\n nz = 1\n\n # Generate multi-index sub-column for x-axis\n filter_bins = np.arange(1, nx+1)\n repeat_factor = ny * nz * self.stride\n filter_bins = np.repeat(filter_bins, repeat_factor)\n tile_factor = data_size / len(filter_bins)\n filter_bins = np.tile(filter_bins, tile_factor)\n filter_dict[(mesh_key, 'x')] = filter_bins\n\n # Generate multi-index sub-column for y-axis\n filter_bins = np.arange(1, ny+1)\n repeat_factor = nz * self.stride\n filter_bins = np.repeat(filter_bins, repeat_factor)\n tile_factor = data_size / len(filter_bins)\n filter_bins = np.tile(filter_bins, tile_factor)\n filter_dict[(mesh_key, 'y')] = filter_bins\n\n # Generate multi-index sub-column for z-axis\n filter_bins = np.arange(1, nz+1)\n repeat_factor = self.stride\n filter_bins = np.repeat(filter_bins, repeat_factor)\n tile_factor = data_size / len(filter_bins)\n filter_bins = np.tile(filter_bins, tile_factor)\n filter_dict[(mesh_key, 'z')] = filter_bins\n\n # Initialize a Pandas DataFrame from the mesh dictionary\n df = pd.concat([df, pd.DataFrame(filter_dict)])\n\n # distribcell filters\n elif self.type == 'distribcell':\n level_df = None\n\n # Create Pandas Multi-index columns for each level in CSG tree\n if distribcell_paths:\n\n # Distribcell paths require linked metadata from the Summary\n if self.distribcell_paths is None:\n msg = 'Unable to construct distribcell paths since ' \\\n 'the Summary is not linked to the StatePoint'\n raise ValueError(msg)\n\n # Make copy of array of distribcell paths to use in\n # Pandas Multi-index column construction\n distribcell_paths = copy.deepcopy(self.distribcell_paths)\n num_offsets = len(distribcell_paths)\n\n # Loop over CSG levels in the distribcell paths\n level_counter = 0\n levels_remain = True\n while levels_remain:\n\n # Use level key as first index in Pandas Multi-index column\n level_counter += 1\n level_key = 'level {}'.format(level_counter)\n\n # Use the first distribcell path to determine if level\n # is a universe/cell or lattice level\n first_path = distribcell_paths[0]\n next_index = first_path.index('-')\n level = first_path[:next_index]\n\n # Trim universe/lattice info from path\n first_path = first_path[next_index+2:]\n\n # Create a dictionary for this level for Pandas Multi-index\n level_dict = OrderedDict()\n\n # This level is a lattice (e.g., ID(x,y,z))\n if '(' in level:\n level_type = 'lattice'\n\n # Initialize prefix Multi-index keys\n lat_id_key = (level_key, 'lat', 'id')\n lat_x_key = (level_key, 'lat', 'x')\n lat_y_key = (level_key, 'lat', 'y')\n lat_z_key = (level_key, 'lat', 'z')\n\n # Allocate NumPy arrays for each CSG level and\n # each Multi-index column in the DataFrame\n level_dict[lat_id_key] = np.empty(num_offsets)\n level_dict[lat_x_key] = np.empty(num_offsets)\n level_dict[lat_y_key] = np.empty(num_offsets)\n level_dict[lat_z_key] = np.empty(num_offsets)\n\n # This level is a universe / cell (e.g., ID->ID)\n else:\n level_type = 'universe'\n\n # Initialize prefix Multi-index keys\n univ_key = (level_key, 'univ', 'id')\n cell_key = (level_key, 'cell', 'id')\n\n # Allocate NumPy arrays for each CSG level and\n # each Multi-index column in the DataFrame\n level_dict[univ_key] = np.empty(num_offsets)\n level_dict[cell_key] = np.empty(num_offsets)\n\n # Determine any levels remain in path\n if '-' not in first_path:\n levels_remain = False\n\n # Populate Multi-index arrays with all distribcell paths\n for i, path in enumerate(distribcell_paths):\n\n if level_type == 'lattice':\n # Extract lattice ID, indices from path\n next_index = path.index('-')\n lat_id_indices = path[:next_index]\n\n # Trim lattice info from distribcell path\n distribcell_paths[i] = path[next_index+2:]\n\n # Extract the lattice cell indices from the path\n i1 = lat_id_indices.index('(')\n i2 = lat_id_indices.index(')')\n i3 = lat_id_indices[i1+1:i2]\n\n # Assign entry to Lattice Multi-index column\n level_dict[lat_id_key][i] = path[:i1]\n level_dict[lat_x_key][i] = int(i3.split(',')[0]) - 1\n level_dict[lat_y_key][i] = int(i3.split(',')[1]) - 1\n level_dict[lat_z_key][i] = int(i3.split(',')[2]) - 1\n\n else:\n # Extract universe ID from path\n next_index = path.index('-')\n universe_id = int(path[:next_index])\n\n # Trim universe info from distribcell path\n path = path[next_index+2:]\n\n # Extract cell ID from path\n if '-' in path:\n next_index = path.index('-')\n cell_id = int(path[:next_index])\n distribcell_paths[i] = path[next_index+2:]\n else:\n cell_id = int(path)\n distribcell_paths[i] = ''\n\n # Assign entry to Universe, Cell Multi-index columns\n level_dict[univ_key][i] = universe_id\n level_dict[cell_key][i] = cell_id\n\n # Tile the Multi-index columns\n for level_key, level_bins in level_dict.items():\n level_bins = np.repeat(level_bins, self.stride)\n tile_factor = data_size / len(level_bins)\n level_bins = np.tile(level_bins, tile_factor)\n level_dict[level_key] = level_bins\n\n # Initialize a Pandas DataFrame from the level dictionary\n if level_df is None:\n level_df = pd.DataFrame(level_dict)\n else:\n level_df = pd.concat([level_df, pd.DataFrame(level_dict)], axis=1)\n\n # Create DataFrame column for distribcell instance IDs\n # NOTE: This is performed regardless of whether the user\n # requests Summary geometric information\n filter_bins = np.arange(self.num_bins)\n filter_bins = np.repeat(filter_bins, self.stride)\n tile_factor = data_size / len(filter_bins)\n filter_bins = np.tile(filter_bins, tile_factor)\n df = pd.DataFrame({self.type : filter_bins})\n\n # If OpenCG level info DataFrame was created, concatenate\n # with DataFrame of distribcell instance IDs\n if level_df is not None:\n level_df = level_df.dropna(axis=1, how='all')\n level_df = level_df.astype(np.int)\n df = pd.concat([level_df, df], axis=1)\n\n # energy, energyout filters\n elif 'energy' in self.type:\n # Extract the lower and upper energy bounds, then repeat and tile\n # them as necessary to account for other filters.\n lo_bins = np.repeat(self.bins[:-1], self.stride)\n hi_bins = np.repeat(self.bins[1:], self.stride)\n tile_factor = data_size / len(lo_bins)\n lo_bins = np.tile(lo_bins, tile_factor)\n hi_bins = np.tile(hi_bins, tile_factor)\n\n # Add the new energy columns to the DataFrame.\n df.loc[:, self.type + ' low [MeV]'] = lo_bins\n df.loc[:, self.type + ' high [MeV]'] = hi_bins\n\n elif self.type in ('azimuthal', 'polar'):\n # Extract the lower and upper angle bounds, then repeat and tile\n # them as necessary to account for other filters.\n lo_bins = np.repeat(self.bins[:-1], self.stride)\n hi_bins = np.repeat(self.bins[1:], self.stride)\n tile_factor = data_size / len(lo_bins)\n lo_bins = np.tile(lo_bins, tile_factor)\n hi_bins = np.tile(hi_bins, tile_factor)\n\n # Add the new angle columns to the DataFrame.\n df.loc[:, self.type + ' low'] = lo_bins\n df.loc[:, self.type + ' high'] = hi_bins\n\n # universe, material, surface, cell, and cellborn filters\n else:\n filter_bins = np.repeat(self.bins, self.stride)\n tile_factor = data_size / len(filter_bins)\n filter_bins = np.tile(filter_bins, tile_factor)\n filter_bins = filter_bins\n df = pd.concat([df, pd.DataFrame({self.type : filter_bins})])\n\n return df\n", "path": "openmc/filter.py"}]} |
gh_patches_debug_1212 | rasdani/github-patches | git_diff | google__jax-19166 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unexpected behavior of `jax.scipy.stats.binom.pmf`
### Description
pmf of a random variable should be zero outside of its range. While plotting the graph for `jax.scipy.stats.binom.pmf`, I notice that for $n>5$ and $p>0.5$, there are some oscillations in the values of the pmf, which should not be there. For evidence, I am attaching a plot too.
```python
import jax
from jax import numpy as jnp
from matplotlib import pyplot as plt
x = jnp.linspace(-1, 10, 1000)
xxf = jax.scipy.stats.binom.pmf(k=x, n=5, p=0.8)
plt.plot(x, xxf)
plt.tight_layout()
plt.show()
```

The side left to the zero is as expected.
### What jax/jaxlib version are you using?
jax v0.4.23
### Which accelerator(s) are you using?
CPU
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `jax/_src/scipy/stats/binom.py`
Content:
```
1 # Copyright 2023 The JAX Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License
14
15
16 import scipy.stats as osp_stats
17
18 from jax import lax
19 import jax.numpy as jnp
20 from jax._src.numpy.util import _wraps, promote_args_inexact
21 from jax._src.scipy.special import gammaln, xlogy, xlog1py
22 from jax._src.typing import Array, ArrayLike
23
24
25 @_wraps(osp_stats.nbinom.logpmf, update_doc=False)
26 def logpmf(k: ArrayLike, n: ArrayLike, p: ArrayLike, loc: ArrayLike = 0) -> Array:
27 """JAX implementation of scipy.stats.binom.logpmf."""
28 k, n, p, loc = promote_args_inexact("binom.logpmf", k, n, p, loc)
29 y = lax.sub(k, loc)
30 comb_term = lax.sub(
31 gammaln(n + 1),
32 lax.add(gammaln(y + 1), gammaln(n - y + 1))
33 )
34 log_linear_term = lax.add(xlogy(y, p), xlog1py(lax.sub(n, y), lax.neg(p)))
35 log_probs = lax.add(comb_term, log_linear_term)
36 return jnp.where(lax.lt(k, loc), -jnp.inf, log_probs)
37
38
39 @_wraps(osp_stats.nbinom.pmf, update_doc=False)
40 def pmf(k: ArrayLike, n: ArrayLike, p: ArrayLike, loc: ArrayLike = 0) -> Array:
41 """JAX implementation of scipy.stats.binom.pmf."""
42 return lax.exp(logpmf(k, n, p, loc))
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/jax/_src/scipy/stats/binom.py b/jax/_src/scipy/stats/binom.py
--- a/jax/_src/scipy/stats/binom.py
+++ b/jax/_src/scipy/stats/binom.py
@@ -33,7 +33,7 @@
)
log_linear_term = lax.add(xlogy(y, p), xlog1py(lax.sub(n, y), lax.neg(p)))
log_probs = lax.add(comb_term, log_linear_term)
- return jnp.where(lax.lt(k, loc), -jnp.inf, log_probs)
+ return jnp.where(lax.ge(k, loc) & lax.lt(k, loc + n + 1), log_probs, -jnp.inf)
@_wraps(osp_stats.nbinom.pmf, update_doc=False)
| {"golden_diff": "diff --git a/jax/_src/scipy/stats/binom.py b/jax/_src/scipy/stats/binom.py\n--- a/jax/_src/scipy/stats/binom.py\n+++ b/jax/_src/scipy/stats/binom.py\n@@ -33,7 +33,7 @@\n )\n log_linear_term = lax.add(xlogy(y, p), xlog1py(lax.sub(n, y), lax.neg(p)))\n log_probs = lax.add(comb_term, log_linear_term)\n- return jnp.where(lax.lt(k, loc), -jnp.inf, log_probs)\n+ return jnp.where(lax.ge(k, loc) & lax.lt(k, loc + n + 1), log_probs, -jnp.inf)\n \n \n @_wraps(osp_stats.nbinom.pmf, update_doc=False)\n", "issue": "Unexpected behavior of `jax.scipy.stats.binom.pmf`\n### Description\r\n\r\npmf of a random variable should be zero outside of its range. While plotting the graph for `jax.scipy.stats.binom.pmf`, I notice that for $n>5$ and $p>0.5$, there are some oscillations in the values of the pmf, which should not be there. For evidence, I am attaching a plot too.\r\n\r\n```python\r\nimport jax\r\nfrom jax import numpy as jnp\r\nfrom matplotlib import pyplot as plt\r\n\r\nx = jnp.linspace(-1, 10, 1000)\r\nxxf = jax.scipy.stats.binom.pmf(k=x, n=5, p=0.8)\r\n\r\nplt.plot(x, xxf)\r\nplt.tight_layout()\r\nplt.show()\r\n```\r\n\r\nThe side left to the zero is as expected.\r\n\r\n### What jax/jaxlib version are you using?\r\n\r\njax v0.4.23\r\n\r\n### Which accelerator(s) are you using?\r\n\r\nCPU\n", "before_files": [{"content": "# Copyright 2023 The JAX Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\n\n\nimport scipy.stats as osp_stats\n\nfrom jax import lax\nimport jax.numpy as jnp\nfrom jax._src.numpy.util import _wraps, promote_args_inexact\nfrom jax._src.scipy.special import gammaln, xlogy, xlog1py\nfrom jax._src.typing import Array, ArrayLike\n\n\n@_wraps(osp_stats.nbinom.logpmf, update_doc=False)\ndef logpmf(k: ArrayLike, n: ArrayLike, p: ArrayLike, loc: ArrayLike = 0) -> Array:\n \"\"\"JAX implementation of scipy.stats.binom.logpmf.\"\"\"\n k, n, p, loc = promote_args_inexact(\"binom.logpmf\", k, n, p, loc)\n y = lax.sub(k, loc)\n comb_term = lax.sub(\n gammaln(n + 1),\n lax.add(gammaln(y + 1), gammaln(n - y + 1))\n )\n log_linear_term = lax.add(xlogy(y, p), xlog1py(lax.sub(n, y), lax.neg(p)))\n log_probs = lax.add(comb_term, log_linear_term)\n return jnp.where(lax.lt(k, loc), -jnp.inf, log_probs)\n\n\n@_wraps(osp_stats.nbinom.pmf, update_doc=False)\ndef pmf(k: ArrayLike, n: ArrayLike, p: ArrayLike, loc: ArrayLike = 0) -> Array:\n \"\"\"JAX implementation of scipy.stats.binom.pmf.\"\"\"\n return lax.exp(logpmf(k, n, p, loc))\n", "path": "jax/_src/scipy/stats/binom.py"}], "after_files": [{"content": "# Copyright 2023 The JAX Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\n\n\nimport scipy.stats as osp_stats\n\nfrom jax import lax\nimport jax.numpy as jnp\nfrom jax._src.numpy.util import _wraps, promote_args_inexact\nfrom jax._src.scipy.special import gammaln, xlogy, xlog1py\nfrom jax._src.typing import Array, ArrayLike\n\n\n@_wraps(osp_stats.nbinom.logpmf, update_doc=False)\ndef logpmf(k: ArrayLike, n: ArrayLike, p: ArrayLike, loc: ArrayLike = 0) -> Array:\n \"\"\"JAX implementation of scipy.stats.binom.logpmf.\"\"\"\n k, n, p, loc = promote_args_inexact(\"binom.logpmf\", k, n, p, loc)\n y = lax.sub(k, loc)\n comb_term = lax.sub(\n gammaln(n + 1),\n lax.add(gammaln(y + 1), gammaln(n - y + 1))\n )\n log_linear_term = lax.add(xlogy(y, p), xlog1py(lax.sub(n, y), lax.neg(p)))\n log_probs = lax.add(comb_term, log_linear_term)\n return jnp.where(lax.ge(k, loc) & lax.lt(k, loc + n + 1), log_probs, -jnp.inf)\n\n\n@_wraps(osp_stats.nbinom.pmf, update_doc=False)\ndef pmf(k: ArrayLike, n: ArrayLike, p: ArrayLike, loc: ArrayLike = 0) -> Array:\n \"\"\"JAX implementation of scipy.stats.binom.pmf.\"\"\"\n return lax.exp(logpmf(k, n, p, loc))\n", "path": "jax/_src/scipy/stats/binom.py"}]} |
gh_patches_debug_1213 | rasdani/github-patches | git_diff | xonsh__xonsh-4622 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Valid Python fails when invoked inside a function
This simple code should return the same list as it is provided with:
```python3
def _fail(x):
return [i for i in x if i is not None and i < 10]
```
However, it fails when invoked:
```
$ _fail([1,2,3])
xonsh: For full traceback set: $XONSH_SHOW_TRACEBACK = True
xonsh: subprocess mode: command not found: i
Did you mean one of the following?
z: Alias
X: Command (X)
w: Command (w)
vi: Command (vi)
[: Command ([)
i: command not found
xonsh: For full traceback set: $XONSH_SHOW_TRACEBACK = True
xonsh: subprocess mode: command not found: i
Did you mean one of the following?
z: Alias
X: Command (X)
w: Command (w)
vi: Command (vi)
[: Command ([)
i: command not found
xonsh: For full traceback set: $XONSH_SHOW_TRACEBACK = True
xonsh: subprocess mode: command not found: i
Did you mean one of the following?
z: Alias
X: Command (X)
w: Command (w)
vi: Command (vi)
[: Command ([)
i: command not found
[]
```
This does *not* happen if one uses a lambda
```python3
_fail_lambda = lambda: [i for i in x if i is not None and i < 10]
```
Which produces the correct result:
```
$ _fail_lambda([1,2,3])
[1, 2, 3]
```
## For community
⬇️ **Please click the 👍 reaction instead of leaving a `+1` or 👍 comment**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `xonsh/ast.py`
Content:
```
1 """The xonsh abstract syntax tree node."""
2 # These are imported into our module namespace for the benefit of parser.py.
3 # pylint: disable=unused-import
4 import sys
5 from ast import (
6 Module,
7 Num,
8 Expr,
9 Str,
10 Bytes,
11 UnaryOp,
12 UAdd,
13 USub,
14 Invert,
15 BinOp,
16 Add,
17 Sub,
18 Mult,
19 Div,
20 FloorDiv,
21 Mod,
22 Pow,
23 Compare,
24 Lt,
25 Gt,
26 LtE,
27 GtE,
28 Eq,
29 NotEq,
30 In,
31 NotIn,
32 Is,
33 IsNot,
34 Not,
35 BoolOp,
36 Or,
37 And,
38 Subscript,
39 Load,
40 Slice,
41 ExtSlice,
42 List,
43 Tuple,
44 Set,
45 Dict,
46 AST,
47 NameConstant,
48 Name,
49 GeneratorExp,
50 Store,
51 comprehension,
52 ListComp,
53 SetComp,
54 DictComp,
55 Assign,
56 AugAssign,
57 BitXor,
58 BitAnd,
59 BitOr,
60 LShift,
61 RShift,
62 Assert,
63 Delete,
64 Del,
65 Pass,
66 Raise,
67 Import,
68 alias,
69 ImportFrom,
70 Continue,
71 Break,
72 Yield,
73 YieldFrom,
74 Return,
75 IfExp,
76 Lambda,
77 arguments,
78 arg,
79 Call,
80 keyword,
81 Attribute,
82 Global,
83 Nonlocal,
84 If,
85 While,
86 For,
87 withitem,
88 With,
89 Try,
90 ExceptHandler,
91 FunctionDef,
92 ClassDef,
93 Starred,
94 NodeTransformer,
95 Interactive,
96 Expression,
97 Index,
98 literal_eval,
99 dump,
100 walk,
101 increment_lineno,
102 Constant,
103 )
104 from ast import Ellipsis as EllipsisNode
105
106 # pylint: enable=unused-import
107 import textwrap
108 import itertools
109
110 from xonsh.built_ins import XSH
111 from xonsh.tools import subproc_toks, find_next_break, get_logical_line
112
113 from ast import (
114 MatMult,
115 AsyncFunctionDef,
116 AsyncWith,
117 AsyncFor,
118 Await,
119 JoinedStr,
120 FormattedValue,
121 AnnAssign,
122 )
123
124 from xonsh.platform import PYTHON_VERSION_INFO
125
126 if PYTHON_VERSION_INFO > (3, 8):
127 from ast import NamedExpr # type:ignore
128
129 STATEMENTS = (
130 FunctionDef,
131 ClassDef,
132 Return,
133 Delete,
134 Assign,
135 AugAssign,
136 For,
137 While,
138 If,
139 With,
140 Raise,
141 Try,
142 Assert,
143 Import,
144 ImportFrom,
145 Global,
146 Nonlocal,
147 Expr,
148 Pass,
149 Break,
150 Continue,
151 AnnAssign,
152 )
153
154
155 def leftmostname(node):
156 """Attempts to find the first name in the tree."""
157 if isinstance(node, Name):
158 rtn = node.id
159 elif isinstance(node, (BinOp, Compare)):
160 rtn = leftmostname(node.left)
161 elif isinstance(node, (Attribute, Subscript, Starred, Expr)):
162 rtn = leftmostname(node.value)
163 elif isinstance(node, Call):
164 rtn = leftmostname(node.func)
165 elif isinstance(node, UnaryOp):
166 rtn = leftmostname(node.operand)
167 elif isinstance(node, BoolOp):
168 rtn = leftmostname(node.values[0])
169 elif isinstance(node, Assign):
170 rtn = leftmostname(node.targets[0])
171 elif isinstance(node, AnnAssign):
172 rtn = leftmostname(node.target)
173 elif isinstance(node, (Str, Bytes, JoinedStr)):
174 # handles case of "./my executable"
175 rtn = leftmostname(node.s)
176 elif isinstance(node, Tuple) and len(node.elts) > 0:
177 # handles case of echo ,1,2,3
178 rtn = leftmostname(node.elts[0])
179 else:
180 rtn = None
181 return rtn
182
183
184 def get_lineno(node, default=0):
185 """Gets the lineno of a node or returns the default."""
186 return getattr(node, "lineno", default)
187
188
189 def min_line(node):
190 """Computes the minimum lineno."""
191 node_line = get_lineno(node)
192 return min(map(get_lineno, walk(node), itertools.repeat(node_line)))
193
194
195 def max_line(node):
196 """Computes the maximum lineno."""
197 return max(map(get_lineno, walk(node)))
198
199
200 def get_col(node, default=-1):
201 """Gets the col_offset of a node, or returns the default"""
202 return getattr(node, "col_offset", default)
203
204
205 def min_col(node):
206 """Computes the minimum col_offset."""
207 return min(map(get_col, walk(node), itertools.repeat(node.col_offset)))
208
209
210 def max_col(node):
211 """Returns the maximum col_offset of the node and all sub-nodes."""
212 col = getattr(node, "max_col", None)
213 if col is not None:
214 return col
215 highest = max(walk(node), key=get_col)
216 col = highest.col_offset + node_len(highest)
217 return col
218
219
220 def node_len(node):
221 """The length of a node as a string"""
222 val = 0
223 for n in walk(node):
224 if isinstance(n, Name):
225 val += len(n.id)
226 elif isinstance(n, Attribute):
227 val += 1 + (len(n.attr) if isinstance(n.attr, str) else 0)
228 # this may need to be added to for more nodes as more cases are found
229 return val
230
231
232 def get_id(node, default=None):
233 """Gets the id attribute of a node, or returns a default."""
234 return getattr(node, "id", default)
235
236
237 def gather_names(node):
238 """Returns the set of all names present in the node's tree."""
239 rtn = set(map(get_id, walk(node)))
240 rtn.discard(None)
241 return rtn
242
243
244 def get_id_ctx(node):
245 """Gets the id and attribute of a node, or returns a default."""
246 nid = getattr(node, "id", None)
247 if nid is None:
248 return (None, None)
249 return (nid, node.ctx)
250
251
252 def gather_load_store_names(node):
253 """Returns the names present in the node's tree in a set of load nodes and
254 a set of store nodes.
255 """
256 load = set()
257 store = set()
258 for nid, ctx in map(get_id_ctx, walk(node)):
259 if nid is None:
260 continue
261 elif isinstance(ctx, Load):
262 load.add(nid)
263 else:
264 store.add(nid)
265 return (load, store)
266
267
268 def has_elts(x):
269 """Tests if x is an AST node with elements."""
270 return isinstance(x, AST) and hasattr(x, "elts")
271
272
273 def load_attribute_chain(name, lineno=None, col=None):
274 """Creates an AST that loads variable name that may (or may not)
275 have attribute chains. For example, "a.b.c"
276 """
277 names = name.split(".")
278 node = Name(id=names.pop(0), ctx=Load(), lineno=lineno, col_offset=col)
279 for attr in names:
280 node = Attribute(
281 value=node, attr=attr, ctx=Load(), lineno=lineno, col_offset=col
282 )
283 return node
284
285
286 def xonsh_call(name, args, lineno=None, col=None):
287 """Creates the AST node for calling a function of a given name.
288 Functions names may contain attribute access, e.g. __xonsh__.env.
289 """
290 return Call(
291 func=load_attribute_chain(name, lineno=lineno, col=col),
292 args=args,
293 keywords=[],
294 starargs=None,
295 kwargs=None,
296 lineno=lineno,
297 col_offset=col,
298 )
299
300
301 def isdescendable(node):
302 """Determines whether or not a node is worth visiting. Currently only
303 UnaryOp and BoolOp nodes are visited.
304 """
305 return isinstance(node, (UnaryOp, BoolOp))
306
307
308 def isexpression(node, ctx=None, *args, **kwargs):
309 """Determines whether a node (or code string) is an expression, and
310 does not contain any statements. The execution context (ctx) and
311 other args and kwargs are passed down to the parser, as needed.
312 """
313 # parse string to AST
314 if isinstance(node, str):
315 node = node if node.endswith("\n") else node + "\n"
316 ctx = XSH.ctx if ctx is None else ctx
317 node = XSH.execer.parse(node, ctx, *args, **kwargs)
318 # determine if expression-like enough
319 if isinstance(node, (Expr, Expression)):
320 isexpr = True
321 elif isinstance(node, Module) and len(node.body) == 1:
322 isexpr = isinstance(node.body[0], (Expr, Expression))
323 else:
324 isexpr = False
325 return isexpr
326
327
328 class CtxAwareTransformer(NodeTransformer):
329 """Transforms a xonsh AST based to use subprocess calls when
330 the first name in an expression statement is not known in the context.
331 This assumes that the expression statement is instead parseable as
332 a subprocess.
333 """
334
335 def __init__(self, parser):
336 """Parameters
337 ----------
338 parser : xonsh.Parser
339 A parse instance to try to parse subprocess statements with.
340 """
341 super().__init__()
342 self.parser = parser
343 self.input = None
344 self.contexts = []
345 self.lines = None
346 self.mode = None
347 self._nwith = 0
348 self.filename = "<xonsh-code>"
349 self.debug_level = 0
350
351 def ctxvisit(self, node, inp, ctx, mode="exec", filename=None, debug_level=0):
352 """Transforms the node in a context-dependent way.
353
354 Parameters
355 ----------
356 node : ast.AST
357 A syntax tree to transform.
358 inp : str
359 The input code in string format.
360 ctx : dict
361 The root context to use.
362 filename : str, optional
363 File we are to transform.
364 debug_level : int, optional
365 Debugging level to use in lexing and parsing.
366
367 Returns
368 -------
369 node : ast.AST
370 The transformed node.
371 """
372 self.filename = self.filename if filename is None else filename
373 self.debug_level = debug_level
374 self.lines = inp.splitlines()
375 self.contexts = [ctx, set()]
376 self.mode = mode
377 self._nwith = 0
378 node = self.visit(node)
379 del self.lines, self.contexts, self.mode
380 self._nwith = 0
381 return node
382
383 def ctxupdate(self, iterable):
384 """Updated the most recent context."""
385 self.contexts[-1].update(iterable)
386
387 def ctxadd(self, value):
388 """Adds a value the most recent context."""
389 self.contexts[-1].add(value)
390
391 def ctxremove(self, value):
392 """Removes a value the most recent context."""
393 for ctx in reversed(self.contexts):
394 if value in ctx:
395 ctx.remove(value)
396 break
397
398 def try_subproc_toks(self, node, strip_expr=False):
399 """Tries to parse the line of the node as a subprocess."""
400 line, nlogical, idx = get_logical_line(self.lines, node.lineno - 1)
401 if self.mode == "eval":
402 mincol = len(line) - len(line.lstrip())
403 maxcol = None
404 else:
405 mincol = max(min_col(node) - 1, 0)
406 maxcol = max_col(node)
407 if mincol == maxcol:
408 maxcol = find_next_break(line, mincol=mincol, lexer=self.parser.lexer)
409 elif nlogical > 1:
410 maxcol = None
411 elif maxcol < len(line) and line[maxcol] == ";":
412 pass
413 else:
414 maxcol += 1
415 spline = subproc_toks(
416 line,
417 mincol=mincol,
418 maxcol=maxcol,
419 returnline=False,
420 lexer=self.parser.lexer,
421 )
422 if spline is None or spline != f"![{line[mincol:maxcol].strip()}]":
423 # failed to get something consistent, try greedy wrap
424 spline = subproc_toks(
425 line,
426 mincol=mincol,
427 maxcol=maxcol,
428 returnline=False,
429 lexer=self.parser.lexer,
430 greedy=True,
431 )
432 if spline is None:
433 return node
434 try:
435 newnode = self.parser.parse(
436 spline,
437 mode=self.mode,
438 filename=self.filename,
439 debug_level=(self.debug_level >= 2),
440 )
441 newnode = newnode.body
442 if not isinstance(newnode, AST):
443 # take the first (and only) Expr
444 newnode = newnode[0]
445 increment_lineno(newnode, n=node.lineno - 1)
446 newnode.col_offset = node.col_offset
447 if self.debug_level >= 1:
448 msg = "{0}:{1}:{2}{3} - {4}\n" "{0}:{1}:{2}{3} + {5}"
449 mstr = "" if maxcol is None else ":" + str(maxcol)
450 msg = msg.format(self.filename, node.lineno, mincol, mstr, line, spline)
451 print(msg, file=sys.stderr)
452 except SyntaxError:
453 newnode = node
454 if strip_expr and isinstance(newnode, Expr):
455 newnode = newnode.value
456 return newnode
457
458 def is_in_scope(self, node):
459 """Determines whether or not the current node is in scope."""
460 names, store = gather_load_store_names(node)
461 names -= store
462 if not names:
463 return True
464 inscope = False
465 for ctx in reversed(self.contexts):
466 names -= ctx
467 if not names:
468 inscope = True
469 break
470 return inscope
471
472 #
473 # Replacement visitors
474 #
475
476 def visit_Expression(self, node):
477 """Handle visiting an expression body."""
478 if isdescendable(node.body):
479 node.body = self.visit(node.body)
480 body = node.body
481 inscope = self.is_in_scope(body)
482 if not inscope:
483 node.body = self.try_subproc_toks(body)
484 return node
485
486 def visit_Expr(self, node):
487 """Handle visiting an expression."""
488 if isdescendable(node.value):
489 node.value = self.visit(node.value) # this allows diving into BoolOps
490 if self.is_in_scope(node) or isinstance(node.value, Lambda):
491 return node
492 else:
493 newnode = self.try_subproc_toks(node)
494 if not isinstance(newnode, Expr):
495 newnode = Expr(
496 value=newnode, lineno=node.lineno, col_offset=node.col_offset
497 )
498 if hasattr(node, "max_lineno"):
499 newnode.max_lineno = node.max_lineno
500 newnode.max_col = node.max_col
501 return newnode
502
503 def visit_UnaryOp(self, node):
504 """Handle visiting an unary operands, like not."""
505 if isdescendable(node.operand):
506 node.operand = self.visit(node.operand)
507 operand = node.operand
508 inscope = self.is_in_scope(operand)
509 if not inscope:
510 node.operand = self.try_subproc_toks(operand, strip_expr=True)
511 return node
512
513 def visit_BoolOp(self, node):
514 """Handle visiting an boolean operands, like and/or."""
515 for i in range(len(node.values)):
516 val = node.values[i]
517 if isdescendable(val):
518 val = node.values[i] = self.visit(val)
519 inscope = self.is_in_scope(val)
520 if not inscope:
521 node.values[i] = self.try_subproc_toks(val, strip_expr=True)
522 return node
523
524 #
525 # Context aggregator visitors
526 #
527
528 def visit_Assign(self, node):
529 """Handle visiting an assignment statement."""
530 ups = set()
531 for targ in node.targets:
532 if isinstance(targ, (Tuple, List)):
533 ups.update(leftmostname(elt) for elt in targ.elts)
534 elif isinstance(targ, BinOp):
535 newnode = self.try_subproc_toks(node)
536 if newnode is node:
537 ups.add(leftmostname(targ))
538 else:
539 return newnode
540 else:
541 ups.add(leftmostname(targ))
542 self.ctxupdate(ups)
543 return node
544
545 def visit_AnnAssign(self, node):
546 """Handle visiting an annotated assignment statement."""
547 self.ctxadd(leftmostname(node.target))
548 return node
549
550 def visit_Import(self, node):
551 """Handle visiting a import statement."""
552 for name in node.names:
553 if name.asname is None:
554 self.ctxadd(name.name)
555 else:
556 self.ctxadd(name.asname)
557 return node
558
559 def visit_ImportFrom(self, node):
560 """Handle visiting a "from ... import ..." statement."""
561 for name in node.names:
562 if name.asname is None:
563 self.ctxadd(name.name)
564 else:
565 self.ctxadd(name.asname)
566 return node
567
568 def visit_With(self, node):
569 """Handle visiting a with statement."""
570 for item in node.items:
571 if item.optional_vars is not None:
572 self.ctxupdate(gather_names(item.optional_vars))
573 self._nwith += 1
574 self.generic_visit(node)
575 self._nwith -= 1
576 return node
577
578 def visit_For(self, node):
579 """Handle visiting a for statement."""
580 targ = node.target
581 self.ctxupdate(gather_names(targ))
582 self.generic_visit(node)
583 return node
584
585 def visit_FunctionDef(self, node):
586 """Handle visiting a function definition."""
587 self.ctxadd(node.name)
588 self.contexts.append(set())
589 args = node.args
590 argchain = [args.args, args.kwonlyargs]
591 if args.vararg is not None:
592 argchain.append((args.vararg,))
593 if args.kwarg is not None:
594 argchain.append((args.kwarg,))
595 self.ctxupdate(a.arg for a in itertools.chain.from_iterable(argchain))
596 self.generic_visit(node)
597 self.contexts.pop()
598 return node
599
600 def visit_ClassDef(self, node):
601 """Handle visiting a class definition."""
602 self.ctxadd(node.name)
603 self.contexts.append(set())
604 self.generic_visit(node)
605 self.contexts.pop()
606 return node
607
608 def visit_Delete(self, node):
609 """Handle visiting a del statement."""
610 for targ in node.targets:
611 if isinstance(targ, Name):
612 self.ctxremove(targ.id)
613 self.generic_visit(node)
614 return node
615
616 def visit_Try(self, node):
617 """Handle visiting a try statement."""
618 for handler in node.handlers:
619 if handler.name is not None:
620 self.ctxadd(handler.name)
621 self.generic_visit(node)
622 return node
623
624 def visit_Global(self, node):
625 """Handle visiting a global statement."""
626 self.contexts[1].update(node.names) # contexts[1] is the global ctx
627 self.generic_visit(node)
628 return node
629
630
631 def pdump(s, **kwargs):
632 """performs a pretty dump of an AST node."""
633 if isinstance(s, AST):
634 s = dump(s, **kwargs).replace(",", ",\n")
635 openers = "([{"
636 closers = ")]}"
637 lens = len(s) + 1
638 if lens == 1:
639 return s
640 i = min(s.find(o) % lens for o in openers)
641 if i == lens - 1:
642 return s
643 closer = closers[openers.find(s[i])]
644 j = s.rfind(closer)
645 if j == -1 or j <= i:
646 return s[: i + 1] + "\n" + textwrap.indent(pdump(s[i + 1 :]), " ")
647 pre = s[: i + 1] + "\n"
648 mid = s[i + 1 : j]
649 post = "\n" + s[j:]
650 mid = textwrap.indent(pdump(mid), " ")
651 if "(" in post or "[" in post or "{" in post:
652 post = pdump(post)
653 return pre + mid + post
654
655
656 def pprint_ast(s, *, sep=None, end=None, file=None, flush=False, **kwargs):
657 """Performs a pretty print of the AST nodes."""
658 print(pdump(s, **kwargs), sep=sep, end=end, file=file, flush=flush)
659
660
661 #
662 # Private helpers
663 #
664
665
666 def _getblockattr(name, lineno, col):
667 """calls getattr(name, '__xonsh_block__', False)."""
668 return xonsh_call(
669 "getattr",
670 args=[
671 Name(id=name, ctx=Load(), lineno=lineno, col_offset=col),
672 Str(s="__xonsh_block__", lineno=lineno, col_offset=col),
673 NameConstant(value=False, lineno=lineno, col_offset=col),
674 ],
675 lineno=lineno,
676 col=col,
677 )
678
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/xonsh/ast.py b/xonsh/ast.py
--- a/xonsh/ast.py
+++ b/xonsh/ast.py
@@ -521,6 +521,11 @@
node.values[i] = self.try_subproc_toks(val, strip_expr=True)
return node
+ def visit_comprehension(self, node):
+ """Handles visiting list comprehensions, set comprehensions,
+ dictionary comprehensions, and generator expressions."""
+ return node # do not descend into any comprehensions
+
#
# Context aggregator visitors
#
| {"golden_diff": "diff --git a/xonsh/ast.py b/xonsh/ast.py\n--- a/xonsh/ast.py\n+++ b/xonsh/ast.py\n@@ -521,6 +521,11 @@\n node.values[i] = self.try_subproc_toks(val, strip_expr=True)\n return node\n \n+ def visit_comprehension(self, node):\n+ \"\"\"Handles visiting list comprehensions, set comprehensions,\n+ dictionary comprehensions, and generator expressions.\"\"\"\n+ return node # do not descend into any comprehensions\n+\n #\n # Context aggregator visitors\n #\n", "issue": "Valid Python fails when invoked inside a function\nThis simple code should return the same list as it is provided with:\r\n```python3\r\ndef _fail(x):\r\n return [i for i in x if i is not None and i < 10]\r\n```\r\nHowever, it fails when invoked:\r\n\r\n```\r\n$ _fail([1,2,3])\r\nxonsh: For full traceback set: $XONSH_SHOW_TRACEBACK = True\r\nxonsh: subprocess mode: command not found: i\r\nDid you mean one of the following?\r\n z: Alias\r\n X: Command (X)\r\n w: Command (w)\r\n vi: Command (vi)\r\n [: Command ([)\r\n\r\ni: command not found\r\nxonsh: For full traceback set: $XONSH_SHOW_TRACEBACK = True\r\nxonsh: subprocess mode: command not found: i\r\nDid you mean one of the following?\r\n z: Alias\r\n X: Command (X)\r\n w: Command (w)\r\n vi: Command (vi)\r\n [: Command ([)\r\n\r\ni: command not found\r\nxonsh: For full traceback set: $XONSH_SHOW_TRACEBACK = True\r\nxonsh: subprocess mode: command not found: i\r\nDid you mean one of the following?\r\n z: Alias\r\n X: Command (X)\r\n w: Command (w)\r\n vi: Command (vi)\r\n [: Command ([)\r\n\r\ni: command not found\r\n[]\r\n```\r\n\r\nThis does *not* happen if one uses a lambda\r\n```python3\r\n_fail_lambda = lambda: [i for i in x if i is not None and i < 10]\r\n```\r\nWhich produces the correct result:\r\n\r\n```\r\n$ _fail_lambda([1,2,3]) \r\n[1, 2, 3]\r\n```\r\n\r\n\r\n## For community\r\n\u2b07\ufe0f **Please click the \ud83d\udc4d reaction instead of leaving a `+1` or \ud83d\udc4d comment**\r\n\n", "before_files": [{"content": "\"\"\"The xonsh abstract syntax tree node.\"\"\"\n# These are imported into our module namespace for the benefit of parser.py.\n# pylint: disable=unused-import\nimport sys\nfrom ast import (\n Module,\n Num,\n Expr,\n Str,\n Bytes,\n UnaryOp,\n UAdd,\n USub,\n Invert,\n BinOp,\n Add,\n Sub,\n Mult,\n Div,\n FloorDiv,\n Mod,\n Pow,\n Compare,\n Lt,\n Gt,\n LtE,\n GtE,\n Eq,\n NotEq,\n In,\n NotIn,\n Is,\n IsNot,\n Not,\n BoolOp,\n Or,\n And,\n Subscript,\n Load,\n Slice,\n ExtSlice,\n List,\n Tuple,\n Set,\n Dict,\n AST,\n NameConstant,\n Name,\n GeneratorExp,\n Store,\n comprehension,\n ListComp,\n SetComp,\n DictComp,\n Assign,\n AugAssign,\n BitXor,\n BitAnd,\n BitOr,\n LShift,\n RShift,\n Assert,\n Delete,\n Del,\n Pass,\n Raise,\n Import,\n alias,\n ImportFrom,\n Continue,\n Break,\n Yield,\n YieldFrom,\n Return,\n IfExp,\n Lambda,\n arguments,\n arg,\n Call,\n keyword,\n Attribute,\n Global,\n Nonlocal,\n If,\n While,\n For,\n withitem,\n With,\n Try,\n ExceptHandler,\n FunctionDef,\n ClassDef,\n Starred,\n NodeTransformer,\n Interactive,\n Expression,\n Index,\n literal_eval,\n dump,\n walk,\n increment_lineno,\n Constant,\n)\nfrom ast import Ellipsis as EllipsisNode\n\n# pylint: enable=unused-import\nimport textwrap\nimport itertools\n\nfrom xonsh.built_ins import XSH\nfrom xonsh.tools import subproc_toks, find_next_break, get_logical_line\n\nfrom ast import (\n MatMult,\n AsyncFunctionDef,\n AsyncWith,\n AsyncFor,\n Await,\n JoinedStr,\n FormattedValue,\n AnnAssign,\n)\n\nfrom xonsh.platform import PYTHON_VERSION_INFO\n\nif PYTHON_VERSION_INFO > (3, 8):\n from ast import NamedExpr # type:ignore\n\nSTATEMENTS = (\n FunctionDef,\n ClassDef,\n Return,\n Delete,\n Assign,\n AugAssign,\n For,\n While,\n If,\n With,\n Raise,\n Try,\n Assert,\n Import,\n ImportFrom,\n Global,\n Nonlocal,\n Expr,\n Pass,\n Break,\n Continue,\n AnnAssign,\n)\n\n\ndef leftmostname(node):\n \"\"\"Attempts to find the first name in the tree.\"\"\"\n if isinstance(node, Name):\n rtn = node.id\n elif isinstance(node, (BinOp, Compare)):\n rtn = leftmostname(node.left)\n elif isinstance(node, (Attribute, Subscript, Starred, Expr)):\n rtn = leftmostname(node.value)\n elif isinstance(node, Call):\n rtn = leftmostname(node.func)\n elif isinstance(node, UnaryOp):\n rtn = leftmostname(node.operand)\n elif isinstance(node, BoolOp):\n rtn = leftmostname(node.values[0])\n elif isinstance(node, Assign):\n rtn = leftmostname(node.targets[0])\n elif isinstance(node, AnnAssign):\n rtn = leftmostname(node.target)\n elif isinstance(node, (Str, Bytes, JoinedStr)):\n # handles case of \"./my executable\"\n rtn = leftmostname(node.s)\n elif isinstance(node, Tuple) and len(node.elts) > 0:\n # handles case of echo ,1,2,3\n rtn = leftmostname(node.elts[0])\n else:\n rtn = None\n return rtn\n\n\ndef get_lineno(node, default=0):\n \"\"\"Gets the lineno of a node or returns the default.\"\"\"\n return getattr(node, \"lineno\", default)\n\n\ndef min_line(node):\n \"\"\"Computes the minimum lineno.\"\"\"\n node_line = get_lineno(node)\n return min(map(get_lineno, walk(node), itertools.repeat(node_line)))\n\n\ndef max_line(node):\n \"\"\"Computes the maximum lineno.\"\"\"\n return max(map(get_lineno, walk(node)))\n\n\ndef get_col(node, default=-1):\n \"\"\"Gets the col_offset of a node, or returns the default\"\"\"\n return getattr(node, \"col_offset\", default)\n\n\ndef min_col(node):\n \"\"\"Computes the minimum col_offset.\"\"\"\n return min(map(get_col, walk(node), itertools.repeat(node.col_offset)))\n\n\ndef max_col(node):\n \"\"\"Returns the maximum col_offset of the node and all sub-nodes.\"\"\"\n col = getattr(node, \"max_col\", None)\n if col is not None:\n return col\n highest = max(walk(node), key=get_col)\n col = highest.col_offset + node_len(highest)\n return col\n\n\ndef node_len(node):\n \"\"\"The length of a node as a string\"\"\"\n val = 0\n for n in walk(node):\n if isinstance(n, Name):\n val += len(n.id)\n elif isinstance(n, Attribute):\n val += 1 + (len(n.attr) if isinstance(n.attr, str) else 0)\n # this may need to be added to for more nodes as more cases are found\n return val\n\n\ndef get_id(node, default=None):\n \"\"\"Gets the id attribute of a node, or returns a default.\"\"\"\n return getattr(node, \"id\", default)\n\n\ndef gather_names(node):\n \"\"\"Returns the set of all names present in the node's tree.\"\"\"\n rtn = set(map(get_id, walk(node)))\n rtn.discard(None)\n return rtn\n\n\ndef get_id_ctx(node):\n \"\"\"Gets the id and attribute of a node, or returns a default.\"\"\"\n nid = getattr(node, \"id\", None)\n if nid is None:\n return (None, None)\n return (nid, node.ctx)\n\n\ndef gather_load_store_names(node):\n \"\"\"Returns the names present in the node's tree in a set of load nodes and\n a set of store nodes.\n \"\"\"\n load = set()\n store = set()\n for nid, ctx in map(get_id_ctx, walk(node)):\n if nid is None:\n continue\n elif isinstance(ctx, Load):\n load.add(nid)\n else:\n store.add(nid)\n return (load, store)\n\n\ndef has_elts(x):\n \"\"\"Tests if x is an AST node with elements.\"\"\"\n return isinstance(x, AST) and hasattr(x, \"elts\")\n\n\ndef load_attribute_chain(name, lineno=None, col=None):\n \"\"\"Creates an AST that loads variable name that may (or may not)\n have attribute chains. For example, \"a.b.c\"\n \"\"\"\n names = name.split(\".\")\n node = Name(id=names.pop(0), ctx=Load(), lineno=lineno, col_offset=col)\n for attr in names:\n node = Attribute(\n value=node, attr=attr, ctx=Load(), lineno=lineno, col_offset=col\n )\n return node\n\n\ndef xonsh_call(name, args, lineno=None, col=None):\n \"\"\"Creates the AST node for calling a function of a given name.\n Functions names may contain attribute access, e.g. __xonsh__.env.\n \"\"\"\n return Call(\n func=load_attribute_chain(name, lineno=lineno, col=col),\n args=args,\n keywords=[],\n starargs=None,\n kwargs=None,\n lineno=lineno,\n col_offset=col,\n )\n\n\ndef isdescendable(node):\n \"\"\"Determines whether or not a node is worth visiting. Currently only\n UnaryOp and BoolOp nodes are visited.\n \"\"\"\n return isinstance(node, (UnaryOp, BoolOp))\n\n\ndef isexpression(node, ctx=None, *args, **kwargs):\n \"\"\"Determines whether a node (or code string) is an expression, and\n does not contain any statements. The execution context (ctx) and\n other args and kwargs are passed down to the parser, as needed.\n \"\"\"\n # parse string to AST\n if isinstance(node, str):\n node = node if node.endswith(\"\\n\") else node + \"\\n\"\n ctx = XSH.ctx if ctx is None else ctx\n node = XSH.execer.parse(node, ctx, *args, **kwargs)\n # determine if expression-like enough\n if isinstance(node, (Expr, Expression)):\n isexpr = True\n elif isinstance(node, Module) and len(node.body) == 1:\n isexpr = isinstance(node.body[0], (Expr, Expression))\n else:\n isexpr = False\n return isexpr\n\n\nclass CtxAwareTransformer(NodeTransformer):\n \"\"\"Transforms a xonsh AST based to use subprocess calls when\n the first name in an expression statement is not known in the context.\n This assumes that the expression statement is instead parseable as\n a subprocess.\n \"\"\"\n\n def __init__(self, parser):\n \"\"\"Parameters\n ----------\n parser : xonsh.Parser\n A parse instance to try to parse subprocess statements with.\n \"\"\"\n super().__init__()\n self.parser = parser\n self.input = None\n self.contexts = []\n self.lines = None\n self.mode = None\n self._nwith = 0\n self.filename = \"<xonsh-code>\"\n self.debug_level = 0\n\n def ctxvisit(self, node, inp, ctx, mode=\"exec\", filename=None, debug_level=0):\n \"\"\"Transforms the node in a context-dependent way.\n\n Parameters\n ----------\n node : ast.AST\n A syntax tree to transform.\n inp : str\n The input code in string format.\n ctx : dict\n The root context to use.\n filename : str, optional\n File we are to transform.\n debug_level : int, optional\n Debugging level to use in lexing and parsing.\n\n Returns\n -------\n node : ast.AST\n The transformed node.\n \"\"\"\n self.filename = self.filename if filename is None else filename\n self.debug_level = debug_level\n self.lines = inp.splitlines()\n self.contexts = [ctx, set()]\n self.mode = mode\n self._nwith = 0\n node = self.visit(node)\n del self.lines, self.contexts, self.mode\n self._nwith = 0\n return node\n\n def ctxupdate(self, iterable):\n \"\"\"Updated the most recent context.\"\"\"\n self.contexts[-1].update(iterable)\n\n def ctxadd(self, value):\n \"\"\"Adds a value the most recent context.\"\"\"\n self.contexts[-1].add(value)\n\n def ctxremove(self, value):\n \"\"\"Removes a value the most recent context.\"\"\"\n for ctx in reversed(self.contexts):\n if value in ctx:\n ctx.remove(value)\n break\n\n def try_subproc_toks(self, node, strip_expr=False):\n \"\"\"Tries to parse the line of the node as a subprocess.\"\"\"\n line, nlogical, idx = get_logical_line(self.lines, node.lineno - 1)\n if self.mode == \"eval\":\n mincol = len(line) - len(line.lstrip())\n maxcol = None\n else:\n mincol = max(min_col(node) - 1, 0)\n maxcol = max_col(node)\n if mincol == maxcol:\n maxcol = find_next_break(line, mincol=mincol, lexer=self.parser.lexer)\n elif nlogical > 1:\n maxcol = None\n elif maxcol < len(line) and line[maxcol] == \";\":\n pass\n else:\n maxcol += 1\n spline = subproc_toks(\n line,\n mincol=mincol,\n maxcol=maxcol,\n returnline=False,\n lexer=self.parser.lexer,\n )\n if spline is None or spline != f\"![{line[mincol:maxcol].strip()}]\":\n # failed to get something consistent, try greedy wrap\n spline = subproc_toks(\n line,\n mincol=mincol,\n maxcol=maxcol,\n returnline=False,\n lexer=self.parser.lexer,\n greedy=True,\n )\n if spline is None:\n return node\n try:\n newnode = self.parser.parse(\n spline,\n mode=self.mode,\n filename=self.filename,\n debug_level=(self.debug_level >= 2),\n )\n newnode = newnode.body\n if not isinstance(newnode, AST):\n # take the first (and only) Expr\n newnode = newnode[0]\n increment_lineno(newnode, n=node.lineno - 1)\n newnode.col_offset = node.col_offset\n if self.debug_level >= 1:\n msg = \"{0}:{1}:{2}{3} - {4}\\n\" \"{0}:{1}:{2}{3} + {5}\"\n mstr = \"\" if maxcol is None else \":\" + str(maxcol)\n msg = msg.format(self.filename, node.lineno, mincol, mstr, line, spline)\n print(msg, file=sys.stderr)\n except SyntaxError:\n newnode = node\n if strip_expr and isinstance(newnode, Expr):\n newnode = newnode.value\n return newnode\n\n def is_in_scope(self, node):\n \"\"\"Determines whether or not the current node is in scope.\"\"\"\n names, store = gather_load_store_names(node)\n names -= store\n if not names:\n return True\n inscope = False\n for ctx in reversed(self.contexts):\n names -= ctx\n if not names:\n inscope = True\n break\n return inscope\n\n #\n # Replacement visitors\n #\n\n def visit_Expression(self, node):\n \"\"\"Handle visiting an expression body.\"\"\"\n if isdescendable(node.body):\n node.body = self.visit(node.body)\n body = node.body\n inscope = self.is_in_scope(body)\n if not inscope:\n node.body = self.try_subproc_toks(body)\n return node\n\n def visit_Expr(self, node):\n \"\"\"Handle visiting an expression.\"\"\"\n if isdescendable(node.value):\n node.value = self.visit(node.value) # this allows diving into BoolOps\n if self.is_in_scope(node) or isinstance(node.value, Lambda):\n return node\n else:\n newnode = self.try_subproc_toks(node)\n if not isinstance(newnode, Expr):\n newnode = Expr(\n value=newnode, lineno=node.lineno, col_offset=node.col_offset\n )\n if hasattr(node, \"max_lineno\"):\n newnode.max_lineno = node.max_lineno\n newnode.max_col = node.max_col\n return newnode\n\n def visit_UnaryOp(self, node):\n \"\"\"Handle visiting an unary operands, like not.\"\"\"\n if isdescendable(node.operand):\n node.operand = self.visit(node.operand)\n operand = node.operand\n inscope = self.is_in_scope(operand)\n if not inscope:\n node.operand = self.try_subproc_toks(operand, strip_expr=True)\n return node\n\n def visit_BoolOp(self, node):\n \"\"\"Handle visiting an boolean operands, like and/or.\"\"\"\n for i in range(len(node.values)):\n val = node.values[i]\n if isdescendable(val):\n val = node.values[i] = self.visit(val)\n inscope = self.is_in_scope(val)\n if not inscope:\n node.values[i] = self.try_subproc_toks(val, strip_expr=True)\n return node\n\n #\n # Context aggregator visitors\n #\n\n def visit_Assign(self, node):\n \"\"\"Handle visiting an assignment statement.\"\"\"\n ups = set()\n for targ in node.targets:\n if isinstance(targ, (Tuple, List)):\n ups.update(leftmostname(elt) for elt in targ.elts)\n elif isinstance(targ, BinOp):\n newnode = self.try_subproc_toks(node)\n if newnode is node:\n ups.add(leftmostname(targ))\n else:\n return newnode\n else:\n ups.add(leftmostname(targ))\n self.ctxupdate(ups)\n return node\n\n def visit_AnnAssign(self, node):\n \"\"\"Handle visiting an annotated assignment statement.\"\"\"\n self.ctxadd(leftmostname(node.target))\n return node\n\n def visit_Import(self, node):\n \"\"\"Handle visiting a import statement.\"\"\"\n for name in node.names:\n if name.asname is None:\n self.ctxadd(name.name)\n else:\n self.ctxadd(name.asname)\n return node\n\n def visit_ImportFrom(self, node):\n \"\"\"Handle visiting a \"from ... import ...\" statement.\"\"\"\n for name in node.names:\n if name.asname is None:\n self.ctxadd(name.name)\n else:\n self.ctxadd(name.asname)\n return node\n\n def visit_With(self, node):\n \"\"\"Handle visiting a with statement.\"\"\"\n for item in node.items:\n if item.optional_vars is not None:\n self.ctxupdate(gather_names(item.optional_vars))\n self._nwith += 1\n self.generic_visit(node)\n self._nwith -= 1\n return node\n\n def visit_For(self, node):\n \"\"\"Handle visiting a for statement.\"\"\"\n targ = node.target\n self.ctxupdate(gather_names(targ))\n self.generic_visit(node)\n return node\n\n def visit_FunctionDef(self, node):\n \"\"\"Handle visiting a function definition.\"\"\"\n self.ctxadd(node.name)\n self.contexts.append(set())\n args = node.args\n argchain = [args.args, args.kwonlyargs]\n if args.vararg is not None:\n argchain.append((args.vararg,))\n if args.kwarg is not None:\n argchain.append((args.kwarg,))\n self.ctxupdate(a.arg for a in itertools.chain.from_iterable(argchain))\n self.generic_visit(node)\n self.contexts.pop()\n return node\n\n def visit_ClassDef(self, node):\n \"\"\"Handle visiting a class definition.\"\"\"\n self.ctxadd(node.name)\n self.contexts.append(set())\n self.generic_visit(node)\n self.contexts.pop()\n return node\n\n def visit_Delete(self, node):\n \"\"\"Handle visiting a del statement.\"\"\"\n for targ in node.targets:\n if isinstance(targ, Name):\n self.ctxremove(targ.id)\n self.generic_visit(node)\n return node\n\n def visit_Try(self, node):\n \"\"\"Handle visiting a try statement.\"\"\"\n for handler in node.handlers:\n if handler.name is not None:\n self.ctxadd(handler.name)\n self.generic_visit(node)\n return node\n\n def visit_Global(self, node):\n \"\"\"Handle visiting a global statement.\"\"\"\n self.contexts[1].update(node.names) # contexts[1] is the global ctx\n self.generic_visit(node)\n return node\n\n\ndef pdump(s, **kwargs):\n \"\"\"performs a pretty dump of an AST node.\"\"\"\n if isinstance(s, AST):\n s = dump(s, **kwargs).replace(\",\", \",\\n\")\n openers = \"([{\"\n closers = \")]}\"\n lens = len(s) + 1\n if lens == 1:\n return s\n i = min(s.find(o) % lens for o in openers)\n if i == lens - 1:\n return s\n closer = closers[openers.find(s[i])]\n j = s.rfind(closer)\n if j == -1 or j <= i:\n return s[: i + 1] + \"\\n\" + textwrap.indent(pdump(s[i + 1 :]), \" \")\n pre = s[: i + 1] + \"\\n\"\n mid = s[i + 1 : j]\n post = \"\\n\" + s[j:]\n mid = textwrap.indent(pdump(mid), \" \")\n if \"(\" in post or \"[\" in post or \"{\" in post:\n post = pdump(post)\n return pre + mid + post\n\n\ndef pprint_ast(s, *, sep=None, end=None, file=None, flush=False, **kwargs):\n \"\"\"Performs a pretty print of the AST nodes.\"\"\"\n print(pdump(s, **kwargs), sep=sep, end=end, file=file, flush=flush)\n\n\n#\n# Private helpers\n#\n\n\ndef _getblockattr(name, lineno, col):\n \"\"\"calls getattr(name, '__xonsh_block__', False).\"\"\"\n return xonsh_call(\n \"getattr\",\n args=[\n Name(id=name, ctx=Load(), lineno=lineno, col_offset=col),\n Str(s=\"__xonsh_block__\", lineno=lineno, col_offset=col),\n NameConstant(value=False, lineno=lineno, col_offset=col),\n ],\n lineno=lineno,\n col=col,\n )\n", "path": "xonsh/ast.py"}], "after_files": [{"content": "\"\"\"The xonsh abstract syntax tree node.\"\"\"\n# These are imported into our module namespace for the benefit of parser.py.\n# pylint: disable=unused-import\nimport sys\nfrom ast import (\n Module,\n Num,\n Expr,\n Str,\n Bytes,\n UnaryOp,\n UAdd,\n USub,\n Invert,\n BinOp,\n Add,\n Sub,\n Mult,\n Div,\n FloorDiv,\n Mod,\n Pow,\n Compare,\n Lt,\n Gt,\n LtE,\n GtE,\n Eq,\n NotEq,\n In,\n NotIn,\n Is,\n IsNot,\n Not,\n BoolOp,\n Or,\n And,\n Subscript,\n Load,\n Slice,\n ExtSlice,\n List,\n Tuple,\n Set,\n Dict,\n AST,\n NameConstant,\n Name,\n GeneratorExp,\n Store,\n comprehension,\n ListComp,\n SetComp,\n DictComp,\n Assign,\n AugAssign,\n BitXor,\n BitAnd,\n BitOr,\n LShift,\n RShift,\n Assert,\n Delete,\n Del,\n Pass,\n Raise,\n Import,\n alias,\n ImportFrom,\n Continue,\n Break,\n Yield,\n YieldFrom,\n Return,\n IfExp,\n Lambda,\n arguments,\n arg,\n Call,\n keyword,\n Attribute,\n Global,\n Nonlocal,\n If,\n While,\n For,\n withitem,\n With,\n Try,\n ExceptHandler,\n FunctionDef,\n ClassDef,\n Starred,\n NodeTransformer,\n Interactive,\n Expression,\n Index,\n literal_eval,\n dump,\n walk,\n increment_lineno,\n Constant,\n)\nfrom ast import Ellipsis as EllipsisNode\n\n# pylint: enable=unused-import\nimport textwrap\nimport itertools\n\nfrom xonsh.built_ins import XSH\nfrom xonsh.tools import subproc_toks, find_next_break, get_logical_line\n\nfrom ast import (\n MatMult,\n AsyncFunctionDef,\n AsyncWith,\n AsyncFor,\n Await,\n JoinedStr,\n FormattedValue,\n AnnAssign,\n)\n\nfrom xonsh.platform import PYTHON_VERSION_INFO\n\nif PYTHON_VERSION_INFO > (3, 8):\n from ast import NamedExpr # type:ignore\n\nSTATEMENTS = (\n FunctionDef,\n ClassDef,\n Return,\n Delete,\n Assign,\n AugAssign,\n For,\n While,\n If,\n With,\n Raise,\n Try,\n Assert,\n Import,\n ImportFrom,\n Global,\n Nonlocal,\n Expr,\n Pass,\n Break,\n Continue,\n AnnAssign,\n)\n\n\ndef leftmostname(node):\n \"\"\"Attempts to find the first name in the tree.\"\"\"\n if isinstance(node, Name):\n rtn = node.id\n elif isinstance(node, (BinOp, Compare)):\n rtn = leftmostname(node.left)\n elif isinstance(node, (Attribute, Subscript, Starred, Expr)):\n rtn = leftmostname(node.value)\n elif isinstance(node, Call):\n rtn = leftmostname(node.func)\n elif isinstance(node, UnaryOp):\n rtn = leftmostname(node.operand)\n elif isinstance(node, BoolOp):\n rtn = leftmostname(node.values[0])\n elif isinstance(node, Assign):\n rtn = leftmostname(node.targets[0])\n elif isinstance(node, AnnAssign):\n rtn = leftmostname(node.target)\n elif isinstance(node, (Str, Bytes, JoinedStr)):\n # handles case of \"./my executable\"\n rtn = leftmostname(node.s)\n elif isinstance(node, Tuple) and len(node.elts) > 0:\n # handles case of echo ,1,2,3\n rtn = leftmostname(node.elts[0])\n else:\n rtn = None\n return rtn\n\n\ndef get_lineno(node, default=0):\n \"\"\"Gets the lineno of a node or returns the default.\"\"\"\n return getattr(node, \"lineno\", default)\n\n\ndef min_line(node):\n \"\"\"Computes the minimum lineno.\"\"\"\n node_line = get_lineno(node)\n return min(map(get_lineno, walk(node), itertools.repeat(node_line)))\n\n\ndef max_line(node):\n \"\"\"Computes the maximum lineno.\"\"\"\n return max(map(get_lineno, walk(node)))\n\n\ndef get_col(node, default=-1):\n \"\"\"Gets the col_offset of a node, or returns the default\"\"\"\n return getattr(node, \"col_offset\", default)\n\n\ndef min_col(node):\n \"\"\"Computes the minimum col_offset.\"\"\"\n return min(map(get_col, walk(node), itertools.repeat(node.col_offset)))\n\n\ndef max_col(node):\n \"\"\"Returns the maximum col_offset of the node and all sub-nodes.\"\"\"\n col = getattr(node, \"max_col\", None)\n if col is not None:\n return col\n highest = max(walk(node), key=get_col)\n col = highest.col_offset + node_len(highest)\n return col\n\n\ndef node_len(node):\n \"\"\"The length of a node as a string\"\"\"\n val = 0\n for n in walk(node):\n if isinstance(n, Name):\n val += len(n.id)\n elif isinstance(n, Attribute):\n val += 1 + (len(n.attr) if isinstance(n.attr, str) else 0)\n # this may need to be added to for more nodes as more cases are found\n return val\n\n\ndef get_id(node, default=None):\n \"\"\"Gets the id attribute of a node, or returns a default.\"\"\"\n return getattr(node, \"id\", default)\n\n\ndef gather_names(node):\n \"\"\"Returns the set of all names present in the node's tree.\"\"\"\n rtn = set(map(get_id, walk(node)))\n rtn.discard(None)\n return rtn\n\n\ndef get_id_ctx(node):\n \"\"\"Gets the id and attribute of a node, or returns a default.\"\"\"\n nid = getattr(node, \"id\", None)\n if nid is None:\n return (None, None)\n return (nid, node.ctx)\n\n\ndef gather_load_store_names(node):\n \"\"\"Returns the names present in the node's tree in a set of load nodes and\n a set of store nodes.\n \"\"\"\n load = set()\n store = set()\n for nid, ctx in map(get_id_ctx, walk(node)):\n if nid is None:\n continue\n elif isinstance(ctx, Load):\n load.add(nid)\n else:\n store.add(nid)\n return (load, store)\n\n\ndef has_elts(x):\n \"\"\"Tests if x is an AST node with elements.\"\"\"\n return isinstance(x, AST) and hasattr(x, \"elts\")\n\n\ndef load_attribute_chain(name, lineno=None, col=None):\n \"\"\"Creates an AST that loads variable name that may (or may not)\n have attribute chains. For example, \"a.b.c\"\n \"\"\"\n names = name.split(\".\")\n node = Name(id=names.pop(0), ctx=Load(), lineno=lineno, col_offset=col)\n for attr in names:\n node = Attribute(\n value=node, attr=attr, ctx=Load(), lineno=lineno, col_offset=col\n )\n return node\n\n\ndef xonsh_call(name, args, lineno=None, col=None):\n \"\"\"Creates the AST node for calling a function of a given name.\n Functions names may contain attribute access, e.g. __xonsh__.env.\n \"\"\"\n return Call(\n func=load_attribute_chain(name, lineno=lineno, col=col),\n args=args,\n keywords=[],\n starargs=None,\n kwargs=None,\n lineno=lineno,\n col_offset=col,\n )\n\n\ndef isdescendable(node):\n \"\"\"Determines whether or not a node is worth visiting. Currently only\n UnaryOp and BoolOp nodes are visited.\n \"\"\"\n return isinstance(node, (UnaryOp, BoolOp))\n\n\ndef isexpression(node, ctx=None, *args, **kwargs):\n \"\"\"Determines whether a node (or code string) is an expression, and\n does not contain any statements. The execution context (ctx) and\n other args and kwargs are passed down to the parser, as needed.\n \"\"\"\n # parse string to AST\n if isinstance(node, str):\n node = node if node.endswith(\"\\n\") else node + \"\\n\"\n ctx = XSH.ctx if ctx is None else ctx\n node = XSH.execer.parse(node, ctx, *args, **kwargs)\n # determine if expression-like enough\n if isinstance(node, (Expr, Expression)):\n isexpr = True\n elif isinstance(node, Module) and len(node.body) == 1:\n isexpr = isinstance(node.body[0], (Expr, Expression))\n else:\n isexpr = False\n return isexpr\n\n\nclass CtxAwareTransformer(NodeTransformer):\n \"\"\"Transforms a xonsh AST based to use subprocess calls when\n the first name in an expression statement is not known in the context.\n This assumes that the expression statement is instead parseable as\n a subprocess.\n \"\"\"\n\n def __init__(self, parser):\n \"\"\"Parameters\n ----------\n parser : xonsh.Parser\n A parse instance to try to parse subprocess statements with.\n \"\"\"\n super().__init__()\n self.parser = parser\n self.input = None\n self.contexts = []\n self.lines = None\n self.mode = None\n self._nwith = 0\n self.filename = \"<xonsh-code>\"\n self.debug_level = 0\n\n def ctxvisit(self, node, inp, ctx, mode=\"exec\", filename=None, debug_level=0):\n \"\"\"Transforms the node in a context-dependent way.\n\n Parameters\n ----------\n node : ast.AST\n A syntax tree to transform.\n inp : str\n The input code in string format.\n ctx : dict\n The root context to use.\n filename : str, optional\n File we are to transform.\n debug_level : int, optional\n Debugging level to use in lexing and parsing.\n\n Returns\n -------\n node : ast.AST\n The transformed node.\n \"\"\"\n self.filename = self.filename if filename is None else filename\n self.debug_level = debug_level\n self.lines = inp.splitlines()\n self.contexts = [ctx, set()]\n self.mode = mode\n self._nwith = 0\n node = self.visit(node)\n del self.lines, self.contexts, self.mode\n self._nwith = 0\n return node\n\n def ctxupdate(self, iterable):\n \"\"\"Updated the most recent context.\"\"\"\n self.contexts[-1].update(iterable)\n\n def ctxadd(self, value):\n \"\"\"Adds a value the most recent context.\"\"\"\n self.contexts[-1].add(value)\n\n def ctxremove(self, value):\n \"\"\"Removes a value the most recent context.\"\"\"\n for ctx in reversed(self.contexts):\n if value in ctx:\n ctx.remove(value)\n break\n\n def try_subproc_toks(self, node, strip_expr=False):\n \"\"\"Tries to parse the line of the node as a subprocess.\"\"\"\n line, nlogical, idx = get_logical_line(self.lines, node.lineno - 1)\n if self.mode == \"eval\":\n mincol = len(line) - len(line.lstrip())\n maxcol = None\n else:\n mincol = max(min_col(node) - 1, 0)\n maxcol = max_col(node)\n if mincol == maxcol:\n maxcol = find_next_break(line, mincol=mincol, lexer=self.parser.lexer)\n elif nlogical > 1:\n maxcol = None\n elif maxcol < len(line) and line[maxcol] == \";\":\n pass\n else:\n maxcol += 1\n spline = subproc_toks(\n line,\n mincol=mincol,\n maxcol=maxcol,\n returnline=False,\n lexer=self.parser.lexer,\n )\n if spline is None or spline != f\"![{line[mincol:maxcol].strip()}]\":\n # failed to get something consistent, try greedy wrap\n spline = subproc_toks(\n line,\n mincol=mincol,\n maxcol=maxcol,\n returnline=False,\n lexer=self.parser.lexer,\n greedy=True,\n )\n if spline is None:\n return node\n try:\n newnode = self.parser.parse(\n spline,\n mode=self.mode,\n filename=self.filename,\n debug_level=(self.debug_level >= 2),\n )\n newnode = newnode.body\n if not isinstance(newnode, AST):\n # take the first (and only) Expr\n newnode = newnode[0]\n increment_lineno(newnode, n=node.lineno - 1)\n newnode.col_offset = node.col_offset\n if self.debug_level >= 1:\n msg = \"{0}:{1}:{2}{3} - {4}\\n\" \"{0}:{1}:{2}{3} + {5}\"\n mstr = \"\" if maxcol is None else \":\" + str(maxcol)\n msg = msg.format(self.filename, node.lineno, mincol, mstr, line, spline)\n print(msg, file=sys.stderr)\n except SyntaxError:\n newnode = node\n if strip_expr and isinstance(newnode, Expr):\n newnode = newnode.value\n return newnode\n\n def is_in_scope(self, node):\n \"\"\"Determines whether or not the current node is in scope.\"\"\"\n names, store = gather_load_store_names(node)\n names -= store\n if not names:\n return True\n inscope = False\n for ctx in reversed(self.contexts):\n names -= ctx\n if not names:\n inscope = True\n break\n return inscope\n\n #\n # Replacement visitors\n #\n\n def visit_Expression(self, node):\n \"\"\"Handle visiting an expression body.\"\"\"\n if isdescendable(node.body):\n node.body = self.visit(node.body)\n body = node.body\n inscope = self.is_in_scope(body)\n if not inscope:\n node.body = self.try_subproc_toks(body)\n return node\n\n def visit_Expr(self, node):\n \"\"\"Handle visiting an expression.\"\"\"\n if isdescendable(node.value):\n node.value = self.visit(node.value) # this allows diving into BoolOps\n if self.is_in_scope(node) or isinstance(node.value, Lambda):\n return node\n else:\n newnode = self.try_subproc_toks(node)\n if not isinstance(newnode, Expr):\n newnode = Expr(\n value=newnode, lineno=node.lineno, col_offset=node.col_offset\n )\n if hasattr(node, \"max_lineno\"):\n newnode.max_lineno = node.max_lineno\n newnode.max_col = node.max_col\n return newnode\n\n def visit_UnaryOp(self, node):\n \"\"\"Handle visiting an unary operands, like not.\"\"\"\n if isdescendable(node.operand):\n node.operand = self.visit(node.operand)\n operand = node.operand\n inscope = self.is_in_scope(operand)\n if not inscope:\n node.operand = self.try_subproc_toks(operand, strip_expr=True)\n return node\n\n def visit_BoolOp(self, node):\n \"\"\"Handle visiting an boolean operands, like and/or.\"\"\"\n for i in range(len(node.values)):\n val = node.values[i]\n if isdescendable(val):\n val = node.values[i] = self.visit(val)\n inscope = self.is_in_scope(val)\n if not inscope:\n node.values[i] = self.try_subproc_toks(val, strip_expr=True)\n return node\n\n def visit_comprehension(self, node):\n \"\"\"Handles visiting list comprehensions, set comprehensions,\n dictionary comprehensions, and generator expressions.\"\"\"\n return node # do not descend into any comprehensions\n\n #\n # Context aggregator visitors\n #\n\n def visit_Assign(self, node):\n \"\"\"Handle visiting an assignment statement.\"\"\"\n ups = set()\n for targ in node.targets:\n if isinstance(targ, (Tuple, List)):\n ups.update(leftmostname(elt) for elt in targ.elts)\n elif isinstance(targ, BinOp):\n newnode = self.try_subproc_toks(node)\n if newnode is node:\n ups.add(leftmostname(targ))\n else:\n return newnode\n else:\n ups.add(leftmostname(targ))\n self.ctxupdate(ups)\n return node\n\n def visit_AnnAssign(self, node):\n \"\"\"Handle visiting an annotated assignment statement.\"\"\"\n self.ctxadd(leftmostname(node.target))\n return node\n\n def visit_Import(self, node):\n \"\"\"Handle visiting a import statement.\"\"\"\n for name in node.names:\n if name.asname is None:\n self.ctxadd(name.name)\n else:\n self.ctxadd(name.asname)\n return node\n\n def visit_ImportFrom(self, node):\n \"\"\"Handle visiting a \"from ... import ...\" statement.\"\"\"\n for name in node.names:\n if name.asname is None:\n self.ctxadd(name.name)\n else:\n self.ctxadd(name.asname)\n return node\n\n def visit_With(self, node):\n \"\"\"Handle visiting a with statement.\"\"\"\n for item in node.items:\n if item.optional_vars is not None:\n self.ctxupdate(gather_names(item.optional_vars))\n self._nwith += 1\n self.generic_visit(node)\n self._nwith -= 1\n return node\n\n def visit_For(self, node):\n \"\"\"Handle visiting a for statement.\"\"\"\n targ = node.target\n self.ctxupdate(gather_names(targ))\n self.generic_visit(node)\n return node\n\n def visit_FunctionDef(self, node):\n \"\"\"Handle visiting a function definition.\"\"\"\n self.ctxadd(node.name)\n self.contexts.append(set())\n args = node.args\n argchain = [args.args, args.kwonlyargs]\n if args.vararg is not None:\n argchain.append((args.vararg,))\n if args.kwarg is not None:\n argchain.append((args.kwarg,))\n self.ctxupdate(a.arg for a in itertools.chain.from_iterable(argchain))\n self.generic_visit(node)\n self.contexts.pop()\n return node\n\n def visit_ClassDef(self, node):\n \"\"\"Handle visiting a class definition.\"\"\"\n self.ctxadd(node.name)\n self.contexts.append(set())\n self.generic_visit(node)\n self.contexts.pop()\n return node\n\n def visit_Delete(self, node):\n \"\"\"Handle visiting a del statement.\"\"\"\n for targ in node.targets:\n if isinstance(targ, Name):\n self.ctxremove(targ.id)\n self.generic_visit(node)\n return node\n\n def visit_Try(self, node):\n \"\"\"Handle visiting a try statement.\"\"\"\n for handler in node.handlers:\n if handler.name is not None:\n self.ctxadd(handler.name)\n self.generic_visit(node)\n return node\n\n def visit_Global(self, node):\n \"\"\"Handle visiting a global statement.\"\"\"\n self.contexts[1].update(node.names) # contexts[1] is the global ctx\n self.generic_visit(node)\n return node\n\n\ndef pdump(s, **kwargs):\n \"\"\"performs a pretty dump of an AST node.\"\"\"\n if isinstance(s, AST):\n s = dump(s, **kwargs).replace(\",\", \",\\n\")\n openers = \"([{\"\n closers = \")]}\"\n lens = len(s) + 1\n if lens == 1:\n return s\n i = min(s.find(o) % lens for o in openers)\n if i == lens - 1:\n return s\n closer = closers[openers.find(s[i])]\n j = s.rfind(closer)\n if j == -1 or j <= i:\n return s[: i + 1] + \"\\n\" + textwrap.indent(pdump(s[i + 1 :]), \" \")\n pre = s[: i + 1] + \"\\n\"\n mid = s[i + 1 : j]\n post = \"\\n\" + s[j:]\n mid = textwrap.indent(pdump(mid), \" \")\n if \"(\" in post or \"[\" in post or \"{\" in post:\n post = pdump(post)\n return pre + mid + post\n\n\ndef pprint_ast(s, *, sep=None, end=None, file=None, flush=False, **kwargs):\n \"\"\"Performs a pretty print of the AST nodes.\"\"\"\n print(pdump(s, **kwargs), sep=sep, end=end, file=file, flush=flush)\n\n\n#\n# Private helpers\n#\n\n\ndef _getblockattr(name, lineno, col):\n \"\"\"calls getattr(name, '__xonsh_block__', False).\"\"\"\n return xonsh_call(\n \"getattr\",\n args=[\n Name(id=name, ctx=Load(), lineno=lineno, col_offset=col),\n Str(s=\"__xonsh_block__\", lineno=lineno, col_offset=col),\n NameConstant(value=False, lineno=lineno, col_offset=col),\n ],\n lineno=lineno,\n col=col,\n )\n", "path": "xonsh/ast.py"}]} |
gh_patches_debug_1214 | rasdani/github-patches | git_diff | scikit-hep__pyhf-1261 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove duplicated libraries in setup.py
# Description
In `setup.py` and `setup.cfg` there are some duplicated libraries that should be removed from `setup.py`.
https://github.com/scikit-hep/pyhf/blob/75f3cd350ed3986d16d680fbb83f312791aafd68/setup.py#L47
already exists as a core requirement in `setup.cfg`
https://github.com/scikit-hep/pyhf/blob/75f3cd350ed3986d16d680fbb83f312791aafd68/setup.cfg#L45
and so should be removed from `setup.py`.
It also isn't clear if
https://github.com/scikit-hep/pyhf/blob/75f3cd350ed3986d16d680fbb83f312791aafd68/setup.py#L42
is still required, given that it was added back in PR #186 when we still used Coveralls for coverage.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup
2
3 extras_require = {
4 'shellcomplete': ['click_completion'],
5 'tensorflow': [
6 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major
7 'tensorflow-probability~=0.10.0',
8 ],
9 'torch': ['torch~=1.2'],
10 'jax': ['jax~=0.2.4', 'jaxlib~=0.1.56'],
11 'xmlio': [
12 'uproot3~=3.14',
13 'uproot~=4.0',
14 ], # uproot3 required until writing to ROOT supported in uproot4
15 'minuit': ['iminuit~=2.1'],
16 }
17 extras_require['backends'] = sorted(
18 set(
19 extras_require['tensorflow']
20 + extras_require['torch']
21 + extras_require['jax']
22 + extras_require['minuit']
23 )
24 )
25 extras_require['contrib'] = sorted({'matplotlib', 'requests'})
26 extras_require['lint'] = sorted({'flake8', 'black'})
27
28 extras_require['test'] = sorted(
29 set(
30 extras_require['backends']
31 + extras_require['xmlio']
32 + extras_require['contrib']
33 + extras_require['shellcomplete']
34 + [
35 'pytest~=6.0',
36 'pytest-cov>=2.5.1',
37 'pytest-mock',
38 'pytest-benchmark[histogram]',
39 'pytest-console-scripts',
40 'pytest-mpl',
41 'pydocstyle',
42 'coverage>=4.0', # coveralls
43 'papermill~=2.0',
44 'nteract-scrapbook~=0.2',
45 'jupyter',
46 'graphviz',
47 'jsonpatch',
48 ]
49 )
50 )
51 extras_require['docs'] = sorted(
52 set(
53 extras_require['xmlio']
54 + [
55 'sphinx>=3.1.2',
56 'sphinxcontrib-bibtex~=2.1',
57 'sphinx-click',
58 'sphinx_rtd_theme',
59 'nbsphinx',
60 'ipywidgets',
61 'sphinx-issues',
62 'sphinx-copybutton>0.2.9',
63 ]
64 )
65 )
66 extras_require['develop'] = sorted(
67 set(
68 extras_require['docs']
69 + extras_require['lint']
70 + extras_require['test']
71 + [
72 'nbdime',
73 'bump2version',
74 'ipython',
75 'pre-commit',
76 'check-manifest',
77 'codemetapy>=0.3.4',
78 'twine',
79 ]
80 )
81 )
82 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
83
84
85 setup(
86 extras_require=extras_require,
87 use_scm_version=lambda: {'local_scheme': lambda version: ''},
88 )
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -39,12 +39,10 @@
'pytest-console-scripts',
'pytest-mpl',
'pydocstyle',
- 'coverage>=4.0', # coveralls
'papermill~=2.0',
'nteract-scrapbook~=0.2',
'jupyter',
'graphviz',
- 'jsonpatch',
]
)
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -39,12 +39,10 @@\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n- 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'graphviz',\n- 'jsonpatch',\n ]\n )\n )\n", "issue": "Remove duplicated libraries in setup.py\n# Description\r\n\r\nIn `setup.py` and `setup.cfg` there are some duplicated libraries that should be removed from `setup.py`.\r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/75f3cd350ed3986d16d680fbb83f312791aafd68/setup.py#L47\r\n\r\nalready exists as a core requirement in `setup.cfg`\r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/75f3cd350ed3986d16d680fbb83f312791aafd68/setup.cfg#L45\r\n\r\nand so should be removed from `setup.py`.\r\n\r\nIt also isn't clear if \r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/75f3cd350ed3986d16d680fbb83f312791aafd68/setup.py#L42\r\n\r\nis still required, given that it was added back in PR #186 when we still used Coveralls for coverage.\r\n\n", "before_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major\n 'tensorflow-probability~=0.10.0',\n ],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.2.4', 'jaxlib~=0.1.56'],\n 'xmlio': [\n 'uproot3~=3.14',\n 'uproot~=4.0',\n ], # uproot3 required until writing to ROOT supported in uproot4\n 'minuit': ['iminuit~=2.1'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted({'matplotlib', 'requests'})\nextras_require['lint'] = sorted({'flake8', 'black'})\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'pytest~=6.0',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'graphviz',\n 'jsonpatch',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n extras_require['xmlio']\n + [\n 'sphinx>=3.1.2',\n 'sphinxcontrib-bibtex~=2.1',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + [\n 'nbdime',\n 'bump2version',\n 'ipython',\n 'pre-commit',\n 'check-manifest',\n 'codemetapy>=0.3.4',\n 'twine',\n ]\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major\n 'tensorflow-probability~=0.10.0',\n ],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.2.4', 'jaxlib~=0.1.56'],\n 'xmlio': [\n 'uproot3~=3.14',\n 'uproot~=4.0',\n ], # uproot3 required until writing to ROOT supported in uproot4\n 'minuit': ['iminuit~=2.1'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted({'matplotlib', 'requests'})\nextras_require['lint'] = sorted({'flake8', 'black'})\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'pytest~=6.0',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'graphviz',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n extras_require['xmlio']\n + [\n 'sphinx>=3.1.2',\n 'sphinxcontrib-bibtex~=2.1',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + [\n 'nbdime',\n 'bump2version',\n 'ipython',\n 'pre-commit',\n 'check-manifest',\n 'codemetapy>=0.3.4',\n 'twine',\n ]\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]} |
gh_patches_debug_1215 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-10076 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Synthesis failed for recommender
Hello! Autosynth couldn't regenerate recommender. :broken_heart:
Here's the output from running `synth.py`:
```
Cloning into 'working_repo'...
Switched to branch 'autosynth-recommender'
Running synthtool
['/tmpfs/src/git/autosynth/env/bin/python3', '-m', 'synthtool', 'synth.py', '--']
synthtool > Executing /tmpfs/src/git/autosynth/working_repo/recommender/synth.py.
synthtool > Ensuring dependencies.
synthtool > Pulling artman image.
latest: Pulling from googleapis/artman
Digest: sha256:c773192618c608a7a0415dd95282f841f8e6bcdef7dd760a988c93b77a64bd57
Status: Image is up to date for googleapis/artman:latest
synthtool > Cloning googleapis.
Traceback (most recent call last):
File "/home/kbuilder/.pyenv/versions/3.6.1/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/home/kbuilder/.pyenv/versions/3.6.1/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/synthtool/__main__.py", line 87, in <module>
main()
File "/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/synthtool/__main__.py", line 79, in main
spec.loader.exec_module(synth_module) # type: ignore
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 205, in _call_with_frames_removed
File "/tmpfs/src/git/autosynth/working_repo/recommender/synth.py", line 32, in <module>
include_protos=True
File "/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/synthtool/gcp/gapic_generator.py", line 50, in py_library
return self._generate_code(service, version, "python", **kwargs)
File "/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/synthtool/gcp/gapic_generator.py", line 121, in _generate_code
f"Unable to find configuration yaml file: {(googleapis / config_path)}."
FileNotFoundError: Unable to find configuration yaml file: /home/kbuilder/.cache/synthtool/googleapis/google/cloud/recommender/artman_recommender_v1beta1.yaml.
synthtool > Cleaned up 1 temporary directories.
synthtool > Wrote metadata to synth.metadata.
Synthesis failed
```
Google internal developers can see the full log [here](https://sponge/74dee1a3-0367-43bf-9f40-1001ae7ea243).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recommender/synth.py`
Content:
```
1 # Copyright 2019 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """This script is used to synthesize generated parts of this library."""
16 import re
17
18 import synthtool as s
19 from synthtool import gcp
20
21 gapic = gcp.GAPICGenerator()
22 versions = ["v1beta1"]
23 common = gcp.CommonTemplates()
24
25
26 # ----------------------------------------------------------------------------
27 # Generate Cloud Recommender
28 # ----------------------------------------------------------------------------
29 for version in versions:
30 library = gapic.py_library(
31 "recommender", version,
32 include_protos=True
33 )
34 s.move(library, excludes=['nox.py', 'docs/index.rst', 'README.rst', 'setup.py'])
35
36 # ----------------------------------------------------------------------------
37 # Add templated files
38 # ----------------------------------------------------------------------------
39 templated_files = common.py_library(unit_cov_level=97, cov_level=100)
40 s.move(templated_files, excludes=['noxfile.py'])
41
42 s.shell.run(["nox", "-s", "blacken"], hide_output=False)
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/recommender/synth.py b/recommender/synth.py
--- a/recommender/synth.py
+++ b/recommender/synth.py
@@ -29,7 +29,8 @@
for version in versions:
library = gapic.py_library(
"recommender", version,
- include_protos=True
+ include_protos=True,
+ config_path="/google/cloud/recommender/v1beta1/artman_recommender_v1beta1.yaml"
)
s.move(library, excludes=['nox.py', 'docs/index.rst', 'README.rst', 'setup.py'])
| {"golden_diff": "diff --git a/recommender/synth.py b/recommender/synth.py\n--- a/recommender/synth.py\n+++ b/recommender/synth.py\n@@ -29,7 +29,8 @@\n for version in versions:\n library = gapic.py_library(\n \"recommender\", version,\n- include_protos=True\n+ include_protos=True,\n+ config_path=\"/google/cloud/recommender/v1beta1/artman_recommender_v1beta1.yaml\"\n )\n s.move(library, excludes=['nox.py', 'docs/index.rst', 'README.rst', 'setup.py'])\n", "issue": "Synthesis failed for recommender\nHello! Autosynth couldn't regenerate recommender. :broken_heart:\n\nHere's the output from running `synth.py`:\n\n```\nCloning into 'working_repo'...\nSwitched to branch 'autosynth-recommender'\nRunning synthtool\n['/tmpfs/src/git/autosynth/env/bin/python3', '-m', 'synthtool', 'synth.py', '--']\nsynthtool > Executing /tmpfs/src/git/autosynth/working_repo/recommender/synth.py.\nsynthtool > Ensuring dependencies.\nsynthtool > Pulling artman image.\nlatest: Pulling from googleapis/artman\nDigest: sha256:c773192618c608a7a0415dd95282f841f8e6bcdef7dd760a988c93b77a64bd57\nStatus: Image is up to date for googleapis/artman:latest\nsynthtool > Cloning googleapis.\nTraceback (most recent call last):\n File \"/home/kbuilder/.pyenv/versions/3.6.1/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/home/kbuilder/.pyenv/versions/3.6.1/lib/python3.6/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/synthtool/__main__.py\", line 87, in <module>\n main()\n File \"/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/click/core.py\", line 764, in __call__\n return self.main(*args, **kwargs)\n File \"/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/click/core.py\", line 717, in main\n rv = self.invoke(ctx)\n File \"/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/click/core.py\", line 956, in invoke\n return ctx.invoke(self.callback, **ctx.params)\n File \"/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/click/core.py\", line 555, in invoke\n return callback(*args, **kwargs)\n File \"/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/synthtool/__main__.py\", line 79, in main\n spec.loader.exec_module(synth_module) # type: ignore\n File \"<frozen importlib._bootstrap_external>\", line 678, in exec_module\n File \"<frozen importlib._bootstrap>\", line 205, in _call_with_frames_removed\n File \"/tmpfs/src/git/autosynth/working_repo/recommender/synth.py\", line 32, in <module>\n include_protos=True\n File \"/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/synthtool/gcp/gapic_generator.py\", line 50, in py_library\n return self._generate_code(service, version, \"python\", **kwargs)\n File \"/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/synthtool/gcp/gapic_generator.py\", line 121, in _generate_code\n f\"Unable to find configuration yaml file: {(googleapis / config_path)}.\"\nFileNotFoundError: Unable to find configuration yaml file: /home/kbuilder/.cache/synthtool/googleapis/google/cloud/recommender/artman_recommender_v1beta1.yaml.\nsynthtool > Cleaned up 1 temporary directories.\nsynthtool > Wrote metadata to synth.metadata.\n\nSynthesis failed\n\n```\n\nGoogle internal developers can see the full log [here](https://sponge/74dee1a3-0367-43bf-9f40-1001ae7ea243).\n\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This script is used to synthesize generated parts of this library.\"\"\"\nimport re\n\nimport synthtool as s\nfrom synthtool import gcp\n\ngapic = gcp.GAPICGenerator()\nversions = [\"v1beta1\"]\ncommon = gcp.CommonTemplates()\n\n\n# ----------------------------------------------------------------------------\n# Generate Cloud Recommender\n# ----------------------------------------------------------------------------\nfor version in versions:\n library = gapic.py_library(\n \"recommender\", version,\n include_protos=True\n )\n s.move(library, excludes=['nox.py', 'docs/index.rst', 'README.rst', 'setup.py'])\n\n# ----------------------------------------------------------------------------\n# Add templated files\n# ----------------------------------------------------------------------------\ntemplated_files = common.py_library(unit_cov_level=97, cov_level=100)\ns.move(templated_files, excludes=['noxfile.py'])\n\ns.shell.run([\"nox\", \"-s\", \"blacken\"], hide_output=False) ", "path": "recommender/synth.py"}], "after_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This script is used to synthesize generated parts of this library.\"\"\"\nimport re\n\nimport synthtool as s\nfrom synthtool import gcp\n\ngapic = gcp.GAPICGenerator()\nversions = [\"v1beta1\"]\ncommon = gcp.CommonTemplates()\n\n\n# ----------------------------------------------------------------------------\n# Generate Cloud Recommender\n# ----------------------------------------------------------------------------\nfor version in versions:\n library = gapic.py_library(\n \"recommender\", version,\n include_protos=True,\n config_path=\"/google/cloud/recommender/v1beta1/artman_recommender_v1beta1.yaml\"\n )\n s.move(library, excludes=['nox.py', 'docs/index.rst', 'README.rst', 'setup.py'])\n\n# ----------------------------------------------------------------------------\n# Add templated files\n# ----------------------------------------------------------------------------\ntemplated_files = common.py_library(unit_cov_level=97, cov_level=100)\ns.move(templated_files, excludes=['noxfile.py'])\n\ns.shell.run([\"nox\", \"-s\", \"blacken\"], hide_output=False) ", "path": "recommender/synth.py"}]} |
gh_patches_debug_1216 | rasdani/github-patches | git_diff | python-telegram-bot__python-telegram-bot-1216 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typo in comment in conversationbot2.py
<!--
Thanks for reporting issues of python-telegram-bot!
Use this template to notify us if you found a bug, or if you want to request a new feature.
If you're looking for help with programming your bot using our library, feel free to ask your
questions in out telegram group at: https://t.me/pythontelegrambotgroup
To make it easier for us to help you please enter detailed information below.
Please note, we only support the latest version of python-telegram-bot and
master branch. Please make sure to upgrade & recreate the issue on the latest
version prior to opening an issue.
-->
### Steps to reproduce
1. Not really a bug... wrong comment line in conversationbot2.py :)
### Expected behaviour
Should be: # Add conversation handler with the states CHOOSING, TYPING_CHOICE and TYPING_REPLY
### Actual behaviour
Actual comment in conversationbot2.py (leftover from conversationbot.py I guess :))
# Add conversation handler with the states GENDER, PHOTO, LOCATION and BIO
### Configuration
**Operating System:**
Windows
**Version of Python, python-telegram-bot & dependencies:**
3.6
``$ python -m telegram``
### Logs
Insert logs here (if necessary)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/conversationbot2.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Simple Bot to reply to Telegram messages
5 # This program is dedicated to the public domain under the CC0 license.
6 """
7 This Bot uses the Updater class to handle the bot.
8
9 First, a few callback functions are defined. Then, those functions are passed to
10 the Dispatcher and registered at their respective places.
11 Then, the bot is started and runs until we press Ctrl-C on the command line.
12
13 Usage:
14 Example of a bot-user conversation using ConversationHandler.
15 Send /start to initiate the conversation.
16 Press Ctrl-C on the command line or send a signal to the process to stop the
17 bot.
18 """
19
20 from telegram import ReplyKeyboardMarkup
21 from telegram.ext import (Updater, CommandHandler, MessageHandler, Filters, RegexHandler,
22 ConversationHandler)
23
24 import logging
25
26 # Enable logging
27 logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
28 level=logging.INFO)
29
30 logger = logging.getLogger(__name__)
31
32 CHOOSING, TYPING_REPLY, TYPING_CHOICE = range(3)
33
34 reply_keyboard = [['Age', 'Favourite colour'],
35 ['Number of siblings', 'Something else...'],
36 ['Done']]
37 markup = ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True)
38
39
40 def facts_to_str(user_data):
41 facts = list()
42
43 for key, value in user_data.items():
44 facts.append('{} - {}'.format(key, value))
45
46 return "\n".join(facts).join(['\n', '\n'])
47
48
49 def start(bot, update):
50 update.message.reply_text(
51 "Hi! My name is Doctor Botter. I will hold a more complex conversation with you. "
52 "Why don't you tell me something about yourself?",
53 reply_markup=markup)
54
55 return CHOOSING
56
57
58 def regular_choice(bot, update, user_data):
59 text = update.message.text
60 user_data['choice'] = text
61 update.message.reply_text(
62 'Your {}? Yes, I would love to hear about that!'.format(text.lower()))
63
64 return TYPING_REPLY
65
66
67 def custom_choice(bot, update):
68 update.message.reply_text('Alright, please send me the category first, '
69 'for example "Most impressive skill"')
70
71 return TYPING_CHOICE
72
73
74 def received_information(bot, update, user_data):
75 text = update.message.text
76 category = user_data['choice']
77 user_data[category] = text
78 del user_data['choice']
79
80 update.message.reply_text("Neat! Just so you know, this is what you already told me:"
81 "{}"
82 "You can tell me more, or change your opinion on something.".format(
83 facts_to_str(user_data)), reply_markup=markup)
84
85 return CHOOSING
86
87
88 def done(bot, update, user_data):
89 if 'choice' in user_data:
90 del user_data['choice']
91
92 update.message.reply_text("I learned these facts about you:"
93 "{}"
94 "Until next time!".format(facts_to_str(user_data)))
95
96 user_data.clear()
97 return ConversationHandler.END
98
99
100 def error(bot, update, error):
101 """Log Errors caused by Updates."""
102 logger.warning('Update "%s" caused error "%s"', update, error)
103
104
105 def main():
106 # Create the Updater and pass it your bot's token.
107 updater = Updater("TOKEN")
108
109 # Get the dispatcher to register handlers
110 dp = updater.dispatcher
111
112 # Add conversation handler with the states GENDER, PHOTO, LOCATION and BIO
113 conv_handler = ConversationHandler(
114 entry_points=[CommandHandler('start', start)],
115
116 states={
117 CHOOSING: [RegexHandler('^(Age|Favourite colour|Number of siblings)$',
118 regular_choice,
119 pass_user_data=True),
120 RegexHandler('^Something else...$',
121 custom_choice),
122 ],
123
124 TYPING_CHOICE: [MessageHandler(Filters.text,
125 regular_choice,
126 pass_user_data=True),
127 ],
128
129 TYPING_REPLY: [MessageHandler(Filters.text,
130 received_information,
131 pass_user_data=True),
132 ],
133 },
134
135 fallbacks=[RegexHandler('^Done$', done, pass_user_data=True)]
136 )
137
138 dp.add_handler(conv_handler)
139
140 # log all errors
141 dp.add_error_handler(error)
142
143 # Start the Bot
144 updater.start_polling()
145
146 # Run the bot until you press Ctrl-C or the process receives SIGINT,
147 # SIGTERM or SIGABRT. This should be used most of the time, since
148 # start_polling() is non-blocking and will stop the bot gracefully.
149 updater.idle()
150
151
152 if __name__ == '__main__':
153 main()
154
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/conversationbot2.py b/examples/conversationbot2.py
--- a/examples/conversationbot2.py
+++ b/examples/conversationbot2.py
@@ -109,7 +109,7 @@
# Get the dispatcher to register handlers
dp = updater.dispatcher
- # Add conversation handler with the states GENDER, PHOTO, LOCATION and BIO
+ # Add conversation handler with the states CHOOSING, TYPING_CHOICE and TYPING_REPLY
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', start)],
| {"golden_diff": "diff --git a/examples/conversationbot2.py b/examples/conversationbot2.py\n--- a/examples/conversationbot2.py\n+++ b/examples/conversationbot2.py\n@@ -109,7 +109,7 @@\n # Get the dispatcher to register handlers\n dp = updater.dispatcher\n \n- # Add conversation handler with the states GENDER, PHOTO, LOCATION and BIO\n+ # Add conversation handler with the states CHOOSING, TYPING_CHOICE and TYPING_REPLY\n conv_handler = ConversationHandler(\n entry_points=[CommandHandler('start', start)],\n", "issue": "Typo in comment in conversationbot2.py\n<!--\r\nThanks for reporting issues of python-telegram-bot!\r\n\r\nUse this template to notify us if you found a bug, or if you want to request a new feature.\r\nIf you're looking for help with programming your bot using our library, feel free to ask your\r\nquestions in out telegram group at: https://t.me/pythontelegrambotgroup\r\n\r\nTo make it easier for us to help you please enter detailed information below.\r\n\r\nPlease note, we only support the latest version of python-telegram-bot and\r\nmaster branch. Please make sure to upgrade & recreate the issue on the latest\r\nversion prior to opening an issue.\r\n-->\r\n### Steps to reproduce\r\n1. Not really a bug... wrong comment line in conversationbot2.py :)\r\n\r\n### Expected behaviour\r\nShould be: # Add conversation handler with the states CHOOSING, TYPING_CHOICE and TYPING_REPLY\r\n\r\n### Actual behaviour\r\nActual comment in conversationbot2.py (leftover from conversationbot.py I guess :))\r\n# Add conversation handler with the states GENDER, PHOTO, LOCATION and BIO\r\n\r\n### Configuration\r\n**Operating System:**\r\nWindows\r\n\r\n**Version of Python, python-telegram-bot & dependencies:**\r\n3.6\r\n\r\n``$ python -m telegram``\r\n\r\n### Logs\r\nInsert logs here (if necessary)\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Simple Bot to reply to Telegram messages\n# This program is dedicated to the public domain under the CC0 license.\n\"\"\"\nThis Bot uses the Updater class to handle the bot.\n\nFirst, a few callback functions are defined. Then, those functions are passed to\nthe Dispatcher and registered at their respective places.\nThen, the bot is started and runs until we press Ctrl-C on the command line.\n\nUsage:\nExample of a bot-user conversation using ConversationHandler.\nSend /start to initiate the conversation.\nPress Ctrl-C on the command line or send a signal to the process to stop the\nbot.\n\"\"\"\n\nfrom telegram import ReplyKeyboardMarkup\nfrom telegram.ext import (Updater, CommandHandler, MessageHandler, Filters, RegexHandler,\n ConversationHandler)\n\nimport logging\n\n# Enable logging\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\nCHOOSING, TYPING_REPLY, TYPING_CHOICE = range(3)\n\nreply_keyboard = [['Age', 'Favourite colour'],\n ['Number of siblings', 'Something else...'],\n ['Done']]\nmarkup = ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True)\n\n\ndef facts_to_str(user_data):\n facts = list()\n\n for key, value in user_data.items():\n facts.append('{} - {}'.format(key, value))\n\n return \"\\n\".join(facts).join(['\\n', '\\n'])\n\n\ndef start(bot, update):\n update.message.reply_text(\n \"Hi! My name is Doctor Botter. I will hold a more complex conversation with you. \"\n \"Why don't you tell me something about yourself?\",\n reply_markup=markup)\n\n return CHOOSING\n\n\ndef regular_choice(bot, update, user_data):\n text = update.message.text\n user_data['choice'] = text\n update.message.reply_text(\n 'Your {}? Yes, I would love to hear about that!'.format(text.lower()))\n\n return TYPING_REPLY\n\n\ndef custom_choice(bot, update):\n update.message.reply_text('Alright, please send me the category first, '\n 'for example \"Most impressive skill\"')\n\n return TYPING_CHOICE\n\n\ndef received_information(bot, update, user_data):\n text = update.message.text\n category = user_data['choice']\n user_data[category] = text\n del user_data['choice']\n\n update.message.reply_text(\"Neat! Just so you know, this is what you already told me:\"\n \"{}\"\n \"You can tell me more, or change your opinion on something.\".format(\n facts_to_str(user_data)), reply_markup=markup)\n\n return CHOOSING\n\n\ndef done(bot, update, user_data):\n if 'choice' in user_data:\n del user_data['choice']\n\n update.message.reply_text(\"I learned these facts about you:\"\n \"{}\"\n \"Until next time!\".format(facts_to_str(user_data)))\n\n user_data.clear()\n return ConversationHandler.END\n\n\ndef error(bot, update, error):\n \"\"\"Log Errors caused by Updates.\"\"\"\n logger.warning('Update \"%s\" caused error \"%s\"', update, error)\n\n\ndef main():\n # Create the Updater and pass it your bot's token.\n updater = Updater(\"TOKEN\")\n\n # Get the dispatcher to register handlers\n dp = updater.dispatcher\n\n # Add conversation handler with the states GENDER, PHOTO, LOCATION and BIO\n conv_handler = ConversationHandler(\n entry_points=[CommandHandler('start', start)],\n\n states={\n CHOOSING: [RegexHandler('^(Age|Favourite colour|Number of siblings)$',\n regular_choice,\n pass_user_data=True),\n RegexHandler('^Something else...$',\n custom_choice),\n ],\n\n TYPING_CHOICE: [MessageHandler(Filters.text,\n regular_choice,\n pass_user_data=True),\n ],\n\n TYPING_REPLY: [MessageHandler(Filters.text,\n received_information,\n pass_user_data=True),\n ],\n },\n\n fallbacks=[RegexHandler('^Done$', done, pass_user_data=True)]\n )\n\n dp.add_handler(conv_handler)\n\n # log all errors\n dp.add_error_handler(error)\n\n # Start the Bot\n updater.start_polling()\n\n # Run the bot until you press Ctrl-C or the process receives SIGINT,\n # SIGTERM or SIGABRT. This should be used most of the time, since\n # start_polling() is non-blocking and will stop the bot gracefully.\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n", "path": "examples/conversationbot2.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Simple Bot to reply to Telegram messages\n# This program is dedicated to the public domain under the CC0 license.\n\"\"\"\nThis Bot uses the Updater class to handle the bot.\n\nFirst, a few callback functions are defined. Then, those functions are passed to\nthe Dispatcher and registered at their respective places.\nThen, the bot is started and runs until we press Ctrl-C on the command line.\n\nUsage:\nExample of a bot-user conversation using ConversationHandler.\nSend /start to initiate the conversation.\nPress Ctrl-C on the command line or send a signal to the process to stop the\nbot.\n\"\"\"\n\nfrom telegram import ReplyKeyboardMarkup\nfrom telegram.ext import (Updater, CommandHandler, MessageHandler, Filters, RegexHandler,\n ConversationHandler)\n\nimport logging\n\n# Enable logging\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\nCHOOSING, TYPING_REPLY, TYPING_CHOICE = range(3)\n\nreply_keyboard = [['Age', 'Favourite colour'],\n ['Number of siblings', 'Something else...'],\n ['Done']]\nmarkup = ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True)\n\n\ndef facts_to_str(user_data):\n facts = list()\n\n for key, value in user_data.items():\n facts.append('{} - {}'.format(key, value))\n\n return \"\\n\".join(facts).join(['\\n', '\\n'])\n\n\ndef start(bot, update):\n update.message.reply_text(\n \"Hi! My name is Doctor Botter. I will hold a more complex conversation with you. \"\n \"Why don't you tell me something about yourself?\",\n reply_markup=markup)\n\n return CHOOSING\n\n\ndef regular_choice(bot, update, user_data):\n text = update.message.text\n user_data['choice'] = text\n update.message.reply_text(\n 'Your {}? Yes, I would love to hear about that!'.format(text.lower()))\n\n return TYPING_REPLY\n\n\ndef custom_choice(bot, update):\n update.message.reply_text('Alright, please send me the category first, '\n 'for example \"Most impressive skill\"')\n\n return TYPING_CHOICE\n\n\ndef received_information(bot, update, user_data):\n text = update.message.text\n category = user_data['choice']\n user_data[category] = text\n del user_data['choice']\n\n update.message.reply_text(\"Neat! Just so you know, this is what you already told me:\"\n \"{}\"\n \"You can tell me more, or change your opinion on something.\".format(\n facts_to_str(user_data)), reply_markup=markup)\n\n return CHOOSING\n\n\ndef done(bot, update, user_data):\n if 'choice' in user_data:\n del user_data['choice']\n\n update.message.reply_text(\"I learned these facts about you:\"\n \"{}\"\n \"Until next time!\".format(facts_to_str(user_data)))\n\n user_data.clear()\n return ConversationHandler.END\n\n\ndef error(bot, update, error):\n \"\"\"Log Errors caused by Updates.\"\"\"\n logger.warning('Update \"%s\" caused error \"%s\"', update, error)\n\n\ndef main():\n # Create the Updater and pass it your bot's token.\n updater = Updater(\"TOKEN\")\n\n # Get the dispatcher to register handlers\n dp = updater.dispatcher\n\n # Add conversation handler with the states CHOOSING, TYPING_CHOICE and TYPING_REPLY\n conv_handler = ConversationHandler(\n entry_points=[CommandHandler('start', start)],\n\n states={\n CHOOSING: [RegexHandler('^(Age|Favourite colour|Number of siblings)$',\n regular_choice,\n pass_user_data=True),\n RegexHandler('^Something else...$',\n custom_choice),\n ],\n\n TYPING_CHOICE: [MessageHandler(Filters.text,\n regular_choice,\n pass_user_data=True),\n ],\n\n TYPING_REPLY: [MessageHandler(Filters.text,\n received_information,\n pass_user_data=True),\n ],\n },\n\n fallbacks=[RegexHandler('^Done$', done, pass_user_data=True)]\n )\n\n dp.add_handler(conv_handler)\n\n # log all errors\n dp.add_error_handler(error)\n\n # Start the Bot\n updater.start_polling()\n\n # Run the bot until you press Ctrl-C or the process receives SIGINT,\n # SIGTERM or SIGABRT. This should be used most of the time, since\n # start_polling() is non-blocking and will stop the bot gracefully.\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n", "path": "examples/conversationbot2.py"}]} |
gh_patches_debug_1217 | rasdani/github-patches | git_diff | StackStorm__st2-3843 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Action 'linux.service' fails on Centos7
When I tried to execute restart some service on the Centos7 server got the following error:
```
Traceback (most recent call last):
File "/tmp/5a0459bc07ac686fb813a920/service.py", line 24, in <module>
subprocess.call(cmd, shell=True)
NameError: name 'cmd' is not defined
```
After investigation the resolution has been found:
in file /opt/stackstorm/packs/linux/actions/service.py the entry
`elif re.search(distro, 'Redhat') or re.search(distro, 'Fedora'):`
fixed to
`elif re.search(distro, 'Redhat') or re.search(distro, 'Fedora') or re.search(distro, 'CentOS Linux'):`
The issue has gone
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `contrib/linux/actions/service.py`
Content:
```
1 #!/usr/bin/env python
2
3 import re
4 import sys
5 import os
6 import platform
7 import subprocess
8
9 distro = platform.linux_distribution()[0]
10
11 args = {'act': sys.argv[1], 'service': sys.argv[2]}
12
13 if re.search(distro, 'Ubuntu'):
14 if os.path.isfile("/etc/init/%s.conf" % args['service']):
15 cmd = args['act'] + " " + args['service']
16 elif os.path.isfile("/etc/init.d/%s" % args['service']):
17 cmd = "/etc/init.d/%s %s" % (args['service'], args['act'])
18 else:
19 print("Unknown service")
20 sys.exit(2)
21 elif re.search(distro, 'Redhat') or re.search(distro, 'Fedora'):
22 cmd = "systemctl %s %s" % (args['act'], args['service'])
23
24 subprocess.call(cmd, shell=True)
25
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/contrib/linux/actions/service.py b/contrib/linux/actions/service.py
--- a/contrib/linux/actions/service.py
+++ b/contrib/linux/actions/service.py
@@ -18,7 +18,8 @@
else:
print("Unknown service")
sys.exit(2)
-elif re.search(distro, 'Redhat') or re.search(distro, 'Fedora'):
+elif re.search(distro, 'Redhat') or re.search(distro, 'Fedora') or \
+ re.search(distro, 'CentOS Linux'):
cmd = "systemctl %s %s" % (args['act'], args['service'])
subprocess.call(cmd, shell=True)
| {"golden_diff": "diff --git a/contrib/linux/actions/service.py b/contrib/linux/actions/service.py\n--- a/contrib/linux/actions/service.py\n+++ b/contrib/linux/actions/service.py\n@@ -18,7 +18,8 @@\n else:\n print(\"Unknown service\")\n sys.exit(2)\n-elif re.search(distro, 'Redhat') or re.search(distro, 'Fedora'):\n+elif re.search(distro, 'Redhat') or re.search(distro, 'Fedora') or \\\n+ re.search(distro, 'CentOS Linux'):\n cmd = \"systemctl %s %s\" % (args['act'], args['service'])\n \n subprocess.call(cmd, shell=True)\n", "issue": "Action 'linux.service' fails on Centos7\nWhen I tried to execute restart some service on the Centos7 server got the following error:\r\n```\r\nTraceback (most recent call last):\r\n File \"/tmp/5a0459bc07ac686fb813a920/service.py\", line 24, in <module>\r\n subprocess.call(cmd, shell=True)\r\nNameError: name 'cmd' is not defined\r\n```\r\nAfter investigation the resolution has been found:\r\nin file /opt/stackstorm/packs/linux/actions/service.py the entry\r\n\r\n`elif re.search(distro, 'Redhat') or re.search(distro, 'Fedora'):`\r\n\r\nfixed to \r\n\r\n`elif re.search(distro, 'Redhat') or re.search(distro, 'Fedora') or re.search(distro, 'CentOS Linux'):`\r\n\r\nThe issue has gone\r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport re\nimport sys\nimport os\nimport platform\nimport subprocess\n\ndistro = platform.linux_distribution()[0]\n\nargs = {'act': sys.argv[1], 'service': sys.argv[2]}\n\nif re.search(distro, 'Ubuntu'):\n if os.path.isfile(\"/etc/init/%s.conf\" % args['service']):\n cmd = args['act'] + \" \" + args['service']\n elif os.path.isfile(\"/etc/init.d/%s\" % args['service']):\n cmd = \"/etc/init.d/%s %s\" % (args['service'], args['act'])\n else:\n print(\"Unknown service\")\n sys.exit(2)\nelif re.search(distro, 'Redhat') or re.search(distro, 'Fedora'):\n cmd = \"systemctl %s %s\" % (args['act'], args['service'])\n\nsubprocess.call(cmd, shell=True)\n", "path": "contrib/linux/actions/service.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport re\nimport sys\nimport os\nimport platform\nimport subprocess\n\ndistro = platform.linux_distribution()[0]\n\nargs = {'act': sys.argv[1], 'service': sys.argv[2]}\n\nif re.search(distro, 'Ubuntu'):\n if os.path.isfile(\"/etc/init/%s.conf\" % args['service']):\n cmd = args['act'] + \" \" + args['service']\n elif os.path.isfile(\"/etc/init.d/%s\" % args['service']):\n cmd = \"/etc/init.d/%s %s\" % (args['service'], args['act'])\n else:\n print(\"Unknown service\")\n sys.exit(2)\nelif re.search(distro, 'Redhat') or re.search(distro, 'Fedora') or \\\n re.search(distro, 'CentOS Linux'):\n cmd = \"systemctl %s %s\" % (args['act'], args['service'])\n\nsubprocess.call(cmd, shell=True)\n", "path": "contrib/linux/actions/service.py"}]} |
gh_patches_debug_1218 | rasdani/github-patches | git_diff | pytorch__vision-4283 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
a little problem when using some pretrained models
## 🐛 Bug
<!-- A clear and concise description of what the bug is. -->
A little problem.
I used some pretrained models to do object detection.
However, when i used models whose name include others models' name, such as 'fasterrcnn_mobilenet_v3_large_320_fpn', (its name includes the name of model 'mobilenet_v3_large'), it will download the weight file of the short name models.
For example, when i used model 'fasterrcnn_mobilenet_v3_large_320_fpn', whenever the pretrained attribute is True or not, the weight file of model 'mobilenet_v3_large' will be downloaded.
This problem also happen in the models such as 'maskrcnn_resnet50_fpn' and many other models.
## To Reproduce
Steps to reproduce the behavior:
it's easy to reproduce. For example:
```bash
from torchvision.models.detection import *
model = fasterrcnn_mobilenet_v3_large_320_fpn()
```
excute the code above, the weight file of model "mobilenet_v3_large" will be downloaded.
you can change the model name to other this kind of model names.
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
cc @fmassa @vfdev-5 @pmeier
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchvision/models/segmentation/segmentation.py`
Content:
```
1 from .._utils import IntermediateLayerGetter
2 from ..._internally_replaced_utils import load_state_dict_from_url
3 from .. import mobilenetv3
4 from .. import resnet
5 from .deeplabv3 import DeepLabHead, DeepLabV3
6 from .fcn import FCN, FCNHead
7 from .lraspp import LRASPP
8
9
10 __all__ = ['fcn_resnet50', 'fcn_resnet101', 'deeplabv3_resnet50', 'deeplabv3_resnet101',
11 'deeplabv3_mobilenet_v3_large', 'lraspp_mobilenet_v3_large']
12
13
14 model_urls = {
15 'fcn_resnet50_coco': 'https://download.pytorch.org/models/fcn_resnet50_coco-1167a1af.pth',
16 'fcn_resnet101_coco': 'https://download.pytorch.org/models/fcn_resnet101_coco-7ecb50ca.pth',
17 'deeplabv3_resnet50_coco': 'https://download.pytorch.org/models/deeplabv3_resnet50_coco-cd0a2569.pth',
18 'deeplabv3_resnet101_coco': 'https://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pth',
19 'deeplabv3_mobilenet_v3_large_coco':
20 'https://download.pytorch.org/models/deeplabv3_mobilenet_v3_large-fc3c493d.pth',
21 'lraspp_mobilenet_v3_large_coco': 'https://download.pytorch.org/models/lraspp_mobilenet_v3_large-d234d4ea.pth',
22 }
23
24
25 def _segm_model(name, backbone_name, num_classes, aux, pretrained_backbone=True):
26 if 'resnet' in backbone_name:
27 backbone = resnet.__dict__[backbone_name](
28 pretrained=pretrained_backbone,
29 replace_stride_with_dilation=[False, True, True])
30 out_layer = 'layer4'
31 out_inplanes = 2048
32 aux_layer = 'layer3'
33 aux_inplanes = 1024
34 elif 'mobilenet_v3' in backbone_name:
35 backbone = mobilenetv3.__dict__[backbone_name](pretrained=pretrained_backbone, dilated=True).features
36
37 # Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks.
38 # The first and last blocks are always included because they are the C0 (conv1) and Cn.
39 stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, "_is_cn", False)] + [len(backbone) - 1]
40 out_pos = stage_indices[-1] # use C5 which has output_stride = 16
41 out_layer = str(out_pos)
42 out_inplanes = backbone[out_pos].out_channels
43 aux_pos = stage_indices[-4] # use C2 here which has output_stride = 8
44 aux_layer = str(aux_pos)
45 aux_inplanes = backbone[aux_pos].out_channels
46 else:
47 raise NotImplementedError('backbone {} is not supported as of now'.format(backbone_name))
48
49 return_layers = {out_layer: 'out'}
50 if aux:
51 return_layers[aux_layer] = 'aux'
52 backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
53
54 aux_classifier = None
55 if aux:
56 aux_classifier = FCNHead(aux_inplanes, num_classes)
57
58 model_map = {
59 'deeplabv3': (DeepLabHead, DeepLabV3),
60 'fcn': (FCNHead, FCN),
61 }
62 classifier = model_map[name][0](out_inplanes, num_classes)
63 base_model = model_map[name][1]
64
65 model = base_model(backbone, classifier, aux_classifier)
66 return model
67
68
69 def _load_model(arch_type, backbone, pretrained, progress, num_classes, aux_loss, **kwargs):
70 if pretrained:
71 aux_loss = True
72 kwargs["pretrained_backbone"] = False
73 model = _segm_model(arch_type, backbone, num_classes, aux_loss, **kwargs)
74 if pretrained:
75 _load_weights(model, arch_type, backbone, progress)
76 return model
77
78
79 def _load_weights(model, arch_type, backbone, progress):
80 arch = arch_type + '_' + backbone + '_coco'
81 model_url = model_urls.get(arch, None)
82 if model_url is None:
83 raise NotImplementedError('pretrained {} is not supported as of now'.format(arch))
84 else:
85 state_dict = load_state_dict_from_url(model_url, progress=progress)
86 model.load_state_dict(state_dict)
87
88
89 def _segm_lraspp_mobilenetv3(backbone_name, num_classes, pretrained_backbone=True):
90 backbone = mobilenetv3.__dict__[backbone_name](pretrained=pretrained_backbone, dilated=True).features
91
92 # Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks.
93 # The first and last blocks are always included because they are the C0 (conv1) and Cn.
94 stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, "_is_cn", False)] + [len(backbone) - 1]
95 low_pos = stage_indices[-4] # use C2 here which has output_stride = 8
96 high_pos = stage_indices[-1] # use C5 which has output_stride = 16
97 low_channels = backbone[low_pos].out_channels
98 high_channels = backbone[high_pos].out_channels
99
100 backbone = IntermediateLayerGetter(backbone, return_layers={str(low_pos): 'low', str(high_pos): 'high'})
101
102 model = LRASPP(backbone, low_channels, high_channels, num_classes)
103 return model
104
105
106 def fcn_resnet50(pretrained=False, progress=True,
107 num_classes=21, aux_loss=None, **kwargs):
108 """Constructs a Fully-Convolutional Network model with a ResNet-50 backbone.
109
110 Args:
111 pretrained (bool): If True, returns a model pre-trained on COCO train2017 which
112 contains the same classes as Pascal VOC
113 progress (bool): If True, displays a progress bar of the download to stderr
114 num_classes (int): number of output classes of the model (including the background)
115 aux_loss (bool): If True, it uses an auxiliary loss
116 """
117 return _load_model('fcn', 'resnet50', pretrained, progress, num_classes, aux_loss, **kwargs)
118
119
120 def fcn_resnet101(pretrained=False, progress=True,
121 num_classes=21, aux_loss=None, **kwargs):
122 """Constructs a Fully-Convolutional Network model with a ResNet-101 backbone.
123
124 Args:
125 pretrained (bool): If True, returns a model pre-trained on COCO train2017 which
126 contains the same classes as Pascal VOC
127 progress (bool): If True, displays a progress bar of the download to stderr
128 num_classes (int): number of output classes of the model (including the background)
129 aux_loss (bool): If True, it uses an auxiliary loss
130 """
131 return _load_model('fcn', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs)
132
133
134 def deeplabv3_resnet50(pretrained=False, progress=True,
135 num_classes=21, aux_loss=None, **kwargs):
136 """Constructs a DeepLabV3 model with a ResNet-50 backbone.
137
138 Args:
139 pretrained (bool): If True, returns a model pre-trained on COCO train2017 which
140 contains the same classes as Pascal VOC
141 progress (bool): If True, displays a progress bar of the download to stderr
142 num_classes (int): number of output classes of the model (including the background)
143 aux_loss (bool): If True, it uses an auxiliary loss
144 """
145 return _load_model('deeplabv3', 'resnet50', pretrained, progress, num_classes, aux_loss, **kwargs)
146
147
148 def deeplabv3_resnet101(pretrained=False, progress=True,
149 num_classes=21, aux_loss=None, **kwargs):
150 """Constructs a DeepLabV3 model with a ResNet-101 backbone.
151
152 Args:
153 pretrained (bool): If True, returns a model pre-trained on COCO train2017 which
154 contains the same classes as Pascal VOC
155 progress (bool): If True, displays a progress bar of the download to stderr
156 num_classes (int): The number of classes
157 aux_loss (bool): If True, include an auxiliary classifier
158 """
159 return _load_model('deeplabv3', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs)
160
161
162 def deeplabv3_mobilenet_v3_large(pretrained=False, progress=True,
163 num_classes=21, aux_loss=None, **kwargs):
164 """Constructs a DeepLabV3 model with a MobileNetV3-Large backbone.
165
166 Args:
167 pretrained (bool): If True, returns a model pre-trained on COCO train2017 which
168 contains the same classes as Pascal VOC
169 progress (bool): If True, displays a progress bar of the download to stderr
170 num_classes (int): number of output classes of the model (including the background)
171 aux_loss (bool): If True, it uses an auxiliary loss
172 """
173 return _load_model('deeplabv3', 'mobilenet_v3_large', pretrained, progress, num_classes, aux_loss, **kwargs)
174
175
176 def lraspp_mobilenet_v3_large(pretrained=False, progress=True, num_classes=21, **kwargs):
177 """Constructs a Lite R-ASPP Network model with a MobileNetV3-Large backbone.
178
179 Args:
180 pretrained (bool): If True, returns a model pre-trained on COCO train2017 which
181 contains the same classes as Pascal VOC
182 progress (bool): If True, displays a progress bar of the download to stderr
183 num_classes (int): number of output classes of the model (including the background)
184 """
185 if kwargs.pop("aux_loss", False):
186 raise NotImplementedError('This model does not use auxiliary loss')
187
188 backbone_name = 'mobilenet_v3_large'
189 model = _segm_lraspp_mobilenetv3(backbone_name, num_classes, **kwargs)
190
191 if pretrained:
192 _load_weights(model, 'lraspp', backbone_name, progress)
193
194 return model
195
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchvision/models/segmentation/segmentation.py b/torchvision/models/segmentation/segmentation.py
--- a/torchvision/models/segmentation/segmentation.py
+++ b/torchvision/models/segmentation/segmentation.py
@@ -186,6 +186,8 @@
raise NotImplementedError('This model does not use auxiliary loss')
backbone_name = 'mobilenet_v3_large'
+ if pretrained:
+ kwargs["pretrained_backbone"] = False
model = _segm_lraspp_mobilenetv3(backbone_name, num_classes, **kwargs)
if pretrained:
| {"golden_diff": "diff --git a/torchvision/models/segmentation/segmentation.py b/torchvision/models/segmentation/segmentation.py\n--- a/torchvision/models/segmentation/segmentation.py\n+++ b/torchvision/models/segmentation/segmentation.py\n@@ -186,6 +186,8 @@\n raise NotImplementedError('This model does not use auxiliary loss')\n \n backbone_name = 'mobilenet_v3_large'\n+ if pretrained:\n+ kwargs[\"pretrained_backbone\"] = False\n model = _segm_lraspp_mobilenetv3(backbone_name, num_classes, **kwargs)\n \n if pretrained:\n", "issue": "a little problem when using some pretrained models\n## \ud83d\udc1b Bug\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\nA little problem.\r\n\r\nI used some pretrained models to do object detection. \r\n\r\nHowever, when i used models whose name include others models' name, such as 'fasterrcnn_mobilenet_v3_large_320_fpn', (its name includes the name of model 'mobilenet_v3_large'), it will download the weight file of the short name models. \r\n\r\nFor example, when i used model 'fasterrcnn_mobilenet_v3_large_320_fpn', whenever the pretrained attribute is True or not, the weight file of model 'mobilenet_v3_large' will be downloaded. \r\n\r\nThis problem also happen in the models such as 'maskrcnn_resnet50_fpn' and many other models.\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\nit's easy to reproduce. For example:\r\n```bash\r\nfrom torchvision.models.detection import *\r\nmodel = fasterrcnn_mobilenet_v3_large_320_fpn()\r\n```\r\nexcute the code above, the weight file of model \"mobilenet_v3_large\" will be downloaded.\r\nyou can change the model name to other this kind of model names.\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n\r\n\r\n\n\ncc @fmassa @vfdev-5 @pmeier\n", "before_files": [{"content": "from .._utils import IntermediateLayerGetter\nfrom ..._internally_replaced_utils import load_state_dict_from_url\nfrom .. import mobilenetv3\nfrom .. import resnet\nfrom .deeplabv3 import DeepLabHead, DeepLabV3\nfrom .fcn import FCN, FCNHead\nfrom .lraspp import LRASPP\n\n\n__all__ = ['fcn_resnet50', 'fcn_resnet101', 'deeplabv3_resnet50', 'deeplabv3_resnet101',\n 'deeplabv3_mobilenet_v3_large', 'lraspp_mobilenet_v3_large']\n\n\nmodel_urls = {\n 'fcn_resnet50_coco': 'https://download.pytorch.org/models/fcn_resnet50_coco-1167a1af.pth',\n 'fcn_resnet101_coco': 'https://download.pytorch.org/models/fcn_resnet101_coco-7ecb50ca.pth',\n 'deeplabv3_resnet50_coco': 'https://download.pytorch.org/models/deeplabv3_resnet50_coco-cd0a2569.pth',\n 'deeplabv3_resnet101_coco': 'https://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pth',\n 'deeplabv3_mobilenet_v3_large_coco':\n 'https://download.pytorch.org/models/deeplabv3_mobilenet_v3_large-fc3c493d.pth',\n 'lraspp_mobilenet_v3_large_coco': 'https://download.pytorch.org/models/lraspp_mobilenet_v3_large-d234d4ea.pth',\n}\n\n\ndef _segm_model(name, backbone_name, num_classes, aux, pretrained_backbone=True):\n if 'resnet' in backbone_name:\n backbone = resnet.__dict__[backbone_name](\n pretrained=pretrained_backbone,\n replace_stride_with_dilation=[False, True, True])\n out_layer = 'layer4'\n out_inplanes = 2048\n aux_layer = 'layer3'\n aux_inplanes = 1024\n elif 'mobilenet_v3' in backbone_name:\n backbone = mobilenetv3.__dict__[backbone_name](pretrained=pretrained_backbone, dilated=True).features\n\n # Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks.\n # The first and last blocks are always included because they are the C0 (conv1) and Cn.\n stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, \"_is_cn\", False)] + [len(backbone) - 1]\n out_pos = stage_indices[-1] # use C5 which has output_stride = 16\n out_layer = str(out_pos)\n out_inplanes = backbone[out_pos].out_channels\n aux_pos = stage_indices[-4] # use C2 here which has output_stride = 8\n aux_layer = str(aux_pos)\n aux_inplanes = backbone[aux_pos].out_channels\n else:\n raise NotImplementedError('backbone {} is not supported as of now'.format(backbone_name))\n\n return_layers = {out_layer: 'out'}\n if aux:\n return_layers[aux_layer] = 'aux'\n backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)\n\n aux_classifier = None\n if aux:\n aux_classifier = FCNHead(aux_inplanes, num_classes)\n\n model_map = {\n 'deeplabv3': (DeepLabHead, DeepLabV3),\n 'fcn': (FCNHead, FCN),\n }\n classifier = model_map[name][0](out_inplanes, num_classes)\n base_model = model_map[name][1]\n\n model = base_model(backbone, classifier, aux_classifier)\n return model\n\n\ndef _load_model(arch_type, backbone, pretrained, progress, num_classes, aux_loss, **kwargs):\n if pretrained:\n aux_loss = True\n kwargs[\"pretrained_backbone\"] = False\n model = _segm_model(arch_type, backbone, num_classes, aux_loss, **kwargs)\n if pretrained:\n _load_weights(model, arch_type, backbone, progress)\n return model\n\n\ndef _load_weights(model, arch_type, backbone, progress):\n arch = arch_type + '_' + backbone + '_coco'\n model_url = model_urls.get(arch, None)\n if model_url is None:\n raise NotImplementedError('pretrained {} is not supported as of now'.format(arch))\n else:\n state_dict = load_state_dict_from_url(model_url, progress=progress)\n model.load_state_dict(state_dict)\n\n\ndef _segm_lraspp_mobilenetv3(backbone_name, num_classes, pretrained_backbone=True):\n backbone = mobilenetv3.__dict__[backbone_name](pretrained=pretrained_backbone, dilated=True).features\n\n # Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks.\n # The first and last blocks are always included because they are the C0 (conv1) and Cn.\n stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, \"_is_cn\", False)] + [len(backbone) - 1]\n low_pos = stage_indices[-4] # use C2 here which has output_stride = 8\n high_pos = stage_indices[-1] # use C5 which has output_stride = 16\n low_channels = backbone[low_pos].out_channels\n high_channels = backbone[high_pos].out_channels\n\n backbone = IntermediateLayerGetter(backbone, return_layers={str(low_pos): 'low', str(high_pos): 'high'})\n\n model = LRASPP(backbone, low_channels, high_channels, num_classes)\n return model\n\n\ndef fcn_resnet50(pretrained=False, progress=True,\n num_classes=21, aux_loss=None, **kwargs):\n \"\"\"Constructs a Fully-Convolutional Network model with a ResNet-50 backbone.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017 which\n contains the same classes as Pascal VOC\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n aux_loss (bool): If True, it uses an auxiliary loss\n \"\"\"\n return _load_model('fcn', 'resnet50', pretrained, progress, num_classes, aux_loss, **kwargs)\n\n\ndef fcn_resnet101(pretrained=False, progress=True,\n num_classes=21, aux_loss=None, **kwargs):\n \"\"\"Constructs a Fully-Convolutional Network model with a ResNet-101 backbone.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017 which\n contains the same classes as Pascal VOC\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n aux_loss (bool): If True, it uses an auxiliary loss\n \"\"\"\n return _load_model('fcn', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs)\n\n\ndef deeplabv3_resnet50(pretrained=False, progress=True,\n num_classes=21, aux_loss=None, **kwargs):\n \"\"\"Constructs a DeepLabV3 model with a ResNet-50 backbone.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017 which\n contains the same classes as Pascal VOC\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n aux_loss (bool): If True, it uses an auxiliary loss\n \"\"\"\n return _load_model('deeplabv3', 'resnet50', pretrained, progress, num_classes, aux_loss, **kwargs)\n\n\ndef deeplabv3_resnet101(pretrained=False, progress=True,\n num_classes=21, aux_loss=None, **kwargs):\n \"\"\"Constructs a DeepLabV3 model with a ResNet-101 backbone.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017 which\n contains the same classes as Pascal VOC\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): The number of classes\n aux_loss (bool): If True, include an auxiliary classifier\n \"\"\"\n return _load_model('deeplabv3', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs)\n\n\ndef deeplabv3_mobilenet_v3_large(pretrained=False, progress=True,\n num_classes=21, aux_loss=None, **kwargs):\n \"\"\"Constructs a DeepLabV3 model with a MobileNetV3-Large backbone.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017 which\n contains the same classes as Pascal VOC\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n aux_loss (bool): If True, it uses an auxiliary loss\n \"\"\"\n return _load_model('deeplabv3', 'mobilenet_v3_large', pretrained, progress, num_classes, aux_loss, **kwargs)\n\n\ndef lraspp_mobilenet_v3_large(pretrained=False, progress=True, num_classes=21, **kwargs):\n \"\"\"Constructs a Lite R-ASPP Network model with a MobileNetV3-Large backbone.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017 which\n contains the same classes as Pascal VOC\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n \"\"\"\n if kwargs.pop(\"aux_loss\", False):\n raise NotImplementedError('This model does not use auxiliary loss')\n\n backbone_name = 'mobilenet_v3_large'\n model = _segm_lraspp_mobilenetv3(backbone_name, num_classes, **kwargs)\n\n if pretrained:\n _load_weights(model, 'lraspp', backbone_name, progress)\n\n return model\n", "path": "torchvision/models/segmentation/segmentation.py"}], "after_files": [{"content": "from .._utils import IntermediateLayerGetter\nfrom ..._internally_replaced_utils import load_state_dict_from_url\nfrom .. import mobilenetv3\nfrom .. import resnet\nfrom .deeplabv3 import DeepLabHead, DeepLabV3\nfrom .fcn import FCN, FCNHead\nfrom .lraspp import LRASPP\n\n\n__all__ = ['fcn_resnet50', 'fcn_resnet101', 'deeplabv3_resnet50', 'deeplabv3_resnet101',\n 'deeplabv3_mobilenet_v3_large', 'lraspp_mobilenet_v3_large']\n\n\nmodel_urls = {\n 'fcn_resnet50_coco': 'https://download.pytorch.org/models/fcn_resnet50_coco-1167a1af.pth',\n 'fcn_resnet101_coco': 'https://download.pytorch.org/models/fcn_resnet101_coco-7ecb50ca.pth',\n 'deeplabv3_resnet50_coco': 'https://download.pytorch.org/models/deeplabv3_resnet50_coco-cd0a2569.pth',\n 'deeplabv3_resnet101_coco': 'https://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pth',\n 'deeplabv3_mobilenet_v3_large_coco':\n 'https://download.pytorch.org/models/deeplabv3_mobilenet_v3_large-fc3c493d.pth',\n 'lraspp_mobilenet_v3_large_coco': 'https://download.pytorch.org/models/lraspp_mobilenet_v3_large-d234d4ea.pth',\n}\n\n\ndef _segm_model(name, backbone_name, num_classes, aux, pretrained_backbone=True):\n if 'resnet' in backbone_name:\n backbone = resnet.__dict__[backbone_name](\n pretrained=pretrained_backbone,\n replace_stride_with_dilation=[False, True, True])\n out_layer = 'layer4'\n out_inplanes = 2048\n aux_layer = 'layer3'\n aux_inplanes = 1024\n elif 'mobilenet_v3' in backbone_name:\n backbone = mobilenetv3.__dict__[backbone_name](pretrained=pretrained_backbone, dilated=True).features\n\n # Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks.\n # The first and last blocks are always included because they are the C0 (conv1) and Cn.\n stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, \"_is_cn\", False)] + [len(backbone) - 1]\n out_pos = stage_indices[-1] # use C5 which has output_stride = 16\n out_layer = str(out_pos)\n out_inplanes = backbone[out_pos].out_channels\n aux_pos = stage_indices[-4] # use C2 here which has output_stride = 8\n aux_layer = str(aux_pos)\n aux_inplanes = backbone[aux_pos].out_channels\n else:\n raise NotImplementedError('backbone {} is not supported as of now'.format(backbone_name))\n\n return_layers = {out_layer: 'out'}\n if aux:\n return_layers[aux_layer] = 'aux'\n backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)\n\n aux_classifier = None\n if aux:\n aux_classifier = FCNHead(aux_inplanes, num_classes)\n\n model_map = {\n 'deeplabv3': (DeepLabHead, DeepLabV3),\n 'fcn': (FCNHead, FCN),\n }\n classifier = model_map[name][0](out_inplanes, num_classes)\n base_model = model_map[name][1]\n\n model = base_model(backbone, classifier, aux_classifier)\n return model\n\n\ndef _load_model(arch_type, backbone, pretrained, progress, num_classes, aux_loss, **kwargs):\n if pretrained:\n aux_loss = True\n kwargs[\"pretrained_backbone\"] = False\n model = _segm_model(arch_type, backbone, num_classes, aux_loss, **kwargs)\n if pretrained:\n _load_weights(model, arch_type, backbone, progress)\n return model\n\n\ndef _load_weights(model, arch_type, backbone, progress):\n arch = arch_type + '_' + backbone + '_coco'\n model_url = model_urls.get(arch, None)\n if model_url is None:\n raise NotImplementedError('pretrained {} is not supported as of now'.format(arch))\n else:\n state_dict = load_state_dict_from_url(model_url, progress=progress)\n model.load_state_dict(state_dict)\n\n\ndef _segm_lraspp_mobilenetv3(backbone_name, num_classes, pretrained_backbone=True):\n backbone = mobilenetv3.__dict__[backbone_name](pretrained=pretrained_backbone, dilated=True).features\n\n # Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks.\n # The first and last blocks are always included because they are the C0 (conv1) and Cn.\n stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, \"_is_cn\", False)] + [len(backbone) - 1]\n low_pos = stage_indices[-4] # use C2 here which has output_stride = 8\n high_pos = stage_indices[-1] # use C5 which has output_stride = 16\n low_channels = backbone[low_pos].out_channels\n high_channels = backbone[high_pos].out_channels\n\n backbone = IntermediateLayerGetter(backbone, return_layers={str(low_pos): 'low', str(high_pos): 'high'})\n\n model = LRASPP(backbone, low_channels, high_channels, num_classes)\n return model\n\n\ndef fcn_resnet50(pretrained=False, progress=True,\n num_classes=21, aux_loss=None, **kwargs):\n \"\"\"Constructs a Fully-Convolutional Network model with a ResNet-50 backbone.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017 which\n contains the same classes as Pascal VOC\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n aux_loss (bool): If True, it uses an auxiliary loss\n \"\"\"\n return _load_model('fcn', 'resnet50', pretrained, progress, num_classes, aux_loss, **kwargs)\n\n\ndef fcn_resnet101(pretrained=False, progress=True,\n num_classes=21, aux_loss=None, **kwargs):\n \"\"\"Constructs a Fully-Convolutional Network model with a ResNet-101 backbone.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017 which\n contains the same classes as Pascal VOC\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n aux_loss (bool): If True, it uses an auxiliary loss\n \"\"\"\n return _load_model('fcn', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs)\n\n\ndef deeplabv3_resnet50(pretrained=False, progress=True,\n num_classes=21, aux_loss=None, **kwargs):\n \"\"\"Constructs a DeepLabV3 model with a ResNet-50 backbone.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017 which\n contains the same classes as Pascal VOC\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n aux_loss (bool): If True, it uses an auxiliary loss\n \"\"\"\n return _load_model('deeplabv3', 'resnet50', pretrained, progress, num_classes, aux_loss, **kwargs)\n\n\ndef deeplabv3_resnet101(pretrained=False, progress=True,\n num_classes=21, aux_loss=None, **kwargs):\n \"\"\"Constructs a DeepLabV3 model with a ResNet-101 backbone.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017 which\n contains the same classes as Pascal VOC\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): The number of classes\n aux_loss (bool): If True, include an auxiliary classifier\n \"\"\"\n return _load_model('deeplabv3', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs)\n\n\ndef deeplabv3_mobilenet_v3_large(pretrained=False, progress=True,\n num_classes=21, aux_loss=None, **kwargs):\n \"\"\"Constructs a DeepLabV3 model with a MobileNetV3-Large backbone.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017 which\n contains the same classes as Pascal VOC\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n aux_loss (bool): If True, it uses an auxiliary loss\n \"\"\"\n return _load_model('deeplabv3', 'mobilenet_v3_large', pretrained, progress, num_classes, aux_loss, **kwargs)\n\n\ndef lraspp_mobilenet_v3_large(pretrained=False, progress=True, num_classes=21, **kwargs):\n \"\"\"Constructs a Lite R-ASPP Network model with a MobileNetV3-Large backbone.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017 which\n contains the same classes as Pascal VOC\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n \"\"\"\n if kwargs.pop(\"aux_loss\", False):\n raise NotImplementedError('This model does not use auxiliary loss')\n\n backbone_name = 'mobilenet_v3_large'\n if pretrained:\n kwargs[\"pretrained_backbone\"] = False\n model = _segm_lraspp_mobilenetv3(backbone_name, num_classes, **kwargs)\n\n if pretrained:\n _load_weights(model, 'lraspp', backbone_name, progress)\n\n return model\n", "path": "torchvision/models/segmentation/segmentation.py"}]} |
gh_patches_debug_1219 | rasdani/github-patches | git_diff | kornia__kornia-1761 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ValueError when applying flip augmentations to boxes
### Describe the bug
Came across a strange bug while applying flip augmentations with bboxes. Running the below code snippet produces the following error some of the time.
For reference my image is of shape `(B, C, H, W)` and my boxes are a list of length `B` where each element is of shape `(N,4)`
```
ValueError: Input batch size must be the same for both tensors or 1.Got torch.Size([2, 3, 3]) and torch.Size([1, 8, 2])
```
The randomness of the message could be stemming from the randomness in applying the augmentation but setting `p=1.0` or `p=0.0` leads to no error being reported.
### Reproduction steps
```bash
import kornia.augmentation as K
import torch
augs = K.AugmentationSequential(
K.RandomHorizontalFlip(p=0.5),
K.RandomVerticalFlip(p=0.5),
data_keys=["input", "bbox_xyxy"],
)
image = torch.randn((2,3,200,200))
boxes = [
torch.tensor([
[1, 2, 2, 4],
[2, 4, 3, 6],
]),
torch.tensor([
[1, 2, 2, 4],
]),
]
tfm_img, tfm_bbox = augs(image, boxes)
```
### Expected behavior
Script should run successfully all the time
### Environment
```shell
PyTorch version: 1.11.0
Is debug build: False
CUDA used to build PyTorch: 11.5
ROCM used to build PyTorch: N/A
OS: Ubuntu 20.04.4 LTS (x86_64)
GCC version: (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0
Clang version: Could not collect
CMake version: version 3.23.2
Libc version: glibc-2.31
Python version: 3.9.13 | packaged by conda-forge | (main, May 27 2022, 16:56:21) [GCC 10.3.0] (64-bit runtime)
Python platform: Linux-5.4.0-117-generic-x86_64-with-glibc2.31
Is CUDA available: True
CUDA runtime version: 11.3.58
GPU models and configuration: GPU 0: NVIDIA TITAN Xp
Nvidia driver version: 470.129.06
cuDNN version: /usr/lib/x86_64-linux-gnu/libcudnn.so.7.6.5
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
Versions of relevant libraries:
[pip3] efficientnet-pytorch==0.6.3
[pip3] mypy==0.961
[pip3] mypy-extensions==0.4.3
[pip3] numpy==1.22.4
[pip3] pytorch-lightning==1.6.4
[pip3] pytorch-sphinx-theme==0.0.24
[pip3] segmentation-models-pytorch==0.2.1
[pip3] torch==1.11.0
[pip3] torchmetrics==0.9.0
[pip3] torchvision==0.12.0
[conda] blas 2.115 mkl conda-forge
[conda] blas-devel 3.9.0 15_linux64_mkl conda-forge
[conda] cudatoolkit 11.5.1 h59c8dcf_10 conda-forge
[conda] efficientnet-pytorch 0.6.3 pypi_0 pypi
[conda] ffmpeg 4.3 hf484d3e_0 pytorch
[conda] libblas 3.9.0 15_linux64_mkl conda-forge
[conda] libcblas 3.9.0 15_linux64_mkl conda-forge
[conda] liblapack 3.9.0 15_linux64_mkl conda-forge
[conda] liblapacke 3.9.0 15_linux64_mkl conda-forge
[conda] mkl 2022.1.0 h84fe81f_915 conda-forge
[conda] mkl-devel 2022.1.0 ha770c72_916 conda-forge
[conda] mkl-include 2022.1.0 h84fe81f_915 conda-forge
[conda] numpy 1.22.4 py39hc58783e_0 conda-forge
[conda] pytorch 1.11.0 py3.9_cuda11.5_cudnn8.3.2_0 pytorch
[conda] pytorch-lightning 1.6.4 pypi_0 pypi
[conda] pytorch-mutex 1.0 cuda pytorch
[conda] pytorch-sphinx-theme 0.0.24 pypi_0 pypi
[conda] segmentation-models-pytorch 0.2.1 pypi_0 pypi
[conda] torchmetrics 0.9.0 pypi_0 pypi
[conda] torchvision 0.12.0 py39_cu115 pytorch
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kornia/augmentation/container/utils.py`
Content:
```
1 import warnings
2 from abc import ABCMeta, abstractmethod
3 from functools import partial
4 from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Type, Union, cast
5
6 import torch
7 import torch.nn as nn
8 from torch import Tensor
9
10 import kornia # lazy loading for circular dependencies
11 from kornia.augmentation import GeometricAugmentationBase2D, MixAugmentationBase, RandomCrop, RandomErasing
12 from kornia.augmentation.base import _AugmentationBase
13 from kornia.augmentation.container.base import ParamItem
14 from kornia.augmentation.utils import override_parameters
15 from kornia.constants import DataKey
16 from kornia.geometry.bbox import transform_bbox
17 from kornia.geometry.linalg import transform_points
18 from kornia.utils.helpers import _torch_inverse_cast
19
20
21 def _get_geometric_only_param(
22 module: "kornia.augmentation.ImageSequential", param: List[ParamItem]
23 ) -> List[ParamItem]:
24 named_modules: Iterator[Tuple[str, nn.Module]] = module.get_forward_sequence(param)
25
26 res: List[ParamItem] = []
27 for (_, mod), p in zip(named_modules, param):
28 if isinstance(mod, (GeometricAugmentationBase2D,)):
29 res.append(p)
30 return res
31
32
33 class ApplyInverseInterface(metaclass=ABCMeta):
34 """Abstract interface for applying and inversing transformations."""
35
36 @classmethod
37 @abstractmethod
38 def apply_trans(
39 cls, input: Tensor, label: Optional[Tensor], module: nn.Module, param: ParamItem,
40 extra_args: Dict[str, Any] = {}
41 ) -> Tuple[Tensor, Optional[Tensor]]:
42 """Apply a transformation with respect to the parameters.
43
44 Args:
45 input: the input tensor.
46 label: the optional label tensor.
47 module: any torch Module but only kornia augmentation modules will count
48 to apply transformations.
49 param: the corresponding parameters to the module.
50 """
51 raise NotImplementedError
52
53 @classmethod
54 @abstractmethod
55 def inverse(
56 cls, input: Tensor, module: nn.Module, param: Optional[ParamItem] = None,
57 extra_args: Dict[str, Any] = {}
58 ) -> Tensor:
59 """Inverse a transformation with respect to the parameters.
60
61 Args:
62 input: the input tensor.
63 module: any torch Module but only kornia augmentation modules will count
64 to apply transformations.
65 param: the corresponding parameters to the module.
66 """
67 raise NotImplementedError
68
69
70 class ApplyInverseImpl(ApplyInverseInterface):
71 """Standard matrix apply and inverse methods."""
72
73 apply_func: Callable
74
75 @classmethod
76 def apply_trans(
77 cls, input: Tensor, label: Optional[Tensor], module: nn.Module, param: ParamItem,
78 extra_args: Dict[str, Any] = {}
79 ) -> Tuple[Tensor, Optional[Tensor]]:
80 """Apply a transformation with respect to the parameters.
81
82 Args:
83 input: the input tensor.
84 label: the optional label tensor.
85 module: any torch Module but only kornia augmentation modules will count
86 to apply transformations.
87 param: the corresponding parameters to the module.
88 """
89 mat: Optional[Tensor]
90 if hasattr(module, "transform_matrix") and module.transform_matrix is not None:
91 mat = cast(Tensor, module.transform_matrix)
92 else:
93 mat = cls._get_transformation(input, module, param, extra_args=extra_args)
94 mat = torch.as_tensor(mat, device=input.device, dtype=input.dtype)
95 to_apply = None
96 if isinstance(module, _AugmentationBase):
97 to_apply = param.data['batch_prob'] # type: ignore
98 if isinstance(module, kornia.augmentation.ImageSequential):
99 to_apply = torch.ones(input.shape[0], device=input.device, dtype=input.dtype).bool()
100
101 # If any inputs need to be transformed.
102 if mat is not None and to_apply is not None and to_apply.sum() != 0 and input.numel() > 0:
103 input[to_apply] = cls.apply_func(mat, input[to_apply])
104
105 return input, label
106
107 @classmethod
108 def inverse(
109 cls, input: Tensor, module: nn.Module, param: Optional[ParamItem] = None,
110 extra_args: Dict[str, Any] = {}
111 ) -> Tensor:
112 """Inverse a transformation with respect to the parameters.
113
114 Args:
115 input: the input tensor.
116 module: any torch Module but only kornia augmentation modules will count
117 to apply transformations.
118 param: the corresponding parameters to the module.
119 """
120 mat: Optional[Tensor]
121 if hasattr(module, "transform_matrix") and module.transform_matrix is not None:
122 mat = cast(Tensor, module.transform_matrix)
123 else:
124 mat = cls._get_transformation(input, module, param, extra_args=extra_args)
125 mat = torch.as_tensor(mat, device=input.device, dtype=input.dtype)
126
127 if mat is not None:
128 transform: Tensor = cls._get_inverse_transformation(mat)
129 input = cls.apply_func(torch.as_tensor(transform, device=input.device, dtype=input.dtype), input)
130 return input
131
132 @classmethod
133 def _get_transformation(
134 cls, input: Tensor, module: nn.Module, param: Optional[ParamItem] = None,
135 extra_args: Dict[str, Any] = {}
136 ) -> Optional[Tensor]:
137
138 if (
139 isinstance(module, (GeometricAugmentationBase2D, kornia.augmentation.ImageSequential))
140 and param is None
141 ):
142 raise ValueError(f"Parameters of transformation matrix for {module} has not been computed.")
143
144 mat: Optional[Tensor] = None
145 if isinstance(module, GeometricAugmentationBase2D):
146 _param = cast(Dict[str, Tensor], param.data) # type: ignore
147 flags = override_parameters(module.flags, extra_args)
148 mat = module.get_transformation_matrix(input, _param, flags=flags)
149 elif isinstance(module, kornia.augmentation.ImageSequential) and not module.is_intensity_only():
150 _param = cast(List[ParamItem], param.data) # type: ignore
151 mat = module.get_transformation_matrix(
152 input, _param, recompute=False, extra_args=extra_args) # type: ignore
153 else:
154 return None # No need to update anything
155 return mat
156
157 @classmethod
158 def _get_inverse_transformation(cls, transform: Tensor) -> Tensor:
159 return _torch_inverse_cast(transform)
160
161
162 class InputApplyInverse(ApplyInverseImpl):
163 """Apply and inverse transformations for (image) input tensors."""
164 data_key = DataKey.INPUT
165
166 @classmethod
167 def apply_trans( # type: ignore
168 cls, input: Tensor, label: Optional[Tensor], module: nn.Module, param: ParamItem,
169 extra_args: Dict[str, Any] = {}
170 ) -> Tuple[Tensor, Optional[Tensor]]:
171 """Apply a transformation with respect to the parameters.
172
173 Args:
174 input: the input tensor.
175 label: the optional label tensor.
176 module: any torch Module but only kornia augmentation modules will count
177 to apply transformations.
178 param: the corresponding parameters to the module.
179 """
180 if isinstance(module, (MixAugmentationBase,)):
181 input, label = module(input, label=label, params=param.data)
182 elif isinstance(module, (_AugmentationBase,)):
183 input = module(input, params=param.data, **extra_args)
184 elif isinstance(module, kornia.augmentation.ImageSequential):
185 temp = module.apply_inverse_func
186 temp2 = module.return_label
187 module.apply_inverse_func = InputApplyInverse
188 module.return_label = True
189 if isinstance(module, kornia.augmentation.AugmentationSequential):
190 input, label = module(input, label=label, params=param.data, data_keys=[cls.data_key])
191 else:
192 input, label = module(input, label=label, params=param.data, extra_args=extra_args)
193 module.apply_inverse_func = temp
194 module.return_label = temp2
195 else:
196 if param.data is not None:
197 raise AssertionError(f"Non-augmentaion operation {param.name} require empty parameters. Got {param}.")
198 # In case of return_transform = True
199 if isinstance(input, (tuple, list)):
200 input = (module(input[0]), input[1])
201 else:
202 input = module(input)
203 return input, label
204
205 @classmethod
206 def inverse(
207 cls, input: Tensor, module: nn.Module, param: Optional[ParamItem] = None,
208 extra_args: Dict[str, Any] = {}
209 ) -> Tensor:
210 """Inverse a transformation with respect to the parameters.
211
212 Args:
213 input: the input tensor.
214 module: any torch Module but only kornia augmentation modules will count
215 to apply transformations.
216 param: the corresponding parameters to the module.
217 """
218 if isinstance(module, GeometricAugmentationBase2D):
219 input = module.inverse(
220 input, params=None if param is None else cast(Dict, param.data), extra_args=extra_args)
221 elif isinstance(module, kornia.augmentation.ImageSequential):
222 temp = module.apply_inverse_func
223 module.apply_inverse_func = InputApplyInverse
224 if isinstance(module, kornia.augmentation.AugmentationSequential):
225 input = cast(Tensor, module.inverse(
226 input, params=None if param is None else cast(List, param.data)))
227 else:
228 input = module.inverse(
229 input, params=None if param is None else cast(List, param.data), extra_args=extra_args)
230 module.apply_inverse_func = temp
231 return input
232
233
234 class MaskApplyInverse(ApplyInverseImpl):
235 """Apply and inverse transformations for mask tensors."""
236 data_key = DataKey.MASK
237
238 @classmethod
239 def make_input_only_sequential(cls, module: "kornia.augmentation.ImageSequential") -> Callable:
240 """Disable all other additional inputs (e.g. ) for ImageSequential."""
241
242 def f(*args, **kwargs):
243 if_return_label = module.return_label
244 module.return_label = False
245 out = module(*args, **kwargs)
246 module.return_label = if_return_label
247 return out
248
249 return f
250
251 @classmethod
252 def apply_trans(
253 cls, input: Tensor, label: Optional[Tensor], module: nn.Module, param: Optional[ParamItem] = None,
254 extra_args: Dict[str, Any] = {}
255 ) -> Tuple[Tensor, Optional[Tensor]]:
256 """Apply a transformation with respect to the parameters.
257
258 Args:
259 input: the input tensor.
260 label: the optional label tensor.
261 module: any torch Module but only kornia augmentation modules will count
262 to apply transformations.
263 param: the corresponding parameters to the module.
264 """
265 if param is not None:
266 _param = param.data
267 else:
268 _param = None # type: ignore
269
270 if isinstance(module, (GeometricAugmentationBase2D, RandomErasing)):
271 _param = cast(Dict[str, Tensor], _param).copy()
272 # TODO: Parametrize value to pad with across the board for different keys
273 if 'values' in _param:
274 _param['values'] = torch.zeros_like(_param['values']) # Always pad with zeros
275
276 input = module(input, params=_param, **extra_args)
277 elif isinstance(module, kornia.augmentation.ImageSequential) and not module.is_intensity_only():
278 _param = cast(List[ParamItem], _param)
279 temp = module.apply_inverse_func
280 module.apply_inverse_func = MaskApplyInverse
281 geo_param: List[ParamItem] = _get_geometric_only_param(module, _param)
282 input = cls.make_input_only_sequential(module)(input, label=None, params=geo_param)
283 module.apply_inverse_func = temp
284 else:
285 pass # No need to update anything
286 return input, label
287
288 @classmethod
289 def inverse(
290 cls, input: Tensor, module: nn.Module, param: Optional[ParamItem] = None,
291 extra_args: Dict[str, Any] = {}
292 ) -> Tensor:
293 """Inverse a transformation with respect to the parameters.
294
295 Args:
296 input: the input tensor.
297 module: any torch Module but only kornia augmentation modules will count
298 to apply transformations.
299 param: the corresponding parameters to the module.
300 """
301
302 if isinstance(module, GeometricAugmentationBase2D):
303 input = module.inverse(
304 input, params=None if param is None else cast(Dict, param.data), **extra_args)
305 elif isinstance(module, kornia.augmentation.ImageSequential):
306 temp = module.apply_inverse_func
307 module.apply_inverse_func = MaskApplyInverse
308 input = module.inverse(
309 input, params=None if param is None else cast(List, param.data))
310 module.apply_inverse_func = temp
311 return input
312
313
314 class BBoxApplyInverse(ApplyInverseImpl):
315 """Apply and inverse transformations for bounding box tensors.
316
317 This is for transform boxes in the format (B, N, 4, 2).
318 """
319
320 @classmethod
321 def _get_padding_size(cls, module: nn.Module, param: Optional[ParamItem]) -> Optional[Tensor]:
322 if isinstance(module, RandomCrop):
323 _param = cast(Dict[str, Tensor], param.data) # type: ignore
324 return _param.get("padding_size")
325 return None
326
327 @classmethod
328 def pad(cls, input: Tensor, padding_size: Tensor) -> Tensor:
329 """
330 Args:
331 input: (B, N, 4, 2)
332 padding_size: (B, 4)
333 """
334 if len(input.shape) not in (3, 4,):
335 raise AssertionError(input.shape)
336
337 if len(padding_size.shape) != 2:
338 raise AssertionError(padding_size.shape)
339
340 _input = input.clone()
341
342 if input.dim() == 3:
343 # B,4,2 to B,1,4,2
344 _input = _input[:, None]
345
346 _input[..., 0] += padding_size[..., None, :1] # left padding
347 _input[..., 1] += padding_size[..., None, 2:3] # top padding
348
349 if input.dim() == 3:
350 _input = _input[:, 0] # squeeze back
351
352 return _input
353
354 @classmethod
355 def unpad(cls, input: Tensor, padding_size: Tensor) -> Tensor:
356 """
357 Args:
358 input: (B, N, 4, 2)
359 padding_size: (B, 4)
360 """
361 if len(input.shape) not in (3, 4,):
362 raise AssertionError(input.shape)
363
364 if len(padding_size.shape) != 2:
365 raise AssertionError(padding_size.shape)
366
367 _input = input.clone()
368
369 if input.dim() == 3:
370 # B,4,2 to B,1,4,2
371 _input = _input[:, None]
372
373 _input[..., 0] -= padding_size[..., None, :1] # left padding
374 _input[..., 1] -= padding_size[..., None, 2:3] # top padding
375
376 if input.dim() == 3:
377 _input = _input[:, 0] # squeeze back
378
379 return _input
380
381 apply_func = partial(transform_bbox, mode="xyxy", restore_coordinates=True)
382
383 @classmethod
384 def apply_trans(
385 cls, input: Tensor, label: Optional[Tensor], module: nn.Module, param: ParamItem,
386 extra_args: Dict[str, Any] = {}
387 ) -> Tuple[Tensor, Optional[Tensor]]:
388 """Apply a transformation with respect to the parameters.
389
390 Args:
391 input: the input tensor, (B, N, 4, 2) or (B, 4, 2).
392 label: the optional label tensor.
393 module: any torch Module but only kornia augmentation modules will count
394 to apply transformations.
395 param: the corresponding parameters to the module.
396 """
397 _input = input.clone()
398
399 padding_size = cls._get_padding_size(module, param)
400 if padding_size is not None:
401 _input = cls.pad(_input, padding_size.to(_input))
402
403 _input, label = super().apply_trans(_input, label, module, param, extra_args=extra_args)
404
405 # TODO: Filter/crop boxes outside crop (with negative or larger than crop size coords)?
406
407 return _input, label
408
409 @classmethod
410 def inverse(
411 cls, input: Tensor, module: nn.Module, param: Optional[ParamItem] = None,
412 extra_args: Dict[str, Any] = {}
413 ) -> Tensor:
414 """Inverse a transformation with respect to the parameters.
415
416 Args:
417 input: the input tensor.
418 module: any torch Module but only kornia augmentation modules will count
419 to apply transformations.
420 param: the corresponding parameters to the module.
421 """
422 _input = input.clone()
423
424 _input = super().inverse(_input, module, param, extra_args=extra_args)
425
426 padding_size = cls._get_padding_size(module, param)
427 if padding_size is not None:
428 _input = cls.unpad(_input, padding_size.to(input))
429
430 return _input
431
432
433 class BBoxXYXYApplyInverse(BBoxApplyInverse):
434 """Apply and inverse transformations for bounding box tensors.
435
436 This is for transform boxes in the format [xmin, ymin, xmax, ymax].
437 """
438
439 apply_func = partial(transform_bbox, mode="xyxy", restore_coordinates=True)
440
441 @classmethod
442 def pad(cls, input, padding_size):
443 _padding_size = padding_size.to(input)
444 for i in range(len(_padding_size)):
445 input[i, :, 0::2] += _padding_size[i][0] # left padding
446 input[i, :, 1::2] += _padding_size[i][2] # top padding
447 return input
448
449 @classmethod
450 def unpad(cls, input, padding_size):
451 _padding_size = padding_size.to(input)
452 for i in range(len(_padding_size)):
453 input[i, :, 0::2] -= _padding_size[i][0] # left padding
454 input[i, :, 1::2] -= _padding_size[i][2] # top padding
455 return input
456
457 @classmethod
458 def apply_trans(
459 cls, input: Tensor, label: Optional[Tensor], module: nn.Module, param: ParamItem,
460 extra_args: Dict[str, Any] = {}
461 ) -> Tuple[Tensor, Optional[Tensor]]:
462 warnings.warn("BBoxXYXYApplyInverse is no longer maintained. Please use BBoxApplyInverse instead.")
463 return super().apply_trans(input, label=label, module=module, param=param, extra_args=extra_args)
464
465 @classmethod
466 def inverse(
467 cls, input: Tensor, module: nn.Module, param: Optional[ParamItem] = None,
468 extra_args: Dict[str, Any] = {}
469 ) -> Tensor:
470 warnings.warn("BBoxXYXYApplyInverse is no longer maintained. Please use BBoxApplyInverse instead.")
471 return super().inverse(input, module=module, param=param, extra_args=extra_args)
472
473
474 class BBoxXYWHApplyInverse(BBoxXYXYApplyInverse):
475 """Apply and inverse transformations for bounding box tensors.
476
477 This is for transform boxes in the format [xmin, ymin, width, height].
478 """
479
480 apply_func = partial(transform_bbox, mode="xywh", restore_coordinates=True)
481
482 @classmethod
483 def pad(cls, input, padding_size):
484 _padding_size = padding_size.to(input)
485 # pad only xy, not wh
486 for i in range(len(_padding_size)):
487 input[i, :, 0] += _padding_size[i][0] # left padding
488 input[i, :, 1] += _padding_size[i][2] # top padding
489 return input
490
491 @classmethod
492 def unpad(cls, input, padding_size):
493 _padding_size = padding_size.to(input)
494 # unpad only xy, not wh
495 for i in range(len(_padding_size)):
496 input[i, :, 0] -= _padding_size[i][0] # left padding
497 input[i, :, 1] -= _padding_size[i][2] # top padding
498 return input
499
500
501 class KeypointsApplyInverse(BBoxApplyInverse):
502 """Apply and inverse transformations for keypoints tensors.
503
504 This is for transform keypoints in the format (B, N, 2).
505 """
506
507 # Hot fix for the typing mismatching
508 apply_func = partial(transform_points)
509
510 @classmethod
511 def pad(cls, input: Tensor, padding_size: Tensor) -> Tensor:
512
513 if len(input.shape) not in (2, 3,):
514 raise AssertionError(input.shape)
515
516 if len(padding_size.shape) != 2:
517 raise AssertionError(padding_size.shape)
518
519 _input = input.clone()
520
521 if input.dim() == 2:
522 # B,2 to B,1,2
523 _input = _input[:, None]
524
525 _input[..., 0] += padding_size[..., :1] # left padding
526 _input[..., 1] += padding_size[..., 2:3] # top padding
527
528 if input.dim() == 2:
529 _input = _input[:, 0] # squeeze back
530
531 return _input
532
533 @classmethod
534 def unpad(cls, input: Tensor, padding_size: Tensor) -> Tensor:
535
536 if len(input.shape) not in (2, 3,):
537 raise AssertionError(input.shape)
538 if len(padding_size.shape) != 2:
539 raise AssertionError(padding_size.shape)
540
541 _input = input.clone()
542
543 if input.dim() == 2:
544 # B,2 to B,1,2
545 _input = _input[:, None]
546
547 # unpad only xy, not wh
548 _input[..., 0] -= padding_size[..., :1] # left padding
549 _input[..., 1] -= padding_size[..., 2:3] # top padding
550
551 if input.dim() == 2:
552 _input = _input[:, 0] # squeeze back
553
554 return _input
555
556
557 class ApplyInverse:
558 """Apply and inverse transformations for any tensors (e.g. mask, box, points)."""
559
560 @classmethod
561 def _get_func_by_key(cls, dcate: Union[str, int, DataKey]) -> Type[ApplyInverseInterface]:
562 if DataKey.get(dcate) == DataKey.INPUT:
563 return InputApplyInverse
564 if DataKey.get(dcate) == DataKey.MASK:
565 return MaskApplyInverse
566 if DataKey.get(dcate) in [DataKey.BBOX, DataKey.BBOX_XYXY, DataKey.BBOX_XYWH]:
567 # We are converting to (B, 4, 2) internally for all formats.
568 return BBoxApplyInverse
569 if DataKey.get(dcate) in [DataKey.KEYPOINTS]:
570 return KeypointsApplyInverse
571 raise NotImplementedError(f"input type of {dcate} is not implemented.")
572
573 @classmethod
574 def apply_by_key(
575 cls,
576 input: Tensor,
577 label: Optional[Tensor],
578 module: nn.Module,
579 param: ParamItem,
580 dcate: Union[str, int, DataKey] = DataKey.INPUT,
581 extra_args: Dict[str, Any] = {}
582 ) -> Tuple[Tensor, Optional[Tensor]]:
583 """Apply a transformation with respect to the parameters.
584
585 Args:
586 input: the input tensor.
587 label: the optional label tensor.
588 module: any torch Module but only kornia augmentation modules will count
589 to apply transformations.
590 param: the corresponding parameters to the module.
591 dcate: data category. 'input', 'mask', 'bbox', 'bbox_xyxy', 'bbox_xyhw', 'keypoints'.
592 By default, it is set to 'input'.
593 """
594 func: Type[ApplyInverseInterface] = cls._get_func_by_key(dcate)
595
596 if isinstance(input, (tuple,)):
597 # If the input is a tuple with (input, mat) or something else
598 return (func.apply_trans(input[0], label, module, param, extra_args), *input[1:]) # type: ignore
599 return func.apply_trans(input, label, module=module, param=param, extra_args=extra_args)
600
601 @classmethod
602 def inverse_by_key(
603 cls,
604 input: Tensor,
605 module: nn.Module,
606 param: Optional[ParamItem] = None,
607 dcate: Union[str, int, DataKey] = DataKey.INPUT,
608 extra_args: Dict[str, Any] = {}
609 ) -> Tensor:
610 """Inverse a transformation with respect to the parameters.
611
612 Args:
613 input: the input tensor.
614 module: any torch Module but only kornia augmentation modules will count
615 to apply transformations.
616 param: the corresponding parameters to the module.
617 dcate: data category. 'input', 'mask', 'bbox', 'bbox_xyxy', 'bbox_xyhw', 'keypoints'.
618 By default, it is set to 'input'.
619 """
620 func: Type[ApplyInverseInterface] = cls._get_func_by_key(dcate)
621 return func.inverse(input, module, param, extra_args=extra_args)
622
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kornia/augmentation/container/utils.py b/kornia/augmentation/container/utils.py
--- a/kornia/augmentation/container/utils.py
+++ b/kornia/augmentation/container/utils.py
@@ -100,7 +100,7 @@
# If any inputs need to be transformed.
if mat is not None and to_apply is not None and to_apply.sum() != 0 and input.numel() > 0:
- input[to_apply] = cls.apply_func(mat, input[to_apply])
+ input[to_apply] = cls.apply_func(mat[to_apply], input[to_apply])
return input, label
| {"golden_diff": "diff --git a/kornia/augmentation/container/utils.py b/kornia/augmentation/container/utils.py\n--- a/kornia/augmentation/container/utils.py\n+++ b/kornia/augmentation/container/utils.py\n@@ -100,7 +100,7 @@\n \n # If any inputs need to be transformed.\n if mat is not None and to_apply is not None and to_apply.sum() != 0 and input.numel() > 0:\n- input[to_apply] = cls.apply_func(mat, input[to_apply])\n+ input[to_apply] = cls.apply_func(mat[to_apply], input[to_apply])\n \n return input, label\n", "issue": "ValueError when applying flip augmentations to boxes\n### Describe the bug\r\n\r\nCame across a strange bug while applying flip augmentations with bboxes. Running the below code snippet produces the following error some of the time. \r\n\r\nFor reference my image is of shape `(B, C, H, W)` and my boxes are a list of length `B` where each element is of shape `(N,4)`\r\n\r\n```\r\nValueError: Input batch size must be the same for both tensors or 1.Got torch.Size([2, 3, 3]) and torch.Size([1, 8, 2])\r\n```\r\n\r\nThe randomness of the message could be stemming from the randomness in applying the augmentation but setting `p=1.0` or `p=0.0` leads to no error being reported.\r\n\r\n### Reproduction steps\r\n\r\n```bash\r\nimport kornia.augmentation as K\r\nimport torch\r\n\r\naugs = K.AugmentationSequential(\r\n K.RandomHorizontalFlip(p=0.5),\r\n K.RandomVerticalFlip(p=0.5),\r\n data_keys=[\"input\", \"bbox_xyxy\"],\r\n)\r\n\r\nimage = torch.randn((2,3,200,200))\r\nboxes = [\r\n torch.tensor([\r\n [1, 2, 2, 4],\r\n [2, 4, 3, 6],\r\n ]),\r\n torch.tensor([\r\n [1, 2, 2, 4],\r\n ]),\r\n]\r\n\r\ntfm_img, tfm_bbox = augs(image, boxes)\r\n```\r\n\r\n\r\n### Expected behavior\r\n\r\nScript should run successfully all the time\r\n\r\n### Environment\r\n\r\n```shell\r\nPyTorch version: 1.11.0\r\nIs debug build: False\r\nCUDA used to build PyTorch: 11.5\r\nROCM used to build PyTorch: N/A\r\n\r\nOS: Ubuntu 20.04.4 LTS (x86_64)\r\nGCC version: (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0\r\nClang version: Could not collect\r\nCMake version: version 3.23.2\r\nLibc version: glibc-2.31\r\n\r\nPython version: 3.9.13 | packaged by conda-forge | (main, May 27 2022, 16:56:21) [GCC 10.3.0] (64-bit runtime)\r\nPython platform: Linux-5.4.0-117-generic-x86_64-with-glibc2.31\r\nIs CUDA available: True\r\nCUDA runtime version: 11.3.58\r\nGPU models and configuration: GPU 0: NVIDIA TITAN Xp\r\nNvidia driver version: 470.129.06\r\ncuDNN version: /usr/lib/x86_64-linux-gnu/libcudnn.so.7.6.5\r\nHIP runtime version: N/A\r\nMIOpen runtime version: N/A\r\nIs XNNPACK available: True\r\n\r\nVersions of relevant libraries:\r\n[pip3] efficientnet-pytorch==0.6.3\r\n[pip3] mypy==0.961\r\n[pip3] mypy-extensions==0.4.3\r\n[pip3] numpy==1.22.4\r\n[pip3] pytorch-lightning==1.6.4\r\n[pip3] pytorch-sphinx-theme==0.0.24\r\n[pip3] segmentation-models-pytorch==0.2.1\r\n[pip3] torch==1.11.0\r\n[pip3] torchmetrics==0.9.0\r\n[pip3] torchvision==0.12.0\r\n[conda] blas 2.115 mkl conda-forge\r\n[conda] blas-devel 3.9.0 15_linux64_mkl conda-forge\r\n[conda] cudatoolkit 11.5.1 h59c8dcf_10 conda-forge\r\n[conda] efficientnet-pytorch 0.6.3 pypi_0 pypi\r\n[conda] ffmpeg 4.3 hf484d3e_0 pytorch\r\n[conda] libblas 3.9.0 15_linux64_mkl conda-forge\r\n[conda] libcblas 3.9.0 15_linux64_mkl conda-forge\r\n[conda] liblapack 3.9.0 15_linux64_mkl conda-forge\r\n[conda] liblapacke 3.9.0 15_linux64_mkl conda-forge\r\n[conda] mkl 2022.1.0 h84fe81f_915 conda-forge\r\n[conda] mkl-devel 2022.1.0 ha770c72_916 conda-forge\r\n[conda] mkl-include 2022.1.0 h84fe81f_915 conda-forge\r\n[conda] numpy 1.22.4 py39hc58783e_0 conda-forge\r\n[conda] pytorch 1.11.0 py3.9_cuda11.5_cudnn8.3.2_0 pytorch\r\n[conda] pytorch-lightning 1.6.4 pypi_0 pypi\r\n[conda] pytorch-mutex 1.0 cuda pytorch\r\n[conda] pytorch-sphinx-theme 0.0.24 pypi_0 pypi\r\n[conda] segmentation-models-pytorch 0.2.1 pypi_0 pypi\r\n[conda] torchmetrics 0.9.0 pypi_0 pypi\r\n[conda] torchvision 0.12.0 py39_cu115 pytorch\r\n```\r\n\n", "before_files": [{"content": "import warnings\nfrom abc import ABCMeta, abstractmethod\nfrom functools import partial\nfrom typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Type, Union, cast\n\nimport torch\nimport torch.nn as nn\nfrom torch import Tensor\n\nimport kornia # lazy loading for circular dependencies\nfrom kornia.augmentation import GeometricAugmentationBase2D, MixAugmentationBase, RandomCrop, RandomErasing\nfrom kornia.augmentation.base import _AugmentationBase\nfrom kornia.augmentation.container.base import ParamItem\nfrom kornia.augmentation.utils import override_parameters\nfrom kornia.constants import DataKey\nfrom kornia.geometry.bbox import transform_bbox\nfrom kornia.geometry.linalg import transform_points\nfrom kornia.utils.helpers import _torch_inverse_cast\n\n\ndef _get_geometric_only_param(\n module: \"kornia.augmentation.ImageSequential\", param: List[ParamItem]\n) -> List[ParamItem]:\n named_modules: Iterator[Tuple[str, nn.Module]] = module.get_forward_sequence(param)\n\n res: List[ParamItem] = []\n for (_, mod), p in zip(named_modules, param):\n if isinstance(mod, (GeometricAugmentationBase2D,)):\n res.append(p)\n return res\n\n\nclass ApplyInverseInterface(metaclass=ABCMeta):\n \"\"\"Abstract interface for applying and inversing transformations.\"\"\"\n\n @classmethod\n @abstractmethod\n def apply_trans(\n cls, input: Tensor, label: Optional[Tensor], module: nn.Module, param: ParamItem,\n extra_args: Dict[str, Any] = {}\n ) -> Tuple[Tensor, Optional[Tensor]]:\n \"\"\"Apply a transformation with respect to the parameters.\n\n Args:\n input: the input tensor.\n label: the optional label tensor.\n module: any torch Module but only kornia augmentation modules will count\n to apply transformations.\n param: the corresponding parameters to the module.\n \"\"\"\n raise NotImplementedError\n\n @classmethod\n @abstractmethod\n def inverse(\n cls, input: Tensor, module: nn.Module, param: Optional[ParamItem] = None,\n extra_args: Dict[str, Any] = {}\n ) -> Tensor:\n \"\"\"Inverse a transformation with respect to the parameters.\n\n Args:\n input: the input tensor.\n module: any torch Module but only kornia augmentation modules will count\n to apply transformations.\n param: the corresponding parameters to the module.\n \"\"\"\n raise NotImplementedError\n\n\nclass ApplyInverseImpl(ApplyInverseInterface):\n \"\"\"Standard matrix apply and inverse methods.\"\"\"\n\n apply_func: Callable\n\n @classmethod\n def apply_trans(\n cls, input: Tensor, label: Optional[Tensor], module: nn.Module, param: ParamItem,\n extra_args: Dict[str, Any] = {}\n ) -> Tuple[Tensor, Optional[Tensor]]:\n \"\"\"Apply a transformation with respect to the parameters.\n\n Args:\n input: the input tensor.\n label: the optional label tensor.\n module: any torch Module but only kornia augmentation modules will count\n to apply transformations.\n param: the corresponding parameters to the module.\n \"\"\"\n mat: Optional[Tensor]\n if hasattr(module, \"transform_matrix\") and module.transform_matrix is not None:\n mat = cast(Tensor, module.transform_matrix)\n else:\n mat = cls._get_transformation(input, module, param, extra_args=extra_args)\n mat = torch.as_tensor(mat, device=input.device, dtype=input.dtype)\n to_apply = None\n if isinstance(module, _AugmentationBase):\n to_apply = param.data['batch_prob'] # type: ignore\n if isinstance(module, kornia.augmentation.ImageSequential):\n to_apply = torch.ones(input.shape[0], device=input.device, dtype=input.dtype).bool()\n\n # If any inputs need to be transformed.\n if mat is not None and to_apply is not None and to_apply.sum() != 0 and input.numel() > 0:\n input[to_apply] = cls.apply_func(mat, input[to_apply])\n\n return input, label\n\n @classmethod\n def inverse(\n cls, input: Tensor, module: nn.Module, param: Optional[ParamItem] = None,\n extra_args: Dict[str, Any] = {}\n ) -> Tensor:\n \"\"\"Inverse a transformation with respect to the parameters.\n\n Args:\n input: the input tensor.\n module: any torch Module but only kornia augmentation modules will count\n to apply transformations.\n param: the corresponding parameters to the module.\n \"\"\"\n mat: Optional[Tensor]\n if hasattr(module, \"transform_matrix\") and module.transform_matrix is not None:\n mat = cast(Tensor, module.transform_matrix)\n else:\n mat = cls._get_transformation(input, module, param, extra_args=extra_args)\n mat = torch.as_tensor(mat, device=input.device, dtype=input.dtype)\n\n if mat is not None:\n transform: Tensor = cls._get_inverse_transformation(mat)\n input = cls.apply_func(torch.as_tensor(transform, device=input.device, dtype=input.dtype), input)\n return input\n\n @classmethod\n def _get_transformation(\n cls, input: Tensor, module: nn.Module, param: Optional[ParamItem] = None,\n extra_args: Dict[str, Any] = {}\n ) -> Optional[Tensor]:\n\n if (\n isinstance(module, (GeometricAugmentationBase2D, kornia.augmentation.ImageSequential))\n and param is None\n ):\n raise ValueError(f\"Parameters of transformation matrix for {module} has not been computed.\")\n\n mat: Optional[Tensor] = None\n if isinstance(module, GeometricAugmentationBase2D):\n _param = cast(Dict[str, Tensor], param.data) # type: ignore\n flags = override_parameters(module.flags, extra_args)\n mat = module.get_transformation_matrix(input, _param, flags=flags)\n elif isinstance(module, kornia.augmentation.ImageSequential) and not module.is_intensity_only():\n _param = cast(List[ParamItem], param.data) # type: ignore\n mat = module.get_transformation_matrix(\n input, _param, recompute=False, extra_args=extra_args) # type: ignore\n else:\n return None # No need to update anything\n return mat\n\n @classmethod\n def _get_inverse_transformation(cls, transform: Tensor) -> Tensor:\n return _torch_inverse_cast(transform)\n\n\nclass InputApplyInverse(ApplyInverseImpl):\n \"\"\"Apply and inverse transformations for (image) input tensors.\"\"\"\n data_key = DataKey.INPUT\n\n @classmethod\n def apply_trans( # type: ignore\n cls, input: Tensor, label: Optional[Tensor], module: nn.Module, param: ParamItem,\n extra_args: Dict[str, Any] = {}\n ) -> Tuple[Tensor, Optional[Tensor]]:\n \"\"\"Apply a transformation with respect to the parameters.\n\n Args:\n input: the input tensor.\n label: the optional label tensor.\n module: any torch Module but only kornia augmentation modules will count\n to apply transformations.\n param: the corresponding parameters to the module.\n \"\"\"\n if isinstance(module, (MixAugmentationBase,)):\n input, label = module(input, label=label, params=param.data)\n elif isinstance(module, (_AugmentationBase,)):\n input = module(input, params=param.data, **extra_args)\n elif isinstance(module, kornia.augmentation.ImageSequential):\n temp = module.apply_inverse_func\n temp2 = module.return_label\n module.apply_inverse_func = InputApplyInverse\n module.return_label = True\n if isinstance(module, kornia.augmentation.AugmentationSequential):\n input, label = module(input, label=label, params=param.data, data_keys=[cls.data_key])\n else:\n input, label = module(input, label=label, params=param.data, extra_args=extra_args)\n module.apply_inverse_func = temp\n module.return_label = temp2\n else:\n if param.data is not None:\n raise AssertionError(f\"Non-augmentaion operation {param.name} require empty parameters. Got {param}.\")\n # In case of return_transform = True\n if isinstance(input, (tuple, list)):\n input = (module(input[0]), input[1])\n else:\n input = module(input)\n return input, label\n\n @classmethod\n def inverse(\n cls, input: Tensor, module: nn.Module, param: Optional[ParamItem] = None,\n extra_args: Dict[str, Any] = {}\n ) -> Tensor:\n \"\"\"Inverse a transformation with respect to the parameters.\n\n Args:\n input: the input tensor.\n module: any torch Module but only kornia augmentation modules will count\n to apply transformations.\n param: the corresponding parameters to the module.\n \"\"\"\n if isinstance(module, GeometricAugmentationBase2D):\n input = module.inverse(\n input, params=None if param is None else cast(Dict, param.data), extra_args=extra_args)\n elif isinstance(module, kornia.augmentation.ImageSequential):\n temp = module.apply_inverse_func\n module.apply_inverse_func = InputApplyInverse\n if isinstance(module, kornia.augmentation.AugmentationSequential):\n input = cast(Tensor, module.inverse(\n input, params=None if param is None else cast(List, param.data)))\n else:\n input = module.inverse(\n input, params=None if param is None else cast(List, param.data), extra_args=extra_args)\n module.apply_inverse_func = temp\n return input\n\n\nclass MaskApplyInverse(ApplyInverseImpl):\n \"\"\"Apply and inverse transformations for mask tensors.\"\"\"\n data_key = DataKey.MASK\n\n @classmethod\n def make_input_only_sequential(cls, module: \"kornia.augmentation.ImageSequential\") -> Callable:\n \"\"\"Disable all other additional inputs (e.g. ) for ImageSequential.\"\"\"\n\n def f(*args, **kwargs):\n if_return_label = module.return_label\n module.return_label = False\n out = module(*args, **kwargs)\n module.return_label = if_return_label\n return out\n\n return f\n\n @classmethod\n def apply_trans(\n cls, input: Tensor, label: Optional[Tensor], module: nn.Module, param: Optional[ParamItem] = None,\n extra_args: Dict[str, Any] = {}\n ) -> Tuple[Tensor, Optional[Tensor]]:\n \"\"\"Apply a transformation with respect to the parameters.\n\n Args:\n input: the input tensor.\n label: the optional label tensor.\n module: any torch Module but only kornia augmentation modules will count\n to apply transformations.\n param: the corresponding parameters to the module.\n \"\"\"\n if param is not None:\n _param = param.data\n else:\n _param = None # type: ignore\n\n if isinstance(module, (GeometricAugmentationBase2D, RandomErasing)):\n _param = cast(Dict[str, Tensor], _param).copy()\n # TODO: Parametrize value to pad with across the board for different keys\n if 'values' in _param:\n _param['values'] = torch.zeros_like(_param['values']) # Always pad with zeros\n\n input = module(input, params=_param, **extra_args)\n elif isinstance(module, kornia.augmentation.ImageSequential) and not module.is_intensity_only():\n _param = cast(List[ParamItem], _param)\n temp = module.apply_inverse_func\n module.apply_inverse_func = MaskApplyInverse\n geo_param: List[ParamItem] = _get_geometric_only_param(module, _param)\n input = cls.make_input_only_sequential(module)(input, label=None, params=geo_param)\n module.apply_inverse_func = temp\n else:\n pass # No need to update anything\n return input, label\n\n @classmethod\n def inverse(\n cls, input: Tensor, module: nn.Module, param: Optional[ParamItem] = None,\n extra_args: Dict[str, Any] = {}\n ) -> Tensor:\n \"\"\"Inverse a transformation with respect to the parameters.\n\n Args:\n input: the input tensor.\n module: any torch Module but only kornia augmentation modules will count\n to apply transformations.\n param: the corresponding parameters to the module.\n \"\"\"\n\n if isinstance(module, GeometricAugmentationBase2D):\n input = module.inverse(\n input, params=None if param is None else cast(Dict, param.data), **extra_args)\n elif isinstance(module, kornia.augmentation.ImageSequential):\n temp = module.apply_inverse_func\n module.apply_inverse_func = MaskApplyInverse\n input = module.inverse(\n input, params=None if param is None else cast(List, param.data))\n module.apply_inverse_func = temp\n return input\n\n\nclass BBoxApplyInverse(ApplyInverseImpl):\n \"\"\"Apply and inverse transformations for bounding box tensors.\n\n This is for transform boxes in the format (B, N, 4, 2).\n \"\"\"\n\n @classmethod\n def _get_padding_size(cls, module: nn.Module, param: Optional[ParamItem]) -> Optional[Tensor]:\n if isinstance(module, RandomCrop):\n _param = cast(Dict[str, Tensor], param.data) # type: ignore\n return _param.get(\"padding_size\")\n return None\n\n @classmethod\n def pad(cls, input: Tensor, padding_size: Tensor) -> Tensor:\n \"\"\"\n Args:\n input: (B, N, 4, 2)\n padding_size: (B, 4)\n \"\"\"\n if len(input.shape) not in (3, 4,):\n raise AssertionError(input.shape)\n\n if len(padding_size.shape) != 2:\n raise AssertionError(padding_size.shape)\n\n _input = input.clone()\n\n if input.dim() == 3:\n # B,4,2 to B,1,4,2\n _input = _input[:, None]\n\n _input[..., 0] += padding_size[..., None, :1] # left padding\n _input[..., 1] += padding_size[..., None, 2:3] # top padding\n\n if input.dim() == 3:\n _input = _input[:, 0] # squeeze back\n\n return _input\n\n @classmethod\n def unpad(cls, input: Tensor, padding_size: Tensor) -> Tensor:\n \"\"\"\n Args:\n input: (B, N, 4, 2)\n padding_size: (B, 4)\n \"\"\"\n if len(input.shape) not in (3, 4,):\n raise AssertionError(input.shape)\n\n if len(padding_size.shape) != 2:\n raise AssertionError(padding_size.shape)\n\n _input = input.clone()\n\n if input.dim() == 3:\n # B,4,2 to B,1,4,2\n _input = _input[:, None]\n\n _input[..., 0] -= padding_size[..., None, :1] # left padding\n _input[..., 1] -= padding_size[..., None, 2:3] # top padding\n\n if input.dim() == 3:\n _input = _input[:, 0] # squeeze back\n\n return _input\n\n apply_func = partial(transform_bbox, mode=\"xyxy\", restore_coordinates=True)\n\n @classmethod\n def apply_trans(\n cls, input: Tensor, label: Optional[Tensor], module: nn.Module, param: ParamItem,\n extra_args: Dict[str, Any] = {}\n ) -> Tuple[Tensor, Optional[Tensor]]:\n \"\"\"Apply a transformation with respect to the parameters.\n\n Args:\n input: the input tensor, (B, N, 4, 2) or (B, 4, 2).\n label: the optional label tensor.\n module: any torch Module but only kornia augmentation modules will count\n to apply transformations.\n param: the corresponding parameters to the module.\n \"\"\"\n _input = input.clone()\n\n padding_size = cls._get_padding_size(module, param)\n if padding_size is not None:\n _input = cls.pad(_input, padding_size.to(_input))\n\n _input, label = super().apply_trans(_input, label, module, param, extra_args=extra_args)\n\n # TODO: Filter/crop boxes outside crop (with negative or larger than crop size coords)?\n\n return _input, label\n\n @classmethod\n def inverse(\n cls, input: Tensor, module: nn.Module, param: Optional[ParamItem] = None,\n extra_args: Dict[str, Any] = {}\n ) -> Tensor:\n \"\"\"Inverse a transformation with respect to the parameters.\n\n Args:\n input: the input tensor.\n module: any torch Module but only kornia augmentation modules will count\n to apply transformations.\n param: the corresponding parameters to the module.\n \"\"\"\n _input = input.clone()\n\n _input = super().inverse(_input, module, param, extra_args=extra_args)\n\n padding_size = cls._get_padding_size(module, param)\n if padding_size is not None:\n _input = cls.unpad(_input, padding_size.to(input))\n\n return _input\n\n\nclass BBoxXYXYApplyInverse(BBoxApplyInverse):\n \"\"\"Apply and inverse transformations for bounding box tensors.\n\n This is for transform boxes in the format [xmin, ymin, xmax, ymax].\n \"\"\"\n\n apply_func = partial(transform_bbox, mode=\"xyxy\", restore_coordinates=True)\n\n @classmethod\n def pad(cls, input, padding_size):\n _padding_size = padding_size.to(input)\n for i in range(len(_padding_size)):\n input[i, :, 0::2] += _padding_size[i][0] # left padding\n input[i, :, 1::2] += _padding_size[i][2] # top padding\n return input\n\n @classmethod\n def unpad(cls, input, padding_size):\n _padding_size = padding_size.to(input)\n for i in range(len(_padding_size)):\n input[i, :, 0::2] -= _padding_size[i][0] # left padding\n input[i, :, 1::2] -= _padding_size[i][2] # top padding\n return input\n\n @classmethod\n def apply_trans(\n cls, input: Tensor, label: Optional[Tensor], module: nn.Module, param: ParamItem,\n extra_args: Dict[str, Any] = {}\n ) -> Tuple[Tensor, Optional[Tensor]]:\n warnings.warn(\"BBoxXYXYApplyInverse is no longer maintained. Please use BBoxApplyInverse instead.\")\n return super().apply_trans(input, label=label, module=module, param=param, extra_args=extra_args)\n\n @classmethod\n def inverse(\n cls, input: Tensor, module: nn.Module, param: Optional[ParamItem] = None,\n extra_args: Dict[str, Any] = {}\n ) -> Tensor:\n warnings.warn(\"BBoxXYXYApplyInverse is no longer maintained. Please use BBoxApplyInverse instead.\")\n return super().inverse(input, module=module, param=param, extra_args=extra_args)\n\n\nclass BBoxXYWHApplyInverse(BBoxXYXYApplyInverse):\n \"\"\"Apply and inverse transformations for bounding box tensors.\n\n This is for transform boxes in the format [xmin, ymin, width, height].\n \"\"\"\n\n apply_func = partial(transform_bbox, mode=\"xywh\", restore_coordinates=True)\n\n @classmethod\n def pad(cls, input, padding_size):\n _padding_size = padding_size.to(input)\n # pad only xy, not wh\n for i in range(len(_padding_size)):\n input[i, :, 0] += _padding_size[i][0] # left padding\n input[i, :, 1] += _padding_size[i][2] # top padding\n return input\n\n @classmethod\n def unpad(cls, input, padding_size):\n _padding_size = padding_size.to(input)\n # unpad only xy, not wh\n for i in range(len(_padding_size)):\n input[i, :, 0] -= _padding_size[i][0] # left padding\n input[i, :, 1] -= _padding_size[i][2] # top padding\n return input\n\n\nclass KeypointsApplyInverse(BBoxApplyInverse):\n \"\"\"Apply and inverse transformations for keypoints tensors.\n\n This is for transform keypoints in the format (B, N, 2).\n \"\"\"\n\n # Hot fix for the typing mismatching\n apply_func = partial(transform_points)\n\n @classmethod\n def pad(cls, input: Tensor, padding_size: Tensor) -> Tensor:\n\n if len(input.shape) not in (2, 3,):\n raise AssertionError(input.shape)\n\n if len(padding_size.shape) != 2:\n raise AssertionError(padding_size.shape)\n\n _input = input.clone()\n\n if input.dim() == 2:\n # B,2 to B,1,2\n _input = _input[:, None]\n\n _input[..., 0] += padding_size[..., :1] # left padding\n _input[..., 1] += padding_size[..., 2:3] # top padding\n\n if input.dim() == 2:\n _input = _input[:, 0] # squeeze back\n\n return _input\n\n @classmethod\n def unpad(cls, input: Tensor, padding_size: Tensor) -> Tensor:\n\n if len(input.shape) not in (2, 3,):\n raise AssertionError(input.shape)\n if len(padding_size.shape) != 2:\n raise AssertionError(padding_size.shape)\n\n _input = input.clone()\n\n if input.dim() == 2:\n # B,2 to B,1,2\n _input = _input[:, None]\n\n # unpad only xy, not wh\n _input[..., 0] -= padding_size[..., :1] # left padding\n _input[..., 1] -= padding_size[..., 2:3] # top padding\n\n if input.dim() == 2:\n _input = _input[:, 0] # squeeze back\n\n return _input\n\n\nclass ApplyInverse:\n \"\"\"Apply and inverse transformations for any tensors (e.g. mask, box, points).\"\"\"\n\n @classmethod\n def _get_func_by_key(cls, dcate: Union[str, int, DataKey]) -> Type[ApplyInverseInterface]:\n if DataKey.get(dcate) == DataKey.INPUT:\n return InputApplyInverse\n if DataKey.get(dcate) == DataKey.MASK:\n return MaskApplyInverse\n if DataKey.get(dcate) in [DataKey.BBOX, DataKey.BBOX_XYXY, DataKey.BBOX_XYWH]:\n # We are converting to (B, 4, 2) internally for all formats.\n return BBoxApplyInverse\n if DataKey.get(dcate) in [DataKey.KEYPOINTS]:\n return KeypointsApplyInverse\n raise NotImplementedError(f\"input type of {dcate} is not implemented.\")\n\n @classmethod\n def apply_by_key(\n cls,\n input: Tensor,\n label: Optional[Tensor],\n module: nn.Module,\n param: ParamItem,\n dcate: Union[str, int, DataKey] = DataKey.INPUT,\n extra_args: Dict[str, Any] = {}\n ) -> Tuple[Tensor, Optional[Tensor]]:\n \"\"\"Apply a transformation with respect to the parameters.\n\n Args:\n input: the input tensor.\n label: the optional label tensor.\n module: any torch Module but only kornia augmentation modules will count\n to apply transformations.\n param: the corresponding parameters to the module.\n dcate: data category. 'input', 'mask', 'bbox', 'bbox_xyxy', 'bbox_xyhw', 'keypoints'.\n By default, it is set to 'input'.\n \"\"\"\n func: Type[ApplyInverseInterface] = cls._get_func_by_key(dcate)\n\n if isinstance(input, (tuple,)):\n # If the input is a tuple with (input, mat) or something else\n return (func.apply_trans(input[0], label, module, param, extra_args), *input[1:]) # type: ignore\n return func.apply_trans(input, label, module=module, param=param, extra_args=extra_args)\n\n @classmethod\n def inverse_by_key(\n cls,\n input: Tensor,\n module: nn.Module,\n param: Optional[ParamItem] = None,\n dcate: Union[str, int, DataKey] = DataKey.INPUT,\n extra_args: Dict[str, Any] = {}\n ) -> Tensor:\n \"\"\"Inverse a transformation with respect to the parameters.\n\n Args:\n input: the input tensor.\n module: any torch Module but only kornia augmentation modules will count\n to apply transformations.\n param: the corresponding parameters to the module.\n dcate: data category. 'input', 'mask', 'bbox', 'bbox_xyxy', 'bbox_xyhw', 'keypoints'.\n By default, it is set to 'input'.\n \"\"\"\n func: Type[ApplyInverseInterface] = cls._get_func_by_key(dcate)\n return func.inverse(input, module, param, extra_args=extra_args)\n", "path": "kornia/augmentation/container/utils.py"}], "after_files": [{"content": "import warnings\nfrom abc import ABCMeta, abstractmethod\nfrom functools import partial\nfrom typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Type, Union, cast\n\nimport torch\nimport torch.nn as nn\nfrom torch import Tensor\n\nimport kornia # lazy loading for circular dependencies\nfrom kornia.augmentation import GeometricAugmentationBase2D, MixAugmentationBase, RandomCrop, RandomErasing\nfrom kornia.augmentation.base import _AugmentationBase\nfrom kornia.augmentation.container.base import ParamItem\nfrom kornia.augmentation.utils import override_parameters\nfrom kornia.constants import DataKey\nfrom kornia.geometry.bbox import transform_bbox\nfrom kornia.geometry.linalg import transform_points\nfrom kornia.utils.helpers import _torch_inverse_cast\n\n\ndef _get_geometric_only_param(\n module: \"kornia.augmentation.ImageSequential\", param: List[ParamItem]\n) -> List[ParamItem]:\n named_modules: Iterator[Tuple[str, nn.Module]] = module.get_forward_sequence(param)\n\n res: List[ParamItem] = []\n for (_, mod), p in zip(named_modules, param):\n if isinstance(mod, (GeometricAugmentationBase2D,)):\n res.append(p)\n return res\n\n\nclass ApplyInverseInterface(metaclass=ABCMeta):\n \"\"\"Abstract interface for applying and inversing transformations.\"\"\"\n\n @classmethod\n @abstractmethod\n def apply_trans(\n cls, input: Tensor, label: Optional[Tensor], module: nn.Module, param: ParamItem,\n extra_args: Dict[str, Any] = {}\n ) -> Tuple[Tensor, Optional[Tensor]]:\n \"\"\"Apply a transformation with respect to the parameters.\n\n Args:\n input: the input tensor.\n label: the optional label tensor.\n module: any torch Module but only kornia augmentation modules will count\n to apply transformations.\n param: the corresponding parameters to the module.\n \"\"\"\n raise NotImplementedError\n\n @classmethod\n @abstractmethod\n def inverse(\n cls, input: Tensor, module: nn.Module, param: Optional[ParamItem] = None,\n extra_args: Dict[str, Any] = {}\n ) -> Tensor:\n \"\"\"Inverse a transformation with respect to the parameters.\n\n Args:\n input: the input tensor.\n module: any torch Module but only kornia augmentation modules will count\n to apply transformations.\n param: the corresponding parameters to the module.\n \"\"\"\n raise NotImplementedError\n\n\nclass ApplyInverseImpl(ApplyInverseInterface):\n \"\"\"Standard matrix apply and inverse methods.\"\"\"\n\n apply_func: Callable\n\n @classmethod\n def apply_trans(\n cls, input: Tensor, label: Optional[Tensor], module: nn.Module, param: ParamItem,\n extra_args: Dict[str, Any] = {}\n ) -> Tuple[Tensor, Optional[Tensor]]:\n \"\"\"Apply a transformation with respect to the parameters.\n\n Args:\n input: the input tensor.\n label: the optional label tensor.\n module: any torch Module but only kornia augmentation modules will count\n to apply transformations.\n param: the corresponding parameters to the module.\n \"\"\"\n mat: Optional[Tensor]\n if hasattr(module, \"transform_matrix\") and module.transform_matrix is not None:\n mat = cast(Tensor, module.transform_matrix)\n else:\n mat = cls._get_transformation(input, module, param, extra_args=extra_args)\n mat = torch.as_tensor(mat, device=input.device, dtype=input.dtype)\n to_apply = None\n if isinstance(module, _AugmentationBase):\n to_apply = param.data['batch_prob'] # type: ignore\n if isinstance(module, kornia.augmentation.ImageSequential):\n to_apply = torch.ones(input.shape[0], device=input.device, dtype=input.dtype).bool()\n\n # If any inputs need to be transformed.\n if mat is not None and to_apply is not None and to_apply.sum() != 0 and input.numel() > 0:\n input[to_apply] = cls.apply_func(mat[to_apply], input[to_apply])\n\n return input, label\n\n @classmethod\n def inverse(\n cls, input: Tensor, module: nn.Module, param: Optional[ParamItem] = None,\n extra_args: Dict[str, Any] = {}\n ) -> Tensor:\n \"\"\"Inverse a transformation with respect to the parameters.\n\n Args:\n input: the input tensor.\n module: any torch Module but only kornia augmentation modules will count\n to apply transformations.\n param: the corresponding parameters to the module.\n \"\"\"\n mat: Optional[Tensor]\n if hasattr(module, \"transform_matrix\") and module.transform_matrix is not None:\n mat = cast(Tensor, module.transform_matrix)\n else:\n mat = cls._get_transformation(input, module, param, extra_args=extra_args)\n mat = torch.as_tensor(mat, device=input.device, dtype=input.dtype)\n\n if mat is not None:\n transform: Tensor = cls._get_inverse_transformation(mat)\n input = cls.apply_func(torch.as_tensor(transform, device=input.device, dtype=input.dtype), input)\n return input\n\n @classmethod\n def _get_transformation(\n cls, input: Tensor, module: nn.Module, param: Optional[ParamItem] = None,\n extra_args: Dict[str, Any] = {}\n ) -> Optional[Tensor]:\n\n if (\n isinstance(module, (GeometricAugmentationBase2D, kornia.augmentation.ImageSequential))\n and param is None\n ):\n raise ValueError(f\"Parameters of transformation matrix for {module} has not been computed.\")\n\n mat: Optional[Tensor] = None\n if isinstance(module, GeometricAugmentationBase2D):\n _param = cast(Dict[str, Tensor], param.data) # type: ignore\n flags = override_parameters(module.flags, extra_args)\n mat = module.get_transformation_matrix(input, _param, flags=flags)\n elif isinstance(module, kornia.augmentation.ImageSequential) and not module.is_intensity_only():\n _param = cast(List[ParamItem], param.data) # type: ignore\n mat = module.get_transformation_matrix(\n input, _param, recompute=False, extra_args=extra_args) # type: ignore\n else:\n return None # No need to update anything\n return mat\n\n @classmethod\n def _get_inverse_transformation(cls, transform: Tensor) -> Tensor:\n return _torch_inverse_cast(transform)\n\n\nclass InputApplyInverse(ApplyInverseImpl):\n \"\"\"Apply and inverse transformations for (image) input tensors.\"\"\"\n data_key = DataKey.INPUT\n\n @classmethod\n def apply_trans( # type: ignore\n cls, input: Tensor, label: Optional[Tensor], module: nn.Module, param: ParamItem,\n extra_args: Dict[str, Any] = {}\n ) -> Tuple[Tensor, Optional[Tensor]]:\n \"\"\"Apply a transformation with respect to the parameters.\n\n Args:\n input: the input tensor.\n label: the optional label tensor.\n module: any torch Module but only kornia augmentation modules will count\n to apply transformations.\n param: the corresponding parameters to the module.\n \"\"\"\n if isinstance(module, (MixAugmentationBase,)):\n input, label = module(input, label=label, params=param.data)\n elif isinstance(module, (_AugmentationBase,)):\n input = module(input, params=param.data, **extra_args)\n elif isinstance(module, kornia.augmentation.ImageSequential):\n temp = module.apply_inverse_func\n temp2 = module.return_label\n module.apply_inverse_func = InputApplyInverse\n module.return_label = True\n if isinstance(module, kornia.augmentation.AugmentationSequential):\n input, label = module(input, label=label, params=param.data, data_keys=[cls.data_key])\n else:\n input, label = module(input, label=label, params=param.data, extra_args=extra_args)\n module.apply_inverse_func = temp\n module.return_label = temp2\n else:\n if param.data is not None:\n raise AssertionError(f\"Non-augmentaion operation {param.name} require empty parameters. Got {param}.\")\n # In case of return_transform = True\n if isinstance(input, (tuple, list)):\n input = (module(input[0]), input[1])\n else:\n input = module(input)\n return input, label\n\n @classmethod\n def inverse(\n cls, input: Tensor, module: nn.Module, param: Optional[ParamItem] = None,\n extra_args: Dict[str, Any] = {}\n ) -> Tensor:\n \"\"\"Inverse a transformation with respect to the parameters.\n\n Args:\n input: the input tensor.\n module: any torch Module but only kornia augmentation modules will count\n to apply transformations.\n param: the corresponding parameters to the module.\n \"\"\"\n if isinstance(module, GeometricAugmentationBase2D):\n input = module.inverse(\n input, params=None if param is None else cast(Dict, param.data), extra_args=extra_args)\n elif isinstance(module, kornia.augmentation.ImageSequential):\n temp = module.apply_inverse_func\n module.apply_inverse_func = InputApplyInverse\n if isinstance(module, kornia.augmentation.AugmentationSequential):\n input = cast(Tensor, module.inverse(\n input, params=None if param is None else cast(List, param.data)))\n else:\n input = module.inverse(\n input, params=None if param is None else cast(List, param.data), extra_args=extra_args)\n module.apply_inverse_func = temp\n return input\n\n\nclass MaskApplyInverse(ApplyInverseImpl):\n \"\"\"Apply and inverse transformations for mask tensors.\"\"\"\n data_key = DataKey.MASK\n\n @classmethod\n def make_input_only_sequential(cls, module: \"kornia.augmentation.ImageSequential\") -> Callable:\n \"\"\"Disable all other additional inputs (e.g. ) for ImageSequential.\"\"\"\n\n def f(*args, **kwargs):\n if_return_label = module.return_label\n module.return_label = False\n out = module(*args, **kwargs)\n module.return_label = if_return_label\n return out\n\n return f\n\n @classmethod\n def apply_trans(\n cls, input: Tensor, label: Optional[Tensor], module: nn.Module, param: Optional[ParamItem] = None,\n extra_args: Dict[str, Any] = {}\n ) -> Tuple[Tensor, Optional[Tensor]]:\n \"\"\"Apply a transformation with respect to the parameters.\n\n Args:\n input: the input tensor.\n label: the optional label tensor.\n module: any torch Module but only kornia augmentation modules will count\n to apply transformations.\n param: the corresponding parameters to the module.\n \"\"\"\n if param is not None:\n _param = param.data\n else:\n _param = None # type: ignore\n\n if isinstance(module, (GeometricAugmentationBase2D, RandomErasing)):\n _param = cast(Dict[str, Tensor], _param).copy()\n # TODO: Parametrize value to pad with across the board for different keys\n if 'values' in _param:\n _param['values'] = torch.zeros_like(_param['values']) # Always pad with zeros\n\n input = module(input, params=_param, **extra_args)\n elif isinstance(module, kornia.augmentation.ImageSequential) and not module.is_intensity_only():\n _param = cast(List[ParamItem], _param)\n temp = module.apply_inverse_func\n module.apply_inverse_func = MaskApplyInverse\n geo_param: List[ParamItem] = _get_geometric_only_param(module, _param)\n input = cls.make_input_only_sequential(module)(input, label=None, params=geo_param)\n module.apply_inverse_func = temp\n else:\n pass # No need to update anything\n return input, label\n\n @classmethod\n def inverse(\n cls, input: Tensor, module: nn.Module, param: Optional[ParamItem] = None,\n extra_args: Dict[str, Any] = {}\n ) -> Tensor:\n \"\"\"Inverse a transformation with respect to the parameters.\n\n Args:\n input: the input tensor.\n module: any torch Module but only kornia augmentation modules will count\n to apply transformations.\n param: the corresponding parameters to the module.\n \"\"\"\n\n if isinstance(module, GeometricAugmentationBase2D):\n input = module.inverse(\n input, params=None if param is None else cast(Dict, param.data), **extra_args)\n elif isinstance(module, kornia.augmentation.ImageSequential):\n temp = module.apply_inverse_func\n module.apply_inverse_func = MaskApplyInverse\n input = module.inverse(\n input, params=None if param is None else cast(List, param.data))\n module.apply_inverse_func = temp\n return input\n\n\nclass BBoxApplyInverse(ApplyInverseImpl):\n \"\"\"Apply and inverse transformations for bounding box tensors.\n\n This is for transform boxes in the format (B, N, 4, 2).\n \"\"\"\n\n @classmethod\n def _get_padding_size(cls, module: nn.Module, param: Optional[ParamItem]) -> Optional[Tensor]:\n if isinstance(module, RandomCrop):\n _param = cast(Dict[str, Tensor], param.data) # type: ignore\n return _param.get(\"padding_size\")\n return None\n\n @classmethod\n def pad(cls, input: Tensor, padding_size: Tensor) -> Tensor:\n \"\"\"\n Args:\n input: (B, N, 4, 2)\n padding_size: (B, 4)\n \"\"\"\n if len(input.shape) not in (3, 4,):\n raise AssertionError(input.shape)\n\n if len(padding_size.shape) != 2:\n raise AssertionError(padding_size.shape)\n\n _input = input.clone()\n\n if input.dim() == 3:\n # B,4,2 to B,1,4,2\n _input = _input[:, None]\n\n _input[..., 0] += padding_size[..., None, :1] # left padding\n _input[..., 1] += padding_size[..., None, 2:3] # top padding\n\n if input.dim() == 3:\n _input = _input[:, 0] # squeeze back\n\n return _input\n\n @classmethod\n def unpad(cls, input: Tensor, padding_size: Tensor) -> Tensor:\n \"\"\"\n Args:\n input: (B, N, 4, 2)\n padding_size: (B, 4)\n \"\"\"\n if len(input.shape) not in (3, 4,):\n raise AssertionError(input.shape)\n\n if len(padding_size.shape) != 2:\n raise AssertionError(padding_size.shape)\n\n _input = input.clone()\n\n if input.dim() == 3:\n # B,4,2 to B,1,4,2\n _input = _input[:, None]\n\n _input[..., 0] -= padding_size[..., None, :1] # left padding\n _input[..., 1] -= padding_size[..., None, 2:3] # top padding\n\n if input.dim() == 3:\n _input = _input[:, 0] # squeeze back\n\n return _input\n\n apply_func = partial(transform_bbox, mode=\"xyxy\", restore_coordinates=True)\n\n @classmethod\n def apply_trans(\n cls, input: Tensor, label: Optional[Tensor], module: nn.Module, param: ParamItem,\n extra_args: Dict[str, Any] = {}\n ) -> Tuple[Tensor, Optional[Tensor]]:\n \"\"\"Apply a transformation with respect to the parameters.\n\n Args:\n input: the input tensor, (B, N, 4, 2) or (B, 4, 2).\n label: the optional label tensor.\n module: any torch Module but only kornia augmentation modules will count\n to apply transformations.\n param: the corresponding parameters to the module.\n \"\"\"\n _input = input.clone()\n\n padding_size = cls._get_padding_size(module, param)\n if padding_size is not None:\n _input = cls.pad(_input, padding_size.to(_input))\n\n _input, label = super().apply_trans(_input, label, module, param, extra_args=extra_args)\n\n # TODO: Filter/crop boxes outside crop (with negative or larger than crop size coords)?\n\n return _input, label\n\n @classmethod\n def inverse(\n cls, input: Tensor, module: nn.Module, param: Optional[ParamItem] = None,\n extra_args: Dict[str, Any] = {}\n ) -> Tensor:\n \"\"\"Inverse a transformation with respect to the parameters.\n\n Args:\n input: the input tensor.\n module: any torch Module but only kornia augmentation modules will count\n to apply transformations.\n param: the corresponding parameters to the module.\n \"\"\"\n _input = input.clone()\n\n _input = super().inverse(_input, module, param, extra_args=extra_args)\n\n padding_size = cls._get_padding_size(module, param)\n if padding_size is not None:\n _input = cls.unpad(_input, padding_size.to(input))\n\n return _input\n\n\nclass BBoxXYXYApplyInverse(BBoxApplyInverse):\n \"\"\"Apply and inverse transformations for bounding box tensors.\n\n This is for transform boxes in the format [xmin, ymin, xmax, ymax].\n \"\"\"\n\n apply_func = partial(transform_bbox, mode=\"xyxy\", restore_coordinates=True)\n\n @classmethod\n def pad(cls, input, padding_size):\n _padding_size = padding_size.to(input)\n for i in range(len(_padding_size)):\n input[i, :, 0::2] += _padding_size[i][0] # left padding\n input[i, :, 1::2] += _padding_size[i][2] # top padding\n return input\n\n @classmethod\n def unpad(cls, input, padding_size):\n _padding_size = padding_size.to(input)\n for i in range(len(_padding_size)):\n input[i, :, 0::2] -= _padding_size[i][0] # left padding\n input[i, :, 1::2] -= _padding_size[i][2] # top padding\n return input\n\n @classmethod\n def apply_trans(\n cls, input: Tensor, label: Optional[Tensor], module: nn.Module, param: ParamItem,\n extra_args: Dict[str, Any] = {}\n ) -> Tuple[Tensor, Optional[Tensor]]:\n warnings.warn(\"BBoxXYXYApplyInverse is no longer maintained. Please use BBoxApplyInverse instead.\")\n return super().apply_trans(input, label=label, module=module, param=param, extra_args=extra_args)\n\n @classmethod\n def inverse(\n cls, input: Tensor, module: nn.Module, param: Optional[ParamItem] = None,\n extra_args: Dict[str, Any] = {}\n ) -> Tensor:\n warnings.warn(\"BBoxXYXYApplyInverse is no longer maintained. Please use BBoxApplyInverse instead.\")\n return super().inverse(input, module=module, param=param, extra_args=extra_args)\n\n\nclass BBoxXYWHApplyInverse(BBoxXYXYApplyInverse):\n \"\"\"Apply and inverse transformations for bounding box tensors.\n\n This is for transform boxes in the format [xmin, ymin, width, height].\n \"\"\"\n\n apply_func = partial(transform_bbox, mode=\"xywh\", restore_coordinates=True)\n\n @classmethod\n def pad(cls, input, padding_size):\n _padding_size = padding_size.to(input)\n # pad only xy, not wh\n for i in range(len(_padding_size)):\n input[i, :, 0] += _padding_size[i][0] # left padding\n input[i, :, 1] += _padding_size[i][2] # top padding\n return input\n\n @classmethod\n def unpad(cls, input, padding_size):\n _padding_size = padding_size.to(input)\n # unpad only xy, not wh\n for i in range(len(_padding_size)):\n input[i, :, 0] -= _padding_size[i][0] # left padding\n input[i, :, 1] -= _padding_size[i][2] # top padding\n return input\n\n\nclass KeypointsApplyInverse(BBoxApplyInverse):\n \"\"\"Apply and inverse transformations for keypoints tensors.\n\n This is for transform keypoints in the format (B, N, 2).\n \"\"\"\n\n # Hot fix for the typing mismatching\n apply_func = partial(transform_points)\n\n @classmethod\n def pad(cls, input: Tensor, padding_size: Tensor) -> Tensor:\n\n if len(input.shape) not in (2, 3,):\n raise AssertionError(input.shape)\n\n if len(padding_size.shape) != 2:\n raise AssertionError(padding_size.shape)\n\n _input = input.clone()\n\n if input.dim() == 2:\n # B,2 to B,1,2\n _input = _input[:, None]\n\n _input[..., 0] += padding_size[..., :1] # left padding\n _input[..., 1] += padding_size[..., 2:3] # top padding\n\n if input.dim() == 2:\n _input = _input[:, 0] # squeeze back\n\n return _input\n\n @classmethod\n def unpad(cls, input: Tensor, padding_size: Tensor) -> Tensor:\n\n if len(input.shape) not in (2, 3,):\n raise AssertionError(input.shape)\n if len(padding_size.shape) != 2:\n raise AssertionError(padding_size.shape)\n\n _input = input.clone()\n\n if input.dim() == 2:\n # B,2 to B,1,2\n _input = _input[:, None]\n\n # unpad only xy, not wh\n _input[..., 0] -= padding_size[..., :1] # left padding\n _input[..., 1] -= padding_size[..., 2:3] # top padding\n\n if input.dim() == 2:\n _input = _input[:, 0] # squeeze back\n\n return _input\n\n\nclass ApplyInverse:\n \"\"\"Apply and inverse transformations for any tensors (e.g. mask, box, points).\"\"\"\n\n @classmethod\n def _get_func_by_key(cls, dcate: Union[str, int, DataKey]) -> Type[ApplyInverseInterface]:\n if DataKey.get(dcate) == DataKey.INPUT:\n return InputApplyInverse\n if DataKey.get(dcate) == DataKey.MASK:\n return MaskApplyInverse\n if DataKey.get(dcate) in [DataKey.BBOX, DataKey.BBOX_XYXY, DataKey.BBOX_XYWH]:\n # We are converting to (B, 4, 2) internally for all formats.\n return BBoxApplyInverse\n if DataKey.get(dcate) in [DataKey.KEYPOINTS]:\n return KeypointsApplyInverse\n raise NotImplementedError(f\"input type of {dcate} is not implemented.\")\n\n @classmethod\n def apply_by_key(\n cls,\n input: Tensor,\n label: Optional[Tensor],\n module: nn.Module,\n param: ParamItem,\n dcate: Union[str, int, DataKey] = DataKey.INPUT,\n extra_args: Dict[str, Any] = {}\n ) -> Tuple[Tensor, Optional[Tensor]]:\n \"\"\"Apply a transformation with respect to the parameters.\n\n Args:\n input: the input tensor.\n label: the optional label tensor.\n module: any torch Module but only kornia augmentation modules will count\n to apply transformations.\n param: the corresponding parameters to the module.\n dcate: data category. 'input', 'mask', 'bbox', 'bbox_xyxy', 'bbox_xyhw', 'keypoints'.\n By default, it is set to 'input'.\n \"\"\"\n func: Type[ApplyInverseInterface] = cls._get_func_by_key(dcate)\n\n if isinstance(input, (tuple,)):\n # If the input is a tuple with (input, mat) or something else\n return (func.apply_trans(input[0], label, module, param, extra_args), *input[1:]) # type: ignore\n return func.apply_trans(input, label, module=module, param=param, extra_args=extra_args)\n\n @classmethod\n def inverse_by_key(\n cls,\n input: Tensor,\n module: nn.Module,\n param: Optional[ParamItem] = None,\n dcate: Union[str, int, DataKey] = DataKey.INPUT,\n extra_args: Dict[str, Any] = {}\n ) -> Tensor:\n \"\"\"Inverse a transformation with respect to the parameters.\n\n Args:\n input: the input tensor.\n module: any torch Module but only kornia augmentation modules will count\n to apply transformations.\n param: the corresponding parameters to the module.\n dcate: data category. 'input', 'mask', 'bbox', 'bbox_xyxy', 'bbox_xyhw', 'keypoints'.\n By default, it is set to 'input'.\n \"\"\"\n func: Type[ApplyInverseInterface] = cls._get_func_by_key(dcate)\n return func.inverse(input, module, param, extra_args=extra_args)\n", "path": "kornia/augmentation/container/utils.py"}]} |
gh_patches_debug_1220 | rasdani/github-patches | git_diff | sunpy__sunpy-2572 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pandas dataframe values return as numpy.datetime64 objects in local time zone. parse_time does not understand these objects.
Came across this issue today when using a pandas DataFrame. When you explicitly ask for the values of the indices in a DataFrame, they can be returned as numpy.datetime64 objects. These time objects have the timezone attached to the end of them (see example below). parse_time at the moment cannot understand these objects.
The following example explains what I'm on about...
```
In [1]: import datetime
In [2]: import pandas
In [3]: import numpy as np
#create a test series
In [4]: x=np.linspace(0,19,20)
In [5]: basetime=datetime.datetime.utcnow()
In [6]: times=[]
In [7]: for thing in x:
...: times.append(basetime + datetime.timedelta(0,thing)
In [8]: times
Out[8]:
[datetime.datetime(2014, 2, 7, 21, 47, 51, 8288),
datetime.datetime(2014, 2, 7, 21, 47, 52, 8288),
datetime.datetime(2014, 2, 7, 21, 47, 53, 8288),
datetime.datetime(2014, 2, 7, 21, 47, 54, 8288),
datetime.datetime(2014, 2, 7, 21, 47, 55, 8288),
datetime.datetime(2014, 2, 7, 21, 47, 56, 8288),
datetime.datetime(2014, 2, 7, 21, 47, 57, 8288),
datetime.datetime(2014, 2, 7, 21, 47, 58, 8288),
datetime.datetime(2014, 2, 7, 21, 47, 59, 8288),
datetime.datetime(2014, 2, 7, 21, 48, 0, 8288),
datetime.datetime(2014, 2, 7, 21, 48, 1, 8288),
datetime.datetime(2014, 2, 7, 21, 48, 2, 8288),
datetime.datetime(2014, 2, 7, 21, 48, 3, 8288),
datetime.datetime(2014, 2, 7, 21, 48, 4, 8288),
datetime.datetime(2014, 2, 7, 21, 48, 5, 8288),
datetime.datetime(2014, 2, 7, 21, 48, 6, 8288),
datetime.datetime(2014, 2, 7, 21, 48, 7, 8288),
datetime.datetime(2014, 2, 7, 21, 48, 8, 8288),
datetime.datetime(2014, 2, 7, 21, 48, 9, 8288),
datetime.datetime(2014, 2, 7, 21, 48, 10, 8288)]
In [9]: test_pandas=pandas.DataFrame(np.random.random(20),index=times)
```
If you now print the values from the pandas dataframe, they are displayed in another time zone! (not UT). In the following example, it displays a numpy.datetime64 in UT-5.
```
In [10]: test_pandas.index.values[0]
Out[10]: numpy.datetime64('2014-02-07T16:47:51.008288000-0500')
```
Also, parse_time can't read this format at the moment.
```
In [11]: from sunpy.time import parse_time
In [12]: parse_time(test_pandas.index.values[0])
ERROR: TypeError: argument of type 'numpy.datetime64' is not iterable [sunpy.time.time]
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-14-7d3de1f9a633> in <module>()
----> 1 parse_time(test_pandas.index.values[0])
/Users/ainglis/python/sunpy/sunpy/time/time.pyc in parse_time(time_string)
169 # remove trailing zeros and the final dot to allow any
170 # number of zeros. This solves issue #289
--> 171 if '.' in time_string:
172 time_string = time_string.rstrip("0").rstrip(".")
173 for time_format in TIME_FORMAT_LIST:
TypeError: argument of type 'numpy.datetime64' is not iterable
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sunpy/time/time.py`
Content:
```
1 from __future__ import absolute_import, division, print_function
2 import re
3 from datetime import datetime, date, time, timedelta
4
5 import numpy as np
6 import pandas
7 from sunpy.extern import six
8
9 import astropy.time
10
11 __all__ = ['find_time', 'parse_time', 'is_time',
12 'day_of_year', 'break_time', 'get_day', 'is_time_in_given_format']
13
14 # Mapping of time format codes to regular expressions.
15 REGEX = {
16 '%Y': '(?P<year>\d{4})',
17 '%j': '(?P<dayofyear>\d{3})',
18 '%m': '(?P<month>\d{1,2})',
19 '%d': '(?P<day>\d{1,2})',
20 '%H': '(?P<hour>\d{1,2})',
21 '%M': '(?P<minute>\d{1,2})',
22 '%S': '(?P<second>\d{1,2})',
23 '%f': '(?P<microsecond>\d+)',
24 '%b': '(?P<month_str>[a-zA-Z]+)',
25 }
26
27 TIME_FORMAT_LIST = [
28 "%Y-%m-%dT%H:%M:%S.%f", # Example 2007-05-04T21:08:12.999999
29 "%Y/%m/%dT%H:%M:%S.%f", # Example 2007/05/04T21:08:12.999999
30 "%Y-%m-%dT%H:%M:%S.%fZ", # Example 2007-05-04T21:08:12.999Z
31 "%Y-%m-%dT%H:%M:%S", # Example 2007-05-04T21:08:12
32 "%Y/%m/%dT%H:%M:%S", # Example 2007/05/04T21:08:12
33 "%Y%m%dT%H%M%S.%f", # Example 20070504T210812.999999
34 "%Y%m%dT%H%M%S", # Example 20070504T210812
35 "%Y/%m/%d %H:%M:%S", # Example 2007/05/04 21:08:12
36 "%Y/%m/%d %H:%M", # Example 2007/05/04 21:08
37 "%Y/%m/%d %H:%M:%S.%f", # Example 2007/05/04 21:08:12.999999
38 "%Y-%m-%d %H:%M:%S.%f", # Example 2007-05-04 21:08:12.999999
39 "%Y-%m-%d %H:%M:%S", # Example 2007-05-04 21:08:12
40 "%Y-%m-%d %H:%M", # Example 2007-05-04 21:08
41 "%Y-%b-%d %H:%M:%S", # Example 2007-May-04 21:08:12
42 "%Y-%b-%d %H:%M", # Example 2007-May-04 21:08
43 "%Y-%b-%d", # Example 2007-May-04
44 "%Y-%m-%d", # Example 2007-05-04
45 "%Y/%m/%d", # Example 2007/05/04
46 "%d-%b-%Y", # Example 04-May-2007
47 "%d-%b-%Y %H:%M:%S.%f", # Example 04-May-2007 21:08:12.999999
48 "%Y%m%d_%H%M%S", # Example 20070504_210812
49 "%Y:%j:%H:%M:%S", # Example 2012:124:21:08:12
50 "%Y:%j:%H:%M:%S.%f", # Example 2012:124:21:08:12.999999
51 "%Y%m%d%H%M%S", # Example 20140101000001 (JSOC / VSO)
52 "%Y.%m.%d_%H:%M:%S_TAI", # Example 2016.05.04_21:08:12_TAI
53 ]
54
55
56 def _group_or_none(match, group, fun):
57 try:
58 ret = match.group(group)
59 except IndexError:
60 return None
61 else:
62 return fun(ret)
63
64
65 def _n_or_eq(a, b):
66 return a is None or a == b
67
68
69 def _regex_parse_time(inp, format):
70 # Parser for finding out the minute value so we can adjust the string
71 # from 24:00:00 to 00:00:00 the next day because strptime does not
72 # understand the former.
73 for key, value in six.iteritems(REGEX):
74 format = format.replace(key, value)
75 match = re.match(format, inp)
76 if match is None:
77 return None, None
78 try:
79 hour = match.group("hour")
80 except IndexError:
81 return inp, timedelta(days=0)
82 if match.group("hour") == "24":
83 if not all(
84 _n_or_eq(_group_or_none(match, g, int), 00)
85 for g in ["minute", "second", "microsecond"]
86 ):
87 raise ValueError
88 from_, to = match.span("hour")
89 return inp[:from_] + "00" + inp[to:], timedelta(days=1)
90 return inp, timedelta(days=0)
91
92
93 def find_time(string, format):
94 """ Return iterator of occurrences of date formatted with format
95 in string. Currently supported format codes: """
96 re_format = format
97 for key, value in six.iteritems(REGEX):
98 re_format = re_format.replace(key, value)
99 matches = re.finditer(re_format, string)
100 for match in matches:
101 try:
102 matchstr = string[slice(*match.span())]
103 dt = datetime.strptime(matchstr, format)
104 except ValueError:
105 continue
106 else:
107 yield dt
108
109
110 find_time.__doc__ += ', '.join(list(REGEX.keys()))
111
112
113 def _iter_empty(iter):
114 try:
115 next(iter)
116 except StopIteration:
117 return True
118 return False
119
120
121 def _astropy_time(time):
122 """
123 Return an `~astropy.time.Time` instance, running it through `~sunpy.time.parse_time` if needed
124 """
125 return time if isinstance(time, astropy.time.Time) else astropy.time.Time(parse_time(time))
126
127
128 def _parse_dt64(dt):
129 """
130 Parse a single numpy datetime64 object
131 """
132 # Validate (in an agnostic way) that we are getting a datetime rather than a date
133 return datetime(*(dt.astype(datetime).timetuple()[:6]))
134
135
136 def parse_time(time_string, time_format='', **kwargs):
137 """Given a time string will parse and return a datetime object.
138 Similar to the anytim function in IDL.
139 utime -- Time since epoch 1 Jan 1979
140
141 Parameters
142 ----------
143 time_string : [ int, float, time_string, datetime ]
144 Date to parse which can be either time_string, int, datetime object.
145 time_format : [ basestring, utime, datetime ]
146 Specifies the format user has provided the time_string in.
147
148 Returns
149 -------
150 out : datetime
151 DateTime corresponding to input date string
152
153 Note:
154 If time_string is an instance of float, then it is assumed to be in utime format.
155
156 Examples
157 --------
158 >>> import sunpy.time
159 >>> sunpy.time.parse_time('2012/08/01')
160 datetime.datetime(2012, 8, 1, 0, 0)
161 >>> sunpy.time.parse_time('2005-08-04T00:01:02.000Z')
162 datetime.datetime(2005, 8, 4, 0, 1, 2)
163 """
164 if isinstance(time_string, pandas.Timestamp):
165 return time_string.to_pydatetime()
166 elif isinstance(time_string, pandas.Series) and 'datetime64' in str(time_string.dtype):
167 return np.array([dt.to_pydatetime() for dt in time_string])
168 elif isinstance(time_string, pandas.DatetimeIndex):
169 return time_string._mpl_repr()
170 elif isinstance(time_string, datetime) or time_format == 'datetime':
171 return time_string
172 elif isinstance(time_string, date):
173 return datetime.combine(time_string, time())
174 elif isinstance(time_string, tuple):
175 return datetime(*time_string)
176 elif time_format == 'utime' or isinstance(time_string, (int, float)):
177 return datetime(1979, 1, 1) + timedelta(0, time_string)
178 elif isinstance(time_string, np.datetime64):
179 return _parse_dt64(time_string)
180 elif isinstance(time_string, np.ndarray) and 'datetime64' in str(time_string.dtype):
181 return np.array([_parse_dt64(dt) for dt in time_string])
182 elif time_string is 'now':
183 return datetime.utcnow()
184 elif isinstance(time_string, astropy.time.Time):
185 return time_string.datetime
186 else:
187 # remove trailing zeros and the final dot to allow any
188 # number of zeros. This solves issue #289
189 if '.' in time_string:
190 time_string = time_string.rstrip("0").rstrip(".")
191 for time_format in TIME_FORMAT_LIST:
192 try:
193 try:
194 ts, time_delta = _regex_parse_time(time_string,
195 time_format)
196 except TypeError:
197 break
198 if ts is None:
199 continue
200 return datetime.strptime(ts, time_format) + time_delta
201 except ValueError:
202 pass
203
204 time_string_parse_format = kwargs.pop('_time_string_parse_format', None)
205 if time_string_parse_format is not None:
206 ts, time_delta = _regex_parse_time(time_string,
207 time_string_parse_format)
208 if ts and time_delta:
209 return datetime.strptime(ts, time_string_parse_format) + time_delta
210 else:
211 return datetime.strptime(time_string, time_string_parse_format)
212 raise ValueError("'{tstr!s}' is not a valid time string!".format(tstr=time_string))
213
214
215 def is_time(time_string, time_format=''):
216 """
217 Returns true if the input is a valid date/time representation
218
219 Parameters
220 ----------
221 time_string : [ int, float, time_string, datetime ]
222 Date to parse which can be either time_string, int, datetime object.
223 time_format : [ basestring, utime, datetime ]
224 Specifies the format user has provided the time_string in.
225
226 Returns
227 -------
228 out : bool
229 True if can be parsed by parse_time
230
231 Notes
232 -----
233 If time_string is an instance of float, then it is assumed to be in
234 unix time format.
235
236 Examples
237 --------
238 >>> import sunpy.time
239 >>> sunpy.time.parse_time('2012/08/01')
240 datetime.datetime(2012, 8, 1, 0, 0)
241 >>> sunpy.time.parse_time('2005-08-04T00:01:02.000Z')
242 datetime.datetime(2005, 8, 4, 0, 1, 2)
243
244 .. todo::
245
246 add ability to parse tai (International Atomic Time seconds
247 since Jan 1, 1958)
248
249 """
250 if time_string is None:
251 return False
252 elif isinstance(time_string, datetime):
253 return True
254
255 try:
256 parse_time(time_string, time_format)
257 except ValueError:
258 return False
259 else:
260 return True
261
262
263 def day_of_year(time_string):
264 """Returns the (fractional) day of year.
265
266 Parameters
267 ----------
268 time_string : string
269 A parse_time compatible string
270
271 Returns
272 -------
273 out : float
274 The fractional day of year (where Jan 1st is 1).
275
276 Examples
277 --------
278 >>> import sunpy.time
279 >>> sunpy.time.day_of_year('2012/01/01')
280 1.0
281 >>> sunpy.time.day_of_year('2012/08/01')
282 214.0
283 >>> sunpy.time.day_of_year('2005-08-04T00:18:02.000Z')
284 216.01252314814815
285
286 """
287 SECONDS_IN_DAY = 60 * 60 * 24.0
288 time = parse_time(time_string)
289 time_diff = time - datetime(time.year, 1, 1, 0, 0, 0)
290 return time_diff.days + time_diff.seconds / SECONDS_IN_DAY + 1
291
292
293 def break_time(t='now', time_format=''):
294 """Given a time returns a string. Useful for naming files."""
295 # TODO: should be able to handle a time range
296 return parse_time(t, time_format).strftime("%Y%m%d_%H%M%S")
297
298
299 def get_day(dt):
300 """ Return datetime for the beginning of the day of given datetime. """
301 return datetime(dt.year, dt.month, dt.day)
302
303
304 def is_time_in_given_format(time_string, time_format):
305 """Tests whether a time string is formatted according to the given time
306 format."""
307 try:
308 datetime.strptime(time_string, time_format)
309 return True
310 except ValueError:
311 return False
312
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sunpy/time/time.py b/sunpy/time/time.py
--- a/sunpy/time/time.py
+++ b/sunpy/time/time.py
@@ -129,8 +129,8 @@
"""
Parse a single numpy datetime64 object
"""
- # Validate (in an agnostic way) that we are getting a datetime rather than a date
- return datetime(*(dt.astype(datetime).timetuple()[:6]))
+ # Convert to microsecond precision because datetime cannot handle nanoseconds
+ return dt.astype('M8[us]').astype(datetime)
def parse_time(time_string, time_format='', **kwargs):
| {"golden_diff": "diff --git a/sunpy/time/time.py b/sunpy/time/time.py\n--- a/sunpy/time/time.py\n+++ b/sunpy/time/time.py\n@@ -129,8 +129,8 @@\n \"\"\"\n Parse a single numpy datetime64 object\n \"\"\"\n- # Validate (in an agnostic way) that we are getting a datetime rather than a date\n- return datetime(*(dt.astype(datetime).timetuple()[:6]))\n+ # Convert to microsecond precision because datetime cannot handle nanoseconds\n+ return dt.astype('M8[us]').astype(datetime)\n \n \n def parse_time(time_string, time_format='', **kwargs):\n", "issue": "Pandas dataframe values return as numpy.datetime64 objects in local time zone. parse_time does not understand these objects. \nCame across this issue today when using a pandas DataFrame. When you explicitly ask for the values of the indices in a DataFrame, they can be returned as numpy.datetime64 objects. These time objects have the timezone attached to the end of them (see example below). parse_time at the moment cannot understand these objects. \n\nThe following example explains what I'm on about...\n\n```\nIn [1]: import datetime\nIn [2]: import pandas\nIn [3]: import numpy as np\n#create a test series\nIn [4]: x=np.linspace(0,19,20)\nIn [5]: basetime=datetime.datetime.utcnow()\nIn [6]: times=[] \n\nIn [7]: for thing in x:\n ...: times.append(basetime + datetime.timedelta(0,thing)\n\nIn [8]: times\nOut[8]: \n[datetime.datetime(2014, 2, 7, 21, 47, 51, 8288),\n datetime.datetime(2014, 2, 7, 21, 47, 52, 8288),\n datetime.datetime(2014, 2, 7, 21, 47, 53, 8288),\n datetime.datetime(2014, 2, 7, 21, 47, 54, 8288),\n datetime.datetime(2014, 2, 7, 21, 47, 55, 8288),\n datetime.datetime(2014, 2, 7, 21, 47, 56, 8288),\n datetime.datetime(2014, 2, 7, 21, 47, 57, 8288),\n datetime.datetime(2014, 2, 7, 21, 47, 58, 8288),\n datetime.datetime(2014, 2, 7, 21, 47, 59, 8288),\n datetime.datetime(2014, 2, 7, 21, 48, 0, 8288),\n datetime.datetime(2014, 2, 7, 21, 48, 1, 8288),\n datetime.datetime(2014, 2, 7, 21, 48, 2, 8288),\n datetime.datetime(2014, 2, 7, 21, 48, 3, 8288),\n datetime.datetime(2014, 2, 7, 21, 48, 4, 8288),\n datetime.datetime(2014, 2, 7, 21, 48, 5, 8288),\n datetime.datetime(2014, 2, 7, 21, 48, 6, 8288),\n datetime.datetime(2014, 2, 7, 21, 48, 7, 8288),\n datetime.datetime(2014, 2, 7, 21, 48, 8, 8288),\n datetime.datetime(2014, 2, 7, 21, 48, 9, 8288),\n datetime.datetime(2014, 2, 7, 21, 48, 10, 8288)]\n\nIn [9]: test_pandas=pandas.DataFrame(np.random.random(20),index=times)\n```\n\nIf you now print the values from the pandas dataframe, they are displayed in another time zone! (not UT). In the following example, it displays a numpy.datetime64 in UT-5.\n\n```\nIn [10]: test_pandas.index.values[0]\nOut[10]: numpy.datetime64('2014-02-07T16:47:51.008288000-0500')\n```\n\nAlso, parse_time can't read this format at the moment.\n\n```\nIn [11]: from sunpy.time import parse_time\nIn [12]: parse_time(test_pandas.index.values[0])\nERROR: TypeError: argument of type 'numpy.datetime64' is not iterable [sunpy.time.time]\n---------------------------------------------------------------------------\nTypeError Traceback (most recent call last)\n<ipython-input-14-7d3de1f9a633> in <module>()\n----> 1 parse_time(test_pandas.index.values[0])\n\n/Users/ainglis/python/sunpy/sunpy/time/time.pyc in parse_time(time_string)\n 169 # remove trailing zeros and the final dot to allow any\n 170 # number of zeros. This solves issue #289\n--> 171 if '.' in time_string:\n 172 time_string = time_string.rstrip(\"0\").rstrip(\".\")\n 173 for time_format in TIME_FORMAT_LIST:\n\nTypeError: argument of type 'numpy.datetime64' is not iterable\n```\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\nimport re\nfrom datetime import datetime, date, time, timedelta\n\nimport numpy as np\nimport pandas\nfrom sunpy.extern import six\n\nimport astropy.time\n\n__all__ = ['find_time', 'parse_time', 'is_time',\n 'day_of_year', 'break_time', 'get_day', 'is_time_in_given_format']\n\n# Mapping of time format codes to regular expressions.\nREGEX = {\n '%Y': '(?P<year>\\d{4})',\n '%j': '(?P<dayofyear>\\d{3})',\n '%m': '(?P<month>\\d{1,2})',\n '%d': '(?P<day>\\d{1,2})',\n '%H': '(?P<hour>\\d{1,2})',\n '%M': '(?P<minute>\\d{1,2})',\n '%S': '(?P<second>\\d{1,2})',\n '%f': '(?P<microsecond>\\d+)',\n '%b': '(?P<month_str>[a-zA-Z]+)',\n}\n\nTIME_FORMAT_LIST = [\n \"%Y-%m-%dT%H:%M:%S.%f\", # Example 2007-05-04T21:08:12.999999\n \"%Y/%m/%dT%H:%M:%S.%f\", # Example 2007/05/04T21:08:12.999999\n \"%Y-%m-%dT%H:%M:%S.%fZ\", # Example 2007-05-04T21:08:12.999Z\n \"%Y-%m-%dT%H:%M:%S\", # Example 2007-05-04T21:08:12\n \"%Y/%m/%dT%H:%M:%S\", # Example 2007/05/04T21:08:12\n \"%Y%m%dT%H%M%S.%f\", # Example 20070504T210812.999999\n \"%Y%m%dT%H%M%S\", # Example 20070504T210812\n \"%Y/%m/%d %H:%M:%S\", # Example 2007/05/04 21:08:12\n \"%Y/%m/%d %H:%M\", # Example 2007/05/04 21:08\n \"%Y/%m/%d %H:%M:%S.%f\", # Example 2007/05/04 21:08:12.999999\n \"%Y-%m-%d %H:%M:%S.%f\", # Example 2007-05-04 21:08:12.999999\n \"%Y-%m-%d %H:%M:%S\", # Example 2007-05-04 21:08:12\n \"%Y-%m-%d %H:%M\", # Example 2007-05-04 21:08\n \"%Y-%b-%d %H:%M:%S\", # Example 2007-May-04 21:08:12\n \"%Y-%b-%d %H:%M\", # Example 2007-May-04 21:08\n \"%Y-%b-%d\", # Example 2007-May-04\n \"%Y-%m-%d\", # Example 2007-05-04\n \"%Y/%m/%d\", # Example 2007/05/04\n \"%d-%b-%Y\", # Example 04-May-2007\n \"%d-%b-%Y %H:%M:%S.%f\", # Example 04-May-2007 21:08:12.999999\n \"%Y%m%d_%H%M%S\", # Example 20070504_210812\n \"%Y:%j:%H:%M:%S\", # Example 2012:124:21:08:12\n \"%Y:%j:%H:%M:%S.%f\", # Example 2012:124:21:08:12.999999\n \"%Y%m%d%H%M%S\", # Example 20140101000001 (JSOC / VSO)\n \"%Y.%m.%d_%H:%M:%S_TAI\", # Example 2016.05.04_21:08:12_TAI\n]\n\n\ndef _group_or_none(match, group, fun):\n try:\n ret = match.group(group)\n except IndexError:\n return None\n else:\n return fun(ret)\n\n\ndef _n_or_eq(a, b):\n return a is None or a == b\n\n\ndef _regex_parse_time(inp, format):\n # Parser for finding out the minute value so we can adjust the string\n # from 24:00:00 to 00:00:00 the next day because strptime does not\n # understand the former.\n for key, value in six.iteritems(REGEX):\n format = format.replace(key, value)\n match = re.match(format, inp)\n if match is None:\n return None, None\n try:\n hour = match.group(\"hour\")\n except IndexError:\n return inp, timedelta(days=0)\n if match.group(\"hour\") == \"24\":\n if not all(\n _n_or_eq(_group_or_none(match, g, int), 00)\n for g in [\"minute\", \"second\", \"microsecond\"]\n ):\n raise ValueError\n from_, to = match.span(\"hour\")\n return inp[:from_] + \"00\" + inp[to:], timedelta(days=1)\n return inp, timedelta(days=0)\n\n\ndef find_time(string, format):\n \"\"\" Return iterator of occurrences of date formatted with format\n in string. Currently supported format codes: \"\"\"\n re_format = format\n for key, value in six.iteritems(REGEX):\n re_format = re_format.replace(key, value)\n matches = re.finditer(re_format, string)\n for match in matches:\n try:\n matchstr = string[slice(*match.span())]\n dt = datetime.strptime(matchstr, format)\n except ValueError:\n continue\n else:\n yield dt\n\n\nfind_time.__doc__ += ', '.join(list(REGEX.keys()))\n\n\ndef _iter_empty(iter):\n try:\n next(iter)\n except StopIteration:\n return True\n return False\n\n\ndef _astropy_time(time):\n \"\"\"\n Return an `~astropy.time.Time` instance, running it through `~sunpy.time.parse_time` if needed\n \"\"\"\n return time if isinstance(time, astropy.time.Time) else astropy.time.Time(parse_time(time))\n\n\ndef _parse_dt64(dt):\n \"\"\"\n Parse a single numpy datetime64 object\n \"\"\"\n # Validate (in an agnostic way) that we are getting a datetime rather than a date\n return datetime(*(dt.astype(datetime).timetuple()[:6]))\n\n\ndef parse_time(time_string, time_format='', **kwargs):\n \"\"\"Given a time string will parse and return a datetime object.\n Similar to the anytim function in IDL.\n utime -- Time since epoch 1 Jan 1979\n\n Parameters\n ----------\n time_string : [ int, float, time_string, datetime ]\n Date to parse which can be either time_string, int, datetime object.\n time_format : [ basestring, utime, datetime ]\n Specifies the format user has provided the time_string in.\n\n Returns\n -------\n out : datetime\n DateTime corresponding to input date string\n\n Note:\n If time_string is an instance of float, then it is assumed to be in utime format.\n\n Examples\n --------\n >>> import sunpy.time\n >>> sunpy.time.parse_time('2012/08/01')\n datetime.datetime(2012, 8, 1, 0, 0)\n >>> sunpy.time.parse_time('2005-08-04T00:01:02.000Z')\n datetime.datetime(2005, 8, 4, 0, 1, 2)\n \"\"\"\n if isinstance(time_string, pandas.Timestamp):\n return time_string.to_pydatetime()\n elif isinstance(time_string, pandas.Series) and 'datetime64' in str(time_string.dtype):\n return np.array([dt.to_pydatetime() for dt in time_string])\n elif isinstance(time_string, pandas.DatetimeIndex):\n return time_string._mpl_repr()\n elif isinstance(time_string, datetime) or time_format == 'datetime':\n return time_string\n elif isinstance(time_string, date):\n return datetime.combine(time_string, time())\n elif isinstance(time_string, tuple):\n return datetime(*time_string)\n elif time_format == 'utime' or isinstance(time_string, (int, float)):\n return datetime(1979, 1, 1) + timedelta(0, time_string)\n elif isinstance(time_string, np.datetime64):\n return _parse_dt64(time_string)\n elif isinstance(time_string, np.ndarray) and 'datetime64' in str(time_string.dtype):\n return np.array([_parse_dt64(dt) for dt in time_string])\n elif time_string is 'now':\n return datetime.utcnow()\n elif isinstance(time_string, astropy.time.Time):\n return time_string.datetime\n else:\n # remove trailing zeros and the final dot to allow any\n # number of zeros. This solves issue #289\n if '.' in time_string:\n time_string = time_string.rstrip(\"0\").rstrip(\".\")\n for time_format in TIME_FORMAT_LIST:\n try:\n try:\n ts, time_delta = _regex_parse_time(time_string,\n time_format)\n except TypeError:\n break\n if ts is None:\n continue\n return datetime.strptime(ts, time_format) + time_delta\n except ValueError:\n pass\n\n time_string_parse_format = kwargs.pop('_time_string_parse_format', None)\n if time_string_parse_format is not None:\n ts, time_delta = _regex_parse_time(time_string,\n time_string_parse_format)\n if ts and time_delta:\n return datetime.strptime(ts, time_string_parse_format) + time_delta\n else:\n return datetime.strptime(time_string, time_string_parse_format)\n raise ValueError(\"'{tstr!s}' is not a valid time string!\".format(tstr=time_string))\n\n\ndef is_time(time_string, time_format=''):\n \"\"\"\n Returns true if the input is a valid date/time representation\n\n Parameters\n ----------\n time_string : [ int, float, time_string, datetime ]\n Date to parse which can be either time_string, int, datetime object.\n time_format : [ basestring, utime, datetime ]\n Specifies the format user has provided the time_string in.\n\n Returns\n -------\n out : bool\n True if can be parsed by parse_time\n\n Notes\n -----\n If time_string is an instance of float, then it is assumed to be in\n unix time format.\n\n Examples\n --------\n >>> import sunpy.time\n >>> sunpy.time.parse_time('2012/08/01')\n datetime.datetime(2012, 8, 1, 0, 0)\n >>> sunpy.time.parse_time('2005-08-04T00:01:02.000Z')\n datetime.datetime(2005, 8, 4, 0, 1, 2)\n\n .. todo::\n\n add ability to parse tai (International Atomic Time seconds\n since Jan 1, 1958)\n\n \"\"\"\n if time_string is None:\n return False\n elif isinstance(time_string, datetime):\n return True\n\n try:\n parse_time(time_string, time_format)\n except ValueError:\n return False\n else:\n return True\n\n\ndef day_of_year(time_string):\n \"\"\"Returns the (fractional) day of year.\n\n Parameters\n ----------\n time_string : string\n A parse_time compatible string\n\n Returns\n -------\n out : float\n The fractional day of year (where Jan 1st is 1).\n\n Examples\n --------\n >>> import sunpy.time\n >>> sunpy.time.day_of_year('2012/01/01')\n 1.0\n >>> sunpy.time.day_of_year('2012/08/01')\n 214.0\n >>> sunpy.time.day_of_year('2005-08-04T00:18:02.000Z')\n 216.01252314814815\n\n \"\"\"\n SECONDS_IN_DAY = 60 * 60 * 24.0\n time = parse_time(time_string)\n time_diff = time - datetime(time.year, 1, 1, 0, 0, 0)\n return time_diff.days + time_diff.seconds / SECONDS_IN_DAY + 1\n\n\ndef break_time(t='now', time_format=''):\n \"\"\"Given a time returns a string. Useful for naming files.\"\"\"\n # TODO: should be able to handle a time range\n return parse_time(t, time_format).strftime(\"%Y%m%d_%H%M%S\")\n\n\ndef get_day(dt):\n \"\"\" Return datetime for the beginning of the day of given datetime. \"\"\"\n return datetime(dt.year, dt.month, dt.day)\n\n\ndef is_time_in_given_format(time_string, time_format):\n \"\"\"Tests whether a time string is formatted according to the given time\n format.\"\"\"\n try:\n datetime.strptime(time_string, time_format)\n return True\n except ValueError:\n return False\n", "path": "sunpy/time/time.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function\nimport re\nfrom datetime import datetime, date, time, timedelta\n\nimport numpy as np\nimport pandas\nfrom sunpy.extern import six\n\nimport astropy.time\n\n__all__ = ['find_time', 'parse_time', 'is_time',\n 'day_of_year', 'break_time', 'get_day', 'is_time_in_given_format']\n\n# Mapping of time format codes to regular expressions.\nREGEX = {\n '%Y': '(?P<year>\\d{4})',\n '%j': '(?P<dayofyear>\\d{3})',\n '%m': '(?P<month>\\d{1,2})',\n '%d': '(?P<day>\\d{1,2})',\n '%H': '(?P<hour>\\d{1,2})',\n '%M': '(?P<minute>\\d{1,2})',\n '%S': '(?P<second>\\d{1,2})',\n '%f': '(?P<microsecond>\\d+)',\n '%b': '(?P<month_str>[a-zA-Z]+)',\n}\n\nTIME_FORMAT_LIST = [\n \"%Y-%m-%dT%H:%M:%S.%f\", # Example 2007-05-04T21:08:12.999999\n \"%Y/%m/%dT%H:%M:%S.%f\", # Example 2007/05/04T21:08:12.999999\n \"%Y-%m-%dT%H:%M:%S.%fZ\", # Example 2007-05-04T21:08:12.999Z\n \"%Y-%m-%dT%H:%M:%S\", # Example 2007-05-04T21:08:12\n \"%Y/%m/%dT%H:%M:%S\", # Example 2007/05/04T21:08:12\n \"%Y%m%dT%H%M%S.%f\", # Example 20070504T210812.999999\n \"%Y%m%dT%H%M%S\", # Example 20070504T210812\n \"%Y/%m/%d %H:%M:%S\", # Example 2007/05/04 21:08:12\n \"%Y/%m/%d %H:%M\", # Example 2007/05/04 21:08\n \"%Y/%m/%d %H:%M:%S.%f\", # Example 2007/05/04 21:08:12.999999\n \"%Y-%m-%d %H:%M:%S.%f\", # Example 2007-05-04 21:08:12.999999\n \"%Y-%m-%d %H:%M:%S\", # Example 2007-05-04 21:08:12\n \"%Y-%m-%d %H:%M\", # Example 2007-05-04 21:08\n \"%Y-%b-%d %H:%M:%S\", # Example 2007-May-04 21:08:12\n \"%Y-%b-%d %H:%M\", # Example 2007-May-04 21:08\n \"%Y-%b-%d\", # Example 2007-May-04\n \"%Y-%m-%d\", # Example 2007-05-04\n \"%Y/%m/%d\", # Example 2007/05/04\n \"%d-%b-%Y\", # Example 04-May-2007\n \"%d-%b-%Y %H:%M:%S.%f\", # Example 04-May-2007 21:08:12.999999\n \"%Y%m%d_%H%M%S\", # Example 20070504_210812\n \"%Y:%j:%H:%M:%S\", # Example 2012:124:21:08:12\n \"%Y:%j:%H:%M:%S.%f\", # Example 2012:124:21:08:12.999999\n \"%Y%m%d%H%M%S\", # Example 20140101000001 (JSOC / VSO)\n \"%Y.%m.%d_%H:%M:%S_TAI\", # Example 2016.05.04_21:08:12_TAI\n]\n\n\ndef _group_or_none(match, group, fun):\n try:\n ret = match.group(group)\n except IndexError:\n return None\n else:\n return fun(ret)\n\n\ndef _n_or_eq(a, b):\n return a is None or a == b\n\n\ndef _regex_parse_time(inp, format):\n # Parser for finding out the minute value so we can adjust the string\n # from 24:00:00 to 00:00:00 the next day because strptime does not\n # understand the former.\n for key, value in six.iteritems(REGEX):\n format = format.replace(key, value)\n match = re.match(format, inp)\n if match is None:\n return None, None\n try:\n hour = match.group(\"hour\")\n except IndexError:\n return inp, timedelta(days=0)\n if match.group(\"hour\") == \"24\":\n if not all(\n _n_or_eq(_group_or_none(match, g, int), 00)\n for g in [\"minute\", \"second\", \"microsecond\"]\n ):\n raise ValueError\n from_, to = match.span(\"hour\")\n return inp[:from_] + \"00\" + inp[to:], timedelta(days=1)\n return inp, timedelta(days=0)\n\n\ndef find_time(string, format):\n \"\"\" Return iterator of occurrences of date formatted with format\n in string. Currently supported format codes: \"\"\"\n re_format = format\n for key, value in six.iteritems(REGEX):\n re_format = re_format.replace(key, value)\n matches = re.finditer(re_format, string)\n for match in matches:\n try:\n matchstr = string[slice(*match.span())]\n dt = datetime.strptime(matchstr, format)\n except ValueError:\n continue\n else:\n yield dt\n\n\nfind_time.__doc__ += ', '.join(list(REGEX.keys()))\n\n\ndef _iter_empty(iter):\n try:\n next(iter)\n except StopIteration:\n return True\n return False\n\n\ndef _astropy_time(time):\n \"\"\"\n Return an `~astropy.time.Time` instance, running it through `~sunpy.time.parse_time` if needed\n \"\"\"\n return time if isinstance(time, astropy.time.Time) else astropy.time.Time(parse_time(time))\n\n\ndef _parse_dt64(dt):\n \"\"\"\n Parse a single numpy datetime64 object\n \"\"\"\n # Convert to microsecond precision because datetime cannot handle nanoseconds\n return dt.astype('M8[us]').astype(datetime)\n\n\ndef parse_time(time_string, time_format='', **kwargs):\n \"\"\"Given a time string will parse and return a datetime object.\n Similar to the anytim function in IDL.\n utime -- Time since epoch 1 Jan 1979\n\n Parameters\n ----------\n time_string : [ int, float, time_string, datetime ]\n Date to parse which can be either time_string, int, datetime object.\n time_format : [ basestring, utime, datetime ]\n Specifies the format user has provided the time_string in.\n\n Returns\n -------\n out : datetime\n DateTime corresponding to input date string\n\n Note:\n If time_string is an instance of float, then it is assumed to be in utime format.\n\n Examples\n --------\n >>> import sunpy.time\n >>> sunpy.time.parse_time('2012/08/01')\n datetime.datetime(2012, 8, 1, 0, 0)\n >>> sunpy.time.parse_time('2005-08-04T00:01:02.000Z')\n datetime.datetime(2005, 8, 4, 0, 1, 2)\n \"\"\"\n if isinstance(time_string, pandas.Timestamp):\n return time_string.to_pydatetime()\n elif isinstance(time_string, pandas.Series) and 'datetime64' in str(time_string.dtype):\n return np.array([dt.to_pydatetime() for dt in time_string])\n elif isinstance(time_string, pandas.DatetimeIndex):\n return time_string._mpl_repr()\n elif isinstance(time_string, datetime) or time_format == 'datetime':\n return time_string\n elif isinstance(time_string, date):\n return datetime.combine(time_string, time())\n elif isinstance(time_string, tuple):\n return datetime(*time_string)\n elif time_format == 'utime' or isinstance(time_string, (int, float)):\n return datetime(1979, 1, 1) + timedelta(0, time_string)\n elif isinstance(time_string, np.datetime64):\n return _parse_dt64(time_string)\n elif isinstance(time_string, np.ndarray) and 'datetime64' in str(time_string.dtype):\n return np.array([_parse_dt64(dt) for dt in time_string])\n elif time_string is 'now':\n return datetime.utcnow()\n elif isinstance(time_string, astropy.time.Time):\n return time_string.datetime\n else:\n # remove trailing zeros and the final dot to allow any\n # number of zeros. This solves issue #289\n if '.' in time_string:\n time_string = time_string.rstrip(\"0\").rstrip(\".\")\n for time_format in TIME_FORMAT_LIST:\n try:\n try:\n ts, time_delta = _regex_parse_time(time_string,\n time_format)\n except TypeError:\n break\n if ts is None:\n continue\n return datetime.strptime(ts, time_format) + time_delta\n except ValueError:\n pass\n\n time_string_parse_format = kwargs.pop('_time_string_parse_format', None)\n if time_string_parse_format is not None:\n ts, time_delta = _regex_parse_time(time_string,\n time_string_parse_format)\n if ts and time_delta:\n return datetime.strptime(ts, time_string_parse_format) + time_delta\n else:\n return datetime.strptime(time_string, time_string_parse_format)\n raise ValueError(\"'{tstr!s}' is not a valid time string!\".format(tstr=time_string))\n\n\ndef is_time(time_string, time_format=''):\n \"\"\"\n Returns true if the input is a valid date/time representation\n\n Parameters\n ----------\n time_string : [ int, float, time_string, datetime ]\n Date to parse which can be either time_string, int, datetime object.\n time_format : [ basestring, utime, datetime ]\n Specifies the format user has provided the time_string in.\n\n Returns\n -------\n out : bool\n True if can be parsed by parse_time\n\n Notes\n -----\n If time_string is an instance of float, then it is assumed to be in\n unix time format.\n\n Examples\n --------\n >>> import sunpy.time\n >>> sunpy.time.parse_time('2012/08/01')\n datetime.datetime(2012, 8, 1, 0, 0)\n >>> sunpy.time.parse_time('2005-08-04T00:01:02.000Z')\n datetime.datetime(2005, 8, 4, 0, 1, 2)\n\n .. todo::\n\n add ability to parse tai (International Atomic Time seconds\n since Jan 1, 1958)\n\n \"\"\"\n if time_string is None:\n return False\n elif isinstance(time_string, datetime):\n return True\n\n try:\n parse_time(time_string, time_format)\n except ValueError:\n return False\n else:\n return True\n\n\ndef day_of_year(time_string):\n \"\"\"Returns the (fractional) day of year.\n\n Parameters\n ----------\n time_string : string\n A parse_time compatible string\n\n Returns\n -------\n out : float\n The fractional day of year (where Jan 1st is 1).\n\n Examples\n --------\n >>> import sunpy.time\n >>> sunpy.time.day_of_year('2012/01/01')\n 1.0\n >>> sunpy.time.day_of_year('2012/08/01')\n 214.0\n >>> sunpy.time.day_of_year('2005-08-04T00:18:02.000Z')\n 216.01252314814815\n\n \"\"\"\n SECONDS_IN_DAY = 60 * 60 * 24.0\n time = parse_time(time_string)\n time_diff = time - datetime(time.year, 1, 1, 0, 0, 0)\n return time_diff.days + time_diff.seconds / SECONDS_IN_DAY + 1\n\n\ndef break_time(t='now', time_format=''):\n \"\"\"Given a time returns a string. Useful for naming files.\"\"\"\n # TODO: should be able to handle a time range\n return parse_time(t, time_format).strftime(\"%Y%m%d_%H%M%S\")\n\n\ndef get_day(dt):\n \"\"\" Return datetime for the beginning of the day of given datetime. \"\"\"\n return datetime(dt.year, dt.month, dt.day)\n\n\ndef is_time_in_given_format(time_string, time_format):\n \"\"\"Tests whether a time string is formatted according to the given time\n format.\"\"\"\n try:\n datetime.strptime(time_string, time_format)\n return True\n except ValueError:\n return False\n", "path": "sunpy/time/time.py"}]} |
gh_patches_debug_1221 | rasdani/github-patches | git_diff | django-cms__django-filer-1378 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Field verbose_name should use gettext_lazy
Hi,
model field verbose_names should use gettext_lazy, because it creates migrations based on user language settings.
https://github.com/django-cms/django-filer/blob/master/filer/models/foldermodels.py#L9
This is migration generated after upgrade to django-filer 3.0

Thanks.
Field verbose_name should use gettext_lazy
Hi,
model field verbose_names should use gettext_lazy, because it creates migrations based on user language settings.
https://github.com/django-cms/django-filer/blob/master/filer/models/foldermodels.py#L9
This is migration generated after upgrade to django-filer 3.0

Thanks.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `filer/models/foldermodels.py`
Content:
```
1 from django.conf import settings
2 from django.contrib.auth import models as auth_models
3 from django.core.exceptions import ValidationError
4 from django.db import models
5 from django.db.models import Q
6 from django.urls import reverse
7 from django.utils.functional import cached_property
8 from django.utils.html import format_html, format_html_join
9 from django.utils.translation import gettext as _
10
11 from .. import settings as filer_settings
12 from . import mixins
13
14
15 class FolderPermissionManager(models.Manager):
16 """
17 These methods are called by introspection from "has_generic_permission" on
18 the folder model.
19 """
20 def get_read_id_list(self, user):
21 """
22 Give a list of a Folders where the user has read rights or the string
23 "All" if the user has all rights.
24 """
25 return self.__get_id_list(user, "can_read")
26
27 def get_edit_id_list(self, user):
28 return self.__get_id_list(user, "can_edit")
29
30 def get_add_children_id_list(self, user):
31 return self.__get_id_list(user, "can_add_children")
32
33 def __get_id_list(self, user, attr):
34 if user.is_superuser or not filer_settings.FILER_ENABLE_PERMISSIONS:
35 return 'All'
36 allow_list = set()
37 deny_list = set()
38 group_ids = user.groups.all().values_list('id', flat=True)
39 q = Q(user=user) | Q(group__in=group_ids) | Q(everybody=True)
40 perms = self.filter(q)
41
42 for perm in perms:
43 p = getattr(perm, attr)
44
45 if p is None:
46 # Not allow nor deny, we continue with the next permission
47 continue
48
49 if not perm.folder:
50 assert perm.type == FolderPermission.ALL
51
52 if p == FolderPermission.ALLOW:
53 allow_list.update(Folder.objects.all().values_list('id', flat=True))
54 else:
55 deny_list.update(Folder.objects.all().values_list('id', flat=True))
56
57 continue
58
59 folder_id = perm.folder.id
60
61 if p == FolderPermission.ALLOW:
62 allow_list.add(folder_id)
63 else:
64 deny_list.add(folder_id)
65
66 if perm.type in [FolderPermission.ALL, FolderPermission.CHILDREN]:
67 if p == FolderPermission.ALLOW:
68 allow_list.update(perm.folder.get_descendants_ids())
69 else:
70 deny_list.update(perm.folder.get_descendants_ids())
71
72 # Deny has precedence over allow
73 return allow_list - deny_list
74
75
76 class Folder(models.Model, mixins.IconsMixin):
77 """
78 Represents a Folder that things (files) can be put into. Folders are *NOT*
79 mirrored in the Filesystem and can have any unicode chars as their name.
80 Other models may attach to a folder with a ForeignKey. If the related name
81 ends with "_files" they will automatically be listed in the
82 folder.files list along with all the other models that link to the folder
83 in this way. Make sure the linked models obey the AbstractFile interface
84 (Duck Type).
85 """
86 file_type = 'Folder'
87 is_root = False
88 can_have_subfolders = True
89 _icon = 'plainfolder'
90
91 parent = models.ForeignKey(
92 'self',
93 verbose_name=_('parent'),
94 null=True,
95 blank=True,
96 related_name='children',
97 on_delete=models.CASCADE,
98 )
99
100 name = models.CharField(
101 _('name'),
102 max_length=255,
103 )
104
105 owner = models.ForeignKey(
106 getattr(settings, 'AUTH_USER_MODEL', 'auth.User'),
107 verbose_name=_('owner'),
108 related_name='filer_owned_folders',
109 on_delete=models.SET_NULL,
110 null=True,
111 blank=True,
112 )
113
114 uploaded_at = models.DateTimeField(
115 _('uploaded at'),
116 auto_now_add=True,
117 )
118
119 created_at = models.DateTimeField(
120 _('created at'),
121 auto_now_add=True,
122 )
123
124 modified_at = models.DateTimeField(
125 _('modified at'),
126 auto_now=True,
127 )
128
129 class Meta:
130 unique_together = (('parent', 'name'),)
131 ordering = ('name',)
132 permissions = (("can_use_directory_listing",
133 "Can use directory listing"),)
134 app_label = 'filer'
135 verbose_name = _("Folder")
136 verbose_name_plural = _("Folders")
137
138 def __str__(self):
139 return self.pretty_logical_path
140
141 def __repr__(self):
142 return f'<{self.__class__.__name__}(pk={self.pk}): {self.pretty_logical_path}>'
143
144 @property
145 def file_count(self):
146 if not hasattr(self, '_file_count_cache'):
147 self._file_count_cache = self.files.count()
148 return self._file_count_cache
149
150 @property
151 def children_count(self):
152 if not hasattr(self, '_children_count_cache'):
153 self._children_count_cache = self.children.count()
154 return self._children_count_cache
155
156 @property
157 def item_count(self):
158 return self.file_count + self.children_count
159
160 @property
161 def files(self):
162 return self.all_files.all()
163
164 @cached_property
165 def logical_path(self):
166 """
167 Gets logical path of the folder in the tree structure.
168 Used to generate breadcrumbs
169 """
170 folder_path = []
171 if self.parent:
172 folder_path.extend(self.parent.logical_path)
173 folder_path.append(self.parent)
174 return folder_path
175
176 def get_descendants_ids(self):
177 desc = []
178 for child in self.children.all():
179 desc.append(child.id)
180 desc.extend(child.get_descendants_ids())
181 return desc
182
183 @property
184 def pretty_logical_path(self):
185 return format_html('/{}', format_html_join('/', '{0}', ((f.name,) for f in self.logical_path + [self])))
186
187 def has_edit_permission(self, request):
188 return request.user.has_perm("filer.change_folder") and self.has_generic_permission(request, 'edit')
189
190 def has_read_permission(self, request):
191 return self.has_generic_permission(request, 'read')
192
193 def has_add_children_permission(self, request):
194 return request.user.has_perm("filer.change_folder") and self.has_generic_permission(request, 'add_children')
195
196 def has_generic_permission(self, request, permission_type):
197 """
198 Return true if the current user has permission on this
199 folder. Return the string 'ALL' if the user has all rights.
200 """
201 user = request.user
202 if not user.is_authenticated:
203 return False
204 elif user.is_superuser:
205 return True
206 elif user == self.owner:
207 return True
208 else:
209 if not hasattr(self, "permission_cache") or\
210 permission_type not in self.permission_cache or \
211 request.user.pk != self.permission_cache['user'].pk:
212 if not hasattr(self, "permission_cache") or request.user.pk != self.permission_cache['user'].pk:
213 self.permission_cache = {
214 'user': request.user,
215 }
216
217 # This calls methods on the manager i.e. get_read_id_list()
218 func = getattr(FolderPermission.objects,
219 "get_%s_id_list" % permission_type)
220 permission = func(user)
221 if permission == "All":
222 self.permission_cache[permission_type] = True
223 self.permission_cache['read'] = True
224 self.permission_cache['edit'] = True
225 self.permission_cache['add_children'] = True
226 else:
227 self.permission_cache[permission_type] = self.id in permission
228 return self.permission_cache[permission_type]
229
230 def get_admin_change_url(self):
231 return reverse('admin:filer_folder_change', args=(self.id,))
232
233 def get_admin_directory_listing_url_path(self):
234 return reverse('admin:filer-directory_listing', args=(self.id,))
235
236 def get_admin_delete_url(self):
237 return reverse(
238 f'admin:{self._meta.app_label}_{self._meta.model_name}_delete',
239 args=(self.pk,)
240 )
241
242 def contains_folder(self, folder_name):
243 try:
244 self.children.get(name=folder_name)
245 return True
246 except Folder.DoesNotExist:
247 return False
248
249
250 class FolderPermission(models.Model):
251 ALL = 0
252 THIS = 1
253 CHILDREN = 2
254
255 ALLOW = 1
256 DENY = 0
257
258 TYPES = [
259 (ALL, _("all items")),
260 (THIS, _("this item only")),
261 (CHILDREN, _("this item and all children")),
262 ]
263
264 PERMISIONS = [
265 (None, _("inherit")),
266 (ALLOW, _("allow")),
267 (DENY, _("deny")),
268 ]
269
270 folder = models.ForeignKey(
271 Folder,
272 verbose_name=("folder"),
273 null=True,
274 blank=True,
275 on_delete=models.CASCADE,
276 )
277
278 type = models.SmallIntegerField(
279 _("type"),
280 choices=TYPES,
281 default=ALL,
282 )
283
284 user = models.ForeignKey(
285 getattr(settings, 'AUTH_USER_MODEL', 'auth.User'),
286 related_name="filer_folder_permissions",
287 on_delete=models.SET_NULL,
288 verbose_name=_("user"),
289 blank=True,
290 null=True,
291 )
292
293 group = models.ForeignKey(
294 auth_models.Group,
295 related_name="filer_folder_permissions",
296 verbose_name=_("group"),
297 blank=True,
298 null=True,
299 on_delete=models.CASCADE,
300 )
301
302 everybody = models.BooleanField(
303 _("everybody"),
304 default=False,
305 )
306
307 can_read = models.SmallIntegerField(
308 _("can read"),
309 choices=PERMISIONS,
310 blank=True,
311 null=True,
312 default=None,
313 )
314
315 can_edit = models.SmallIntegerField(
316 _("can edit"),
317 choices=PERMISIONS,
318 blank=True,
319 null=True,
320 default=None,
321 )
322
323 can_add_children = models.SmallIntegerField(
324 _("can add children"),
325 choices=PERMISIONS,
326 blank=True,
327 null=True,
328 default=None,
329 )
330
331 class Meta:
332 verbose_name = _('folder permission')
333 verbose_name_plural = _('folder permissions')
334 app_label = 'filer'
335
336 objects = FolderPermissionManager()
337
338 def __str__(self):
339 return self.pretty_logical_path
340
341 def __repr__(self):
342 return f'<{self.__class__.__name__}(pk={self.pk}): folder="{self.pretty_logical_path}", ' \
343 'who="{self.who}", what="{self.what}">'
344
345 def clean(self):
346 if self.type == self.ALL and self.folder:
347 raise ValidationError('Folder cannot be selected with type "all items".')
348 if self.type != self.ALL and not self.folder:
349 raise ValidationError('Folder has to be selected when type is not "all items".')
350 if self.everybody and (self.user or self.group):
351 raise ValidationError('User or group cannot be selected together with "everybody".')
352 if not self.user and not self.group and not self.everybody:
353 raise ValidationError('At least one of user, group, or "everybody" has to be selected.')
354
355 @cached_property
356 def pretty_logical_path(self):
357 if self.folder:
358 return self.folder.pretty_logical_path
359 return _("All Folders")
360
361 pretty_logical_path.short_description = _("Logical Path")
362
363 @cached_property
364 def who(self):
365 """
366 Returns a human readable string of *who* can interact with a given folder
367 """
368 parts = []
369 if self.user:
370 parts.append(_("User: {user}").format(user=self.user))
371 if self.group:
372 parts.append(_("Group: {group}").format(group=self.group))
373 if self.everybody:
374 parts.append(_("Everybody"))
375 if parts:
376 return format_html_join("; ", '{}', ((p,) for p in parts))
377 return '–'
378
379 who.short_description = _("Who")
380
381 @cached_property
382 def what(self):
383 """
384 Returns a human readable string of *what* a user/group/everybody can do with a given folder
385 """
386 mapping = {
387 'can_edit': _("Edit"),
388 'can_read': _("Read"),
389 'can_add_children': _("Add children"),
390 }
391 perms = []
392 for key, text in mapping.items():
393 perm = getattr(self, key)
394 if perm == self.ALLOW:
395 perms.append(text)
396 elif perm == self.DENY:
397 perms.append('\u0336'.join(text) + '\u0336')
398 return format_html_join(", ", '{}', ((p,) for p in perms))
399
400 what.short_description = _("What")
401
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/filer/models/foldermodels.py b/filer/models/foldermodels.py
--- a/filer/models/foldermodels.py
+++ b/filer/models/foldermodels.py
@@ -6,7 +6,7 @@
from django.urls import reverse
from django.utils.functional import cached_property
from django.utils.html import format_html, format_html_join
-from django.utils.translation import gettext as _
+from django.utils.translation import gettext_lazy as _
from .. import settings as filer_settings
from . import mixins
| {"golden_diff": "diff --git a/filer/models/foldermodels.py b/filer/models/foldermodels.py\n--- a/filer/models/foldermodels.py\n+++ b/filer/models/foldermodels.py\n@@ -6,7 +6,7 @@\n from django.urls import reverse\n from django.utils.functional import cached_property\n from django.utils.html import format_html, format_html_join\n-from django.utils.translation import gettext as _\n+from django.utils.translation import gettext_lazy as _\n \n from .. import settings as filer_settings\n from . import mixins\n", "issue": "Field verbose_name should use gettext_lazy\nHi, \r\nmodel field verbose_names should use gettext_lazy, because it creates migrations based on user language settings. \r\n\r\nhttps://github.com/django-cms/django-filer/blob/master/filer/models/foldermodels.py#L9\r\n\r\nThis is migration generated after upgrade to django-filer 3.0\r\n\r\n\r\nThanks.\nField verbose_name should use gettext_lazy\nHi, \r\nmodel field verbose_names should use gettext_lazy, because it creates migrations based on user language settings. \r\n\r\nhttps://github.com/django-cms/django-filer/blob/master/filer/models/foldermodels.py#L9\r\n\r\nThis is migration generated after upgrade to django-filer 3.0\r\n\r\n\r\nThanks.\n", "before_files": [{"content": "from django.conf import settings\nfrom django.contrib.auth import models as auth_models\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.db.models import Q\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property\nfrom django.utils.html import format_html, format_html_join\nfrom django.utils.translation import gettext as _\n\nfrom .. import settings as filer_settings\nfrom . import mixins\n\n\nclass FolderPermissionManager(models.Manager):\n \"\"\"\n These methods are called by introspection from \"has_generic_permission\" on\n the folder model.\n \"\"\"\n def get_read_id_list(self, user):\n \"\"\"\n Give a list of a Folders where the user has read rights or the string\n \"All\" if the user has all rights.\n \"\"\"\n return self.__get_id_list(user, \"can_read\")\n\n def get_edit_id_list(self, user):\n return self.__get_id_list(user, \"can_edit\")\n\n def get_add_children_id_list(self, user):\n return self.__get_id_list(user, \"can_add_children\")\n\n def __get_id_list(self, user, attr):\n if user.is_superuser or not filer_settings.FILER_ENABLE_PERMISSIONS:\n return 'All'\n allow_list = set()\n deny_list = set()\n group_ids = user.groups.all().values_list('id', flat=True)\n q = Q(user=user) | Q(group__in=group_ids) | Q(everybody=True)\n perms = self.filter(q)\n\n for perm in perms:\n p = getattr(perm, attr)\n\n if p is None:\n # Not allow nor deny, we continue with the next permission\n continue\n\n if not perm.folder:\n assert perm.type == FolderPermission.ALL\n\n if p == FolderPermission.ALLOW:\n allow_list.update(Folder.objects.all().values_list('id', flat=True))\n else:\n deny_list.update(Folder.objects.all().values_list('id', flat=True))\n\n continue\n\n folder_id = perm.folder.id\n\n if p == FolderPermission.ALLOW:\n allow_list.add(folder_id)\n else:\n deny_list.add(folder_id)\n\n if perm.type in [FolderPermission.ALL, FolderPermission.CHILDREN]:\n if p == FolderPermission.ALLOW:\n allow_list.update(perm.folder.get_descendants_ids())\n else:\n deny_list.update(perm.folder.get_descendants_ids())\n\n # Deny has precedence over allow\n return allow_list - deny_list\n\n\nclass Folder(models.Model, mixins.IconsMixin):\n \"\"\"\n Represents a Folder that things (files) can be put into. Folders are *NOT*\n mirrored in the Filesystem and can have any unicode chars as their name.\n Other models may attach to a folder with a ForeignKey. If the related name\n ends with \"_files\" they will automatically be listed in the\n folder.files list along with all the other models that link to the folder\n in this way. Make sure the linked models obey the AbstractFile interface\n (Duck Type).\n \"\"\"\n file_type = 'Folder'\n is_root = False\n can_have_subfolders = True\n _icon = 'plainfolder'\n\n parent = models.ForeignKey(\n 'self',\n verbose_name=_('parent'),\n null=True,\n blank=True,\n related_name='children',\n on_delete=models.CASCADE,\n )\n\n name = models.CharField(\n _('name'),\n max_length=255,\n )\n\n owner = models.ForeignKey(\n getattr(settings, 'AUTH_USER_MODEL', 'auth.User'),\n verbose_name=_('owner'),\n related_name='filer_owned_folders',\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n )\n\n uploaded_at = models.DateTimeField(\n _('uploaded at'),\n auto_now_add=True,\n )\n\n created_at = models.DateTimeField(\n _('created at'),\n auto_now_add=True,\n )\n\n modified_at = models.DateTimeField(\n _('modified at'),\n auto_now=True,\n )\n\n class Meta:\n unique_together = (('parent', 'name'),)\n ordering = ('name',)\n permissions = ((\"can_use_directory_listing\",\n \"Can use directory listing\"),)\n app_label = 'filer'\n verbose_name = _(\"Folder\")\n verbose_name_plural = _(\"Folders\")\n\n def __str__(self):\n return self.pretty_logical_path\n\n def __repr__(self):\n return f'<{self.__class__.__name__}(pk={self.pk}): {self.pretty_logical_path}>'\n\n @property\n def file_count(self):\n if not hasattr(self, '_file_count_cache'):\n self._file_count_cache = self.files.count()\n return self._file_count_cache\n\n @property\n def children_count(self):\n if not hasattr(self, '_children_count_cache'):\n self._children_count_cache = self.children.count()\n return self._children_count_cache\n\n @property\n def item_count(self):\n return self.file_count + self.children_count\n\n @property\n def files(self):\n return self.all_files.all()\n\n @cached_property\n def logical_path(self):\n \"\"\"\n Gets logical path of the folder in the tree structure.\n Used to generate breadcrumbs\n \"\"\"\n folder_path = []\n if self.parent:\n folder_path.extend(self.parent.logical_path)\n folder_path.append(self.parent)\n return folder_path\n\n def get_descendants_ids(self):\n desc = []\n for child in self.children.all():\n desc.append(child.id)\n desc.extend(child.get_descendants_ids())\n return desc\n\n @property\n def pretty_logical_path(self):\n return format_html('/{}', format_html_join('/', '{0}', ((f.name,) for f in self.logical_path + [self])))\n\n def has_edit_permission(self, request):\n return request.user.has_perm(\"filer.change_folder\") and self.has_generic_permission(request, 'edit')\n\n def has_read_permission(self, request):\n return self.has_generic_permission(request, 'read')\n\n def has_add_children_permission(self, request):\n return request.user.has_perm(\"filer.change_folder\") and self.has_generic_permission(request, 'add_children')\n\n def has_generic_permission(self, request, permission_type):\n \"\"\"\n Return true if the current user has permission on this\n folder. Return the string 'ALL' if the user has all rights.\n \"\"\"\n user = request.user\n if not user.is_authenticated:\n return False\n elif user.is_superuser:\n return True\n elif user == self.owner:\n return True\n else:\n if not hasattr(self, \"permission_cache\") or\\\n permission_type not in self.permission_cache or \\\n request.user.pk != self.permission_cache['user'].pk:\n if not hasattr(self, \"permission_cache\") or request.user.pk != self.permission_cache['user'].pk:\n self.permission_cache = {\n 'user': request.user,\n }\n\n # This calls methods on the manager i.e. get_read_id_list()\n func = getattr(FolderPermission.objects,\n \"get_%s_id_list\" % permission_type)\n permission = func(user)\n if permission == \"All\":\n self.permission_cache[permission_type] = True\n self.permission_cache['read'] = True\n self.permission_cache['edit'] = True\n self.permission_cache['add_children'] = True\n else:\n self.permission_cache[permission_type] = self.id in permission\n return self.permission_cache[permission_type]\n\n def get_admin_change_url(self):\n return reverse('admin:filer_folder_change', args=(self.id,))\n\n def get_admin_directory_listing_url_path(self):\n return reverse('admin:filer-directory_listing', args=(self.id,))\n\n def get_admin_delete_url(self):\n return reverse(\n f'admin:{self._meta.app_label}_{self._meta.model_name}_delete',\n args=(self.pk,)\n )\n\n def contains_folder(self, folder_name):\n try:\n self.children.get(name=folder_name)\n return True\n except Folder.DoesNotExist:\n return False\n\n\nclass FolderPermission(models.Model):\n ALL = 0\n THIS = 1\n CHILDREN = 2\n\n ALLOW = 1\n DENY = 0\n\n TYPES = [\n (ALL, _(\"all items\")),\n (THIS, _(\"this item only\")),\n (CHILDREN, _(\"this item and all children\")),\n ]\n\n PERMISIONS = [\n (None, _(\"inherit\")),\n (ALLOW, _(\"allow\")),\n (DENY, _(\"deny\")),\n ]\n\n folder = models.ForeignKey(\n Folder,\n verbose_name=(\"folder\"),\n null=True,\n blank=True,\n on_delete=models.CASCADE,\n )\n\n type = models.SmallIntegerField(\n _(\"type\"),\n choices=TYPES,\n default=ALL,\n )\n\n user = models.ForeignKey(\n getattr(settings, 'AUTH_USER_MODEL', 'auth.User'),\n related_name=\"filer_folder_permissions\",\n on_delete=models.SET_NULL,\n verbose_name=_(\"user\"),\n blank=True,\n null=True,\n )\n\n group = models.ForeignKey(\n auth_models.Group,\n related_name=\"filer_folder_permissions\",\n verbose_name=_(\"group\"),\n blank=True,\n null=True,\n on_delete=models.CASCADE,\n )\n\n everybody = models.BooleanField(\n _(\"everybody\"),\n default=False,\n )\n\n can_read = models.SmallIntegerField(\n _(\"can read\"),\n choices=PERMISIONS,\n blank=True,\n null=True,\n default=None,\n )\n\n can_edit = models.SmallIntegerField(\n _(\"can edit\"),\n choices=PERMISIONS,\n blank=True,\n null=True,\n default=None,\n )\n\n can_add_children = models.SmallIntegerField(\n _(\"can add children\"),\n choices=PERMISIONS,\n blank=True,\n null=True,\n default=None,\n )\n\n class Meta:\n verbose_name = _('folder permission')\n verbose_name_plural = _('folder permissions')\n app_label = 'filer'\n\n objects = FolderPermissionManager()\n\n def __str__(self):\n return self.pretty_logical_path\n\n def __repr__(self):\n return f'<{self.__class__.__name__}(pk={self.pk}): folder=\"{self.pretty_logical_path}\", ' \\\n 'who=\"{self.who}\", what=\"{self.what}\">'\n\n def clean(self):\n if self.type == self.ALL and self.folder:\n raise ValidationError('Folder cannot be selected with type \"all items\".')\n if self.type != self.ALL and not self.folder:\n raise ValidationError('Folder has to be selected when type is not \"all items\".')\n if self.everybody and (self.user or self.group):\n raise ValidationError('User or group cannot be selected together with \"everybody\".')\n if not self.user and not self.group and not self.everybody:\n raise ValidationError('At least one of user, group, or \"everybody\" has to be selected.')\n\n @cached_property\n def pretty_logical_path(self):\n if self.folder:\n return self.folder.pretty_logical_path\n return _(\"All Folders\")\n\n pretty_logical_path.short_description = _(\"Logical Path\")\n\n @cached_property\n def who(self):\n \"\"\"\n Returns a human readable string of *who* can interact with a given folder\n \"\"\"\n parts = []\n if self.user:\n parts.append(_(\"User: {user}\").format(user=self.user))\n if self.group:\n parts.append(_(\"Group: {group}\").format(group=self.group))\n if self.everybody:\n parts.append(_(\"Everybody\"))\n if parts:\n return format_html_join(\"; \", '{}', ((p,) for p in parts))\n return '\u2013'\n\n who.short_description = _(\"Who\")\n\n @cached_property\n def what(self):\n \"\"\"\n Returns a human readable string of *what* a user/group/everybody can do with a given folder\n \"\"\"\n mapping = {\n 'can_edit': _(\"Edit\"),\n 'can_read': _(\"Read\"),\n 'can_add_children': _(\"Add children\"),\n }\n perms = []\n for key, text in mapping.items():\n perm = getattr(self, key)\n if perm == self.ALLOW:\n perms.append(text)\n elif perm == self.DENY:\n perms.append('\\u0336'.join(text) + '\\u0336')\n return format_html_join(\", \", '{}', ((p,) for p in perms))\n\n what.short_description = _(\"What\")\n", "path": "filer/models/foldermodels.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.contrib.auth import models as auth_models\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.db.models import Q\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property\nfrom django.utils.html import format_html, format_html_join\nfrom django.utils.translation import gettext_lazy as _\n\nfrom .. import settings as filer_settings\nfrom . import mixins\n\n\nclass FolderPermissionManager(models.Manager):\n \"\"\"\n These methods are called by introspection from \"has_generic_permission\" on\n the folder model.\n \"\"\"\n def get_read_id_list(self, user):\n \"\"\"\n Give a list of a Folders where the user has read rights or the string\n \"All\" if the user has all rights.\n \"\"\"\n return self.__get_id_list(user, \"can_read\")\n\n def get_edit_id_list(self, user):\n return self.__get_id_list(user, \"can_edit\")\n\n def get_add_children_id_list(self, user):\n return self.__get_id_list(user, \"can_add_children\")\n\n def __get_id_list(self, user, attr):\n if user.is_superuser or not filer_settings.FILER_ENABLE_PERMISSIONS:\n return 'All'\n allow_list = set()\n deny_list = set()\n group_ids = user.groups.all().values_list('id', flat=True)\n q = Q(user=user) | Q(group__in=group_ids) | Q(everybody=True)\n perms = self.filter(q)\n\n for perm in perms:\n p = getattr(perm, attr)\n\n if p is None:\n # Not allow nor deny, we continue with the next permission\n continue\n\n if not perm.folder:\n assert perm.type == FolderPermission.ALL\n\n if p == FolderPermission.ALLOW:\n allow_list.update(Folder.objects.all().values_list('id', flat=True))\n else:\n deny_list.update(Folder.objects.all().values_list('id', flat=True))\n\n continue\n\n folder_id = perm.folder.id\n\n if p == FolderPermission.ALLOW:\n allow_list.add(folder_id)\n else:\n deny_list.add(folder_id)\n\n if perm.type in [FolderPermission.ALL, FolderPermission.CHILDREN]:\n if p == FolderPermission.ALLOW:\n allow_list.update(perm.folder.get_descendants_ids())\n else:\n deny_list.update(perm.folder.get_descendants_ids())\n\n # Deny has precedence over allow\n return allow_list - deny_list\n\n\nclass Folder(models.Model, mixins.IconsMixin):\n \"\"\"\n Represents a Folder that things (files) can be put into. Folders are *NOT*\n mirrored in the Filesystem and can have any unicode chars as their name.\n Other models may attach to a folder with a ForeignKey. If the related name\n ends with \"_files\" they will automatically be listed in the\n folder.files list along with all the other models that link to the folder\n in this way. Make sure the linked models obey the AbstractFile interface\n (Duck Type).\n \"\"\"\n file_type = 'Folder'\n is_root = False\n can_have_subfolders = True\n _icon = 'plainfolder'\n\n parent = models.ForeignKey(\n 'self',\n verbose_name=_('parent'),\n null=True,\n blank=True,\n related_name='children',\n on_delete=models.CASCADE,\n )\n\n name = models.CharField(\n _('name'),\n max_length=255,\n )\n\n owner = models.ForeignKey(\n getattr(settings, 'AUTH_USER_MODEL', 'auth.User'),\n verbose_name=_('owner'),\n related_name='filer_owned_folders',\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n )\n\n uploaded_at = models.DateTimeField(\n _('uploaded at'),\n auto_now_add=True,\n )\n\n created_at = models.DateTimeField(\n _('created at'),\n auto_now_add=True,\n )\n\n modified_at = models.DateTimeField(\n _('modified at'),\n auto_now=True,\n )\n\n class Meta:\n unique_together = (('parent', 'name'),)\n ordering = ('name',)\n permissions = ((\"can_use_directory_listing\",\n \"Can use directory listing\"),)\n app_label = 'filer'\n verbose_name = _(\"Folder\")\n verbose_name_plural = _(\"Folders\")\n\n def __str__(self):\n return self.pretty_logical_path\n\n def __repr__(self):\n return f'<{self.__class__.__name__}(pk={self.pk}): {self.pretty_logical_path}>'\n\n @property\n def file_count(self):\n if not hasattr(self, '_file_count_cache'):\n self._file_count_cache = self.files.count()\n return self._file_count_cache\n\n @property\n def children_count(self):\n if not hasattr(self, '_children_count_cache'):\n self._children_count_cache = self.children.count()\n return self._children_count_cache\n\n @property\n def item_count(self):\n return self.file_count + self.children_count\n\n @property\n def files(self):\n return self.all_files.all()\n\n @cached_property\n def logical_path(self):\n \"\"\"\n Gets logical path of the folder in the tree structure.\n Used to generate breadcrumbs\n \"\"\"\n folder_path = []\n if self.parent:\n folder_path.extend(self.parent.logical_path)\n folder_path.append(self.parent)\n return folder_path\n\n def get_descendants_ids(self):\n desc = []\n for child in self.children.all():\n desc.append(child.id)\n desc.extend(child.get_descendants_ids())\n return desc\n\n @property\n def pretty_logical_path(self):\n return format_html('/{}', format_html_join('/', '{0}', ((f.name,) for f in self.logical_path + [self])))\n\n def has_edit_permission(self, request):\n return request.user.has_perm(\"filer.change_folder\") and self.has_generic_permission(request, 'edit')\n\n def has_read_permission(self, request):\n return self.has_generic_permission(request, 'read')\n\n def has_add_children_permission(self, request):\n return request.user.has_perm(\"filer.change_folder\") and self.has_generic_permission(request, 'add_children')\n\n def has_generic_permission(self, request, permission_type):\n \"\"\"\n Return true if the current user has permission on this\n folder. Return the string 'ALL' if the user has all rights.\n \"\"\"\n user = request.user\n if not user.is_authenticated:\n return False\n elif user.is_superuser:\n return True\n elif user == self.owner:\n return True\n else:\n if not hasattr(self, \"permission_cache\") or\\\n permission_type not in self.permission_cache or \\\n request.user.pk != self.permission_cache['user'].pk:\n if not hasattr(self, \"permission_cache\") or request.user.pk != self.permission_cache['user'].pk:\n self.permission_cache = {\n 'user': request.user,\n }\n\n # This calls methods on the manager i.e. get_read_id_list()\n func = getattr(FolderPermission.objects,\n \"get_%s_id_list\" % permission_type)\n permission = func(user)\n if permission == \"All\":\n self.permission_cache[permission_type] = True\n self.permission_cache['read'] = True\n self.permission_cache['edit'] = True\n self.permission_cache['add_children'] = True\n else:\n self.permission_cache[permission_type] = self.id in permission\n return self.permission_cache[permission_type]\n\n def get_admin_change_url(self):\n return reverse('admin:filer_folder_change', args=(self.id,))\n\n def get_admin_directory_listing_url_path(self):\n return reverse('admin:filer-directory_listing', args=(self.id,))\n\n def get_admin_delete_url(self):\n return reverse(\n f'admin:{self._meta.app_label}_{self._meta.model_name}_delete',\n args=(self.pk,)\n )\n\n def contains_folder(self, folder_name):\n try:\n self.children.get(name=folder_name)\n return True\n except Folder.DoesNotExist:\n return False\n\n\nclass FolderPermission(models.Model):\n ALL = 0\n THIS = 1\n CHILDREN = 2\n\n ALLOW = 1\n DENY = 0\n\n TYPES = [\n (ALL, _(\"all items\")),\n (THIS, _(\"this item only\")),\n (CHILDREN, _(\"this item and all children\")),\n ]\n\n PERMISIONS = [\n (None, _(\"inherit\")),\n (ALLOW, _(\"allow\")),\n (DENY, _(\"deny\")),\n ]\n\n folder = models.ForeignKey(\n Folder,\n verbose_name=(\"folder\"),\n null=True,\n blank=True,\n on_delete=models.CASCADE,\n )\n\n type = models.SmallIntegerField(\n _(\"type\"),\n choices=TYPES,\n default=ALL,\n )\n\n user = models.ForeignKey(\n getattr(settings, 'AUTH_USER_MODEL', 'auth.User'),\n related_name=\"filer_folder_permissions\",\n on_delete=models.SET_NULL,\n verbose_name=_(\"user\"),\n blank=True,\n null=True,\n )\n\n group = models.ForeignKey(\n auth_models.Group,\n related_name=\"filer_folder_permissions\",\n verbose_name=_(\"group\"),\n blank=True,\n null=True,\n on_delete=models.CASCADE,\n )\n\n everybody = models.BooleanField(\n _(\"everybody\"),\n default=False,\n )\n\n can_read = models.SmallIntegerField(\n _(\"can read\"),\n choices=PERMISIONS,\n blank=True,\n null=True,\n default=None,\n )\n\n can_edit = models.SmallIntegerField(\n _(\"can edit\"),\n choices=PERMISIONS,\n blank=True,\n null=True,\n default=None,\n )\n\n can_add_children = models.SmallIntegerField(\n _(\"can add children\"),\n choices=PERMISIONS,\n blank=True,\n null=True,\n default=None,\n )\n\n class Meta:\n verbose_name = _('folder permission')\n verbose_name_plural = _('folder permissions')\n app_label = 'filer'\n\n objects = FolderPermissionManager()\n\n def __str__(self):\n return self.pretty_logical_path\n\n def __repr__(self):\n return f'<{self.__class__.__name__}(pk={self.pk}): folder=\"{self.pretty_logical_path}\", ' \\\n 'who=\"{self.who}\", what=\"{self.what}\">'\n\n def clean(self):\n if self.type == self.ALL and self.folder:\n raise ValidationError('Folder cannot be selected with type \"all items\".')\n if self.type != self.ALL and not self.folder:\n raise ValidationError('Folder has to be selected when type is not \"all items\".')\n if self.everybody and (self.user or self.group):\n raise ValidationError('User or group cannot be selected together with \"everybody\".')\n if not self.user and not self.group and not self.everybody:\n raise ValidationError('At least one of user, group, or \"everybody\" has to be selected.')\n\n @cached_property\n def pretty_logical_path(self):\n if self.folder:\n return self.folder.pretty_logical_path\n return _(\"All Folders\")\n\n pretty_logical_path.short_description = _(\"Logical Path\")\n\n @cached_property\n def who(self):\n \"\"\"\n Returns a human readable string of *who* can interact with a given folder\n \"\"\"\n parts = []\n if self.user:\n parts.append(_(\"User: {user}\").format(user=self.user))\n if self.group:\n parts.append(_(\"Group: {group}\").format(group=self.group))\n if self.everybody:\n parts.append(_(\"Everybody\"))\n if parts:\n return format_html_join(\"; \", '{}', ((p,) for p in parts))\n return '\u2013'\n\n who.short_description = _(\"Who\")\n\n @cached_property\n def what(self):\n \"\"\"\n Returns a human readable string of *what* a user/group/everybody can do with a given folder\n \"\"\"\n mapping = {\n 'can_edit': _(\"Edit\"),\n 'can_read': _(\"Read\"),\n 'can_add_children': _(\"Add children\"),\n }\n perms = []\n for key, text in mapping.items():\n perm = getattr(self, key)\n if perm == self.ALLOW:\n perms.append(text)\n elif perm == self.DENY:\n perms.append('\\u0336'.join(text) + '\\u0336')\n return format_html_join(\", \", '{}', ((p,) for p in perms))\n\n what.short_description = _(\"What\")\n", "path": "filer/models/foldermodels.py"}]} |
gh_patches_debug_1222 | rasdani/github-patches | git_diff | nonebot__nonebot2-561 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: Nonebot2通过正向Websocket连接go-cqhttp报错400
**描述问题:**
在目前最新的Nonebot2的release中使用websocket与go-cqhttp进行连接会报错400。
这个问题仅出现在了Linux端上,在Win环境下并不会出现400错误(无论是go-cqhttp运行在linux还是win下)。
此问题已经在私下测试完成复现了很多次。
**如何复现?**
前提条件:
- go-cqhttp已完成配置,连接协议为ws,host为127.0.0.1,port为6700,不启用access_token
- 已完成安装nb-cli并安装nonebot-adapter-cqhttp且创建了一个新的机器人,使用cqhttp(Nonebot2)、.env.dev已添加配置 ` CQHTTP_WS_URLS={"2461591632": "ws://127.0.0.1:6700/"} `
1. 先行启动go-cqhttp服务
2. 然后启动nonebot2机器人
**期望的结果**
go-cqhttp并没有任何报错,所以不贴东西了。
Nonebot2端报错:
```python
10-11 11:49:35 [ERROR] nonebot | Error while connecting to ws://127.0.0.1:6700/. Try to reconnect...
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/usr/lib/python3.9/multiprocessing/spawn.py", line 116, in spawn_main
exitcode = _main(fd, parent_sentinel)
File "/usr/lib/python3.9/multiprocessing/spawn.py", line 129, in _main
return self._bootstrap(parent_sentinel)
File "/usr/lib/python3.9/multiprocessing/process.py", line 315, in _bootstrap
self.run()
File "/usr/lib/python3.9/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/home/ls420/.local/lib/python3.9/site-packages/uvicorn/subprocess.py", line 76, in subprocess_started
target(sockets=sockets)
File "/home/ls420/.local/lib/python3.9/site-packages/uvicorn/server.py", line 68, in run
return asyncio.run(self.serve(sockets=sockets))
File "/usr/lib/python3.9/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
> File "/usr/local/lib/python3.9/dist-packages/nonebot/drivers/fastapi.py", line 489, in _ws_loop
async with connection as ws:
File "/home/ls420/.local/lib/python3.9/site-packages/websockets/legacy/client.py", line 604, in __aenter__
return await self
File "/home/ls420/.local/lib/python3.9/site-packages/websockets/legacy/client.py", line 629, in __await_impl__
await protocol.handshake(
File "/home/ls420/.local/lib/python3.9/site-packages/websockets/legacy/client.py", line 388, in handshake
raise InvalidStatusCode(status_code)
websockets.exceptions.InvalidStatusCode: server rejected WebSocket connection: HTTP 400
```
**环境信息:**
- OS: [Ubuntu20.04-LTS_arm64/amd64]
- Python Version: [3.9.5]
- Nonebot Version: [2.0.0a16]
**协议端信息:**
- 协议端: [go-cqhttp]
- 协议端版本: [v1.0.0-beta7-fix2]
**截图或日志**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nonebot/drivers/fastapi.py`
Content:
```
1 """
2 FastAPI 驱动适配
3 ================
4
5 本驱动同时支持服务端以及客户端连接
6
7 后端使用方法请参考: `FastAPI 文档`_
8
9 .. _FastAPI 文档:
10 https://fastapi.tiangolo.com/
11 """
12
13 import asyncio
14 import logging
15 from dataclasses import dataclass
16 from typing import List, Union, Callable, Optional, Awaitable, cast
17
18 import httpx
19 import uvicorn
20 from pydantic import BaseSettings
21 from fastapi.responses import Response
22 from websockets.exceptions import ConnectionClosed
23 from fastapi import FastAPI, Request, HTTPException, status
24 from starlette.websockets import WebSocket as FastAPIWebSocket
25 from starlette.websockets import WebSocketState, WebSocketDisconnect
26 from websockets.legacy.client import Connect, WebSocketClientProtocol
27
28 from nonebot.config import Env
29 from nonebot.log import logger
30 from nonebot.adapters import Bot
31 from nonebot.typing import overrides
32 from nonebot.utils import escape_tag
33 from nonebot.config import Config as NoneBotConfig
34 from nonebot.drivers import WebSocket as BaseWebSocket
35 from nonebot.drivers import (HTTPRequest, ForwardDriver, ReverseDriver,
36 WebSocketSetup, HTTPPollingSetup)
37
38 HTTPPOLLING_SETUP = Union[HTTPPollingSetup,
39 Callable[[], Awaitable[HTTPPollingSetup]]]
40 WEBSOCKET_SETUP = Union[WebSocketSetup, Callable[[], Awaitable[WebSocketSetup]]]
41
42
43 class Config(BaseSettings):
44 """
45 FastAPI 驱动框架设置,详情参考 FastAPI 文档
46 """
47 fastapi_openapi_url: Optional[str] = None
48 """
49 :类型:
50
51 ``Optional[str]``
52
53 :说明:
54
55 ``openapi.json`` 地址,默认为 ``None`` 即关闭
56 """
57 fastapi_docs_url: Optional[str] = None
58 """
59 :类型:
60
61 ``Optional[str]``
62
63 :说明:
64
65 ``swagger`` 地址,默认为 ``None`` 即关闭
66 """
67 fastapi_redoc_url: Optional[str] = None
68 """
69 :类型:
70
71 ``Optional[str]``
72
73 :说明:
74
75 ``redoc`` 地址,默认为 ``None`` 即关闭
76 """
77 fastapi_reload: Optional[bool] = None
78 """
79 :类型:
80
81 ``Optional[bool]``
82
83 :说明:
84
85 开启/关闭冷重载,默认会在配置了 app 的 debug 模式启用
86 """
87 fastapi_reload_dirs: Optional[List[str]] = None
88 """
89 :类型:
90
91 ``Optional[List[str]]``
92
93 :说明:
94
95 重载监控文件夹列表,默认为 uvicorn 默认值
96 """
97 fastapi_reload_delay: Optional[float] = None
98 """
99 :类型:
100
101 ``Optional[float]``
102
103 :说明:
104
105 重载延迟,默认为 uvicorn 默认值
106 """
107 fastapi_reload_includes: Optional[List[str]] = None
108 """
109 :类型:
110
111 ``Optional[List[str]]``
112
113 :说明:
114
115 要监听的文件列表,支持 glob pattern,默认为 uvicorn 默认值
116 """
117 fastapi_reload_excludes: Optional[List[str]] = None
118 """
119 :类型:
120
121 ``Optional[List[str]]``
122
123 :说明:
124
125 不要监听的文件列表,支持 glob pattern,默认为 uvicorn 默认值
126 """
127
128 class Config:
129 extra = "ignore"
130
131
132 class Driver(ReverseDriver, ForwardDriver):
133 """
134 FastAPI 驱动框架
135
136 :上报地址:
137
138 * ``/{adapter name}/``: HTTP POST 上报
139 * ``/{adapter name}/http/``: HTTP POST 上报
140 * ``/{adapter name}/ws``: WebSocket 上报
141 * ``/{adapter name}/ws/``: WebSocket 上报
142 """
143
144 def __init__(self, env: Env, config: NoneBotConfig):
145 super().__init__(env, config)
146
147 self.fastapi_config: Config = Config(**config.dict())
148 self.http_pollings: List[HTTPPOLLING_SETUP] = []
149 self.websockets: List[WEBSOCKET_SETUP] = []
150 self.shutdown: asyncio.Event = asyncio.Event()
151 self.connections: List[asyncio.Task] = []
152
153 self._server_app = FastAPI(
154 debug=config.debug,
155 openapi_url=self.fastapi_config.fastapi_openapi_url,
156 docs_url=self.fastapi_config.fastapi_docs_url,
157 redoc_url=self.fastapi_config.fastapi_redoc_url,
158 )
159
160 self._server_app.post("/{adapter}/")(self._handle_http)
161 self._server_app.post("/{adapter}/http")(self._handle_http)
162 self._server_app.websocket("/{adapter}/ws")(self._handle_ws_reverse)
163 self._server_app.websocket("/{adapter}/ws/")(self._handle_ws_reverse)
164
165 self.on_startup(self._run_forward)
166 self.on_shutdown(self._shutdown_forward)
167
168 @property
169 @overrides(ReverseDriver)
170 def type(self) -> str:
171 """驱动名称: ``fastapi``"""
172 return "fastapi"
173
174 @property
175 @overrides(ReverseDriver)
176 def server_app(self) -> FastAPI:
177 """``FastAPI APP`` 对象"""
178 return self._server_app
179
180 @property
181 @overrides(ReverseDriver)
182 def asgi(self) -> FastAPI:
183 """``FastAPI APP`` 对象"""
184 return self._server_app
185
186 @property
187 @overrides(ReverseDriver)
188 def logger(self) -> logging.Logger:
189 """fastapi 使用的 logger"""
190 return logging.getLogger("fastapi")
191
192 @overrides(ReverseDriver)
193 def on_startup(self, func: Callable) -> Callable:
194 """参考文档: `Events <https://fastapi.tiangolo.com/advanced/events/#startup-event>`_"""
195 return self.server_app.on_event("startup")(func)
196
197 @overrides(ReverseDriver)
198 def on_shutdown(self, func: Callable) -> Callable:
199 """参考文档: `Events <https://fastapi.tiangolo.com/advanced/events/#startup-event>`_"""
200 return self.server_app.on_event("shutdown")(func)
201
202 @overrides(ForwardDriver)
203 def setup_http_polling(self, setup: HTTPPOLLING_SETUP) -> None:
204 """
205 :说明:
206
207 注册一个 HTTP 轮询连接,如果传入一个函数,则该函数会在每次连接时被调用
208
209 :参数:
210
211 * ``setup: Union[HTTPPollingSetup, Callable[[], Awaitable[HTTPPollingSetup]]]``
212 """
213 self.http_pollings.append(setup)
214
215 @overrides(ForwardDriver)
216 def setup_websocket(self, setup: WEBSOCKET_SETUP) -> None:
217 """
218 :说明:
219
220 注册一个 WebSocket 连接,如果传入一个函数,则该函数会在每次重连时被调用
221
222 :参数:
223
224 * ``setup: Union[WebSocketSetup, Callable[[], Awaitable[WebSocketSetup]]]``
225 """
226 self.websockets.append(setup)
227
228 @overrides(ReverseDriver)
229 def run(self,
230 host: Optional[str] = None,
231 port: Optional[int] = None,
232 *,
233 app: Optional[str] = None,
234 **kwargs):
235 """使用 ``uvicorn`` 启动 FastAPI"""
236 super().run(host, port, app, **kwargs)
237 LOGGING_CONFIG = {
238 "version": 1,
239 "disable_existing_loggers": False,
240 "handlers": {
241 "default": {
242 "class": "nonebot.log.LoguruHandler",
243 },
244 },
245 "loggers": {
246 "uvicorn.error": {
247 "handlers": ["default"],
248 "level": "INFO"
249 },
250 "uvicorn.access": {
251 "handlers": ["default"],
252 "level": "INFO",
253 },
254 },
255 }
256 uvicorn.run(
257 app or self.server_app, # type: ignore
258 host=host or str(self.config.host),
259 port=port or self.config.port,
260 reload=self.fastapi_config.fastapi_reload
261 if self.fastapi_config.fastapi_reload is not None else
262 (bool(app) and self.config.debug),
263 reload_dirs=self.fastapi_config.fastapi_reload_dirs,
264 reload_delay=self.fastapi_config.fastapi_reload_delay,
265 reload_includes=self.fastapi_config.fastapi_reload_includes,
266 reload_excludes=self.fastapi_config.fastapi_reload_excludes,
267 debug=self.config.debug,
268 log_config=LOGGING_CONFIG,
269 **kwargs)
270
271 def _run_forward(self):
272 for setup in self.http_pollings:
273 self.connections.append(asyncio.create_task(self._http_loop(setup)))
274 for setup in self.websockets:
275 self.connections.append(asyncio.create_task(self._ws_loop(setup)))
276
277 def _shutdown_forward(self):
278 self.shutdown.set()
279 for task in self.connections:
280 if not task.done():
281 task.cancel()
282
283 async def _handle_http(self, adapter: str, request: Request):
284 data = await request.body()
285
286 if adapter not in self._adapters:
287 logger.warning(
288 f"Unknown adapter {adapter}. Please register the adapter before use."
289 )
290 raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
291 detail="adapter not found")
292
293 # 创建 Bot 对象
294 BotClass = self._adapters[adapter]
295 http_request = HTTPRequest(request.scope["http_version"],
296 request.url.scheme, request.url.path,
297 request.scope["query_string"],
298 dict(request.headers), request.method, data)
299 x_self_id, response = await BotClass.check_permission(
300 self, http_request)
301
302 if not x_self_id:
303 raise HTTPException(
304 response and response.status or 401, response and
305 response.body and response.body.decode("utf-8"))
306
307 if x_self_id in self._clients:
308 logger.warning("There's already a reverse websocket connection,"
309 "so the event may be handled twice.")
310
311 bot = BotClass(x_self_id, http_request)
312
313 asyncio.create_task(bot.handle_message(data))
314 return Response(response and response.body,
315 response and response.status or 200)
316
317 async def _handle_ws_reverse(self, adapter: str,
318 websocket: FastAPIWebSocket):
319 ws = WebSocket(websocket.scope.get("http_version",
320 "1.1"), websocket.url.scheme,
321 websocket.url.path, websocket.scope["query_string"],
322 dict(websocket.headers), websocket)
323
324 if adapter not in self._adapters:
325 logger.warning(
326 f"Unknown adapter {adapter}. Please register the adapter before use."
327 )
328 await ws.close(code=status.WS_1008_POLICY_VIOLATION)
329 return
330
331 # Create Bot Object
332 BotClass = self._adapters[adapter]
333 self_id, _ = await BotClass.check_permission(self, ws)
334
335 if not self_id:
336 await ws.close(code=status.WS_1008_POLICY_VIOLATION)
337 return
338
339 if self_id in self._clients:
340 logger.opt(colors=True).warning(
341 "There's already a websocket connection, "
342 f"<y>{escape_tag(adapter.upper())} Bot {escape_tag(self_id)}</y> ignored."
343 )
344 await ws.close(code=status.WS_1008_POLICY_VIOLATION)
345 return
346
347 bot = BotClass(self_id, ws)
348
349 await ws.accept()
350 logger.opt(colors=True).info(
351 f"WebSocket Connection from <y>{escape_tag(adapter.upper())} "
352 f"Bot {escape_tag(self_id)}</y> Accepted!")
353
354 self._bot_connect(bot)
355
356 try:
357 while not ws.closed:
358 try:
359 data = await ws.receive()
360 except WebSocketDisconnect:
361 logger.error("WebSocket disconnected by peer.")
362 break
363 except Exception as e:
364 logger.opt(exception=e).error(
365 "Error when receiving data from websocket.")
366 break
367
368 asyncio.create_task(bot.handle_message(data.encode()))
369 finally:
370 self._bot_disconnect(bot)
371
372 async def _http_loop(self, setup: HTTPPOLLING_SETUP):
373
374 async def _build_request(
375 setup: HTTPPollingSetup) -> Optional[HTTPRequest]:
376 url = httpx.URL(setup.url)
377 if not url.netloc:
378 logger.opt(colors=True).error(
379 f"<r><bg #f8bbd0>Error parsing url {escape_tag(str(url))}</bg #f8bbd0></r>"
380 )
381 return
382 return HTTPRequest(
383 setup.http_version, url.scheme, url.path, url.query, {
384 **setup.headers, "host": url.netloc.decode("ascii")
385 }, setup.method, setup.body)
386
387 bot: Optional[Bot] = None
388 request: Optional[HTTPRequest] = None
389 setup_: Optional[HTTPPollingSetup] = None
390
391 logger.opt(colors=True).info(
392 f"Start http polling for <y>{escape_tag(setup.adapter.upper())} "
393 f"Bot {escape_tag(setup.self_id)}</y>")
394
395 try:
396 async with httpx.AsyncClient(http2=True) as session:
397 while not self.shutdown.is_set():
398
399 try:
400 if callable(setup):
401 setup_ = await setup()
402 else:
403 setup_ = setup
404 except Exception as e:
405 logger.opt(colors=True, exception=e).error(
406 "<r><bg #f8bbd0>Error while parsing setup "
407 f"{escape_tag(repr(setup))}.</bg #f8bbd0></r>")
408 await asyncio.sleep(3)
409 continue
410
411 if not bot:
412 request = await _build_request(setup_)
413 if not request:
414 return
415 BotClass = self._adapters[setup.adapter]
416 bot = BotClass(setup.self_id, request)
417 self._bot_connect(bot)
418 elif callable(setup):
419 request = await _build_request(setup_)
420 if not request:
421 await asyncio.sleep(setup_.poll_interval)
422 continue
423 bot.request = request
424
425 setup_ = cast(HTTPPollingSetup, setup_)
426 request = cast(HTTPRequest, request)
427 headers = request.headers
428
429 logger.debug(
430 f"Bot {setup_.self_id} from adapter {setup_.adapter} request {setup_.url}"
431 )
432 try:
433 response = await session.request(request.method,
434 setup_.url,
435 content=request.body,
436 headers=headers,
437 timeout=30.)
438 response.raise_for_status()
439 data = response.read()
440 asyncio.create_task(bot.handle_message(data))
441 except httpx.HTTPError as e:
442 logger.opt(colors=True, exception=e).error(
443 f"<r><bg #f8bbd0>Error occurred while requesting {escape_tag(setup_.url)}. "
444 "Try to reconnect...</bg #f8bbd0></r>")
445
446 await asyncio.sleep(setup_.poll_interval)
447
448 except asyncio.CancelledError:
449 pass
450 except Exception as e:
451 logger.opt(colors=True, exception=e).error(
452 "<r><bg #f8bbd0>Unexpected exception occurred "
453 "while http polling</bg #f8bbd0></r>")
454 finally:
455 if bot:
456 self._bot_disconnect(bot)
457
458 async def _ws_loop(self, setup: WEBSOCKET_SETUP):
459 bot: Optional[Bot] = None
460
461 try:
462 while True:
463
464 try:
465 if callable(setup):
466 setup_ = await setup()
467 else:
468 setup_ = setup
469 except Exception as e:
470 logger.opt(colors=True, exception=e).error(
471 "<r><bg #f8bbd0>Error while parsing setup "
472 f"{escape_tag(repr(setup))}.</bg #f8bbd0></r>")
473 await asyncio.sleep(3)
474 continue
475
476 url = httpx.URL(setup_.url)
477 if not url.netloc:
478 logger.opt(colors=True).error(
479 f"<r><bg #f8bbd0>Error parsing url {escape_tag(str(url))}</bg #f8bbd0></r>"
480 )
481 return
482
483 headers = {**setup_.headers, "host": url.netloc.decode("ascii")}
484 logger.debug(
485 f"Bot {setup_.self_id} from adapter {setup_.adapter} connecting to {url}"
486 )
487 try:
488 connection = Connect(setup_.url, extra_headers=headers)
489 async with connection as ws:
490 logger.opt(colors=True).info(
491 f"WebSocket Connection to <y>{escape_tag(setup_.adapter.upper())} "
492 f"Bot {escape_tag(setup_.self_id)}</y> succeeded!")
493 request = WebSocket("1.1", url.scheme, url.path,
494 url.query, headers, ws)
495
496 BotClass = self._adapters[setup_.adapter]
497 bot = BotClass(setup_.self_id, request)
498 self._bot_connect(bot)
499 while not self.shutdown.is_set():
500 # use try except instead of "request.closed" because of queued message
501 try:
502 msg = await request.receive_bytes()
503 asyncio.create_task(bot.handle_message(msg))
504 except ConnectionClosed:
505 logger.opt(colors=True).error(
506 "<r><bg #f8bbd0>WebSocket connection closed by peer. "
507 "Try to reconnect...</bg #f8bbd0></r>")
508 break
509 except Exception as e:
510 logger.opt(colors=True, exception=e).error(
511 f"<r><bg #f8bbd0>Error while connecting to {url}. "
512 "Try to reconnect...</bg #f8bbd0></r>")
513 finally:
514 if bot:
515 self._bot_disconnect(bot)
516 bot = None
517 await asyncio.sleep(setup_.reconnect_interval)
518
519 except asyncio.CancelledError:
520 pass
521 except Exception as e:
522 logger.opt(colors=True, exception=e).error(
523 "<r><bg #f8bbd0>Unexpected exception occurred "
524 "while websocket loop</bg #f8bbd0></r>")
525
526
527 @dataclass
528 class WebSocket(BaseWebSocket):
529 websocket: Union[FastAPIWebSocket,
530 WebSocketClientProtocol] = None # type: ignore
531
532 @property
533 @overrides(BaseWebSocket)
534 def closed(self) -> bool:
535 if isinstance(self.websocket, FastAPIWebSocket):
536 return (
537 self.websocket.client_state == WebSocketState.DISCONNECTED or
538 self.websocket.application_state == WebSocketState.DISCONNECTED)
539 else:
540 return self.websocket.closed
541
542 @overrides(BaseWebSocket)
543 async def accept(self):
544 if isinstance(self.websocket, FastAPIWebSocket):
545 await self.websocket.accept()
546 else:
547 raise NotImplementedError
548
549 @overrides(BaseWebSocket)
550 async def close(self, code: int = status.WS_1000_NORMAL_CLOSURE):
551 await self.websocket.close(code)
552
553 @overrides(BaseWebSocket)
554 async def receive(self) -> str:
555 if isinstance(self.websocket, FastAPIWebSocket):
556 return await self.websocket.receive_text()
557 else:
558 msg = await self.websocket.recv()
559 return msg.decode("utf-8") if isinstance(msg, bytes) else msg
560
561 @overrides(BaseWebSocket)
562 async def receive_bytes(self) -> bytes:
563 if isinstance(self.websocket, FastAPIWebSocket):
564 return await self.websocket.receive_bytes()
565 else:
566 msg = await self.websocket.recv()
567 return msg.encode("utf-8") if isinstance(msg, str) else msg
568
569 @overrides(BaseWebSocket)
570 async def send(self, data: str) -> None:
571 if isinstance(self.websocket, FastAPIWebSocket):
572 await self.websocket.send({"type": "websocket.send", "text": data})
573 else:
574 await self.websocket.send(data)
575
576 @overrides(BaseWebSocket)
577 async def send_bytes(self, data: bytes) -> None:
578 if isinstance(self.websocket, FastAPIWebSocket):
579 await self.websocket.send({"type": "websocket.send", "bytes": data})
580 else:
581 await self.websocket.send(data)
582
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nonebot/drivers/fastapi.py b/nonebot/drivers/fastapi.py
--- a/nonebot/drivers/fastapi.py
+++ b/nonebot/drivers/fastapi.py
@@ -480,7 +480,7 @@
)
return
- headers = {**setup_.headers, "host": url.netloc.decode("ascii")}
+ headers = setup_.headers.copy()
logger.debug(
f"Bot {setup_.self_id} from adapter {setup_.adapter} connecting to {url}"
)
| {"golden_diff": "diff --git a/nonebot/drivers/fastapi.py b/nonebot/drivers/fastapi.py\n--- a/nonebot/drivers/fastapi.py\n+++ b/nonebot/drivers/fastapi.py\n@@ -480,7 +480,7 @@\n )\n return\n \n- headers = {**setup_.headers, \"host\": url.netloc.decode(\"ascii\")}\n+ headers = setup_.headers.copy()\n logger.debug(\n f\"Bot {setup_.self_id} from adapter {setup_.adapter} connecting to {url}\"\n )\n", "issue": "Bug: Nonebot2\u901a\u8fc7\u6b63\u5411Websocket\u8fde\u63a5go-cqhttp\u62a5\u9519400\n**\u63cf\u8ff0\u95ee\u9898\uff1a**\r\n\r\n\u5728\u76ee\u524d\u6700\u65b0\u7684Nonebot2\u7684release\u4e2d\u4f7f\u7528websocket\u4e0ego-cqhttp\u8fdb\u884c\u8fde\u63a5\u4f1a\u62a5\u9519400\u3002\r\n\u8fd9\u4e2a\u95ee\u9898\u4ec5\u51fa\u73b0\u5728\u4e86Linux\u7aef\u4e0a\uff0c\u5728Win\u73af\u5883\u4e0b\u5e76\u4e0d\u4f1a\u51fa\u73b0400\u9519\u8bef\uff08\u65e0\u8bba\u662fgo-cqhttp\u8fd0\u884c\u5728linux\u8fd8\u662fwin\u4e0b\uff09\u3002\r\n\u6b64\u95ee\u9898\u5df2\u7ecf\u5728\u79c1\u4e0b\u6d4b\u8bd5\u5b8c\u6210\u590d\u73b0\u4e86\u5f88\u591a\u6b21\u3002\r\n\r\n**\u5982\u4f55\u590d\u73b0\uff1f**\r\n\r\n\u524d\u63d0\u6761\u4ef6\uff1a\r\n- go-cqhttp\u5df2\u5b8c\u6210\u914d\u7f6e\uff0c\u8fde\u63a5\u534f\u8bae\u4e3aws\uff0chost\u4e3a127.0.0.1\uff0cport\u4e3a6700\uff0c\u4e0d\u542f\u7528access_token\r\n- \u5df2\u5b8c\u6210\u5b89\u88c5nb-cli\u5e76\u5b89\u88c5nonebot-adapter-cqhttp\u4e14\u521b\u5efa\u4e86\u4e00\u4e2a\u65b0\u7684\u673a\u5668\u4eba\uff0c\u4f7f\u7528cqhttp\uff08Nonebot2\uff09\u3001.env.dev\u5df2\u6dfb\u52a0\u914d\u7f6e ` CQHTTP_WS_URLS={\"2461591632\": \"ws://127.0.0.1:6700/\"} `\r\n\r\n1. \u5148\u884c\u542f\u52a8go-cqhttp\u670d\u52a1\r\n2. \u7136\u540e\u542f\u52a8nonebot2\u673a\u5668\u4eba\r\n\r\n**\u671f\u671b\u7684\u7ed3\u679c**\r\ngo-cqhttp\u5e76\u6ca1\u6709\u4efb\u4f55\u62a5\u9519\uff0c\u6240\u4ee5\u4e0d\u8d34\u4e1c\u897f\u4e86\u3002\r\n\r\nNonebot2\u7aef\u62a5\u9519\uff1a\r\n```python\r\n10-11 11:49:35 [ERROR] nonebot | Error while connecting to ws://127.0.0.1:6700/. Try to reconnect...\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/usr/lib/python3.9/multiprocessing/spawn.py\", line 116, in spawn_main\r\n exitcode = _main(fd, parent_sentinel)\r\n File \"/usr/lib/python3.9/multiprocessing/spawn.py\", line 129, in _main\r\n return self._bootstrap(parent_sentinel)\r\n File \"/usr/lib/python3.9/multiprocessing/process.py\", line 315, in _bootstrap\r\n self.run()\r\n File \"/usr/lib/python3.9/multiprocessing/process.py\", line 108, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/home/ls420/.local/lib/python3.9/site-packages/uvicorn/subprocess.py\", line 76, in subprocess_started\r\n target(sockets=sockets)\r\n File \"/home/ls420/.local/lib/python3.9/site-packages/uvicorn/server.py\", line 68, in run\r\n return asyncio.run(self.serve(sockets=sockets))\r\n File \"/usr/lib/python3.9/asyncio/runners.py\", line 44, in run\r\n return loop.run_until_complete(main)\r\n> File \"/usr/local/lib/python3.9/dist-packages/nonebot/drivers/fastapi.py\", line 489, in _ws_loop\r\n async with connection as ws:\r\n File \"/home/ls420/.local/lib/python3.9/site-packages/websockets/legacy/client.py\", line 604, in __aenter__\r\n return await self\r\n File \"/home/ls420/.local/lib/python3.9/site-packages/websockets/legacy/client.py\", line 629, in __await_impl__\r\n await protocol.handshake(\r\n File \"/home/ls420/.local/lib/python3.9/site-packages/websockets/legacy/client.py\", line 388, in handshake\r\n raise InvalidStatusCode(status_code)\r\nwebsockets.exceptions.InvalidStatusCode: server rejected WebSocket connection: HTTP 400\r\n```\r\n\r\n**\u73af\u5883\u4fe1\u606f\uff1a**\r\n\r\n - OS: [Ubuntu20.04-LTS_arm64/amd64]\r\n - Python Version: [3.9.5]\r\n - Nonebot Version: [2.0.0a16]\r\n\r\n**\u534f\u8bae\u7aef\u4fe1\u606f\uff1a**\r\n\r\n - \u534f\u8bae\u7aef: [go-cqhttp]\r\n - \u534f\u8bae\u7aef\u7248\u672c: [v1.0.0-beta7-fix2]\r\n\r\n**\u622a\u56fe\u6216\u65e5\u5fd7**\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nFastAPI \u9a71\u52a8\u9002\u914d\n================\n\n\u672c\u9a71\u52a8\u540c\u65f6\u652f\u6301\u670d\u52a1\u7aef\u4ee5\u53ca\u5ba2\u6237\u7aef\u8fde\u63a5\n\n\u540e\u7aef\u4f7f\u7528\u65b9\u6cd5\u8bf7\u53c2\u8003: `FastAPI \u6587\u6863`_\n\n.. _FastAPI \u6587\u6863:\n https://fastapi.tiangolo.com/\n\"\"\"\n\nimport asyncio\nimport logging\nfrom dataclasses import dataclass\nfrom typing import List, Union, Callable, Optional, Awaitable, cast\n\nimport httpx\nimport uvicorn\nfrom pydantic import BaseSettings\nfrom fastapi.responses import Response\nfrom websockets.exceptions import ConnectionClosed\nfrom fastapi import FastAPI, Request, HTTPException, status\nfrom starlette.websockets import WebSocket as FastAPIWebSocket\nfrom starlette.websockets import WebSocketState, WebSocketDisconnect\nfrom websockets.legacy.client import Connect, WebSocketClientProtocol\n\nfrom nonebot.config import Env\nfrom nonebot.log import logger\nfrom nonebot.adapters import Bot\nfrom nonebot.typing import overrides\nfrom nonebot.utils import escape_tag\nfrom nonebot.config import Config as NoneBotConfig\nfrom nonebot.drivers import WebSocket as BaseWebSocket\nfrom nonebot.drivers import (HTTPRequest, ForwardDriver, ReverseDriver,\n WebSocketSetup, HTTPPollingSetup)\n\nHTTPPOLLING_SETUP = Union[HTTPPollingSetup,\n Callable[[], Awaitable[HTTPPollingSetup]]]\nWEBSOCKET_SETUP = Union[WebSocketSetup, Callable[[], Awaitable[WebSocketSetup]]]\n\n\nclass Config(BaseSettings):\n \"\"\"\n FastAPI \u9a71\u52a8\u6846\u67b6\u8bbe\u7f6e\uff0c\u8be6\u60c5\u53c2\u8003 FastAPI \u6587\u6863\n \"\"\"\n fastapi_openapi_url: Optional[str] = None\n \"\"\"\n :\u7c7b\u578b:\n\n ``Optional[str]``\n\n :\u8bf4\u660e:\n\n ``openapi.json`` \u5730\u5740\uff0c\u9ed8\u8ba4\u4e3a ``None`` \u5373\u5173\u95ed\n \"\"\"\n fastapi_docs_url: Optional[str] = None\n \"\"\"\n :\u7c7b\u578b:\n\n ``Optional[str]``\n\n :\u8bf4\u660e:\n\n ``swagger`` \u5730\u5740\uff0c\u9ed8\u8ba4\u4e3a ``None`` \u5373\u5173\u95ed\n \"\"\"\n fastapi_redoc_url: Optional[str] = None\n \"\"\"\n :\u7c7b\u578b:\n\n ``Optional[str]``\n\n :\u8bf4\u660e:\n\n ``redoc`` \u5730\u5740\uff0c\u9ed8\u8ba4\u4e3a ``None`` \u5373\u5173\u95ed\n \"\"\"\n fastapi_reload: Optional[bool] = None\n \"\"\"\n :\u7c7b\u578b:\n\n ``Optional[bool]``\n\n :\u8bf4\u660e:\n\n \u5f00\u542f/\u5173\u95ed\u51b7\u91cd\u8f7d\uff0c\u9ed8\u8ba4\u4f1a\u5728\u914d\u7f6e\u4e86 app \u7684 debug \u6a21\u5f0f\u542f\u7528\n \"\"\"\n fastapi_reload_dirs: Optional[List[str]] = None\n \"\"\"\n :\u7c7b\u578b:\n\n ``Optional[List[str]]``\n\n :\u8bf4\u660e:\n\n \u91cd\u8f7d\u76d1\u63a7\u6587\u4ef6\u5939\u5217\u8868\uff0c\u9ed8\u8ba4\u4e3a uvicorn \u9ed8\u8ba4\u503c\n \"\"\"\n fastapi_reload_delay: Optional[float] = None\n \"\"\"\n :\u7c7b\u578b:\n\n ``Optional[float]``\n\n :\u8bf4\u660e:\n\n \u91cd\u8f7d\u5ef6\u8fdf\uff0c\u9ed8\u8ba4\u4e3a uvicorn \u9ed8\u8ba4\u503c\n \"\"\"\n fastapi_reload_includes: Optional[List[str]] = None\n \"\"\"\n :\u7c7b\u578b:\n\n ``Optional[List[str]]``\n\n :\u8bf4\u660e:\n\n \u8981\u76d1\u542c\u7684\u6587\u4ef6\u5217\u8868\uff0c\u652f\u6301 glob pattern\uff0c\u9ed8\u8ba4\u4e3a uvicorn \u9ed8\u8ba4\u503c\n \"\"\"\n fastapi_reload_excludes: Optional[List[str]] = None\n \"\"\"\n :\u7c7b\u578b:\n\n ``Optional[List[str]]``\n\n :\u8bf4\u660e:\n\n \u4e0d\u8981\u76d1\u542c\u7684\u6587\u4ef6\u5217\u8868\uff0c\u652f\u6301 glob pattern\uff0c\u9ed8\u8ba4\u4e3a uvicorn \u9ed8\u8ba4\u503c\n \"\"\"\n\n class Config:\n extra = \"ignore\"\n\n\nclass Driver(ReverseDriver, ForwardDriver):\n \"\"\"\n FastAPI \u9a71\u52a8\u6846\u67b6\n\n :\u4e0a\u62a5\u5730\u5740:\n\n * ``/{adapter name}/``: HTTP POST \u4e0a\u62a5\n * ``/{adapter name}/http/``: HTTP POST \u4e0a\u62a5\n * ``/{adapter name}/ws``: WebSocket \u4e0a\u62a5\n * ``/{adapter name}/ws/``: WebSocket \u4e0a\u62a5\n \"\"\"\n\n def __init__(self, env: Env, config: NoneBotConfig):\n super().__init__(env, config)\n\n self.fastapi_config: Config = Config(**config.dict())\n self.http_pollings: List[HTTPPOLLING_SETUP] = []\n self.websockets: List[WEBSOCKET_SETUP] = []\n self.shutdown: asyncio.Event = asyncio.Event()\n self.connections: List[asyncio.Task] = []\n\n self._server_app = FastAPI(\n debug=config.debug,\n openapi_url=self.fastapi_config.fastapi_openapi_url,\n docs_url=self.fastapi_config.fastapi_docs_url,\n redoc_url=self.fastapi_config.fastapi_redoc_url,\n )\n\n self._server_app.post(\"/{adapter}/\")(self._handle_http)\n self._server_app.post(\"/{adapter}/http\")(self._handle_http)\n self._server_app.websocket(\"/{adapter}/ws\")(self._handle_ws_reverse)\n self._server_app.websocket(\"/{adapter}/ws/\")(self._handle_ws_reverse)\n\n self.on_startup(self._run_forward)\n self.on_shutdown(self._shutdown_forward)\n\n @property\n @overrides(ReverseDriver)\n def type(self) -> str:\n \"\"\"\u9a71\u52a8\u540d\u79f0: ``fastapi``\"\"\"\n return \"fastapi\"\n\n @property\n @overrides(ReverseDriver)\n def server_app(self) -> FastAPI:\n \"\"\"``FastAPI APP`` \u5bf9\u8c61\"\"\"\n return self._server_app\n\n @property\n @overrides(ReverseDriver)\n def asgi(self) -> FastAPI:\n \"\"\"``FastAPI APP`` \u5bf9\u8c61\"\"\"\n return self._server_app\n\n @property\n @overrides(ReverseDriver)\n def logger(self) -> logging.Logger:\n \"\"\"fastapi \u4f7f\u7528\u7684 logger\"\"\"\n return logging.getLogger(\"fastapi\")\n\n @overrides(ReverseDriver)\n def on_startup(self, func: Callable) -> Callable:\n \"\"\"\u53c2\u8003\u6587\u6863: `Events <https://fastapi.tiangolo.com/advanced/events/#startup-event>`_\"\"\"\n return self.server_app.on_event(\"startup\")(func)\n\n @overrides(ReverseDriver)\n def on_shutdown(self, func: Callable) -> Callable:\n \"\"\"\u53c2\u8003\u6587\u6863: `Events <https://fastapi.tiangolo.com/advanced/events/#startup-event>`_\"\"\"\n return self.server_app.on_event(\"shutdown\")(func)\n\n @overrides(ForwardDriver)\n def setup_http_polling(self, setup: HTTPPOLLING_SETUP) -> None:\n \"\"\"\n :\u8bf4\u660e:\n\n \u6ce8\u518c\u4e00\u4e2a HTTP \u8f6e\u8be2\u8fde\u63a5\uff0c\u5982\u679c\u4f20\u5165\u4e00\u4e2a\u51fd\u6570\uff0c\u5219\u8be5\u51fd\u6570\u4f1a\u5728\u6bcf\u6b21\u8fde\u63a5\u65f6\u88ab\u8c03\u7528\n\n :\u53c2\u6570:\n\n * ``setup: Union[HTTPPollingSetup, Callable[[], Awaitable[HTTPPollingSetup]]]``\n \"\"\"\n self.http_pollings.append(setup)\n\n @overrides(ForwardDriver)\n def setup_websocket(self, setup: WEBSOCKET_SETUP) -> None:\n \"\"\"\n :\u8bf4\u660e:\n\n \u6ce8\u518c\u4e00\u4e2a WebSocket \u8fde\u63a5\uff0c\u5982\u679c\u4f20\u5165\u4e00\u4e2a\u51fd\u6570\uff0c\u5219\u8be5\u51fd\u6570\u4f1a\u5728\u6bcf\u6b21\u91cd\u8fde\u65f6\u88ab\u8c03\u7528\n\n :\u53c2\u6570:\n\n * ``setup: Union[WebSocketSetup, Callable[[], Awaitable[WebSocketSetup]]]``\n \"\"\"\n self.websockets.append(setup)\n\n @overrides(ReverseDriver)\n def run(self,\n host: Optional[str] = None,\n port: Optional[int] = None,\n *,\n app: Optional[str] = None,\n **kwargs):\n \"\"\"\u4f7f\u7528 ``uvicorn`` \u542f\u52a8 FastAPI\"\"\"\n super().run(host, port, app, **kwargs)\n LOGGING_CONFIG = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"handlers\": {\n \"default\": {\n \"class\": \"nonebot.log.LoguruHandler\",\n },\n },\n \"loggers\": {\n \"uvicorn.error\": {\n \"handlers\": [\"default\"],\n \"level\": \"INFO\"\n },\n \"uvicorn.access\": {\n \"handlers\": [\"default\"],\n \"level\": \"INFO\",\n },\n },\n }\n uvicorn.run(\n app or self.server_app, # type: ignore\n host=host or str(self.config.host),\n port=port or self.config.port,\n reload=self.fastapi_config.fastapi_reload\n if self.fastapi_config.fastapi_reload is not None else\n (bool(app) and self.config.debug),\n reload_dirs=self.fastapi_config.fastapi_reload_dirs,\n reload_delay=self.fastapi_config.fastapi_reload_delay,\n reload_includes=self.fastapi_config.fastapi_reload_includes,\n reload_excludes=self.fastapi_config.fastapi_reload_excludes,\n debug=self.config.debug,\n log_config=LOGGING_CONFIG,\n **kwargs)\n\n def _run_forward(self):\n for setup in self.http_pollings:\n self.connections.append(asyncio.create_task(self._http_loop(setup)))\n for setup in self.websockets:\n self.connections.append(asyncio.create_task(self._ws_loop(setup)))\n\n def _shutdown_forward(self):\n self.shutdown.set()\n for task in self.connections:\n if not task.done():\n task.cancel()\n\n async def _handle_http(self, adapter: str, request: Request):\n data = await request.body()\n\n if adapter not in self._adapters:\n logger.warning(\n f\"Unknown adapter {adapter}. Please register the adapter before use.\"\n )\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=\"adapter not found\")\n\n # \u521b\u5efa Bot \u5bf9\u8c61\n BotClass = self._adapters[adapter]\n http_request = HTTPRequest(request.scope[\"http_version\"],\n request.url.scheme, request.url.path,\n request.scope[\"query_string\"],\n dict(request.headers), request.method, data)\n x_self_id, response = await BotClass.check_permission(\n self, http_request)\n\n if not x_self_id:\n raise HTTPException(\n response and response.status or 401, response and\n response.body and response.body.decode(\"utf-8\"))\n\n if x_self_id in self._clients:\n logger.warning(\"There's already a reverse websocket connection,\"\n \"so the event may be handled twice.\")\n\n bot = BotClass(x_self_id, http_request)\n\n asyncio.create_task(bot.handle_message(data))\n return Response(response and response.body,\n response and response.status or 200)\n\n async def _handle_ws_reverse(self, adapter: str,\n websocket: FastAPIWebSocket):\n ws = WebSocket(websocket.scope.get(\"http_version\",\n \"1.1\"), websocket.url.scheme,\n websocket.url.path, websocket.scope[\"query_string\"],\n dict(websocket.headers), websocket)\n\n if adapter not in self._adapters:\n logger.warning(\n f\"Unknown adapter {adapter}. Please register the adapter before use.\"\n )\n await ws.close(code=status.WS_1008_POLICY_VIOLATION)\n return\n\n # Create Bot Object\n BotClass = self._adapters[adapter]\n self_id, _ = await BotClass.check_permission(self, ws)\n\n if not self_id:\n await ws.close(code=status.WS_1008_POLICY_VIOLATION)\n return\n\n if self_id in self._clients:\n logger.opt(colors=True).warning(\n \"There's already a websocket connection, \"\n f\"<y>{escape_tag(adapter.upper())} Bot {escape_tag(self_id)}</y> ignored.\"\n )\n await ws.close(code=status.WS_1008_POLICY_VIOLATION)\n return\n\n bot = BotClass(self_id, ws)\n\n await ws.accept()\n logger.opt(colors=True).info(\n f\"WebSocket Connection from <y>{escape_tag(adapter.upper())} \"\n f\"Bot {escape_tag(self_id)}</y> Accepted!\")\n\n self._bot_connect(bot)\n\n try:\n while not ws.closed:\n try:\n data = await ws.receive()\n except WebSocketDisconnect:\n logger.error(\"WebSocket disconnected by peer.\")\n break\n except Exception as e:\n logger.opt(exception=e).error(\n \"Error when receiving data from websocket.\")\n break\n\n asyncio.create_task(bot.handle_message(data.encode()))\n finally:\n self._bot_disconnect(bot)\n\n async def _http_loop(self, setup: HTTPPOLLING_SETUP):\n\n async def _build_request(\n setup: HTTPPollingSetup) -> Optional[HTTPRequest]:\n url = httpx.URL(setup.url)\n if not url.netloc:\n logger.opt(colors=True).error(\n f\"<r><bg #f8bbd0>Error parsing url {escape_tag(str(url))}</bg #f8bbd0></r>\"\n )\n return\n return HTTPRequest(\n setup.http_version, url.scheme, url.path, url.query, {\n **setup.headers, \"host\": url.netloc.decode(\"ascii\")\n }, setup.method, setup.body)\n\n bot: Optional[Bot] = None\n request: Optional[HTTPRequest] = None\n setup_: Optional[HTTPPollingSetup] = None\n\n logger.opt(colors=True).info(\n f\"Start http polling for <y>{escape_tag(setup.adapter.upper())} \"\n f\"Bot {escape_tag(setup.self_id)}</y>\")\n\n try:\n async with httpx.AsyncClient(http2=True) as session:\n while not self.shutdown.is_set():\n\n try:\n if callable(setup):\n setup_ = await setup()\n else:\n setup_ = setup\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error while parsing setup \"\n f\"{escape_tag(repr(setup))}.</bg #f8bbd0></r>\")\n await asyncio.sleep(3)\n continue\n\n if not bot:\n request = await _build_request(setup_)\n if not request:\n return\n BotClass = self._adapters[setup.adapter]\n bot = BotClass(setup.self_id, request)\n self._bot_connect(bot)\n elif callable(setup):\n request = await _build_request(setup_)\n if not request:\n await asyncio.sleep(setup_.poll_interval)\n continue\n bot.request = request\n\n setup_ = cast(HTTPPollingSetup, setup_)\n request = cast(HTTPRequest, request)\n headers = request.headers\n\n logger.debug(\n f\"Bot {setup_.self_id} from adapter {setup_.adapter} request {setup_.url}\"\n )\n try:\n response = await session.request(request.method,\n setup_.url,\n content=request.body,\n headers=headers,\n timeout=30.)\n response.raise_for_status()\n data = response.read()\n asyncio.create_task(bot.handle_message(data))\n except httpx.HTTPError as e:\n logger.opt(colors=True, exception=e).error(\n f\"<r><bg #f8bbd0>Error occurred while requesting {escape_tag(setup_.url)}. \"\n \"Try to reconnect...</bg #f8bbd0></r>\")\n\n await asyncio.sleep(setup_.poll_interval)\n\n except asyncio.CancelledError:\n pass\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Unexpected exception occurred \"\n \"while http polling</bg #f8bbd0></r>\")\n finally:\n if bot:\n self._bot_disconnect(bot)\n\n async def _ws_loop(self, setup: WEBSOCKET_SETUP):\n bot: Optional[Bot] = None\n\n try:\n while True:\n\n try:\n if callable(setup):\n setup_ = await setup()\n else:\n setup_ = setup\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error while parsing setup \"\n f\"{escape_tag(repr(setup))}.</bg #f8bbd0></r>\")\n await asyncio.sleep(3)\n continue\n\n url = httpx.URL(setup_.url)\n if not url.netloc:\n logger.opt(colors=True).error(\n f\"<r><bg #f8bbd0>Error parsing url {escape_tag(str(url))}</bg #f8bbd0></r>\"\n )\n return\n\n headers = {**setup_.headers, \"host\": url.netloc.decode(\"ascii\")}\n logger.debug(\n f\"Bot {setup_.self_id} from adapter {setup_.adapter} connecting to {url}\"\n )\n try:\n connection = Connect(setup_.url, extra_headers=headers)\n async with connection as ws:\n logger.opt(colors=True).info(\n f\"WebSocket Connection to <y>{escape_tag(setup_.adapter.upper())} \"\n f\"Bot {escape_tag(setup_.self_id)}</y> succeeded!\")\n request = WebSocket(\"1.1\", url.scheme, url.path,\n url.query, headers, ws)\n\n BotClass = self._adapters[setup_.adapter]\n bot = BotClass(setup_.self_id, request)\n self._bot_connect(bot)\n while not self.shutdown.is_set():\n # use try except instead of \"request.closed\" because of queued message\n try:\n msg = await request.receive_bytes()\n asyncio.create_task(bot.handle_message(msg))\n except ConnectionClosed:\n logger.opt(colors=True).error(\n \"<r><bg #f8bbd0>WebSocket connection closed by peer. \"\n \"Try to reconnect...</bg #f8bbd0></r>\")\n break\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n f\"<r><bg #f8bbd0>Error while connecting to {url}. \"\n \"Try to reconnect...</bg #f8bbd0></r>\")\n finally:\n if bot:\n self._bot_disconnect(bot)\n bot = None\n await asyncio.sleep(setup_.reconnect_interval)\n\n except asyncio.CancelledError:\n pass\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Unexpected exception occurred \"\n \"while websocket loop</bg #f8bbd0></r>\")\n\n\n@dataclass\nclass WebSocket(BaseWebSocket):\n websocket: Union[FastAPIWebSocket,\n WebSocketClientProtocol] = None # type: ignore\n\n @property\n @overrides(BaseWebSocket)\n def closed(self) -> bool:\n if isinstance(self.websocket, FastAPIWebSocket):\n return (\n self.websocket.client_state == WebSocketState.DISCONNECTED or\n self.websocket.application_state == WebSocketState.DISCONNECTED)\n else:\n return self.websocket.closed\n\n @overrides(BaseWebSocket)\n async def accept(self):\n if isinstance(self.websocket, FastAPIWebSocket):\n await self.websocket.accept()\n else:\n raise NotImplementedError\n\n @overrides(BaseWebSocket)\n async def close(self, code: int = status.WS_1000_NORMAL_CLOSURE):\n await self.websocket.close(code)\n\n @overrides(BaseWebSocket)\n async def receive(self) -> str:\n if isinstance(self.websocket, FastAPIWebSocket):\n return await self.websocket.receive_text()\n else:\n msg = await self.websocket.recv()\n return msg.decode(\"utf-8\") if isinstance(msg, bytes) else msg\n\n @overrides(BaseWebSocket)\n async def receive_bytes(self) -> bytes:\n if isinstance(self.websocket, FastAPIWebSocket):\n return await self.websocket.receive_bytes()\n else:\n msg = await self.websocket.recv()\n return msg.encode(\"utf-8\") if isinstance(msg, str) else msg\n\n @overrides(BaseWebSocket)\n async def send(self, data: str) -> None:\n if isinstance(self.websocket, FastAPIWebSocket):\n await self.websocket.send({\"type\": \"websocket.send\", \"text\": data})\n else:\n await self.websocket.send(data)\n\n @overrides(BaseWebSocket)\n async def send_bytes(self, data: bytes) -> None:\n if isinstance(self.websocket, FastAPIWebSocket):\n await self.websocket.send({\"type\": \"websocket.send\", \"bytes\": data})\n else:\n await self.websocket.send(data)\n", "path": "nonebot/drivers/fastapi.py"}], "after_files": [{"content": "\"\"\"\nFastAPI \u9a71\u52a8\u9002\u914d\n================\n\n\u672c\u9a71\u52a8\u540c\u65f6\u652f\u6301\u670d\u52a1\u7aef\u4ee5\u53ca\u5ba2\u6237\u7aef\u8fde\u63a5\n\n\u540e\u7aef\u4f7f\u7528\u65b9\u6cd5\u8bf7\u53c2\u8003: `FastAPI \u6587\u6863`_\n\n.. _FastAPI \u6587\u6863:\n https://fastapi.tiangolo.com/\n\"\"\"\n\nimport asyncio\nimport logging\nfrom dataclasses import dataclass\nfrom typing import List, Union, Callable, Optional, Awaitable, cast\n\nimport httpx\nimport uvicorn\nfrom pydantic import BaseSettings\nfrom fastapi.responses import Response\nfrom websockets.exceptions import ConnectionClosed\nfrom fastapi import FastAPI, Request, HTTPException, status\nfrom starlette.websockets import WebSocket as FastAPIWebSocket\nfrom starlette.websockets import WebSocketState, WebSocketDisconnect\nfrom websockets.legacy.client import Connect, WebSocketClientProtocol\n\nfrom nonebot.config import Env\nfrom nonebot.log import logger\nfrom nonebot.adapters import Bot\nfrom nonebot.typing import overrides\nfrom nonebot.utils import escape_tag\nfrom nonebot.config import Config as NoneBotConfig\nfrom nonebot.drivers import WebSocket as BaseWebSocket\nfrom nonebot.drivers import (HTTPRequest, ForwardDriver, ReverseDriver,\n WebSocketSetup, HTTPPollingSetup)\n\nHTTPPOLLING_SETUP = Union[HTTPPollingSetup,\n Callable[[], Awaitable[HTTPPollingSetup]]]\nWEBSOCKET_SETUP = Union[WebSocketSetup, Callable[[], Awaitable[WebSocketSetup]]]\n\n\nclass Config(BaseSettings):\n \"\"\"\n FastAPI \u9a71\u52a8\u6846\u67b6\u8bbe\u7f6e\uff0c\u8be6\u60c5\u53c2\u8003 FastAPI \u6587\u6863\n \"\"\"\n fastapi_openapi_url: Optional[str] = None\n \"\"\"\n :\u7c7b\u578b:\n\n ``Optional[str]``\n\n :\u8bf4\u660e:\n\n ``openapi.json`` \u5730\u5740\uff0c\u9ed8\u8ba4\u4e3a ``None`` \u5373\u5173\u95ed\n \"\"\"\n fastapi_docs_url: Optional[str] = None\n \"\"\"\n :\u7c7b\u578b:\n\n ``Optional[str]``\n\n :\u8bf4\u660e:\n\n ``swagger`` \u5730\u5740\uff0c\u9ed8\u8ba4\u4e3a ``None`` \u5373\u5173\u95ed\n \"\"\"\n fastapi_redoc_url: Optional[str] = None\n \"\"\"\n :\u7c7b\u578b:\n\n ``Optional[str]``\n\n :\u8bf4\u660e:\n\n ``redoc`` \u5730\u5740\uff0c\u9ed8\u8ba4\u4e3a ``None`` \u5373\u5173\u95ed\n \"\"\"\n fastapi_reload: Optional[bool] = None\n \"\"\"\n :\u7c7b\u578b:\n\n ``Optional[bool]``\n\n :\u8bf4\u660e:\n\n \u5f00\u542f/\u5173\u95ed\u51b7\u91cd\u8f7d\uff0c\u9ed8\u8ba4\u4f1a\u5728\u914d\u7f6e\u4e86 app \u7684 debug \u6a21\u5f0f\u542f\u7528\n \"\"\"\n fastapi_reload_dirs: Optional[List[str]] = None\n \"\"\"\n :\u7c7b\u578b:\n\n ``Optional[List[str]]``\n\n :\u8bf4\u660e:\n\n \u91cd\u8f7d\u76d1\u63a7\u6587\u4ef6\u5939\u5217\u8868\uff0c\u9ed8\u8ba4\u4e3a uvicorn \u9ed8\u8ba4\u503c\n \"\"\"\n fastapi_reload_delay: Optional[float] = None\n \"\"\"\n :\u7c7b\u578b:\n\n ``Optional[float]``\n\n :\u8bf4\u660e:\n\n \u91cd\u8f7d\u5ef6\u8fdf\uff0c\u9ed8\u8ba4\u4e3a uvicorn \u9ed8\u8ba4\u503c\n \"\"\"\n fastapi_reload_includes: Optional[List[str]] = None\n \"\"\"\n :\u7c7b\u578b:\n\n ``Optional[List[str]]``\n\n :\u8bf4\u660e:\n\n \u8981\u76d1\u542c\u7684\u6587\u4ef6\u5217\u8868\uff0c\u652f\u6301 glob pattern\uff0c\u9ed8\u8ba4\u4e3a uvicorn \u9ed8\u8ba4\u503c\n \"\"\"\n fastapi_reload_excludes: Optional[List[str]] = None\n \"\"\"\n :\u7c7b\u578b:\n\n ``Optional[List[str]]``\n\n :\u8bf4\u660e:\n\n \u4e0d\u8981\u76d1\u542c\u7684\u6587\u4ef6\u5217\u8868\uff0c\u652f\u6301 glob pattern\uff0c\u9ed8\u8ba4\u4e3a uvicorn \u9ed8\u8ba4\u503c\n \"\"\"\n\n class Config:\n extra = \"ignore\"\n\n\nclass Driver(ReverseDriver, ForwardDriver):\n \"\"\"\n FastAPI \u9a71\u52a8\u6846\u67b6\n\n :\u4e0a\u62a5\u5730\u5740:\n\n * ``/{adapter name}/``: HTTP POST \u4e0a\u62a5\n * ``/{adapter name}/http/``: HTTP POST \u4e0a\u62a5\n * ``/{adapter name}/ws``: WebSocket \u4e0a\u62a5\n * ``/{adapter name}/ws/``: WebSocket \u4e0a\u62a5\n \"\"\"\n\n def __init__(self, env: Env, config: NoneBotConfig):\n super().__init__(env, config)\n\n self.fastapi_config: Config = Config(**config.dict())\n self.http_pollings: List[HTTPPOLLING_SETUP] = []\n self.websockets: List[WEBSOCKET_SETUP] = []\n self.shutdown: asyncio.Event = asyncio.Event()\n self.connections: List[asyncio.Task] = []\n\n self._server_app = FastAPI(\n debug=config.debug,\n openapi_url=self.fastapi_config.fastapi_openapi_url,\n docs_url=self.fastapi_config.fastapi_docs_url,\n redoc_url=self.fastapi_config.fastapi_redoc_url,\n )\n\n self._server_app.post(\"/{adapter}/\")(self._handle_http)\n self._server_app.post(\"/{adapter}/http\")(self._handle_http)\n self._server_app.websocket(\"/{adapter}/ws\")(self._handle_ws_reverse)\n self._server_app.websocket(\"/{adapter}/ws/\")(self._handle_ws_reverse)\n\n self.on_startup(self._run_forward)\n self.on_shutdown(self._shutdown_forward)\n\n @property\n @overrides(ReverseDriver)\n def type(self) -> str:\n \"\"\"\u9a71\u52a8\u540d\u79f0: ``fastapi``\"\"\"\n return \"fastapi\"\n\n @property\n @overrides(ReverseDriver)\n def server_app(self) -> FastAPI:\n \"\"\"``FastAPI APP`` \u5bf9\u8c61\"\"\"\n return self._server_app\n\n @property\n @overrides(ReverseDriver)\n def asgi(self) -> FastAPI:\n \"\"\"``FastAPI APP`` \u5bf9\u8c61\"\"\"\n return self._server_app\n\n @property\n @overrides(ReverseDriver)\n def logger(self) -> logging.Logger:\n \"\"\"fastapi \u4f7f\u7528\u7684 logger\"\"\"\n return logging.getLogger(\"fastapi\")\n\n @overrides(ReverseDriver)\n def on_startup(self, func: Callable) -> Callable:\n \"\"\"\u53c2\u8003\u6587\u6863: `Events <https://fastapi.tiangolo.com/advanced/events/#startup-event>`_\"\"\"\n return self.server_app.on_event(\"startup\")(func)\n\n @overrides(ReverseDriver)\n def on_shutdown(self, func: Callable) -> Callable:\n \"\"\"\u53c2\u8003\u6587\u6863: `Events <https://fastapi.tiangolo.com/advanced/events/#startup-event>`_\"\"\"\n return self.server_app.on_event(\"shutdown\")(func)\n\n @overrides(ForwardDriver)\n def setup_http_polling(self, setup: HTTPPOLLING_SETUP) -> None:\n \"\"\"\n :\u8bf4\u660e:\n\n \u6ce8\u518c\u4e00\u4e2a HTTP \u8f6e\u8be2\u8fde\u63a5\uff0c\u5982\u679c\u4f20\u5165\u4e00\u4e2a\u51fd\u6570\uff0c\u5219\u8be5\u51fd\u6570\u4f1a\u5728\u6bcf\u6b21\u8fde\u63a5\u65f6\u88ab\u8c03\u7528\n\n :\u53c2\u6570:\n\n * ``setup: Union[HTTPPollingSetup, Callable[[], Awaitable[HTTPPollingSetup]]]``\n \"\"\"\n self.http_pollings.append(setup)\n\n @overrides(ForwardDriver)\n def setup_websocket(self, setup: WEBSOCKET_SETUP) -> None:\n \"\"\"\n :\u8bf4\u660e:\n\n \u6ce8\u518c\u4e00\u4e2a WebSocket \u8fde\u63a5\uff0c\u5982\u679c\u4f20\u5165\u4e00\u4e2a\u51fd\u6570\uff0c\u5219\u8be5\u51fd\u6570\u4f1a\u5728\u6bcf\u6b21\u91cd\u8fde\u65f6\u88ab\u8c03\u7528\n\n :\u53c2\u6570:\n\n * ``setup: Union[WebSocketSetup, Callable[[], Awaitable[WebSocketSetup]]]``\n \"\"\"\n self.websockets.append(setup)\n\n @overrides(ReverseDriver)\n def run(self,\n host: Optional[str] = None,\n port: Optional[int] = None,\n *,\n app: Optional[str] = None,\n **kwargs):\n \"\"\"\u4f7f\u7528 ``uvicorn`` \u542f\u52a8 FastAPI\"\"\"\n super().run(host, port, app, **kwargs)\n LOGGING_CONFIG = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"handlers\": {\n \"default\": {\n \"class\": \"nonebot.log.LoguruHandler\",\n },\n },\n \"loggers\": {\n \"uvicorn.error\": {\n \"handlers\": [\"default\"],\n \"level\": \"INFO\"\n },\n \"uvicorn.access\": {\n \"handlers\": [\"default\"],\n \"level\": \"INFO\",\n },\n },\n }\n uvicorn.run(\n app or self.server_app, # type: ignore\n host=host or str(self.config.host),\n port=port or self.config.port,\n reload=self.fastapi_config.fastapi_reload\n if self.fastapi_config.fastapi_reload is not None else\n (bool(app) and self.config.debug),\n reload_dirs=self.fastapi_config.fastapi_reload_dirs,\n reload_delay=self.fastapi_config.fastapi_reload_delay,\n reload_includes=self.fastapi_config.fastapi_reload_includes,\n reload_excludes=self.fastapi_config.fastapi_reload_excludes,\n debug=self.config.debug,\n log_config=LOGGING_CONFIG,\n **kwargs)\n\n def _run_forward(self):\n for setup in self.http_pollings:\n self.connections.append(asyncio.create_task(self._http_loop(setup)))\n for setup in self.websockets:\n self.connections.append(asyncio.create_task(self._ws_loop(setup)))\n\n def _shutdown_forward(self):\n self.shutdown.set()\n for task in self.connections:\n if not task.done():\n task.cancel()\n\n async def _handle_http(self, adapter: str, request: Request):\n data = await request.body()\n\n if adapter not in self._adapters:\n logger.warning(\n f\"Unknown adapter {adapter}. Please register the adapter before use.\"\n )\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=\"adapter not found\")\n\n # \u521b\u5efa Bot \u5bf9\u8c61\n BotClass = self._adapters[adapter]\n http_request = HTTPRequest(request.scope[\"http_version\"],\n request.url.scheme, request.url.path,\n request.scope[\"query_string\"],\n dict(request.headers), request.method, data)\n x_self_id, response = await BotClass.check_permission(\n self, http_request)\n\n if not x_self_id:\n raise HTTPException(\n response and response.status or 401, response and\n response.body and response.body.decode(\"utf-8\"))\n\n if x_self_id in self._clients:\n logger.warning(\"There's already a reverse websocket connection,\"\n \"so the event may be handled twice.\")\n\n bot = BotClass(x_self_id, http_request)\n\n asyncio.create_task(bot.handle_message(data))\n return Response(response and response.body,\n response and response.status or 200)\n\n async def _handle_ws_reverse(self, adapter: str,\n websocket: FastAPIWebSocket):\n ws = WebSocket(websocket.scope.get(\"http_version\",\n \"1.1\"), websocket.url.scheme,\n websocket.url.path, websocket.scope[\"query_string\"],\n dict(websocket.headers), websocket)\n\n if adapter not in self._adapters:\n logger.warning(\n f\"Unknown adapter {adapter}. Please register the adapter before use.\"\n )\n await ws.close(code=status.WS_1008_POLICY_VIOLATION)\n return\n\n # Create Bot Object\n BotClass = self._adapters[adapter]\n self_id, _ = await BotClass.check_permission(self, ws)\n\n if not self_id:\n await ws.close(code=status.WS_1008_POLICY_VIOLATION)\n return\n\n if self_id in self._clients:\n logger.opt(colors=True).warning(\n \"There's already a websocket connection, \"\n f\"<y>{escape_tag(adapter.upper())} Bot {escape_tag(self_id)}</y> ignored.\"\n )\n await ws.close(code=status.WS_1008_POLICY_VIOLATION)\n return\n\n bot = BotClass(self_id, ws)\n\n await ws.accept()\n logger.opt(colors=True).info(\n f\"WebSocket Connection from <y>{escape_tag(adapter.upper())} \"\n f\"Bot {escape_tag(self_id)}</y> Accepted!\")\n\n self._bot_connect(bot)\n\n try:\n while not ws.closed:\n try:\n data = await ws.receive()\n except WebSocketDisconnect:\n logger.error(\"WebSocket disconnected by peer.\")\n break\n except Exception as e:\n logger.opt(exception=e).error(\n \"Error when receiving data from websocket.\")\n break\n\n asyncio.create_task(bot.handle_message(data.encode()))\n finally:\n self._bot_disconnect(bot)\n\n async def _http_loop(self, setup: HTTPPOLLING_SETUP):\n\n async def _build_request(\n setup: HTTPPollingSetup) -> Optional[HTTPRequest]:\n url = httpx.URL(setup.url)\n if not url.netloc:\n logger.opt(colors=True).error(\n f\"<r><bg #f8bbd0>Error parsing url {escape_tag(str(url))}</bg #f8bbd0></r>\"\n )\n return\n return HTTPRequest(\n setup.http_version, url.scheme, url.path, url.query, {\n **setup.headers, \"host\": url.netloc.decode(\"ascii\")\n }, setup.method, setup.body)\n\n bot: Optional[Bot] = None\n request: Optional[HTTPRequest] = None\n setup_: Optional[HTTPPollingSetup] = None\n\n logger.opt(colors=True).info(\n f\"Start http polling for <y>{escape_tag(setup.adapter.upper())} \"\n f\"Bot {escape_tag(setup.self_id)}</y>\")\n\n try:\n async with httpx.AsyncClient(http2=True) as session:\n while not self.shutdown.is_set():\n\n try:\n if callable(setup):\n setup_ = await setup()\n else:\n setup_ = setup\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error while parsing setup \"\n f\"{escape_tag(repr(setup))}.</bg #f8bbd0></r>\")\n await asyncio.sleep(3)\n continue\n\n if not bot:\n request = await _build_request(setup_)\n if not request:\n return\n BotClass = self._adapters[setup.adapter]\n bot = BotClass(setup.self_id, request)\n self._bot_connect(bot)\n elif callable(setup):\n request = await _build_request(setup_)\n if not request:\n await asyncio.sleep(setup_.poll_interval)\n continue\n bot.request = request\n\n setup_ = cast(HTTPPollingSetup, setup_)\n request = cast(HTTPRequest, request)\n headers = request.headers\n\n logger.debug(\n f\"Bot {setup_.self_id} from adapter {setup_.adapter} request {setup_.url}\"\n )\n try:\n response = await session.request(request.method,\n setup_.url,\n content=request.body,\n headers=headers,\n timeout=30.)\n response.raise_for_status()\n data = response.read()\n asyncio.create_task(bot.handle_message(data))\n except httpx.HTTPError as e:\n logger.opt(colors=True, exception=e).error(\n f\"<r><bg #f8bbd0>Error occurred while requesting {escape_tag(setup_.url)}. \"\n \"Try to reconnect...</bg #f8bbd0></r>\")\n\n await asyncio.sleep(setup_.poll_interval)\n\n except asyncio.CancelledError:\n pass\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Unexpected exception occurred \"\n \"while http polling</bg #f8bbd0></r>\")\n finally:\n if bot:\n self._bot_disconnect(bot)\n\n async def _ws_loop(self, setup: WEBSOCKET_SETUP):\n bot: Optional[Bot] = None\n\n try:\n while True:\n\n try:\n if callable(setup):\n setup_ = await setup()\n else:\n setup_ = setup\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error while parsing setup \"\n f\"{escape_tag(repr(setup))}.</bg #f8bbd0></r>\")\n await asyncio.sleep(3)\n continue\n\n url = httpx.URL(setup_.url)\n if not url.netloc:\n logger.opt(colors=True).error(\n f\"<r><bg #f8bbd0>Error parsing url {escape_tag(str(url))}</bg #f8bbd0></r>\"\n )\n return\n\n headers = setup_.headers.copy()\n logger.debug(\n f\"Bot {setup_.self_id} from adapter {setup_.adapter} connecting to {url}\"\n )\n try:\n connection = Connect(setup_.url, extra_headers=headers)\n async with connection as ws:\n logger.opt(colors=True).info(\n f\"WebSocket Connection to <y>{escape_tag(setup_.adapter.upper())} \"\n f\"Bot {escape_tag(setup_.self_id)}</y> succeeded!\")\n request = WebSocket(\"1.1\", url.scheme, url.path,\n url.query, headers, ws)\n\n BotClass = self._adapters[setup_.adapter]\n bot = BotClass(setup_.self_id, request)\n self._bot_connect(bot)\n while not self.shutdown.is_set():\n # use try except instead of \"request.closed\" because of queued message\n try:\n msg = await request.receive_bytes()\n asyncio.create_task(bot.handle_message(msg))\n except ConnectionClosed:\n logger.opt(colors=True).error(\n \"<r><bg #f8bbd0>WebSocket connection closed by peer. \"\n \"Try to reconnect...</bg #f8bbd0></r>\")\n break\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n f\"<r><bg #f8bbd0>Error while connecting to {url}. \"\n \"Try to reconnect...</bg #f8bbd0></r>\")\n finally:\n if bot:\n self._bot_disconnect(bot)\n bot = None\n await asyncio.sleep(setup_.reconnect_interval)\n\n except asyncio.CancelledError:\n pass\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Unexpected exception occurred \"\n \"while websocket loop</bg #f8bbd0></r>\")\n\n\n@dataclass\nclass WebSocket(BaseWebSocket):\n websocket: Union[FastAPIWebSocket,\n WebSocketClientProtocol] = None # type: ignore\n\n @property\n @overrides(BaseWebSocket)\n def closed(self) -> bool:\n if isinstance(self.websocket, FastAPIWebSocket):\n return (\n self.websocket.client_state == WebSocketState.DISCONNECTED or\n self.websocket.application_state == WebSocketState.DISCONNECTED)\n else:\n return self.websocket.closed\n\n @overrides(BaseWebSocket)\n async def accept(self):\n if isinstance(self.websocket, FastAPIWebSocket):\n await self.websocket.accept()\n else:\n raise NotImplementedError\n\n @overrides(BaseWebSocket)\n async def close(self, code: int = status.WS_1000_NORMAL_CLOSURE):\n await self.websocket.close(code)\n\n @overrides(BaseWebSocket)\n async def receive(self) -> str:\n if isinstance(self.websocket, FastAPIWebSocket):\n return await self.websocket.receive_text()\n else:\n msg = await self.websocket.recv()\n return msg.decode(\"utf-8\") if isinstance(msg, bytes) else msg\n\n @overrides(BaseWebSocket)\n async def receive_bytes(self) -> bytes:\n if isinstance(self.websocket, FastAPIWebSocket):\n return await self.websocket.receive_bytes()\n else:\n msg = await self.websocket.recv()\n return msg.encode(\"utf-8\") if isinstance(msg, str) else msg\n\n @overrides(BaseWebSocket)\n async def send(self, data: str) -> None:\n if isinstance(self.websocket, FastAPIWebSocket):\n await self.websocket.send({\"type\": \"websocket.send\", \"text\": data})\n else:\n await self.websocket.send(data)\n\n @overrides(BaseWebSocket)\n async def send_bytes(self, data: bytes) -> None:\n if isinstance(self.websocket, FastAPIWebSocket):\n await self.websocket.send({\"type\": \"websocket.send\", \"bytes\": data})\n else:\n await self.websocket.send(data)\n", "path": "nonebot/drivers/fastapi.py"}]} |
gh_patches_debug_1223 | rasdani/github-patches | git_diff | translate__pootle-6098 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
String needs to use plural form
Reported by @milupo in https://github.com/translate/pootle/issues/6061#issuecomment-284076850
There is one string more:
templates/includes/formtable.html:27
Showing %(count)s results per page
I know, percents are not used here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/apps/pootle_store/forms.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 """Form fields required for handling translation files."""
10
11 from translate.misc.multistring import multistring
12
13 from django import forms
14 from django.contrib.auth import get_user_model
15 from django.urls import Resolver404, resolve
16 from django.utils import timezone
17 from django.utils.translation import get_language
18
19 from pootle.core.contextmanagers import update_data_after
20 from pootle.core.delegate import review
21 from pootle.core.url_helpers import split_pootle_path
22 from pootle.core.utils.timezone import make_aware
23 from pootle.i18n.gettext import ugettext as _
24 from pootle_app.models import Directory
25 from pootle_app.models.permissions import check_permission, check_user_permission
26 from pootle_comment.forms import UnsecuredCommentForm
27 from pootle_misc.checks import CATEGORY_CODES, check_names
28 from pootle_misc.util import get_date_interval
29 from pootle_project.models import Project
30 from pootle_statistics.models import (Submission, SubmissionFields,
31 SubmissionTypes)
32
33 from .constants import ALLOWED_SORTS, FUZZY, OBSOLETE, TRANSLATED, UNTRANSLATED
34 from .fields import to_db
35 from .form_fields import (
36 CategoryChoiceField, ISODateTimeField, MultipleArgsField,
37 CommaSeparatedCheckboxSelectMultiple)
38 from .models import Suggestion, Unit
39
40
41 UNIT_SEARCH_FILTER_CHOICES = (
42 ("all", "all"),
43 ("translated", "translated"),
44 ("untranslated", "untranslated"),
45 ("fuzzy", "fuzzy"),
46 ("incomplete", "incomplete"),
47 ("suggestions", "suggestions"),
48 ("my-suggestions", "my-suggestions"),
49 ("user-suggestions", "user-suggestions"),
50 ("user-suggestions-accepted", "user-suggestions-accepted"),
51 ("user-suggestions-rejected", "user-suggestions-rejected"),
52 ("my-submissions", "my-submissions"),
53 ("user-submissions", "user-submissions"),
54 ("my-submissions-overwritten", "my-submissions-overwritten"),
55 ("user-submissions-overwritten", "user-submissions-overwritten"),
56 ("checks", "checks"))
57
58 UNIT_SEARCH_SORT_CHOICES = (
59 ('priority', 'priority'),
60 ('oldest', 'oldest'),
61 ('newest', 'newest'))
62
63 # # # # # # # text cleanup and highlighting # # # # # # # # # # # # #
64
65
66 class MultiStringWidgetMixin(object):
67
68 def decompress(self, value):
69 if value is None:
70 return [None] * len(self.widgets)
71 elif isinstance(value, multistring):
72 return [string for string in value.strings]
73 elif isinstance(value, list):
74 return value
75 elif isinstance(value, basestring):
76 return [value]
77
78 raise ValueError
79
80
81 class MultiStringWidget(MultiStringWidgetMixin, forms.MultiWidget):
82 """Custom Widget for editing multistrings, expands number of text
83 area based on number of plural forms.
84 """
85
86 def __init__(self, attrs=None, nplurals=1, textarea=True):
87 if textarea:
88 widget = forms.Textarea
89 else:
90 widget = forms.TextInput
91
92 widgets = [widget(attrs=attrs) for i_ in xrange(nplurals)]
93 super(MultiStringWidget, self).__init__(widgets, attrs)
94
95 def format_output(self, rendered_widgets):
96 from django.utils.safestring import mark_safe
97 if len(rendered_widgets) == 1:
98 return mark_safe(rendered_widgets[0])
99
100 output = ''
101 for i, widget in enumerate(rendered_widgets):
102 output += '<div lang="%s" title="%s">' % \
103 (get_language(), _('Plural Form %d', i))
104 output += widget
105 output += '</div>'
106
107 return mark_safe(output)
108
109
110 class HiddenMultiStringWidget(MultiStringWidgetMixin, forms.MultiWidget):
111 """Uses hidden input instead of textareas."""
112
113 def __init__(self, attrs=None, nplurals=1):
114 widgets = [forms.HiddenInput(attrs=attrs) for i_ in xrange(nplurals)]
115 super(HiddenMultiStringWidget, self).__init__(widgets, attrs)
116
117 def format_output(self, rendered_widgets):
118 return super(
119 HiddenMultiStringWidget, self).format_output(rendered_widgets)
120
121 def __call__(self):
122 # HACKISH: Django is inconsistent in how it handles Field.widget and
123 # Field.hidden_widget, it expects widget to be an instantiated object
124 # and hidden_widget to be a class, since we need to specify nplurals at
125 # run time we can let django instantiate hidden_widget.
126 #
127 # making the object callable let's us get away with forcing an object
128 # where django expects a class
129 return self
130
131
132 class MultiStringFormField(forms.MultiValueField):
133
134 def __init__(self, nplurals=1, attrs=None, textarea=True, *args, **kwargs):
135 self.widget = MultiStringWidget(nplurals=nplurals, attrs=attrs,
136 textarea=textarea)
137 self.hidden_widget = HiddenMultiStringWidget(nplurals=nplurals)
138 fields = [forms.CharField(strip=False) for i_ in range(nplurals)]
139 super(MultiStringFormField, self).__init__(fields=fields,
140 *args, **kwargs)
141
142 def compress(self, data_list):
143 return data_list
144
145
146 class UnitStateField(forms.BooleanField):
147
148 def to_python(self, value):
149 """Returns a Python boolean object.
150
151 It is necessary to customize the behavior because the default
152 ``BooleanField`` treats the string '0' as ``False``, but if the
153 unit is in ``UNTRANSLATED`` state (which would report '0' as a
154 value), we need the marked checkbox to be evaluated as ``True``.
155
156 :return: ``False`` for any unknown :cls:`~pootle_store.models.Unit`
157 states and for the 'False' string.
158 """
159 truthy_values = (str(s) for s in (UNTRANSLATED, FUZZY, TRANSLATED))
160 if (isinstance(value, basestring) and
161 (value.lower() == 'false' or value not in truthy_values)):
162 value = False
163 else:
164 value = bool(value)
165
166 return super(UnitStateField, self).to_python(value)
167
168
169 def unit_form_factory(language, snplurals=None, request=None):
170
171 if snplurals is not None:
172 tnplurals = language.nplurals
173 else:
174 tnplurals = 1
175
176 action_disabled = False
177 if request is not None:
178 cantranslate = check_permission("translate", request)
179 cansuggest = check_permission("suggest", request)
180
181 if not (cansuggest or cantranslate):
182 action_disabled = True
183
184 target_attrs = {
185 'lang': language.code,
186 'dir': language.direction,
187 'class': 'translation expanding focusthis js-translation-area',
188 'rows': 2,
189 'tabindex': 10,
190 }
191
192 fuzzy_attrs = {
193 'accesskey': 'f',
194 'class': 'fuzzycheck',
195 'tabindex': 13,
196 }
197
198 if action_disabled:
199 target_attrs['disabled'] = 'disabled'
200 fuzzy_attrs['disabled'] = 'disabled'
201
202 class UnitForm(forms.ModelForm):
203 class Meta(object):
204 model = Unit
205 fields = ('target_f', 'state',)
206
207 target_f = MultiStringFormField(
208 nplurals=tnplurals,
209 required=False,
210 attrs=target_attrs,
211 )
212 state = UnitStateField(
213 required=False,
214 label=_('Needs work'),
215 widget=forms.CheckboxInput(
216 attrs=fuzzy_attrs,
217 check_test=lambda x: x == FUZZY,
218 ),
219 )
220 suggestion = forms.ModelChoiceField(
221 queryset=Suggestion.objects.all(),
222 required=False)
223 comment = forms.CharField(required=False)
224
225 def __init__(self, *args, **kwargs):
226 self.request = kwargs.pop('request', None)
227 self.user = self.request.user
228 super(UnitForm, self).__init__(*args, **kwargs)
229 self._updated_fields = []
230 self.fields['target_f'].widget.attrs['data-translation-aid'] = \
231 self['target_f'].value()
232
233 @property
234 def updated_fields(self):
235 order_dict = {
236 SubmissionFields.STATE: 0,
237 SubmissionFields.TARGET: 1,
238 }
239 return sorted(self._updated_fields, key=lambda x: order_dict[x[0]])
240
241 def clean_target_f(self):
242 value = self.cleaned_data['target_f']
243
244 if self.instance.target != multistring(value or [u'']):
245 self._updated_fields.append((SubmissionFields.TARGET,
246 to_db(self.instance.target),
247 to_db(value)))
248
249 return value
250
251 def clean(self):
252 old_state = self.instance.state # Integer
253 is_fuzzy = self.cleaned_data['state'] # Boolean
254 new_target = self.cleaned_data['target_f']
255
256 # If suggestion is provided set `old_state` should be `TRANSLATED`.
257 if self.cleaned_data['suggestion']:
258 old_state = TRANSLATED
259
260 # Skip `TARGET` field submission if suggestion value is equal
261 # to submitted translation
262 if new_target == self.cleaned_data['suggestion'].target_f:
263 self._updated_fields = []
264
265 if (self.request is not None and
266 not check_permission('administrate', self.request) and
267 is_fuzzy):
268 self.add_error('state',
269 forms.ValidationError(
270 _('Needs work flag must be '
271 'cleared')))
272
273 if new_target:
274 if is_fuzzy:
275 new_state = FUZZY
276 else:
277 new_state = TRANSLATED
278 else:
279 new_state = UNTRANSLATED
280 if old_state not in [new_state, OBSOLETE]:
281 self._updated_fields.append((SubmissionFields.STATE,
282 old_state, new_state))
283
284 self.cleaned_data['state'] = new_state
285 else:
286 self.cleaned_data['state'] = old_state
287
288 return super(UnitForm, self).clean()
289
290 def save(self, *args, **kwargs):
291 changed_with = kwargs.pop("changed_with", None)
292 kwargs["commit"] = False
293 unit = super(UnitForm, self).save(*args, **kwargs)
294 with update_data_after(unit.store):
295 current_time = timezone.now()
296 if SubmissionFields.TARGET in (f[0] for f in self.updated_fields):
297 unit.submitted_by = self.user
298 unit.submitted_on = current_time
299 unit.reviewed_by = None
300 unit.reviewed_on = None
301 suggestion = self.cleaned_data["suggestion"]
302 user = (
303 suggestion.user
304 if suggestion
305 else self.user)
306 unit.save(
307 submitted_on=current_time,
308 submitted_by=user,
309 changed_with=changed_with)
310 translation_project = unit.store.translation_project
311 for field, old_value, new_value in self.updated_fields:
312 if field == SubmissionFields.TARGET and suggestion:
313 old_value = str(suggestion.target_f)
314 sub = Submission(
315 creation_time=current_time,
316 translation_project=translation_project,
317 submitter=self.user,
318 unit=unit,
319 field=field,
320 type=SubmissionTypes.WEB,
321 old_value=old_value,
322 new_value=new_value)
323 sub.save()
324 return unit
325
326 return UnitForm
327
328
329 def unit_comment_form_factory(language):
330
331 comment_attrs = {
332 'lang': language.code,
333 'dir': language.direction,
334 'class': 'comments expanding focusthis',
335 'rows': 1,
336 'tabindex': 15,
337 }
338
339 class UnitCommentForm(forms.ModelForm):
340
341 class Meta(object):
342 fields = ('translator_comment',)
343 model = Unit
344
345 translator_comment = forms.CharField(
346 required=True,
347 label=_("Translator comment"),
348 widget=forms.Textarea(attrs=comment_attrs),
349 )
350
351 def __init__(self, *args, **kwargs):
352 self.request = kwargs.pop('request', None)
353 self.previous_value = ''
354
355 super(UnitCommentForm, self).__init__(*args, **kwargs)
356
357 if self.request.method == 'DELETE':
358 self.fields['translator_comment'].required = False
359
360 def clean_translator_comment(self):
361 # HACKISH: Setting empty string when `DELETE` is being used
362 if self.request.method == 'DELETE':
363 self.previous_value = self.instance.translator_comment
364 return ''
365
366 return self.cleaned_data['translator_comment']
367
368 def save(self, **kwargs):
369 """Register the submission and save the comment."""
370 if self.has_changed():
371 creation_time = timezone.now()
372 translation_project = self.request.translation_project
373
374 sub = Submission(
375 creation_time=creation_time,
376 translation_project=translation_project,
377 submitter=self.request.user,
378 unit=self.instance,
379 field=SubmissionFields.COMMENT,
380 type=SubmissionTypes.WEB,
381 old_value=self.previous_value,
382 new_value=self.cleaned_data['translator_comment']
383 )
384 sub.save()
385 super(UnitCommentForm, self).save(**kwargs)
386
387 return UnitCommentForm
388
389
390 class UnitSearchForm(forms.Form):
391
392 offset = forms.IntegerField(required=False)
393 path = forms.CharField(
394 max_length=2048,
395 required=True)
396 previous_uids = MultipleArgsField(
397 field=forms.IntegerField(),
398 required=False)
399 uids = MultipleArgsField(
400 field=forms.IntegerField(),
401 required=False)
402 filter = forms.ChoiceField(
403 required=False,
404 choices=UNIT_SEARCH_FILTER_CHOICES)
405 checks = forms.MultipleChoiceField(
406 required=False,
407 widget=CommaSeparatedCheckboxSelectMultiple,
408 choices=check_names.items())
409 category = CategoryChoiceField(
410 required=False,
411 choices=CATEGORY_CODES.items())
412 month = forms.DateField(
413 required=False,
414 input_formats=['%Y-%m'])
415 sort = forms.ChoiceField(
416 required=False,
417 choices=UNIT_SEARCH_SORT_CHOICES)
418
419 user = forms.ModelChoiceField(
420 queryset=get_user_model().objects.all(),
421 required=False,
422 to_field_name="username")
423
424 search = forms.CharField(required=False)
425
426 soptions = forms.MultipleChoiceField(
427 required=False,
428 widget=forms.CheckboxSelectMultiple,
429 choices=(
430 ('exact', _('Exact Match')), ))
431
432 sfields = forms.MultipleChoiceField(
433 required=False,
434 widget=CommaSeparatedCheckboxSelectMultiple,
435 choices=(
436 ('source', _('Source Text')),
437 ('target', _('Target Text')),
438 ('notes', _('Comments')),
439 ('locations', _('Locations'))),
440 initial=['source', 'target'])
441
442 def __init__(self, *args, **kwargs):
443 self.request_user = kwargs.pop("user")
444 super(UnitSearchForm, self).__init__(*args, **kwargs)
445 self.fields["modified-since"] = ISODateTimeField(required=False)
446
447 def clean(self):
448 if "checks" in self.errors:
449 del self.errors["checks"]
450 self.cleaned_data["checks"] = None
451 if "user" in self.errors:
452 del self.errors["user"]
453 self.cleaned_data["user"] = self.request_user
454 if self.errors:
455 return
456 self.cleaned_data['count'] = self.request_user.get_unit_rows()
457 self.cleaned_data["vfolder"] = None
458 pootle_path = self.cleaned_data.get("path")
459 path_keys = [
460 "project_code", "language_code", "dir_path", "filename"]
461 try:
462 path_kwargs = {
463 k: v
464 for k, v in resolve(pootle_path).kwargs.items()
465 if k in path_keys}
466 except Resolver404:
467 raise forms.ValidationError('Unrecognised path')
468 self.cleaned_data.update(path_kwargs)
469 sort_on = "units"
470 if "filter" in self.cleaned_data:
471 unit_filter = self.cleaned_data["filter"]
472 if unit_filter in ('suggestions', 'user-suggestions'):
473 sort_on = 'suggestions'
474 elif unit_filter in ('user-submissions', ):
475 sort_on = 'submissions'
476 sort_by_param = self.cleaned_data["sort"]
477 self.cleaned_data["sort_by"] = ALLOWED_SORTS[sort_on].get(sort_by_param)
478 self.cleaned_data["sort_on"] = sort_on
479
480 def clean_month(self):
481 if self.cleaned_data["month"]:
482 return get_date_interval(self.cleaned_data["month"].strftime("%Y-%m"))
483
484 def clean_user(self):
485 return self.cleaned_data["user"] or self.request_user
486
487 def clean_path(self):
488 lang_code, proj_code = split_pootle_path(
489 self.cleaned_data["path"])[:2]
490 if not (lang_code or proj_code):
491 permission_context = Directory.objects.projects
492 elif proj_code and not lang_code:
493 try:
494 permission_context = Project.objects.select_related(
495 "directory").get(code=proj_code).directory
496 except Project.DoesNotExist:
497 raise forms.ValidationError("Unrecognized path")
498 else:
499 # no permission checking on lang translate views
500 return self.cleaned_data["path"]
501 if self.request_user.is_superuser:
502 return self.cleaned_data["path"]
503 can_view_path = check_user_permission(
504 self.request_user, "administrate", permission_context)
505 if can_view_path:
506 return self.cleaned_data["path"]
507 raise forms.ValidationError("Unrecognized path")
508
509
510 class BaseSuggestionForm(UnsecuredCommentForm):
511 should_save = lambda self: True
512
513 def __init__(self, *args, **kwargs):
514 kwargs["request_user"] = kwargs.get("request_user") or self.request_user
515 super(BaseSuggestionForm, self).__init__(**kwargs)
516 self.fields["comment"].required = False
517
518 @property
519 def review_type(self):
520 return SubmissionTypes.WEB
521
522 @property
523 def suggestion_review(self):
524 return review.get(self.target_object.__class__)(
525 [self.target_object],
526 self.request_user,
527 review_type=self.review_type)
528
529
530 class SuggestionReviewForm(BaseSuggestionForm):
531
532 action = forms.ChoiceField(
533 required=True,
534 choices=(
535 ("accept", "Accept"),
536 ("reject", "Reject")))
537
538 def clean_action(self):
539 if self.target_object.state.name != "pending":
540 self.add_error(
541 "action",
542 forms.ValidationError(
543 _("Suggestion '%s' cannot be accepted/rejected twice!",
544 self.target_object)))
545 return self.data["action"]
546
547 def clean(self):
548 self_review = (
549 self.request_user == self.target_object.user
550 and self.cleaned_data.get("action") == "reject")
551 permission = (
552 "view"
553 if self_review
554 else "review")
555 has_permission = check_user_permission(
556 self.request_user,
557 permission,
558 self.target_object.unit.store.parent)
559 if not has_permission:
560 raise forms.ValidationError(
561 _("Insufficient rights to access this page."))
562 if not self.errors:
563 super(SuggestionReviewForm, self).clean()
564
565 def save(self):
566 if self.cleaned_data["action"] == "accept":
567 self.suggestion_review.accept()
568 else:
569 self.suggestion_review.reject()
570 if self.cleaned_data["comment"]:
571 super(SuggestionReviewForm, self).save()
572
573
574 class SubmitFormMixin(object):
575
576 def __init__(self, *args, **kwargs):
577 self.unit = kwargs.pop("unit")
578 self.request_user = kwargs.pop("request_user")
579 super(SubmitFormMixin, self).__init__(*args, **kwargs)
580 snplurals = (
581 len(self.unit.source.strings)
582 if self.unit.hasplural()
583 else None)
584 nplurals = (
585 self.unit.store.translation_project.language.nplurals
586 if snplurals
587 else 1)
588 self.fields["target_f"].widget = MultiStringWidget(
589 nplurals=nplurals,
590 attrs={
591 'lang': self.unit.store.translation_project.language.code,
592 'dir': self.unit.store.translation_project.language.direction,
593 'class': 'translation expanding focusthis js-translation-area',
594 'rows': 2,
595 'tabindex': 10})
596 self.fields['target_f'].widget.attrs[
597 'data-translation-aid'] = self['target_f'].value()
598 self.fields[
599 "target_f"].hidden_widget = HiddenMultiStringWidget(nplurals=nplurals)
600 self.fields["target_f"].fields = [
601 forms.CharField(strip=False) for i in range(nplurals)]
602 for k in ["user", "name", "email"]:
603 if k in self.fields:
604 self.fields[k].required = False
605
606
607 class SuggestionSubmitForm(SubmitFormMixin, BaseSuggestionForm):
608
609 target_f = MultiStringFormField(required=False)
610
611 def save_unit(self):
612 current_time = make_aware(timezone.now())
613 updated = []
614 if self.cleaned_data["target_f"]:
615 self.unit.target = self.cleaned_data["target_f"]
616 self.unit.save(
617 submitted_on=current_time,
618 submitted_by=self.target_object.user,
619 reviewed_on=current_time,
620 reviewed_by=self.request_user,
621 changed_with=SubmissionTypes.WEB)
622 updated.append(
623 (SubmissionFields.TARGET,
624 self.unit._frozen.target,
625 self.unit.target))
626 if self.unit.state_updated:
627 updated.append(
628 (SubmissionFields.STATE,
629 self.unit._frozen.state,
630 self.unit.state))
631 translation_project = self.unit.store.translation_project
632 for field, old_value, new_value in updated:
633 sub = Submission(
634 creation_time=current_time,
635 translation_project=translation_project,
636 suggestion=self.target_object,
637 submitter=self.request_user,
638 unit=self.unit,
639 field=field,
640 type=SubmissionTypes.WEB,
641 old_value=old_value,
642 new_value=new_value)
643 sub.save()
644 self.suggestion_review.accept(
645 update_unit=(
646 False
647 if self.cleaned_data["target_f"]
648 else True))
649
650 def save(self):
651 with update_data_after(self.unit.store):
652 self.save_unit()
653 if self.cleaned_data['comment']:
654 super(SuggestionSubmitForm, self).save()
655
656
657 class SubmitForm(SubmitFormMixin, forms.Form):
658 state = UnitStateField(
659 required=False,
660 label=_('Needs work'))
661 target_f = MultiStringFormField(required=False)
662
663 def save_unit(self):
664 user = self.request_user
665 current_time = make_aware(timezone.now())
666 updated = []
667 if multistring(self.cleaned_data["target_f"]) != self.unit.target:
668 self.unit.submitted_by = user
669 self.unit.submitted_on = current_time
670 self.unit.reviewed_by = None
671 self.unit.reviewed_on = None
672 updated.append(
673 (SubmissionFields.TARGET,
674 self.unit.target_f,
675 self.cleaned_data["target_f"]))
676 self.unit.target = self.cleaned_data["target_f"]
677 if self.cleaned_data["state"] != self.unit.state:
678 self.unit.state = self.cleaned_data["state"]
679 self.unit.save(
680 submitted_on=current_time,
681 submitted_by=user,
682 changed_with=SubmissionTypes.WEB)
683 if self.unit.state_updated:
684 updated.append(
685 (SubmissionFields.STATE,
686 self.unit.state,
687 self.cleaned_data["state"]))
688 self.unit.state = self.cleaned_data["state"]
689 translation_project = self.unit.store.translation_project
690 for field, old_value, new_value in updated:
691 sub = Submission(
692 creation_time=current_time,
693 translation_project=translation_project,
694 submitter=user,
695 unit=self.unit,
696 field=field,
697 type=SubmissionTypes.WEB,
698 old_value=old_value,
699 new_value=new_value)
700 sub.save()
701
702 def save(self):
703 with update_data_after(self.unit.store):
704 self.save_unit()
705
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pootle/apps/pootle_store/forms.py b/pootle/apps/pootle_store/forms.py
--- a/pootle/apps/pootle_store/forms.py
+++ b/pootle/apps/pootle_store/forms.py
@@ -540,7 +540,7 @@
self.add_error(
"action",
forms.ValidationError(
- _("Suggestion '%s' cannot be accepted/rejected twice!",
+ _("Suggestion '%s' has already been accepted or rejected.",
self.target_object)))
return self.data["action"]
| {"golden_diff": "diff --git a/pootle/apps/pootle_store/forms.py b/pootle/apps/pootle_store/forms.py\n--- a/pootle/apps/pootle_store/forms.py\n+++ b/pootle/apps/pootle_store/forms.py\n@@ -540,7 +540,7 @@\n self.add_error(\n \"action\",\n forms.ValidationError(\n- _(\"Suggestion '%s' cannot be accepted/rejected twice!\",\n+ _(\"Suggestion '%s' has already been accepted or rejected.\",\n self.target_object)))\n return self.data[\"action\"]\n", "issue": "String needs to use plural form\nReported by @milupo in https://github.com/translate/pootle/issues/6061#issuecomment-284076850\r\n\r\nThere is one string more:\r\n\r\ntemplates/includes/formtable.html:27\r\nShowing %(count)s results per page\r\n\r\nI know, percents are not used here.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\n\"\"\"Form fields required for handling translation files.\"\"\"\n\nfrom translate.misc.multistring import multistring\n\nfrom django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.urls import Resolver404, resolve\nfrom django.utils import timezone\nfrom django.utils.translation import get_language\n\nfrom pootle.core.contextmanagers import update_data_after\nfrom pootle.core.delegate import review\nfrom pootle.core.url_helpers import split_pootle_path\nfrom pootle.core.utils.timezone import make_aware\nfrom pootle.i18n.gettext import ugettext as _\nfrom pootle_app.models import Directory\nfrom pootle_app.models.permissions import check_permission, check_user_permission\nfrom pootle_comment.forms import UnsecuredCommentForm\nfrom pootle_misc.checks import CATEGORY_CODES, check_names\nfrom pootle_misc.util import get_date_interval\nfrom pootle_project.models import Project\nfrom pootle_statistics.models import (Submission, SubmissionFields,\n SubmissionTypes)\n\nfrom .constants import ALLOWED_SORTS, FUZZY, OBSOLETE, TRANSLATED, UNTRANSLATED\nfrom .fields import to_db\nfrom .form_fields import (\n CategoryChoiceField, ISODateTimeField, MultipleArgsField,\n CommaSeparatedCheckboxSelectMultiple)\nfrom .models import Suggestion, Unit\n\n\nUNIT_SEARCH_FILTER_CHOICES = (\n (\"all\", \"all\"),\n (\"translated\", \"translated\"),\n (\"untranslated\", \"untranslated\"),\n (\"fuzzy\", \"fuzzy\"),\n (\"incomplete\", \"incomplete\"),\n (\"suggestions\", \"suggestions\"),\n (\"my-suggestions\", \"my-suggestions\"),\n (\"user-suggestions\", \"user-suggestions\"),\n (\"user-suggestions-accepted\", \"user-suggestions-accepted\"),\n (\"user-suggestions-rejected\", \"user-suggestions-rejected\"),\n (\"my-submissions\", \"my-submissions\"),\n (\"user-submissions\", \"user-submissions\"),\n (\"my-submissions-overwritten\", \"my-submissions-overwritten\"),\n (\"user-submissions-overwritten\", \"user-submissions-overwritten\"),\n (\"checks\", \"checks\"))\n\nUNIT_SEARCH_SORT_CHOICES = (\n ('priority', 'priority'),\n ('oldest', 'oldest'),\n ('newest', 'newest'))\n\n# # # # # # # text cleanup and highlighting # # # # # # # # # # # # #\n\n\nclass MultiStringWidgetMixin(object):\n\n def decompress(self, value):\n if value is None:\n return [None] * len(self.widgets)\n elif isinstance(value, multistring):\n return [string for string in value.strings]\n elif isinstance(value, list):\n return value\n elif isinstance(value, basestring):\n return [value]\n\n raise ValueError\n\n\nclass MultiStringWidget(MultiStringWidgetMixin, forms.MultiWidget):\n \"\"\"Custom Widget for editing multistrings, expands number of text\n area based on number of plural forms.\n \"\"\"\n\n def __init__(self, attrs=None, nplurals=1, textarea=True):\n if textarea:\n widget = forms.Textarea\n else:\n widget = forms.TextInput\n\n widgets = [widget(attrs=attrs) for i_ in xrange(nplurals)]\n super(MultiStringWidget, self).__init__(widgets, attrs)\n\n def format_output(self, rendered_widgets):\n from django.utils.safestring import mark_safe\n if len(rendered_widgets) == 1:\n return mark_safe(rendered_widgets[0])\n\n output = ''\n for i, widget in enumerate(rendered_widgets):\n output += '<div lang=\"%s\" title=\"%s\">' % \\\n (get_language(), _('Plural Form %d', i))\n output += widget\n output += '</div>'\n\n return mark_safe(output)\n\n\nclass HiddenMultiStringWidget(MultiStringWidgetMixin, forms.MultiWidget):\n \"\"\"Uses hidden input instead of textareas.\"\"\"\n\n def __init__(self, attrs=None, nplurals=1):\n widgets = [forms.HiddenInput(attrs=attrs) for i_ in xrange(nplurals)]\n super(HiddenMultiStringWidget, self).__init__(widgets, attrs)\n\n def format_output(self, rendered_widgets):\n return super(\n HiddenMultiStringWidget, self).format_output(rendered_widgets)\n\n def __call__(self):\n # HACKISH: Django is inconsistent in how it handles Field.widget and\n # Field.hidden_widget, it expects widget to be an instantiated object\n # and hidden_widget to be a class, since we need to specify nplurals at\n # run time we can let django instantiate hidden_widget.\n #\n # making the object callable let's us get away with forcing an object\n # where django expects a class\n return self\n\n\nclass MultiStringFormField(forms.MultiValueField):\n\n def __init__(self, nplurals=1, attrs=None, textarea=True, *args, **kwargs):\n self.widget = MultiStringWidget(nplurals=nplurals, attrs=attrs,\n textarea=textarea)\n self.hidden_widget = HiddenMultiStringWidget(nplurals=nplurals)\n fields = [forms.CharField(strip=False) for i_ in range(nplurals)]\n super(MultiStringFormField, self).__init__(fields=fields,\n *args, **kwargs)\n\n def compress(self, data_list):\n return data_list\n\n\nclass UnitStateField(forms.BooleanField):\n\n def to_python(self, value):\n \"\"\"Returns a Python boolean object.\n\n It is necessary to customize the behavior because the default\n ``BooleanField`` treats the string '0' as ``False``, but if the\n unit is in ``UNTRANSLATED`` state (which would report '0' as a\n value), we need the marked checkbox to be evaluated as ``True``.\n\n :return: ``False`` for any unknown :cls:`~pootle_store.models.Unit`\n states and for the 'False' string.\n \"\"\"\n truthy_values = (str(s) for s in (UNTRANSLATED, FUZZY, TRANSLATED))\n if (isinstance(value, basestring) and\n (value.lower() == 'false' or value not in truthy_values)):\n value = False\n else:\n value = bool(value)\n\n return super(UnitStateField, self).to_python(value)\n\n\ndef unit_form_factory(language, snplurals=None, request=None):\n\n if snplurals is not None:\n tnplurals = language.nplurals\n else:\n tnplurals = 1\n\n action_disabled = False\n if request is not None:\n cantranslate = check_permission(\"translate\", request)\n cansuggest = check_permission(\"suggest\", request)\n\n if not (cansuggest or cantranslate):\n action_disabled = True\n\n target_attrs = {\n 'lang': language.code,\n 'dir': language.direction,\n 'class': 'translation expanding focusthis js-translation-area',\n 'rows': 2,\n 'tabindex': 10,\n }\n\n fuzzy_attrs = {\n 'accesskey': 'f',\n 'class': 'fuzzycheck',\n 'tabindex': 13,\n }\n\n if action_disabled:\n target_attrs['disabled'] = 'disabled'\n fuzzy_attrs['disabled'] = 'disabled'\n\n class UnitForm(forms.ModelForm):\n class Meta(object):\n model = Unit\n fields = ('target_f', 'state',)\n\n target_f = MultiStringFormField(\n nplurals=tnplurals,\n required=False,\n attrs=target_attrs,\n )\n state = UnitStateField(\n required=False,\n label=_('Needs work'),\n widget=forms.CheckboxInput(\n attrs=fuzzy_attrs,\n check_test=lambda x: x == FUZZY,\n ),\n )\n suggestion = forms.ModelChoiceField(\n queryset=Suggestion.objects.all(),\n required=False)\n comment = forms.CharField(required=False)\n\n def __init__(self, *args, **kwargs):\n self.request = kwargs.pop('request', None)\n self.user = self.request.user\n super(UnitForm, self).__init__(*args, **kwargs)\n self._updated_fields = []\n self.fields['target_f'].widget.attrs['data-translation-aid'] = \\\n self['target_f'].value()\n\n @property\n def updated_fields(self):\n order_dict = {\n SubmissionFields.STATE: 0,\n SubmissionFields.TARGET: 1,\n }\n return sorted(self._updated_fields, key=lambda x: order_dict[x[0]])\n\n def clean_target_f(self):\n value = self.cleaned_data['target_f']\n\n if self.instance.target != multistring(value or [u'']):\n self._updated_fields.append((SubmissionFields.TARGET,\n to_db(self.instance.target),\n to_db(value)))\n\n return value\n\n def clean(self):\n old_state = self.instance.state # Integer\n is_fuzzy = self.cleaned_data['state'] # Boolean\n new_target = self.cleaned_data['target_f']\n\n # If suggestion is provided set `old_state` should be `TRANSLATED`.\n if self.cleaned_data['suggestion']:\n old_state = TRANSLATED\n\n # Skip `TARGET` field submission if suggestion value is equal\n # to submitted translation\n if new_target == self.cleaned_data['suggestion'].target_f:\n self._updated_fields = []\n\n if (self.request is not None and\n not check_permission('administrate', self.request) and\n is_fuzzy):\n self.add_error('state',\n forms.ValidationError(\n _('Needs work flag must be '\n 'cleared')))\n\n if new_target:\n if is_fuzzy:\n new_state = FUZZY\n else:\n new_state = TRANSLATED\n else:\n new_state = UNTRANSLATED\n if old_state not in [new_state, OBSOLETE]:\n self._updated_fields.append((SubmissionFields.STATE,\n old_state, new_state))\n\n self.cleaned_data['state'] = new_state\n else:\n self.cleaned_data['state'] = old_state\n\n return super(UnitForm, self).clean()\n\n def save(self, *args, **kwargs):\n changed_with = kwargs.pop(\"changed_with\", None)\n kwargs[\"commit\"] = False\n unit = super(UnitForm, self).save(*args, **kwargs)\n with update_data_after(unit.store):\n current_time = timezone.now()\n if SubmissionFields.TARGET in (f[0] for f in self.updated_fields):\n unit.submitted_by = self.user\n unit.submitted_on = current_time\n unit.reviewed_by = None\n unit.reviewed_on = None\n suggestion = self.cleaned_data[\"suggestion\"]\n user = (\n suggestion.user\n if suggestion\n else self.user)\n unit.save(\n submitted_on=current_time,\n submitted_by=user,\n changed_with=changed_with)\n translation_project = unit.store.translation_project\n for field, old_value, new_value in self.updated_fields:\n if field == SubmissionFields.TARGET and suggestion:\n old_value = str(suggestion.target_f)\n sub = Submission(\n creation_time=current_time,\n translation_project=translation_project,\n submitter=self.user,\n unit=unit,\n field=field,\n type=SubmissionTypes.WEB,\n old_value=old_value,\n new_value=new_value)\n sub.save()\n return unit\n\n return UnitForm\n\n\ndef unit_comment_form_factory(language):\n\n comment_attrs = {\n 'lang': language.code,\n 'dir': language.direction,\n 'class': 'comments expanding focusthis',\n 'rows': 1,\n 'tabindex': 15,\n }\n\n class UnitCommentForm(forms.ModelForm):\n\n class Meta(object):\n fields = ('translator_comment',)\n model = Unit\n\n translator_comment = forms.CharField(\n required=True,\n label=_(\"Translator comment\"),\n widget=forms.Textarea(attrs=comment_attrs),\n )\n\n def __init__(self, *args, **kwargs):\n self.request = kwargs.pop('request', None)\n self.previous_value = ''\n\n super(UnitCommentForm, self).__init__(*args, **kwargs)\n\n if self.request.method == 'DELETE':\n self.fields['translator_comment'].required = False\n\n def clean_translator_comment(self):\n # HACKISH: Setting empty string when `DELETE` is being used\n if self.request.method == 'DELETE':\n self.previous_value = self.instance.translator_comment\n return ''\n\n return self.cleaned_data['translator_comment']\n\n def save(self, **kwargs):\n \"\"\"Register the submission and save the comment.\"\"\"\n if self.has_changed():\n creation_time = timezone.now()\n translation_project = self.request.translation_project\n\n sub = Submission(\n creation_time=creation_time,\n translation_project=translation_project,\n submitter=self.request.user,\n unit=self.instance,\n field=SubmissionFields.COMMENT,\n type=SubmissionTypes.WEB,\n old_value=self.previous_value,\n new_value=self.cleaned_data['translator_comment']\n )\n sub.save()\n super(UnitCommentForm, self).save(**kwargs)\n\n return UnitCommentForm\n\n\nclass UnitSearchForm(forms.Form):\n\n offset = forms.IntegerField(required=False)\n path = forms.CharField(\n max_length=2048,\n required=True)\n previous_uids = MultipleArgsField(\n field=forms.IntegerField(),\n required=False)\n uids = MultipleArgsField(\n field=forms.IntegerField(),\n required=False)\n filter = forms.ChoiceField(\n required=False,\n choices=UNIT_SEARCH_FILTER_CHOICES)\n checks = forms.MultipleChoiceField(\n required=False,\n widget=CommaSeparatedCheckboxSelectMultiple,\n choices=check_names.items())\n category = CategoryChoiceField(\n required=False,\n choices=CATEGORY_CODES.items())\n month = forms.DateField(\n required=False,\n input_formats=['%Y-%m'])\n sort = forms.ChoiceField(\n required=False,\n choices=UNIT_SEARCH_SORT_CHOICES)\n\n user = forms.ModelChoiceField(\n queryset=get_user_model().objects.all(),\n required=False,\n to_field_name=\"username\")\n\n search = forms.CharField(required=False)\n\n soptions = forms.MultipleChoiceField(\n required=False,\n widget=forms.CheckboxSelectMultiple,\n choices=(\n ('exact', _('Exact Match')), ))\n\n sfields = forms.MultipleChoiceField(\n required=False,\n widget=CommaSeparatedCheckboxSelectMultiple,\n choices=(\n ('source', _('Source Text')),\n ('target', _('Target Text')),\n ('notes', _('Comments')),\n ('locations', _('Locations'))),\n initial=['source', 'target'])\n\n def __init__(self, *args, **kwargs):\n self.request_user = kwargs.pop(\"user\")\n super(UnitSearchForm, self).__init__(*args, **kwargs)\n self.fields[\"modified-since\"] = ISODateTimeField(required=False)\n\n def clean(self):\n if \"checks\" in self.errors:\n del self.errors[\"checks\"]\n self.cleaned_data[\"checks\"] = None\n if \"user\" in self.errors:\n del self.errors[\"user\"]\n self.cleaned_data[\"user\"] = self.request_user\n if self.errors:\n return\n self.cleaned_data['count'] = self.request_user.get_unit_rows()\n self.cleaned_data[\"vfolder\"] = None\n pootle_path = self.cleaned_data.get(\"path\")\n path_keys = [\n \"project_code\", \"language_code\", \"dir_path\", \"filename\"]\n try:\n path_kwargs = {\n k: v\n for k, v in resolve(pootle_path).kwargs.items()\n if k in path_keys}\n except Resolver404:\n raise forms.ValidationError('Unrecognised path')\n self.cleaned_data.update(path_kwargs)\n sort_on = \"units\"\n if \"filter\" in self.cleaned_data:\n unit_filter = self.cleaned_data[\"filter\"]\n if unit_filter in ('suggestions', 'user-suggestions'):\n sort_on = 'suggestions'\n elif unit_filter in ('user-submissions', ):\n sort_on = 'submissions'\n sort_by_param = self.cleaned_data[\"sort\"]\n self.cleaned_data[\"sort_by\"] = ALLOWED_SORTS[sort_on].get(sort_by_param)\n self.cleaned_data[\"sort_on\"] = sort_on\n\n def clean_month(self):\n if self.cleaned_data[\"month\"]:\n return get_date_interval(self.cleaned_data[\"month\"].strftime(\"%Y-%m\"))\n\n def clean_user(self):\n return self.cleaned_data[\"user\"] or self.request_user\n\n def clean_path(self):\n lang_code, proj_code = split_pootle_path(\n self.cleaned_data[\"path\"])[:2]\n if not (lang_code or proj_code):\n permission_context = Directory.objects.projects\n elif proj_code and not lang_code:\n try:\n permission_context = Project.objects.select_related(\n \"directory\").get(code=proj_code).directory\n except Project.DoesNotExist:\n raise forms.ValidationError(\"Unrecognized path\")\n else:\n # no permission checking on lang translate views\n return self.cleaned_data[\"path\"]\n if self.request_user.is_superuser:\n return self.cleaned_data[\"path\"]\n can_view_path = check_user_permission(\n self.request_user, \"administrate\", permission_context)\n if can_view_path:\n return self.cleaned_data[\"path\"]\n raise forms.ValidationError(\"Unrecognized path\")\n\n\nclass BaseSuggestionForm(UnsecuredCommentForm):\n should_save = lambda self: True\n\n def __init__(self, *args, **kwargs):\n kwargs[\"request_user\"] = kwargs.get(\"request_user\") or self.request_user\n super(BaseSuggestionForm, self).__init__(**kwargs)\n self.fields[\"comment\"].required = False\n\n @property\n def review_type(self):\n return SubmissionTypes.WEB\n\n @property\n def suggestion_review(self):\n return review.get(self.target_object.__class__)(\n [self.target_object],\n self.request_user,\n review_type=self.review_type)\n\n\nclass SuggestionReviewForm(BaseSuggestionForm):\n\n action = forms.ChoiceField(\n required=True,\n choices=(\n (\"accept\", \"Accept\"),\n (\"reject\", \"Reject\")))\n\n def clean_action(self):\n if self.target_object.state.name != \"pending\":\n self.add_error(\n \"action\",\n forms.ValidationError(\n _(\"Suggestion '%s' cannot be accepted/rejected twice!\",\n self.target_object)))\n return self.data[\"action\"]\n\n def clean(self):\n self_review = (\n self.request_user == self.target_object.user\n and self.cleaned_data.get(\"action\") == \"reject\")\n permission = (\n \"view\"\n if self_review\n else \"review\")\n has_permission = check_user_permission(\n self.request_user,\n permission,\n self.target_object.unit.store.parent)\n if not has_permission:\n raise forms.ValidationError(\n _(\"Insufficient rights to access this page.\"))\n if not self.errors:\n super(SuggestionReviewForm, self).clean()\n\n def save(self):\n if self.cleaned_data[\"action\"] == \"accept\":\n self.suggestion_review.accept()\n else:\n self.suggestion_review.reject()\n if self.cleaned_data[\"comment\"]:\n super(SuggestionReviewForm, self).save()\n\n\nclass SubmitFormMixin(object):\n\n def __init__(self, *args, **kwargs):\n self.unit = kwargs.pop(\"unit\")\n self.request_user = kwargs.pop(\"request_user\")\n super(SubmitFormMixin, self).__init__(*args, **kwargs)\n snplurals = (\n len(self.unit.source.strings)\n if self.unit.hasplural()\n else None)\n nplurals = (\n self.unit.store.translation_project.language.nplurals\n if snplurals\n else 1)\n self.fields[\"target_f\"].widget = MultiStringWidget(\n nplurals=nplurals,\n attrs={\n 'lang': self.unit.store.translation_project.language.code,\n 'dir': self.unit.store.translation_project.language.direction,\n 'class': 'translation expanding focusthis js-translation-area',\n 'rows': 2,\n 'tabindex': 10})\n self.fields['target_f'].widget.attrs[\n 'data-translation-aid'] = self['target_f'].value()\n self.fields[\n \"target_f\"].hidden_widget = HiddenMultiStringWidget(nplurals=nplurals)\n self.fields[\"target_f\"].fields = [\n forms.CharField(strip=False) for i in range(nplurals)]\n for k in [\"user\", \"name\", \"email\"]:\n if k in self.fields:\n self.fields[k].required = False\n\n\nclass SuggestionSubmitForm(SubmitFormMixin, BaseSuggestionForm):\n\n target_f = MultiStringFormField(required=False)\n\n def save_unit(self):\n current_time = make_aware(timezone.now())\n updated = []\n if self.cleaned_data[\"target_f\"]:\n self.unit.target = self.cleaned_data[\"target_f\"]\n self.unit.save(\n submitted_on=current_time,\n submitted_by=self.target_object.user,\n reviewed_on=current_time,\n reviewed_by=self.request_user,\n changed_with=SubmissionTypes.WEB)\n updated.append(\n (SubmissionFields.TARGET,\n self.unit._frozen.target,\n self.unit.target))\n if self.unit.state_updated:\n updated.append(\n (SubmissionFields.STATE,\n self.unit._frozen.state,\n self.unit.state))\n translation_project = self.unit.store.translation_project\n for field, old_value, new_value in updated:\n sub = Submission(\n creation_time=current_time,\n translation_project=translation_project,\n suggestion=self.target_object,\n submitter=self.request_user,\n unit=self.unit,\n field=field,\n type=SubmissionTypes.WEB,\n old_value=old_value,\n new_value=new_value)\n sub.save()\n self.suggestion_review.accept(\n update_unit=(\n False\n if self.cleaned_data[\"target_f\"]\n else True))\n\n def save(self):\n with update_data_after(self.unit.store):\n self.save_unit()\n if self.cleaned_data['comment']:\n super(SuggestionSubmitForm, self).save()\n\n\nclass SubmitForm(SubmitFormMixin, forms.Form):\n state = UnitStateField(\n required=False,\n label=_('Needs work'))\n target_f = MultiStringFormField(required=False)\n\n def save_unit(self):\n user = self.request_user\n current_time = make_aware(timezone.now())\n updated = []\n if multistring(self.cleaned_data[\"target_f\"]) != self.unit.target:\n self.unit.submitted_by = user\n self.unit.submitted_on = current_time\n self.unit.reviewed_by = None\n self.unit.reviewed_on = None\n updated.append(\n (SubmissionFields.TARGET,\n self.unit.target_f,\n self.cleaned_data[\"target_f\"]))\n self.unit.target = self.cleaned_data[\"target_f\"]\n if self.cleaned_data[\"state\"] != self.unit.state:\n self.unit.state = self.cleaned_data[\"state\"]\n self.unit.save(\n submitted_on=current_time,\n submitted_by=user,\n changed_with=SubmissionTypes.WEB)\n if self.unit.state_updated:\n updated.append(\n (SubmissionFields.STATE,\n self.unit.state,\n self.cleaned_data[\"state\"]))\n self.unit.state = self.cleaned_data[\"state\"]\n translation_project = self.unit.store.translation_project\n for field, old_value, new_value in updated:\n sub = Submission(\n creation_time=current_time,\n translation_project=translation_project,\n submitter=user,\n unit=self.unit,\n field=field,\n type=SubmissionTypes.WEB,\n old_value=old_value,\n new_value=new_value)\n sub.save()\n\n def save(self):\n with update_data_after(self.unit.store):\n self.save_unit()\n", "path": "pootle/apps/pootle_store/forms.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\n\"\"\"Form fields required for handling translation files.\"\"\"\n\nfrom translate.misc.multistring import multistring\n\nfrom django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.urls import Resolver404, resolve\nfrom django.utils import timezone\nfrom django.utils.translation import get_language\n\nfrom pootle.core.contextmanagers import update_data_after\nfrom pootle.core.delegate import review\nfrom pootle.core.url_helpers import split_pootle_path\nfrom pootle.core.utils.timezone import make_aware\nfrom pootle.i18n.gettext import ugettext as _\nfrom pootle_app.models import Directory\nfrom pootle_app.models.permissions import check_permission, check_user_permission\nfrom pootle_comment.forms import UnsecuredCommentForm\nfrom pootle_misc.checks import CATEGORY_CODES, check_names\nfrom pootle_misc.util import get_date_interval\nfrom pootle_project.models import Project\nfrom pootle_statistics.models import (Submission, SubmissionFields,\n SubmissionTypes)\n\nfrom .constants import ALLOWED_SORTS, FUZZY, OBSOLETE, TRANSLATED, UNTRANSLATED\nfrom .fields import to_db\nfrom .form_fields import (\n CategoryChoiceField, ISODateTimeField, MultipleArgsField,\n CommaSeparatedCheckboxSelectMultiple)\nfrom .models import Suggestion, Unit\n\n\nUNIT_SEARCH_FILTER_CHOICES = (\n (\"all\", \"all\"),\n (\"translated\", \"translated\"),\n (\"untranslated\", \"untranslated\"),\n (\"fuzzy\", \"fuzzy\"),\n (\"incomplete\", \"incomplete\"),\n (\"suggestions\", \"suggestions\"),\n (\"my-suggestions\", \"my-suggestions\"),\n (\"user-suggestions\", \"user-suggestions\"),\n (\"user-suggestions-accepted\", \"user-suggestions-accepted\"),\n (\"user-suggestions-rejected\", \"user-suggestions-rejected\"),\n (\"my-submissions\", \"my-submissions\"),\n (\"user-submissions\", \"user-submissions\"),\n (\"my-submissions-overwritten\", \"my-submissions-overwritten\"),\n (\"user-submissions-overwritten\", \"user-submissions-overwritten\"),\n (\"checks\", \"checks\"))\n\nUNIT_SEARCH_SORT_CHOICES = (\n ('priority', 'priority'),\n ('oldest', 'oldest'),\n ('newest', 'newest'))\n\n# # # # # # # text cleanup and highlighting # # # # # # # # # # # # #\n\n\nclass MultiStringWidgetMixin(object):\n\n def decompress(self, value):\n if value is None:\n return [None] * len(self.widgets)\n elif isinstance(value, multistring):\n return [string for string in value.strings]\n elif isinstance(value, list):\n return value\n elif isinstance(value, basestring):\n return [value]\n\n raise ValueError\n\n\nclass MultiStringWidget(MultiStringWidgetMixin, forms.MultiWidget):\n \"\"\"Custom Widget for editing multistrings, expands number of text\n area based on number of plural forms.\n \"\"\"\n\n def __init__(self, attrs=None, nplurals=1, textarea=True):\n if textarea:\n widget = forms.Textarea\n else:\n widget = forms.TextInput\n\n widgets = [widget(attrs=attrs) for i_ in xrange(nplurals)]\n super(MultiStringWidget, self).__init__(widgets, attrs)\n\n def format_output(self, rendered_widgets):\n from django.utils.safestring import mark_safe\n if len(rendered_widgets) == 1:\n return mark_safe(rendered_widgets[0])\n\n output = ''\n for i, widget in enumerate(rendered_widgets):\n output += '<div lang=\"%s\" title=\"%s\">' % \\\n (get_language(), _('Plural Form %d', i))\n output += widget\n output += '</div>'\n\n return mark_safe(output)\n\n\nclass HiddenMultiStringWidget(MultiStringWidgetMixin, forms.MultiWidget):\n \"\"\"Uses hidden input instead of textareas.\"\"\"\n\n def __init__(self, attrs=None, nplurals=1):\n widgets = [forms.HiddenInput(attrs=attrs) for i_ in xrange(nplurals)]\n super(HiddenMultiStringWidget, self).__init__(widgets, attrs)\n\n def format_output(self, rendered_widgets):\n return super(\n HiddenMultiStringWidget, self).format_output(rendered_widgets)\n\n def __call__(self):\n # HACKISH: Django is inconsistent in how it handles Field.widget and\n # Field.hidden_widget, it expects widget to be an instantiated object\n # and hidden_widget to be a class, since we need to specify nplurals at\n # run time we can let django instantiate hidden_widget.\n #\n # making the object callable let's us get away with forcing an object\n # where django expects a class\n return self\n\n\nclass MultiStringFormField(forms.MultiValueField):\n\n def __init__(self, nplurals=1, attrs=None, textarea=True, *args, **kwargs):\n self.widget = MultiStringWidget(nplurals=nplurals, attrs=attrs,\n textarea=textarea)\n self.hidden_widget = HiddenMultiStringWidget(nplurals=nplurals)\n fields = [forms.CharField(strip=False) for i_ in range(nplurals)]\n super(MultiStringFormField, self).__init__(fields=fields,\n *args, **kwargs)\n\n def compress(self, data_list):\n return data_list\n\n\nclass UnitStateField(forms.BooleanField):\n\n def to_python(self, value):\n \"\"\"Returns a Python boolean object.\n\n It is necessary to customize the behavior because the default\n ``BooleanField`` treats the string '0' as ``False``, but if the\n unit is in ``UNTRANSLATED`` state (which would report '0' as a\n value), we need the marked checkbox to be evaluated as ``True``.\n\n :return: ``False`` for any unknown :cls:`~pootle_store.models.Unit`\n states and for the 'False' string.\n \"\"\"\n truthy_values = (str(s) for s in (UNTRANSLATED, FUZZY, TRANSLATED))\n if (isinstance(value, basestring) and\n (value.lower() == 'false' or value not in truthy_values)):\n value = False\n else:\n value = bool(value)\n\n return super(UnitStateField, self).to_python(value)\n\n\ndef unit_form_factory(language, snplurals=None, request=None):\n\n if snplurals is not None:\n tnplurals = language.nplurals\n else:\n tnplurals = 1\n\n action_disabled = False\n if request is not None:\n cantranslate = check_permission(\"translate\", request)\n cansuggest = check_permission(\"suggest\", request)\n\n if not (cansuggest or cantranslate):\n action_disabled = True\n\n target_attrs = {\n 'lang': language.code,\n 'dir': language.direction,\n 'class': 'translation expanding focusthis js-translation-area',\n 'rows': 2,\n 'tabindex': 10,\n }\n\n fuzzy_attrs = {\n 'accesskey': 'f',\n 'class': 'fuzzycheck',\n 'tabindex': 13,\n }\n\n if action_disabled:\n target_attrs['disabled'] = 'disabled'\n fuzzy_attrs['disabled'] = 'disabled'\n\n class UnitForm(forms.ModelForm):\n class Meta(object):\n model = Unit\n fields = ('target_f', 'state',)\n\n target_f = MultiStringFormField(\n nplurals=tnplurals,\n required=False,\n attrs=target_attrs,\n )\n state = UnitStateField(\n required=False,\n label=_('Needs work'),\n widget=forms.CheckboxInput(\n attrs=fuzzy_attrs,\n check_test=lambda x: x == FUZZY,\n ),\n )\n suggestion = forms.ModelChoiceField(\n queryset=Suggestion.objects.all(),\n required=False)\n comment = forms.CharField(required=False)\n\n def __init__(self, *args, **kwargs):\n self.request = kwargs.pop('request', None)\n self.user = self.request.user\n super(UnitForm, self).__init__(*args, **kwargs)\n self._updated_fields = []\n self.fields['target_f'].widget.attrs['data-translation-aid'] = \\\n self['target_f'].value()\n\n @property\n def updated_fields(self):\n order_dict = {\n SubmissionFields.STATE: 0,\n SubmissionFields.TARGET: 1,\n }\n return sorted(self._updated_fields, key=lambda x: order_dict[x[0]])\n\n def clean_target_f(self):\n value = self.cleaned_data['target_f']\n\n if self.instance.target != multistring(value or [u'']):\n self._updated_fields.append((SubmissionFields.TARGET,\n to_db(self.instance.target),\n to_db(value)))\n\n return value\n\n def clean(self):\n old_state = self.instance.state # Integer\n is_fuzzy = self.cleaned_data['state'] # Boolean\n new_target = self.cleaned_data['target_f']\n\n # If suggestion is provided set `old_state` should be `TRANSLATED`.\n if self.cleaned_data['suggestion']:\n old_state = TRANSLATED\n\n # Skip `TARGET` field submission if suggestion value is equal\n # to submitted translation\n if new_target == self.cleaned_data['suggestion'].target_f:\n self._updated_fields = []\n\n if (self.request is not None and\n not check_permission('administrate', self.request) and\n is_fuzzy):\n self.add_error('state',\n forms.ValidationError(\n _('Needs work flag must be '\n 'cleared')))\n\n if new_target:\n if is_fuzzy:\n new_state = FUZZY\n else:\n new_state = TRANSLATED\n else:\n new_state = UNTRANSLATED\n if old_state not in [new_state, OBSOLETE]:\n self._updated_fields.append((SubmissionFields.STATE,\n old_state, new_state))\n\n self.cleaned_data['state'] = new_state\n else:\n self.cleaned_data['state'] = old_state\n\n return super(UnitForm, self).clean()\n\n def save(self, *args, **kwargs):\n changed_with = kwargs.pop(\"changed_with\", None)\n kwargs[\"commit\"] = False\n unit = super(UnitForm, self).save(*args, **kwargs)\n with update_data_after(unit.store):\n current_time = timezone.now()\n if SubmissionFields.TARGET in (f[0] for f in self.updated_fields):\n unit.submitted_by = self.user\n unit.submitted_on = current_time\n unit.reviewed_by = None\n unit.reviewed_on = None\n suggestion = self.cleaned_data[\"suggestion\"]\n user = (\n suggestion.user\n if suggestion\n else self.user)\n unit.save(\n submitted_on=current_time,\n submitted_by=user,\n changed_with=changed_with)\n translation_project = unit.store.translation_project\n for field, old_value, new_value in self.updated_fields:\n if field == SubmissionFields.TARGET and suggestion:\n old_value = str(suggestion.target_f)\n sub = Submission(\n creation_time=current_time,\n translation_project=translation_project,\n submitter=self.user,\n unit=unit,\n field=field,\n type=SubmissionTypes.WEB,\n old_value=old_value,\n new_value=new_value)\n sub.save()\n return unit\n\n return UnitForm\n\n\ndef unit_comment_form_factory(language):\n\n comment_attrs = {\n 'lang': language.code,\n 'dir': language.direction,\n 'class': 'comments expanding focusthis',\n 'rows': 1,\n 'tabindex': 15,\n }\n\n class UnitCommentForm(forms.ModelForm):\n\n class Meta(object):\n fields = ('translator_comment',)\n model = Unit\n\n translator_comment = forms.CharField(\n required=True,\n label=_(\"Translator comment\"),\n widget=forms.Textarea(attrs=comment_attrs),\n )\n\n def __init__(self, *args, **kwargs):\n self.request = kwargs.pop('request', None)\n self.previous_value = ''\n\n super(UnitCommentForm, self).__init__(*args, **kwargs)\n\n if self.request.method == 'DELETE':\n self.fields['translator_comment'].required = False\n\n def clean_translator_comment(self):\n # HACKISH: Setting empty string when `DELETE` is being used\n if self.request.method == 'DELETE':\n self.previous_value = self.instance.translator_comment\n return ''\n\n return self.cleaned_data['translator_comment']\n\n def save(self, **kwargs):\n \"\"\"Register the submission and save the comment.\"\"\"\n if self.has_changed():\n creation_time = timezone.now()\n translation_project = self.request.translation_project\n\n sub = Submission(\n creation_time=creation_time,\n translation_project=translation_project,\n submitter=self.request.user,\n unit=self.instance,\n field=SubmissionFields.COMMENT,\n type=SubmissionTypes.WEB,\n old_value=self.previous_value,\n new_value=self.cleaned_data['translator_comment']\n )\n sub.save()\n super(UnitCommentForm, self).save(**kwargs)\n\n return UnitCommentForm\n\n\nclass UnitSearchForm(forms.Form):\n\n offset = forms.IntegerField(required=False)\n path = forms.CharField(\n max_length=2048,\n required=True)\n previous_uids = MultipleArgsField(\n field=forms.IntegerField(),\n required=False)\n uids = MultipleArgsField(\n field=forms.IntegerField(),\n required=False)\n filter = forms.ChoiceField(\n required=False,\n choices=UNIT_SEARCH_FILTER_CHOICES)\n checks = forms.MultipleChoiceField(\n required=False,\n widget=CommaSeparatedCheckboxSelectMultiple,\n choices=check_names.items())\n category = CategoryChoiceField(\n required=False,\n choices=CATEGORY_CODES.items())\n month = forms.DateField(\n required=False,\n input_formats=['%Y-%m'])\n sort = forms.ChoiceField(\n required=False,\n choices=UNIT_SEARCH_SORT_CHOICES)\n\n user = forms.ModelChoiceField(\n queryset=get_user_model().objects.all(),\n required=False,\n to_field_name=\"username\")\n\n search = forms.CharField(required=False)\n\n soptions = forms.MultipleChoiceField(\n required=False,\n widget=forms.CheckboxSelectMultiple,\n choices=(\n ('exact', _('Exact Match')), ))\n\n sfields = forms.MultipleChoiceField(\n required=False,\n widget=CommaSeparatedCheckboxSelectMultiple,\n choices=(\n ('source', _('Source Text')),\n ('target', _('Target Text')),\n ('notes', _('Comments')),\n ('locations', _('Locations'))),\n initial=['source', 'target'])\n\n def __init__(self, *args, **kwargs):\n self.request_user = kwargs.pop(\"user\")\n super(UnitSearchForm, self).__init__(*args, **kwargs)\n self.fields[\"modified-since\"] = ISODateTimeField(required=False)\n\n def clean(self):\n if \"checks\" in self.errors:\n del self.errors[\"checks\"]\n self.cleaned_data[\"checks\"] = None\n if \"user\" in self.errors:\n del self.errors[\"user\"]\n self.cleaned_data[\"user\"] = self.request_user\n if self.errors:\n return\n self.cleaned_data['count'] = self.request_user.get_unit_rows()\n self.cleaned_data[\"vfolder\"] = None\n pootle_path = self.cleaned_data.get(\"path\")\n path_keys = [\n \"project_code\", \"language_code\", \"dir_path\", \"filename\"]\n try:\n path_kwargs = {\n k: v\n for k, v in resolve(pootle_path).kwargs.items()\n if k in path_keys}\n except Resolver404:\n raise forms.ValidationError('Unrecognised path')\n self.cleaned_data.update(path_kwargs)\n sort_on = \"units\"\n if \"filter\" in self.cleaned_data:\n unit_filter = self.cleaned_data[\"filter\"]\n if unit_filter in ('suggestions', 'user-suggestions'):\n sort_on = 'suggestions'\n elif unit_filter in ('user-submissions', ):\n sort_on = 'submissions'\n sort_by_param = self.cleaned_data[\"sort\"]\n self.cleaned_data[\"sort_by\"] = ALLOWED_SORTS[sort_on].get(sort_by_param)\n self.cleaned_data[\"sort_on\"] = sort_on\n\n def clean_month(self):\n if self.cleaned_data[\"month\"]:\n return get_date_interval(self.cleaned_data[\"month\"].strftime(\"%Y-%m\"))\n\n def clean_user(self):\n return self.cleaned_data[\"user\"] or self.request_user\n\n def clean_path(self):\n lang_code, proj_code = split_pootle_path(\n self.cleaned_data[\"path\"])[:2]\n if not (lang_code or proj_code):\n permission_context = Directory.objects.projects\n elif proj_code and not lang_code:\n try:\n permission_context = Project.objects.select_related(\n \"directory\").get(code=proj_code).directory\n except Project.DoesNotExist:\n raise forms.ValidationError(\"Unrecognized path\")\n else:\n # no permission checking on lang translate views\n return self.cleaned_data[\"path\"]\n if self.request_user.is_superuser:\n return self.cleaned_data[\"path\"]\n can_view_path = check_user_permission(\n self.request_user, \"administrate\", permission_context)\n if can_view_path:\n return self.cleaned_data[\"path\"]\n raise forms.ValidationError(\"Unrecognized path\")\n\n\nclass BaseSuggestionForm(UnsecuredCommentForm):\n should_save = lambda self: True\n\n def __init__(self, *args, **kwargs):\n kwargs[\"request_user\"] = kwargs.get(\"request_user\") or self.request_user\n super(BaseSuggestionForm, self).__init__(**kwargs)\n self.fields[\"comment\"].required = False\n\n @property\n def review_type(self):\n return SubmissionTypes.WEB\n\n @property\n def suggestion_review(self):\n return review.get(self.target_object.__class__)(\n [self.target_object],\n self.request_user,\n review_type=self.review_type)\n\n\nclass SuggestionReviewForm(BaseSuggestionForm):\n\n action = forms.ChoiceField(\n required=True,\n choices=(\n (\"accept\", \"Accept\"),\n (\"reject\", \"Reject\")))\n\n def clean_action(self):\n if self.target_object.state.name != \"pending\":\n self.add_error(\n \"action\",\n forms.ValidationError(\n _(\"Suggestion '%s' has already been accepted or rejected.\",\n self.target_object)))\n return self.data[\"action\"]\n\n def clean(self):\n self_review = (\n self.request_user == self.target_object.user\n and self.cleaned_data.get(\"action\") == \"reject\")\n permission = (\n \"view\"\n if self_review\n else \"review\")\n has_permission = check_user_permission(\n self.request_user,\n permission,\n self.target_object.unit.store.parent)\n if not has_permission:\n raise forms.ValidationError(\n _(\"Insufficient rights to access this page.\"))\n if not self.errors:\n super(SuggestionReviewForm, self).clean()\n\n def save(self):\n if self.cleaned_data[\"action\"] == \"accept\":\n self.suggestion_review.accept()\n else:\n self.suggestion_review.reject()\n if self.cleaned_data[\"comment\"]:\n super(SuggestionReviewForm, self).save()\n\n\nclass SubmitFormMixin(object):\n\n def __init__(self, *args, **kwargs):\n self.unit = kwargs.pop(\"unit\")\n self.request_user = kwargs.pop(\"request_user\")\n super(SubmitFormMixin, self).__init__(*args, **kwargs)\n snplurals = (\n len(self.unit.source.strings)\n if self.unit.hasplural()\n else None)\n nplurals = (\n self.unit.store.translation_project.language.nplurals\n if snplurals\n else 1)\n self.fields[\"target_f\"].widget = MultiStringWidget(\n nplurals=nplurals,\n attrs={\n 'lang': self.unit.store.translation_project.language.code,\n 'dir': self.unit.store.translation_project.language.direction,\n 'class': 'translation expanding focusthis js-translation-area',\n 'rows': 2,\n 'tabindex': 10})\n self.fields['target_f'].widget.attrs[\n 'data-translation-aid'] = self['target_f'].value()\n self.fields[\n \"target_f\"].hidden_widget = HiddenMultiStringWidget(nplurals=nplurals)\n self.fields[\"target_f\"].fields = [\n forms.CharField(strip=False) for i in range(nplurals)]\n for k in [\"user\", \"name\", \"email\"]:\n if k in self.fields:\n self.fields[k].required = False\n\n\nclass SuggestionSubmitForm(SubmitFormMixin, BaseSuggestionForm):\n\n target_f = MultiStringFormField(required=False)\n\n def save_unit(self):\n current_time = make_aware(timezone.now())\n updated = []\n if self.cleaned_data[\"target_f\"]:\n self.unit.target = self.cleaned_data[\"target_f\"]\n self.unit.save(\n submitted_on=current_time,\n submitted_by=self.target_object.user,\n reviewed_on=current_time,\n reviewed_by=self.request_user,\n changed_with=SubmissionTypes.WEB)\n updated.append(\n (SubmissionFields.TARGET,\n self.unit._frozen.target,\n self.unit.target))\n if self.unit.state_updated:\n updated.append(\n (SubmissionFields.STATE,\n self.unit._frozen.state,\n self.unit.state))\n translation_project = self.unit.store.translation_project\n for field, old_value, new_value in updated:\n sub = Submission(\n creation_time=current_time,\n translation_project=translation_project,\n suggestion=self.target_object,\n submitter=self.request_user,\n unit=self.unit,\n field=field,\n type=SubmissionTypes.WEB,\n old_value=old_value,\n new_value=new_value)\n sub.save()\n self.suggestion_review.accept(\n update_unit=(\n False\n if self.cleaned_data[\"target_f\"]\n else True))\n\n def save(self):\n with update_data_after(self.unit.store):\n self.save_unit()\n if self.cleaned_data['comment']:\n super(SuggestionSubmitForm, self).save()\n\n\nclass SubmitForm(SubmitFormMixin, forms.Form):\n state = UnitStateField(\n required=False,\n label=_('Needs work'))\n target_f = MultiStringFormField(required=False)\n\n def save_unit(self):\n user = self.request_user\n current_time = make_aware(timezone.now())\n updated = []\n if multistring(self.cleaned_data[\"target_f\"]) != self.unit.target:\n self.unit.submitted_by = user\n self.unit.submitted_on = current_time\n self.unit.reviewed_by = None\n self.unit.reviewed_on = None\n updated.append(\n (SubmissionFields.TARGET,\n self.unit.target_f,\n self.cleaned_data[\"target_f\"]))\n self.unit.target = self.cleaned_data[\"target_f\"]\n if self.cleaned_data[\"state\"] != self.unit.state:\n self.unit.state = self.cleaned_data[\"state\"]\n self.unit.save(\n submitted_on=current_time,\n submitted_by=user,\n changed_with=SubmissionTypes.WEB)\n if self.unit.state_updated:\n updated.append(\n (SubmissionFields.STATE,\n self.unit.state,\n self.cleaned_data[\"state\"]))\n self.unit.state = self.cleaned_data[\"state\"]\n translation_project = self.unit.store.translation_project\n for field, old_value, new_value in updated:\n sub = Submission(\n creation_time=current_time,\n translation_project=translation_project,\n submitter=user,\n unit=self.unit,\n field=field,\n type=SubmissionTypes.WEB,\n old_value=old_value,\n new_value=new_value)\n sub.save()\n\n def save(self):\n with update_data_after(self.unit.store):\n self.save_unit()\n", "path": "pootle/apps/pootle_store/forms.py"}]} |
gh_patches_debug_1224 | rasdani/github-patches | git_diff | celery__celery-3997 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Request on_timeout should ignore soft time limit exception
When Request.on_timeout receive a soft timeout from billiard, it does the same as if it was receiving a hard time limit exception. This is ran by the controller.
But the task may catch this exception and eg. return (this is what soft timeout are for).
This cause:
1. the result to be saved once as an exception by the controller (on_timeout) and another time with the result returned by the task
2. the task status to be passed to failure and to success on the same manner
3. if the task is participating to a chord, the chord result counter (at least with redis) is incremented twice (instead of once), making the chord to return prematurely and eventually loose tasks…
1, 2 and 3 can leads of course to strange race conditions…
## Steps to reproduce (Illustration)
with the program in test_timeout.py:
```python
import time
import celery
app = celery.Celery('test_timeout')
app.conf.update(
result_backend="redis://localhost/0",
broker_url="amqp://celery:celery@localhost:5672/host",
)
@app.task(soft_time_limit=1)
def test():
try:
time.sleep(2)
except Exception:
return 1
@app.task()
def add(args):
print("### adding", args)
return sum(args)
@app.task()
def on_error(context, exception, traceback, **kwargs):
print("### on_error: ", exception)
if __name__ == "__main__":
result = celery.chord([test.s().set(link_error=on_error.s()), test.s().set(link_error=on_error.s())])(add.s())
result.get()
```
start a worker and the program:
```
$ celery -A test_timeout worker -l WARNING
$ python3 test_timeout.py
```
## Expected behavior
add method is called with `[1, 1]` as argument and test_timeout.py return normally
## Actual behavior
The test_timeout.py fails, with
```
celery.backends.base.ChordError: Callback error: ChordError("Dependency 15109e05-da43-449f-9081-85d839ac0ef2 raised SoftTimeLimitExceeded('SoftTimeLimitExceeded(True,)',)",
```
On the worker side, the **on_error is called but the add method as well !**
```
[2017-11-29 23:07:25,538: WARNING/MainProcess] Soft time limit (1s) exceeded for test_timeout.test[15109e05-da43-449f-9081-85d839ac0ef2]
[2017-11-29 23:07:25,546: WARNING/MainProcess] ### on_error:
[2017-11-29 23:07:25,546: WARNING/MainProcess] SoftTimeLimitExceeded(True,)
[2017-11-29 23:07:25,547: WARNING/MainProcess] Soft time limit (1s) exceeded for test_timeout.test[38f3f7f2-4a89-4318-8ee9-36a987f73757]
[2017-11-29 23:07:25,553: ERROR/MainProcess] Chord callback for 'ef6d7a38-d1b4-40ad-b937-ffa84e40bb23' raised: ChordError("Dependency 15109e05-da43-449f-9081-85d839ac0ef2 raised SoftTimeLimitExceeded('SoftTimeLimitExceeded(True,)',)",)
Traceback (most recent call last):
File "/usr/local/lib/python3.4/dist-packages/celery/backends/redis.py", line 290, in on_chord_part_return
callback.delay([unpack(tup, decode) for tup in resl])
File "/usr/local/lib/python3.4/dist-packages/celery/backends/redis.py", line 290, in <listcomp>
callback.delay([unpack(tup, decode) for tup in resl])
File "/usr/local/lib/python3.4/dist-packages/celery/backends/redis.py", line 243, in _unpack_chord_result
raise ChordError('Dependency {0} raised {1!r}'.format(tid, retval))
celery.exceptions.ChordError: Dependency 15109e05-da43-449f-9081-85d839ac0ef2 raised SoftTimeLimitExceeded('SoftTimeLimitExceeded(True,)',)
[2017-11-29 23:07:25,565: WARNING/MainProcess] ### on_error:
[2017-11-29 23:07:25,565: WARNING/MainProcess] SoftTimeLimitExceeded(True,)
[2017-11-29 23:07:27,262: WARNING/PoolWorker-2] ### adding
[2017-11-29 23:07:27,264: WARNING/PoolWorker-2] [1, 1]
```
Of course, on purpose did I choose to call the test.s() twice, to show that the count in the chord continues. In fact:
- the chord result is incremented twice by the error of soft time limit
- the chord result is again incremented twice by the correct returning of `test` task
## Conclusion
Request.on_timeout should not process soft time limit exception.
here is a quick monkey patch (correction of celery is trivial)
```python
def patch_celery_request_on_timeout():
from celery.worker import request
orig = request.Request.on_timeout
def patched_on_timeout(self, soft, timeout):
if not soft:
orig(self, soft, timeout)
request.Request.on_timeout = patched_on_timeout
patch_celery_request_on_timeout()
```
## version info
software -> celery:4.1.0 (latentcall) kombu:4.0.2 py:3.4.3
billiard:3.5.0.2 py-amqp:2.1.4
platform -> system:Linux arch:64bit, ELF imp:CPython
loader -> celery.loaders.app.AppLoader
settings -> transport:amqp results:redis://10.0.3.253/0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `celery/app/defaults.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """Configuration introspection and defaults."""
3 from __future__ import absolute_import, unicode_literals
4 import sys
5 from collections import deque, namedtuple
6 from datetime import timedelta
7 from celery.five import items, keys, python_2_unicode_compatible
8 from celery.utils.functional import memoize
9 from celery.utils.serialization import strtobool
10
11 __all__ = ['Option', 'NAMESPACES', 'flatten', 'find']
12
13 is_jython = sys.platform.startswith('java')
14 is_pypy = hasattr(sys, 'pypy_version_info')
15
16 DEFAULT_POOL = 'prefork'
17 if is_jython:
18 DEFAULT_POOL = 'solo'
19 elif is_pypy:
20 if sys.pypy_version_info[0:3] < (1, 5, 0):
21 DEFAULT_POOL = 'solo'
22 else:
23 DEFAULT_POOL = 'prefork'
24
25 DEFAULT_ACCEPT_CONTENT = ['json']
26 DEFAULT_PROCESS_LOG_FMT = """
27 [%(asctime)s: %(levelname)s/%(processName)s] %(message)s
28 """.strip()
29 DEFAULT_TASK_LOG_FMT = """[%(asctime)s: %(levelname)s/%(processName)s] \
30 %(task_name)s[%(task_id)s]: %(message)s"""
31
32 OLD_NS = {'celery_{0}'}
33 OLD_NS_BEAT = {'celerybeat_{0}'}
34 OLD_NS_WORKER = {'celeryd_{0}'}
35
36 searchresult = namedtuple('searchresult', ('namespace', 'key', 'type'))
37
38
39 def Namespace(__old__=None, **options):
40 if __old__ is not None:
41 for key, opt in items(options):
42 if not opt.old:
43 opt.old = {o.format(key) for o in __old__}
44 return options
45
46
47 def old_ns(ns):
48 return {'{0}_{{0}}'.format(ns)}
49
50
51 @python_2_unicode_compatible
52 class Option(object):
53 """Decribes a Celery configuration option."""
54
55 alt = None
56 deprecate_by = None
57 remove_by = None
58 old = set()
59 typemap = dict(string=str, int=int, float=float, any=lambda v: v,
60 bool=strtobool, dict=dict, tuple=tuple)
61
62 def __init__(self, default=None, *args, **kwargs):
63 self.default = default
64 self.type = kwargs.get('type') or 'string'
65 for attr, value in items(kwargs):
66 setattr(self, attr, value)
67
68 def to_python(self, value):
69 return self.typemap[self.type](value)
70
71 def __repr__(self):
72 return '<Option: type->{0} default->{1!r}>'.format(self.type,
73 self.default)
74
75
76 NAMESPACES = Namespace(
77 accept_content=Option(DEFAULT_ACCEPT_CONTENT, type='list', old=OLD_NS),
78 enable_utc=Option(True, type='bool'),
79 imports=Option((), type='tuple', old=OLD_NS),
80 include=Option((), type='tuple', old=OLD_NS),
81 timezone=Option(type='string', old=OLD_NS),
82 beat=Namespace(
83 __old__=OLD_NS_BEAT,
84
85 max_loop_interval=Option(0, type='float'),
86 schedule=Option({}, type='dict'),
87 scheduler=Option('celery.beat:PersistentScheduler'),
88 schedule_filename=Option('celerybeat-schedule'),
89 sync_every=Option(0, type='int'),
90 ),
91 broker=Namespace(
92 url=Option(None, type='string'),
93 read_url=Option(None, type='string'),
94 write_url=Option(None, type='string'),
95 transport=Option(type='string'),
96 transport_options=Option({}, type='dict'),
97 connection_timeout=Option(4, type='float'),
98 connection_retry=Option(True, type='bool'),
99 connection_max_retries=Option(100, type='int'),
100 failover_strategy=Option(None, type='string'),
101 heartbeat=Option(120, type='int'),
102 heartbeat_checkrate=Option(3.0, type='int'),
103 login_method=Option(None, type='string'),
104 pool_limit=Option(10, type='int'),
105 use_ssl=Option(False, type='bool'),
106
107 host=Option(type='string'),
108 port=Option(type='int'),
109 user=Option(type='string'),
110 password=Option(type='string'),
111 vhost=Option(type='string'),
112 ),
113 cache=Namespace(
114 __old__=old_ns('celery_cache'),
115
116 backend=Option(),
117 backend_options=Option({}, type='dict'),
118 ),
119 cassandra=Namespace(
120 entry_ttl=Option(type='float'),
121 keyspace=Option(type='string'),
122 port=Option(type='string'),
123 read_consistency=Option(type='string'),
124 servers=Option(type='list'),
125 table=Option(type='string'),
126 write_consistency=Option(type='string'),
127 auth_provider=Option(type='string'),
128 auth_kwargs=Option(type='string'),
129 ),
130 control=Namespace(
131 queue_ttl=Option(300.0, type='float'),
132 queue_expires=Option(10.0, type='float'),
133 ),
134 couchbase=Namespace(
135 __old__=old_ns('celery_couchbase'),
136
137 backend_settings=Option(None, type='dict'),
138 ),
139 mongodb=Namespace(
140 __old__=old_ns('celery_mongodb'),
141
142 backend_settings=Option(type='dict'),
143 ),
144 event=Namespace(
145 __old__=old_ns('celery_event'),
146
147 queue_expires=Option(60.0, type='float'),
148 queue_ttl=Option(5.0, type='float'),
149 queue_prefix=Option('celeryev'),
150 serializer=Option('json'),
151 ),
152 redis=Namespace(
153 __old__=old_ns('celery_redis'),
154
155 backend_use_ssl=Option(type='dict'),
156 db=Option(type='int'),
157 host=Option(type='string'),
158 max_connections=Option(type='int'),
159 password=Option(type='string'),
160 port=Option(type='int'),
161 socket_timeout=Option(120.0, type='float'),
162 socket_connect_timeout=Option(None, type='float'),
163 ),
164 result=Namespace(
165 __old__=old_ns('celery_result'),
166
167 backend=Option(type='string'),
168 cache_max=Option(
169 -1,
170 type='int', old={'celery_max_cached_results'},
171 ),
172 compression=Option(type='str'),
173 exchange=Option('celeryresults'),
174 exchange_type=Option('direct'),
175 expires=Option(
176 timedelta(days=1),
177 type='float', old={'celery_task_result_expires'},
178 ),
179 persistent=Option(None, type='bool'),
180 serializer=Option('json'),
181 ),
182 elasticsearch=Namespace(
183 __old__=old_ns('celery_elasticsearch'),
184
185 retry_on_timeout=Option(type='bool'),
186 max_retries=Option(type='int'),
187 timeout=Option(type='float'),
188 ),
189 riak=Namespace(
190 __old__=old_ns('celery_riak'),
191
192 backend_settings=Option(type='dict'),
193 ),
194 security=Namespace(
195 __old__=old_ns('celery_security'),
196
197 certificate=Option(type='string'),
198 cert_store=Option(type='string'),
199 key=Option(type='string'),
200 ),
201 database=Namespace(
202 url=Option(old={'celery_result_dburi'}),
203 engine_options=Option(
204 type='dict', old={'celery_result_engine_options'},
205 ),
206 short_lived_sessions=Option(
207 False, type='bool', old={'celery_result_db_short_lived_sessions'},
208 ),
209 table_names=Option(type='dict', old={'celery_result_db_tablenames'}),
210 ),
211 task=Namespace(
212 __old__=OLD_NS,
213 acks_late=Option(False, type='bool'),
214 always_eager=Option(False, type='bool'),
215 annotations=Option(type='any'),
216 compression=Option(type='string', old={'celery_message_compression'}),
217 create_missing_queues=Option(True, type='bool'),
218 default_delivery_mode=Option(2, type='string'),
219 default_queue=Option('celery'),
220 default_exchange=Option(None, type='string'), # taken from queue
221 default_exchange_type=Option('direct'),
222 default_routing_key=Option(None, type='string'), # taken from queue
223 default_rate_limit=Option(type='string'),
224 eager_propagates=Option(
225 False, type='bool', old={'celery_eager_propagates_exceptions'},
226 ),
227 ignore_result=Option(False, type='bool'),
228 protocol=Option(2, type='int', old={'celery_task_protocol'}),
229 publish_retry=Option(
230 True, type='bool', old={'celery_task_publish_retry'},
231 ),
232 publish_retry_policy=Option(
233 {'max_retries': 3,
234 'interval_start': 0,
235 'interval_max': 1,
236 'interval_step': 0.2},
237 type='dict', old={'celery_task_publish_retry_policy'},
238 ),
239 queues=Option(type='dict'),
240 queue_ha_policy=Option(None, type='string'),
241 queue_max_priority=Option(None, type='int'),
242 reject_on_worker_lost=Option(type='bool'),
243 remote_tracebacks=Option(False, type='bool'),
244 routes=Option(type='any'),
245 send_sent_event=Option(
246 False, type='bool', old={'celery_send_task_sent_event'},
247 ),
248 serializer=Option('json', old={'celery_task_serializer'}),
249 soft_time_limit=Option(
250 type='float', old={'celeryd_task_soft_time_limit'},
251 ),
252 time_limit=Option(
253 type='float', old={'celeryd_task_time_limit'},
254 ),
255 store_errors_even_if_ignored=Option(False, type='bool'),
256 track_started=Option(False, type='bool'),
257 ),
258 worker=Namespace(
259 __old__=OLD_NS_WORKER,
260 agent=Option(None, type='string'),
261 autoscaler=Option('celery.worker.autoscale:Autoscaler'),
262 concurrency=Option(0, type='int'),
263 consumer=Option('celery.worker.consumer:Consumer', type='string'),
264 direct=Option(False, type='bool', old={'celery_worker_direct'}),
265 disable_rate_limits=Option(
266 False, type='bool', old={'celery_disable_rate_limits'},
267 ),
268 enable_remote_control=Option(
269 True, type='bool', old={'celery_enable_remote_control'},
270 ),
271 hijack_root_logger=Option(True, type='bool'),
272 log_color=Option(type='bool'),
273 log_format=Option(DEFAULT_PROCESS_LOG_FMT),
274 lost_wait=Option(10.0, type='float', old={'celeryd_worker_lost_wait'}),
275 max_memory_per_child=Option(type='int'),
276 max_tasks_per_child=Option(type='int'),
277 pool=Option(DEFAULT_POOL),
278 pool_putlocks=Option(True, type='bool'),
279 pool_restarts=Option(False, type='bool'),
280 prefetch_multiplier=Option(4, type='int'),
281 redirect_stdouts=Option(
282 True, type='bool', old={'celery_redirect_stdouts'},
283 ),
284 redirect_stdouts_level=Option(
285 'WARNING', old={'celery_redirect_stdouts_level'},
286 ),
287 send_task_events=Option(
288 False, type='bool', old={'celeryd_send_events'},
289 ),
290 state_db=Option(),
291 task_log_format=Option(DEFAULT_TASK_LOG_FMT),
292 timer=Option(type='string'),
293 timer_precision=Option(1.0, type='float'),
294 ),
295 )
296
297
298 def _flatten_keys(ns, key, opt):
299 return [(ns + key, opt)]
300
301
302 def _to_compat(ns, key, opt):
303 if opt.old:
304 return [
305 (oldkey.format(key).upper(), ns + key, opt)
306 for oldkey in opt.old
307 ]
308 return [((ns + key).upper(), ns + key, opt)]
309
310
311 def flatten(d, root='', keyfilter=_flatten_keys):
312 """Flatten settings."""
313 stack = deque([(root, d)])
314 while stack:
315 ns, options = stack.popleft()
316 for key, opt in items(options):
317 if isinstance(opt, dict):
318 stack.append((ns + key + '_', opt))
319 else:
320 for ret in keyfilter(ns, key, opt):
321 yield ret
322
323
324 DEFAULTS = {
325 key: opt.default for key, opt in flatten(NAMESPACES)
326 }
327 __compat = list(flatten(NAMESPACES, keyfilter=_to_compat))
328 _OLD_DEFAULTS = {old_key: opt.default for old_key, _, opt in __compat}
329 _TO_OLD_KEY = {new_key: old_key for old_key, new_key, _ in __compat}
330 _TO_NEW_KEY = {old_key: new_key for old_key, new_key, _ in __compat}
331 __compat = None
332
333 SETTING_KEYS = set(keys(DEFAULTS))
334 _OLD_SETTING_KEYS = set(keys(_TO_NEW_KEY))
335
336
337 def find_deprecated_settings(source): # pragma: no cover
338 from celery.utils import deprecated
339 for name, opt in flatten(NAMESPACES):
340 if (opt.deprecate_by or opt.remove_by) and getattr(source, name, None):
341 deprecated.warn(description='The {0!r} setting'.format(name),
342 deprecation=opt.deprecate_by,
343 removal=opt.remove_by,
344 alternative='Use the {0.alt} instead'.format(opt))
345 return source
346
347
348 @memoize(maxsize=None)
349 def find(name, namespace='celery'):
350 """Find setting by name."""
351 # - Try specified name-space first.
352 namespace = namespace.lower()
353 try:
354 return searchresult(
355 namespace, name.lower(), NAMESPACES[namespace][name.lower()],
356 )
357 except KeyError:
358 # - Try all the other namespaces.
359 for ns, opts in items(NAMESPACES):
360 if ns.lower() == name.lower():
361 return searchresult(None, ns, opts)
362 elif isinstance(opts, dict):
363 try:
364 return searchresult(ns, name.lower(), opts[name.lower()])
365 except KeyError:
366 pass
367 # - See if name is a qualname last.
368 return searchresult(None, name.lower(), DEFAULTS[name.lower()])
369
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/celery/app/defaults.py b/celery/app/defaults.py
--- a/celery/app/defaults.py
+++ b/celery/app/defaults.py
@@ -285,7 +285,7 @@
'WARNING', old={'celery_redirect_stdouts_level'},
),
send_task_events=Option(
- False, type='bool', old={'celeryd_send_events'},
+ False, type='bool', old={'celery_send_events'},
),
state_db=Option(),
task_log_format=Option(DEFAULT_TASK_LOG_FMT),
| {"golden_diff": "diff --git a/celery/app/defaults.py b/celery/app/defaults.py\n--- a/celery/app/defaults.py\n+++ b/celery/app/defaults.py\n@@ -285,7 +285,7 @@\n 'WARNING', old={'celery_redirect_stdouts_level'},\n ),\n send_task_events=Option(\n- False, type='bool', old={'celeryd_send_events'},\n+ False, type='bool', old={'celery_send_events'},\n ),\n state_db=Option(),\n task_log_format=Option(DEFAULT_TASK_LOG_FMT),\n", "issue": "Request on_timeout should ignore soft time limit exception\nWhen Request.on_timeout receive a soft timeout from billiard, it does the same as if it was receiving a hard time limit exception. This is ran by the controller.\r\n\r\nBut the task may catch this exception and eg. return (this is what soft timeout are for).\r\n\r\nThis cause:\r\n1. the result to be saved once as an exception by the controller (on_timeout) and another time with the result returned by the task\r\n2. the task status to be passed to failure and to success on the same manner\r\n3. if the task is participating to a chord, the chord result counter (at least with redis) is incremented twice (instead of once), making the chord to return prematurely and eventually loose tasks\u2026\r\n\r\n1, 2 and 3 can leads of course to strange race conditions\u2026\r\n\r\n## Steps to reproduce (Illustration)\r\n\r\nwith the program in test_timeout.py:\r\n\r\n```python\r\nimport time\r\nimport celery\r\n\r\n\r\napp = celery.Celery('test_timeout')\r\napp.conf.update(\r\n result_backend=\"redis://localhost/0\",\r\n broker_url=\"amqp://celery:celery@localhost:5672/host\",\r\n)\r\n\r\[email protected](soft_time_limit=1)\r\ndef test():\r\n try:\r\n time.sleep(2)\r\n except Exception:\r\n return 1\r\n\r\[email protected]()\r\ndef add(args):\r\n print(\"### adding\", args)\r\n return sum(args)\r\n\r\[email protected]()\r\ndef on_error(context, exception, traceback, **kwargs):\r\n print(\"### on_error:\u00a0\", exception)\r\n\r\nif __name__ == \"__main__\":\r\n result = celery.chord([test.s().set(link_error=on_error.s()), test.s().set(link_error=on_error.s())])(add.s())\r\n result.get()\r\n```\r\n\r\nstart a worker and the program:\r\n\r\n```\r\n$ celery -A test_timeout worker -l WARNING\r\n$ python3 test_timeout.py\r\n```\r\n\r\n## Expected behavior\r\n\r\nadd method is called with `[1, 1]` as argument and test_timeout.py return normally\r\n\r\n## Actual behavior\r\n\r\nThe test_timeout.py fails, with\r\n```\r\ncelery.backends.base.ChordError: Callback error: ChordError(\"Dependency 15109e05-da43-449f-9081-85d839ac0ef2 raised SoftTimeLimitExceeded('SoftTimeLimitExceeded(True,)',)\",\r\n```\r\nOn the worker side, the **on_error is called but the add method as well !**\r\n\r\n```\r\n[2017-11-29 23:07:25,538: WARNING/MainProcess] Soft time limit (1s) exceeded for test_timeout.test[15109e05-da43-449f-9081-85d839ac0ef2]\r\n[2017-11-29 23:07:25,546: WARNING/MainProcess] ### on_error:\r\n[2017-11-29 23:07:25,546: WARNING/MainProcess] SoftTimeLimitExceeded(True,)\r\n[2017-11-29 23:07:25,547: WARNING/MainProcess] Soft time limit (1s) exceeded for test_timeout.test[38f3f7f2-4a89-4318-8ee9-36a987f73757]\r\n[2017-11-29 23:07:25,553: ERROR/MainProcess] Chord callback for 'ef6d7a38-d1b4-40ad-b937-ffa84e40bb23' raised: ChordError(\"Dependency 15109e05-da43-449f-9081-85d839ac0ef2 raised SoftTimeLimitExceeded('SoftTimeLimitExceeded(True,)',)\",)\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.4/dist-packages/celery/backends/redis.py\", line 290, in on_chord_part_return\r\n callback.delay([unpack(tup, decode) for tup in resl])\r\n File \"/usr/local/lib/python3.4/dist-packages/celery/backends/redis.py\", line 290, in <listcomp>\r\n callback.delay([unpack(tup, decode) for tup in resl])\r\n File \"/usr/local/lib/python3.4/dist-packages/celery/backends/redis.py\", line 243, in _unpack_chord_result\r\n raise ChordError('Dependency {0} raised {1!r}'.format(tid, retval))\r\ncelery.exceptions.ChordError: Dependency 15109e05-da43-449f-9081-85d839ac0ef2 raised SoftTimeLimitExceeded('SoftTimeLimitExceeded(True,)',)\r\n[2017-11-29 23:07:25,565: WARNING/MainProcess] ### on_error:\r\n[2017-11-29 23:07:25,565: WARNING/MainProcess] SoftTimeLimitExceeded(True,)\r\n[2017-11-29 23:07:27,262: WARNING/PoolWorker-2] ### adding\r\n[2017-11-29 23:07:27,264: WARNING/PoolWorker-2] [1, 1]\r\n```\r\n\r\nOf course, on purpose did I choose to call the test.s() twice, to show that the count in the chord continues. In fact:\r\n- the chord result is incremented twice by the error of soft time limit\r\n- the chord result is again incremented twice by the correct returning of `test` task\r\n\r\n## Conclusion\r\n\r\nRequest.on_timeout should not process soft time limit exception. \r\n\r\nhere is a quick monkey patch (correction of celery is trivial)\r\n\r\n```python\r\ndef patch_celery_request_on_timeout():\r\n from celery.worker import request\r\n orig = request.Request.on_timeout\r\n def patched_on_timeout(self, soft, timeout):\r\n if not soft:\r\n orig(self, soft, timeout)\r\n request.Request.on_timeout = patched_on_timeout\r\npatch_celery_request_on_timeout()\r\n```\r\n\r\n\r\n\r\n## version info\r\n\r\nsoftware -> celery:4.1.0 (latentcall) kombu:4.0.2 py:3.4.3\r\n billiard:3.5.0.2 py-amqp:2.1.4\r\nplatform -> system:Linux arch:64bit, ELF imp:CPython\r\nloader -> celery.loaders.app.AppLoader\r\nsettings -> transport:amqp results:redis://10.0.3.253/0\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Configuration introspection and defaults.\"\"\"\nfrom __future__ import absolute_import, unicode_literals\nimport sys\nfrom collections import deque, namedtuple\nfrom datetime import timedelta\nfrom celery.five import items, keys, python_2_unicode_compatible\nfrom celery.utils.functional import memoize\nfrom celery.utils.serialization import strtobool\n\n__all__ = ['Option', 'NAMESPACES', 'flatten', 'find']\n\nis_jython = sys.platform.startswith('java')\nis_pypy = hasattr(sys, 'pypy_version_info')\n\nDEFAULT_POOL = 'prefork'\nif is_jython:\n DEFAULT_POOL = 'solo'\nelif is_pypy:\n if sys.pypy_version_info[0:3] < (1, 5, 0):\n DEFAULT_POOL = 'solo'\n else:\n DEFAULT_POOL = 'prefork'\n\nDEFAULT_ACCEPT_CONTENT = ['json']\nDEFAULT_PROCESS_LOG_FMT = \"\"\"\n [%(asctime)s: %(levelname)s/%(processName)s] %(message)s\n\"\"\".strip()\nDEFAULT_TASK_LOG_FMT = \"\"\"[%(asctime)s: %(levelname)s/%(processName)s] \\\n%(task_name)s[%(task_id)s]: %(message)s\"\"\"\n\nOLD_NS = {'celery_{0}'}\nOLD_NS_BEAT = {'celerybeat_{0}'}\nOLD_NS_WORKER = {'celeryd_{0}'}\n\nsearchresult = namedtuple('searchresult', ('namespace', 'key', 'type'))\n\n\ndef Namespace(__old__=None, **options):\n if __old__ is not None:\n for key, opt in items(options):\n if not opt.old:\n opt.old = {o.format(key) for o in __old__}\n return options\n\n\ndef old_ns(ns):\n return {'{0}_{{0}}'.format(ns)}\n\n\n@python_2_unicode_compatible\nclass Option(object):\n \"\"\"Decribes a Celery configuration option.\"\"\"\n\n alt = None\n deprecate_by = None\n remove_by = None\n old = set()\n typemap = dict(string=str, int=int, float=float, any=lambda v: v,\n bool=strtobool, dict=dict, tuple=tuple)\n\n def __init__(self, default=None, *args, **kwargs):\n self.default = default\n self.type = kwargs.get('type') or 'string'\n for attr, value in items(kwargs):\n setattr(self, attr, value)\n\n def to_python(self, value):\n return self.typemap[self.type](value)\n\n def __repr__(self):\n return '<Option: type->{0} default->{1!r}>'.format(self.type,\n self.default)\n\n\nNAMESPACES = Namespace(\n accept_content=Option(DEFAULT_ACCEPT_CONTENT, type='list', old=OLD_NS),\n enable_utc=Option(True, type='bool'),\n imports=Option((), type='tuple', old=OLD_NS),\n include=Option((), type='tuple', old=OLD_NS),\n timezone=Option(type='string', old=OLD_NS),\n beat=Namespace(\n __old__=OLD_NS_BEAT,\n\n max_loop_interval=Option(0, type='float'),\n schedule=Option({}, type='dict'),\n scheduler=Option('celery.beat:PersistentScheduler'),\n schedule_filename=Option('celerybeat-schedule'),\n sync_every=Option(0, type='int'),\n ),\n broker=Namespace(\n url=Option(None, type='string'),\n read_url=Option(None, type='string'),\n write_url=Option(None, type='string'),\n transport=Option(type='string'),\n transport_options=Option({}, type='dict'),\n connection_timeout=Option(4, type='float'),\n connection_retry=Option(True, type='bool'),\n connection_max_retries=Option(100, type='int'),\n failover_strategy=Option(None, type='string'),\n heartbeat=Option(120, type='int'),\n heartbeat_checkrate=Option(3.0, type='int'),\n login_method=Option(None, type='string'),\n pool_limit=Option(10, type='int'),\n use_ssl=Option(False, type='bool'),\n\n host=Option(type='string'),\n port=Option(type='int'),\n user=Option(type='string'),\n password=Option(type='string'),\n vhost=Option(type='string'),\n ),\n cache=Namespace(\n __old__=old_ns('celery_cache'),\n\n backend=Option(),\n backend_options=Option({}, type='dict'),\n ),\n cassandra=Namespace(\n entry_ttl=Option(type='float'),\n keyspace=Option(type='string'),\n port=Option(type='string'),\n read_consistency=Option(type='string'),\n servers=Option(type='list'),\n table=Option(type='string'),\n write_consistency=Option(type='string'),\n auth_provider=Option(type='string'),\n auth_kwargs=Option(type='string'),\n ),\n control=Namespace(\n queue_ttl=Option(300.0, type='float'),\n queue_expires=Option(10.0, type='float'),\n ),\n couchbase=Namespace(\n __old__=old_ns('celery_couchbase'),\n\n backend_settings=Option(None, type='dict'),\n ),\n mongodb=Namespace(\n __old__=old_ns('celery_mongodb'),\n\n backend_settings=Option(type='dict'),\n ),\n event=Namespace(\n __old__=old_ns('celery_event'),\n\n queue_expires=Option(60.0, type='float'),\n queue_ttl=Option(5.0, type='float'),\n queue_prefix=Option('celeryev'),\n serializer=Option('json'),\n ),\n redis=Namespace(\n __old__=old_ns('celery_redis'),\n\n backend_use_ssl=Option(type='dict'),\n db=Option(type='int'),\n host=Option(type='string'),\n max_connections=Option(type='int'),\n password=Option(type='string'),\n port=Option(type='int'),\n socket_timeout=Option(120.0, type='float'),\n socket_connect_timeout=Option(None, type='float'),\n ),\n result=Namespace(\n __old__=old_ns('celery_result'),\n\n backend=Option(type='string'),\n cache_max=Option(\n -1,\n type='int', old={'celery_max_cached_results'},\n ),\n compression=Option(type='str'),\n exchange=Option('celeryresults'),\n exchange_type=Option('direct'),\n expires=Option(\n timedelta(days=1),\n type='float', old={'celery_task_result_expires'},\n ),\n persistent=Option(None, type='bool'),\n serializer=Option('json'),\n ),\n elasticsearch=Namespace(\n __old__=old_ns('celery_elasticsearch'),\n\n retry_on_timeout=Option(type='bool'),\n max_retries=Option(type='int'),\n timeout=Option(type='float'),\n ),\n riak=Namespace(\n __old__=old_ns('celery_riak'),\n\n backend_settings=Option(type='dict'),\n ),\n security=Namespace(\n __old__=old_ns('celery_security'),\n\n certificate=Option(type='string'),\n cert_store=Option(type='string'),\n key=Option(type='string'),\n ),\n database=Namespace(\n url=Option(old={'celery_result_dburi'}),\n engine_options=Option(\n type='dict', old={'celery_result_engine_options'},\n ),\n short_lived_sessions=Option(\n False, type='bool', old={'celery_result_db_short_lived_sessions'},\n ),\n table_names=Option(type='dict', old={'celery_result_db_tablenames'}),\n ),\n task=Namespace(\n __old__=OLD_NS,\n acks_late=Option(False, type='bool'),\n always_eager=Option(False, type='bool'),\n annotations=Option(type='any'),\n compression=Option(type='string', old={'celery_message_compression'}),\n create_missing_queues=Option(True, type='bool'),\n default_delivery_mode=Option(2, type='string'),\n default_queue=Option('celery'),\n default_exchange=Option(None, type='string'), # taken from queue\n default_exchange_type=Option('direct'),\n default_routing_key=Option(None, type='string'), # taken from queue\n default_rate_limit=Option(type='string'),\n eager_propagates=Option(\n False, type='bool', old={'celery_eager_propagates_exceptions'},\n ),\n ignore_result=Option(False, type='bool'),\n protocol=Option(2, type='int', old={'celery_task_protocol'}),\n publish_retry=Option(\n True, type='bool', old={'celery_task_publish_retry'},\n ),\n publish_retry_policy=Option(\n {'max_retries': 3,\n 'interval_start': 0,\n 'interval_max': 1,\n 'interval_step': 0.2},\n type='dict', old={'celery_task_publish_retry_policy'},\n ),\n queues=Option(type='dict'),\n queue_ha_policy=Option(None, type='string'),\n queue_max_priority=Option(None, type='int'),\n reject_on_worker_lost=Option(type='bool'),\n remote_tracebacks=Option(False, type='bool'),\n routes=Option(type='any'),\n send_sent_event=Option(\n False, type='bool', old={'celery_send_task_sent_event'},\n ),\n serializer=Option('json', old={'celery_task_serializer'}),\n soft_time_limit=Option(\n type='float', old={'celeryd_task_soft_time_limit'},\n ),\n time_limit=Option(\n type='float', old={'celeryd_task_time_limit'},\n ),\n store_errors_even_if_ignored=Option(False, type='bool'),\n track_started=Option(False, type='bool'),\n ),\n worker=Namespace(\n __old__=OLD_NS_WORKER,\n agent=Option(None, type='string'),\n autoscaler=Option('celery.worker.autoscale:Autoscaler'),\n concurrency=Option(0, type='int'),\n consumer=Option('celery.worker.consumer:Consumer', type='string'),\n direct=Option(False, type='bool', old={'celery_worker_direct'}),\n disable_rate_limits=Option(\n False, type='bool', old={'celery_disable_rate_limits'},\n ),\n enable_remote_control=Option(\n True, type='bool', old={'celery_enable_remote_control'},\n ),\n hijack_root_logger=Option(True, type='bool'),\n log_color=Option(type='bool'),\n log_format=Option(DEFAULT_PROCESS_LOG_FMT),\n lost_wait=Option(10.0, type='float', old={'celeryd_worker_lost_wait'}),\n max_memory_per_child=Option(type='int'),\n max_tasks_per_child=Option(type='int'),\n pool=Option(DEFAULT_POOL),\n pool_putlocks=Option(True, type='bool'),\n pool_restarts=Option(False, type='bool'),\n prefetch_multiplier=Option(4, type='int'),\n redirect_stdouts=Option(\n True, type='bool', old={'celery_redirect_stdouts'},\n ),\n redirect_stdouts_level=Option(\n 'WARNING', old={'celery_redirect_stdouts_level'},\n ),\n send_task_events=Option(\n False, type='bool', old={'celeryd_send_events'},\n ),\n state_db=Option(),\n task_log_format=Option(DEFAULT_TASK_LOG_FMT),\n timer=Option(type='string'),\n timer_precision=Option(1.0, type='float'),\n ),\n)\n\n\ndef _flatten_keys(ns, key, opt):\n return [(ns + key, opt)]\n\n\ndef _to_compat(ns, key, opt):\n if opt.old:\n return [\n (oldkey.format(key).upper(), ns + key, opt)\n for oldkey in opt.old\n ]\n return [((ns + key).upper(), ns + key, opt)]\n\n\ndef flatten(d, root='', keyfilter=_flatten_keys):\n \"\"\"Flatten settings.\"\"\"\n stack = deque([(root, d)])\n while stack:\n ns, options = stack.popleft()\n for key, opt in items(options):\n if isinstance(opt, dict):\n stack.append((ns + key + '_', opt))\n else:\n for ret in keyfilter(ns, key, opt):\n yield ret\n\n\nDEFAULTS = {\n key: opt.default for key, opt in flatten(NAMESPACES)\n}\n__compat = list(flatten(NAMESPACES, keyfilter=_to_compat))\n_OLD_DEFAULTS = {old_key: opt.default for old_key, _, opt in __compat}\n_TO_OLD_KEY = {new_key: old_key for old_key, new_key, _ in __compat}\n_TO_NEW_KEY = {old_key: new_key for old_key, new_key, _ in __compat}\n__compat = None\n\nSETTING_KEYS = set(keys(DEFAULTS))\n_OLD_SETTING_KEYS = set(keys(_TO_NEW_KEY))\n\n\ndef find_deprecated_settings(source): # pragma: no cover\n from celery.utils import deprecated\n for name, opt in flatten(NAMESPACES):\n if (opt.deprecate_by or opt.remove_by) and getattr(source, name, None):\n deprecated.warn(description='The {0!r} setting'.format(name),\n deprecation=opt.deprecate_by,\n removal=opt.remove_by,\n alternative='Use the {0.alt} instead'.format(opt))\n return source\n\n\n@memoize(maxsize=None)\ndef find(name, namespace='celery'):\n \"\"\"Find setting by name.\"\"\"\n # - Try specified name-space first.\n namespace = namespace.lower()\n try:\n return searchresult(\n namespace, name.lower(), NAMESPACES[namespace][name.lower()],\n )\n except KeyError:\n # - Try all the other namespaces.\n for ns, opts in items(NAMESPACES):\n if ns.lower() == name.lower():\n return searchresult(None, ns, opts)\n elif isinstance(opts, dict):\n try:\n return searchresult(ns, name.lower(), opts[name.lower()])\n except KeyError:\n pass\n # - See if name is a qualname last.\n return searchresult(None, name.lower(), DEFAULTS[name.lower()])\n", "path": "celery/app/defaults.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Configuration introspection and defaults.\"\"\"\nfrom __future__ import absolute_import, unicode_literals\nimport sys\nfrom collections import deque, namedtuple\nfrom datetime import timedelta\nfrom celery.five import items, keys, python_2_unicode_compatible\nfrom celery.utils.functional import memoize\nfrom celery.utils.serialization import strtobool\n\n__all__ = ['Option', 'NAMESPACES', 'flatten', 'find']\n\nis_jython = sys.platform.startswith('java')\nis_pypy = hasattr(sys, 'pypy_version_info')\n\nDEFAULT_POOL = 'prefork'\nif is_jython:\n DEFAULT_POOL = 'solo'\nelif is_pypy:\n if sys.pypy_version_info[0:3] < (1, 5, 0):\n DEFAULT_POOL = 'solo'\n else:\n DEFAULT_POOL = 'prefork'\n\nDEFAULT_ACCEPT_CONTENT = ['json']\nDEFAULT_PROCESS_LOG_FMT = \"\"\"\n [%(asctime)s: %(levelname)s/%(processName)s] %(message)s\n\"\"\".strip()\nDEFAULT_TASK_LOG_FMT = \"\"\"[%(asctime)s: %(levelname)s/%(processName)s] \\\n%(task_name)s[%(task_id)s]: %(message)s\"\"\"\n\nOLD_NS = {'celery_{0}'}\nOLD_NS_BEAT = {'celerybeat_{0}'}\nOLD_NS_WORKER = {'celeryd_{0}'}\n\nsearchresult = namedtuple('searchresult', ('namespace', 'key', 'type'))\n\n\ndef Namespace(__old__=None, **options):\n if __old__ is not None:\n for key, opt in items(options):\n if not opt.old:\n opt.old = {o.format(key) for o in __old__}\n return options\n\n\ndef old_ns(ns):\n return {'{0}_{{0}}'.format(ns)}\n\n\n@python_2_unicode_compatible\nclass Option(object):\n \"\"\"Decribes a Celery configuration option.\"\"\"\n\n alt = None\n deprecate_by = None\n remove_by = None\n old = set()\n typemap = dict(string=str, int=int, float=float, any=lambda v: v,\n bool=strtobool, dict=dict, tuple=tuple)\n\n def __init__(self, default=None, *args, **kwargs):\n self.default = default\n self.type = kwargs.get('type') or 'string'\n for attr, value in items(kwargs):\n setattr(self, attr, value)\n\n def to_python(self, value):\n return self.typemap[self.type](value)\n\n def __repr__(self):\n return '<Option: type->{0} default->{1!r}>'.format(self.type,\n self.default)\n\n\nNAMESPACES = Namespace(\n accept_content=Option(DEFAULT_ACCEPT_CONTENT, type='list', old=OLD_NS),\n enable_utc=Option(True, type='bool'),\n imports=Option((), type='tuple', old=OLD_NS),\n include=Option((), type='tuple', old=OLD_NS),\n timezone=Option(type='string', old=OLD_NS),\n beat=Namespace(\n __old__=OLD_NS_BEAT,\n\n max_loop_interval=Option(0, type='float'),\n schedule=Option({}, type='dict'),\n scheduler=Option('celery.beat:PersistentScheduler'),\n schedule_filename=Option('celerybeat-schedule'),\n sync_every=Option(0, type='int'),\n ),\n broker=Namespace(\n url=Option(None, type='string'),\n read_url=Option(None, type='string'),\n write_url=Option(None, type='string'),\n transport=Option(type='string'),\n transport_options=Option({}, type='dict'),\n connection_timeout=Option(4, type='float'),\n connection_retry=Option(True, type='bool'),\n connection_max_retries=Option(100, type='int'),\n failover_strategy=Option(None, type='string'),\n heartbeat=Option(120, type='int'),\n heartbeat_checkrate=Option(3.0, type='int'),\n login_method=Option(None, type='string'),\n pool_limit=Option(10, type='int'),\n use_ssl=Option(False, type='bool'),\n\n host=Option(type='string'),\n port=Option(type='int'),\n user=Option(type='string'),\n password=Option(type='string'),\n vhost=Option(type='string'),\n ),\n cache=Namespace(\n __old__=old_ns('celery_cache'),\n\n backend=Option(),\n backend_options=Option({}, type='dict'),\n ),\n cassandra=Namespace(\n entry_ttl=Option(type='float'),\n keyspace=Option(type='string'),\n port=Option(type='string'),\n read_consistency=Option(type='string'),\n servers=Option(type='list'),\n table=Option(type='string'),\n write_consistency=Option(type='string'),\n auth_provider=Option(type='string'),\n auth_kwargs=Option(type='string'),\n ),\n control=Namespace(\n queue_ttl=Option(300.0, type='float'),\n queue_expires=Option(10.0, type='float'),\n ),\n couchbase=Namespace(\n __old__=old_ns('celery_couchbase'),\n\n backend_settings=Option(None, type='dict'),\n ),\n mongodb=Namespace(\n __old__=old_ns('celery_mongodb'),\n\n backend_settings=Option(type='dict'),\n ),\n event=Namespace(\n __old__=old_ns('celery_event'),\n\n queue_expires=Option(60.0, type='float'),\n queue_ttl=Option(5.0, type='float'),\n queue_prefix=Option('celeryev'),\n serializer=Option('json'),\n ),\n redis=Namespace(\n __old__=old_ns('celery_redis'),\n\n backend_use_ssl=Option(type='dict'),\n db=Option(type='int'),\n host=Option(type='string'),\n max_connections=Option(type='int'),\n password=Option(type='string'),\n port=Option(type='int'),\n socket_timeout=Option(120.0, type='float'),\n socket_connect_timeout=Option(None, type='float'),\n ),\n result=Namespace(\n __old__=old_ns('celery_result'),\n\n backend=Option(type='string'),\n cache_max=Option(\n -1,\n type='int', old={'celery_max_cached_results'},\n ),\n compression=Option(type='str'),\n exchange=Option('celeryresults'),\n exchange_type=Option('direct'),\n expires=Option(\n timedelta(days=1),\n type='float', old={'celery_task_result_expires'},\n ),\n persistent=Option(None, type='bool'),\n serializer=Option('json'),\n ),\n elasticsearch=Namespace(\n __old__=old_ns('celery_elasticsearch'),\n\n retry_on_timeout=Option(type='bool'),\n max_retries=Option(type='int'),\n timeout=Option(type='float'),\n ),\n riak=Namespace(\n __old__=old_ns('celery_riak'),\n\n backend_settings=Option(type='dict'),\n ),\n security=Namespace(\n __old__=old_ns('celery_security'),\n\n certificate=Option(type='string'),\n cert_store=Option(type='string'),\n key=Option(type='string'),\n ),\n database=Namespace(\n url=Option(old={'celery_result_dburi'}),\n engine_options=Option(\n type='dict', old={'celery_result_engine_options'},\n ),\n short_lived_sessions=Option(\n False, type='bool', old={'celery_result_db_short_lived_sessions'},\n ),\n table_names=Option(type='dict', old={'celery_result_db_tablenames'}),\n ),\n task=Namespace(\n __old__=OLD_NS,\n acks_late=Option(False, type='bool'),\n always_eager=Option(False, type='bool'),\n annotations=Option(type='any'),\n compression=Option(type='string', old={'celery_message_compression'}),\n create_missing_queues=Option(True, type='bool'),\n default_delivery_mode=Option(2, type='string'),\n default_queue=Option('celery'),\n default_exchange=Option(None, type='string'), # taken from queue\n default_exchange_type=Option('direct'),\n default_routing_key=Option(None, type='string'), # taken from queue\n default_rate_limit=Option(type='string'),\n eager_propagates=Option(\n False, type='bool', old={'celery_eager_propagates_exceptions'},\n ),\n ignore_result=Option(False, type='bool'),\n protocol=Option(2, type='int', old={'celery_task_protocol'}),\n publish_retry=Option(\n True, type='bool', old={'celery_task_publish_retry'},\n ),\n publish_retry_policy=Option(\n {'max_retries': 3,\n 'interval_start': 0,\n 'interval_max': 1,\n 'interval_step': 0.2},\n type='dict', old={'celery_task_publish_retry_policy'},\n ),\n queues=Option(type='dict'),\n queue_ha_policy=Option(None, type='string'),\n queue_max_priority=Option(None, type='int'),\n reject_on_worker_lost=Option(type='bool'),\n remote_tracebacks=Option(False, type='bool'),\n routes=Option(type='any'),\n send_sent_event=Option(\n False, type='bool', old={'celery_send_task_sent_event'},\n ),\n serializer=Option('json', old={'celery_task_serializer'}),\n soft_time_limit=Option(\n type='float', old={'celeryd_task_soft_time_limit'},\n ),\n time_limit=Option(\n type='float', old={'celeryd_task_time_limit'},\n ),\n store_errors_even_if_ignored=Option(False, type='bool'),\n track_started=Option(False, type='bool'),\n ),\n worker=Namespace(\n __old__=OLD_NS_WORKER,\n agent=Option(None, type='string'),\n autoscaler=Option('celery.worker.autoscale:Autoscaler'),\n concurrency=Option(0, type='int'),\n consumer=Option('celery.worker.consumer:Consumer', type='string'),\n direct=Option(False, type='bool', old={'celery_worker_direct'}),\n disable_rate_limits=Option(\n False, type='bool', old={'celery_disable_rate_limits'},\n ),\n enable_remote_control=Option(\n True, type='bool', old={'celery_enable_remote_control'},\n ),\n hijack_root_logger=Option(True, type='bool'),\n log_color=Option(type='bool'),\n log_format=Option(DEFAULT_PROCESS_LOG_FMT),\n lost_wait=Option(10.0, type='float', old={'celeryd_worker_lost_wait'}),\n max_memory_per_child=Option(type='int'),\n max_tasks_per_child=Option(type='int'),\n pool=Option(DEFAULT_POOL),\n pool_putlocks=Option(True, type='bool'),\n pool_restarts=Option(False, type='bool'),\n prefetch_multiplier=Option(4, type='int'),\n redirect_stdouts=Option(\n True, type='bool', old={'celery_redirect_stdouts'},\n ),\n redirect_stdouts_level=Option(\n 'WARNING', old={'celery_redirect_stdouts_level'},\n ),\n send_task_events=Option(\n False, type='bool', old={'celery_send_events'},\n ),\n state_db=Option(),\n task_log_format=Option(DEFAULT_TASK_LOG_FMT),\n timer=Option(type='string'),\n timer_precision=Option(1.0, type='float'),\n ),\n)\n\n\ndef _flatten_keys(ns, key, opt):\n return [(ns + key, opt)]\n\n\ndef _to_compat(ns, key, opt):\n if opt.old:\n return [\n (oldkey.format(key).upper(), ns + key, opt)\n for oldkey in opt.old\n ]\n return [((ns + key).upper(), ns + key, opt)]\n\n\ndef flatten(d, root='', keyfilter=_flatten_keys):\n \"\"\"Flatten settings.\"\"\"\n stack = deque([(root, d)])\n while stack:\n ns, options = stack.popleft()\n for key, opt in items(options):\n if isinstance(opt, dict):\n stack.append((ns + key + '_', opt))\n else:\n for ret in keyfilter(ns, key, opt):\n yield ret\n\n\nDEFAULTS = {\n key: opt.default for key, opt in flatten(NAMESPACES)\n}\n__compat = list(flatten(NAMESPACES, keyfilter=_to_compat))\n_OLD_DEFAULTS = {old_key: opt.default for old_key, _, opt in __compat}\n_TO_OLD_KEY = {new_key: old_key for old_key, new_key, _ in __compat}\n_TO_NEW_KEY = {old_key: new_key for old_key, new_key, _ in __compat}\n__compat = None\n\nSETTING_KEYS = set(keys(DEFAULTS))\n_OLD_SETTING_KEYS = set(keys(_TO_NEW_KEY))\n\n\ndef find_deprecated_settings(source): # pragma: no cover\n from celery.utils import deprecated\n for name, opt in flatten(NAMESPACES):\n if (opt.deprecate_by or opt.remove_by) and getattr(source, name, None):\n deprecated.warn(description='The {0!r} setting'.format(name),\n deprecation=opt.deprecate_by,\n removal=opt.remove_by,\n alternative='Use the {0.alt} instead'.format(opt))\n return source\n\n\n@memoize(maxsize=None)\ndef find(name, namespace='celery'):\n \"\"\"Find setting by name.\"\"\"\n # - Try specified name-space first.\n namespace = namespace.lower()\n try:\n return searchresult(\n namespace, name.lower(), NAMESPACES[namespace][name.lower()],\n )\n except KeyError:\n # - Try all the other namespaces.\n for ns, opts in items(NAMESPACES):\n if ns.lower() == name.lower():\n return searchresult(None, ns, opts)\n elif isinstance(opts, dict):\n try:\n return searchresult(ns, name.lower(), opts[name.lower()])\n except KeyError:\n pass\n # - See if name is a qualname last.\n return searchresult(None, name.lower(), DEFAULTS[name.lower()])\n", "path": "celery/app/defaults.py"}]} |
gh_patches_debug_1225 | rasdani/github-patches | git_diff | ivy-llc__ivy-19405 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Einsum
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/tensorflow/raw_ops.py`
Content:
```
1 # global
2 import ivy
3 import ivy.functional.frontends.tensorflow as tf_frontend
4 from ivy.functional.frontends.tensorflow import check_tensorflow_casting
5 from ivy.functional.frontends.tensorflow.func_wrapper import (
6 to_ivy_arrays_and_back,
7 map_raw_ops_alias,
8 to_ivy_dtype,
9 )
10
11 from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
12 from ivy.utils.exceptions import IvyNotImplementedException
13
14 AddN = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.add_n))
15
16
17 Acos = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.acos))
18
19
20 Acosh = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.acosh))
21
22
23 Add = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.add))
24
25
26 ArgMax = to_ivy_arrays_and_back(
27 with_unsupported_dtypes(
28 {"2.13.0 and below": ("complex",)},
29 "tensorflow",
30 )(
31 map_raw_ops_alias(
32 tf_frontend.math.argmax, kwargs_to_update={"dimension": "axis"}
33 )
34 )
35 )
36
37
38 AddV2 = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.add))
39
40
41 Atan2 = to_ivy_arrays_and_back(
42 with_unsupported_dtypes(
43 {"2.13.0 and below": "float16"},
44 "tensorflow",
45 )(map_raw_ops_alias(tf_frontend.math.atan2))
46 )
47
48
49 @with_unsupported_dtypes(
50 {
51 "2.13.0 and below": (
52 "float16",
53 "bool",
54 "bfloat16",
55 )
56 },
57 "tensorflow",
58 )
59 @to_ivy_arrays_and_back
60 def ApproximateEqual(
61 *,
62 x,
63 y,
64 tolerance=1e-05,
65 name="ApproximateEqual",
66 ):
67 x, y = check_tensorflow_casting(x, y)
68 return ivy.abs(x - y) < tolerance
69
70
71 @to_ivy_arrays_and_back
72 def Angle(
73 *,
74 input,
75 Tout=ivy.float32,
76 name="Angle",
77 ):
78 Tout = ivy.as_ivy_dtype(Tout) if Tout is not None else ivy.float32
79 return ivy.astype(ivy.angle(input), Tout)
80
81
82 ArgMin = to_ivy_arrays_and_back(
83 with_unsupported_dtypes(
84 {"2.13.0 and below": ("complex",)},
85 "tensorflow",
86 )(
87 map_raw_ops_alias(
88 tf_frontend.math.argmin, kwargs_to_update={"dimension": "axis"}
89 )
90 )
91 )
92
93
94 Asin = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.asin))
95
96
97 Atan = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.atan))
98
99
100 @to_ivy_arrays_and_back
101 def Atanh(*, x, name="Atanh"):
102 return ivy.atanh(x)
103
104
105 @to_ivy_arrays_and_back
106 def BitwiseAnd(*, x, y, name="BitwiseAnd"):
107 x, y = check_tensorflow_casting(x, y)
108 return ivy.bitwise_and(x, y)
109
110
111 @to_ivy_arrays_and_back
112 def BitwiseOr(*, x, y, name="BitwiseOr"):
113 x, y = check_tensorflow_casting(x, y)
114 return ivy.bitwise_or(x, y)
115
116
117 @to_ivy_arrays_and_back
118 def BitwiseXor(*, x, y, name="BitwiseXor"):
119 x, y = check_tensorflow_casting(x, y)
120 return ivy.bitwise_xor(x, y)
121
122
123 @to_ivy_arrays_and_back
124 def BroadcastTo(*, input, shape, name="BroadcastTo"):
125 return ivy.broadcast_to(input, shape=shape)
126
127
128 @to_ivy_arrays_and_back
129 def Cholesky(*, input, name="Cholesky"):
130 return ivy.astype(ivy.cholesky(input), input.dtype)
131
132
133 @to_ivy_arrays_and_back
134 def Ceil(*, x, name=None):
135 return ivy.ceil(x)
136
137
138 @to_ivy_arrays_and_back
139 def Concat(*, concat_dim, values, name="Concat"):
140 return ivy.concat(values, axis=concat_dim)
141
142
143 Cos = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cos))
144
145
146 Cosh = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cosh))
147
148
149 @to_ivy_arrays_and_back
150 def Cross(*, a, b, name="Cross"):
151 a, b = check_tensorflow_casting(a, b)
152 return ivy.cross(a, b)
153
154
155 @to_ivy_arrays_and_back
156 def Cosh(*, x, name="Cosh"):
157 return ivy.cosh(x)
158
159
160 Div = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.divide))
161
162
163 @to_ivy_arrays_and_back
164 def Diag(*, diagonal, name="Diag"):
165 return ivy.astype(ivy.diag(diagonal), diagonal.dtype)
166
167
168 Cumprod = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cumprod))
169
170
171 @to_ivy_arrays_and_back
172 def Equal(*, x, y, incompatible_shape_error=True, name="Equal"):
173 x, y = check_tensorflow_casting(x, y)
174 if incompatible_shape_error:
175 return ivy.equal(x, y)
176
177 try:
178 return ivy.equal(x, y)
179 except (ivy.utils.exceptions.IvyError, ivy.utils.exceptions.IvyBackendException):
180 return ivy.array(False)
181
182
183 @to_ivy_arrays_and_back
184 def Exp(*, x, name="Exp"):
185 return ivy.exp(x)
186
187
188 @to_ivy_arrays_and_back
189 def Expm1(*, x, name="Expm1"):
190 return ivy.expm1(x)
191
192
193 @to_ivy_arrays_and_back
194 def Fill(*, dims, value, name="Full"):
195 return ivy.full(dims, value)
196
197
198 @to_ivy_arrays_and_back
199 def Floor(*, x, name="Floor"):
200 return ivy.floor(x)
201
202
203 @to_ivy_arrays_and_back
204 def FloorDiv(*, x, y, name="FloorDiv"):
205 x, y = check_tensorflow_casting(x, y)
206 return ivy.floor_divide(x, y)
207
208
209 @to_ivy_arrays_and_back
210 def FloorMod(*, x, y, name="FloorMod"):
211 x, y = check_tensorflow_casting(x, y)
212 return ivy.remainder(x, y)
213
214
215 @to_ivy_arrays_and_back
216 def FFT(*, input, name="FFT"):
217 return ivy.astype(ivy.fft(input, -1), input.dtype)
218
219
220 @to_ivy_arrays_and_back
221 def Gather(*, params, indices, validate_indices=None, name="Gather"):
222 return ivy.gather(params, indices, axis=0, batch_dims=0)
223
224
225 @to_ivy_arrays_and_back
226 def Greater(*, x, y, name="Greater"):
227 x, y = check_tensorflow_casting(x, y)
228 return ivy.greater(x, y)
229
230
231 @to_ivy_arrays_and_back
232 def GreaterEqual(*, x, y, name="GreaterEqual"):
233 x, y = check_tensorflow_casting(x, y)
234 return ivy.greater_equal(x, y)
235
236
237 Identity = to_ivy_arrays_and_back(
238 map_raw_ops_alias(tf_frontend.general_functions.identity)
239 )
240
241
242 IdentityN = to_ivy_arrays_and_back(
243 map_raw_ops_alias(tf_frontend.general_functions.identity_n)
244 )
245
246
247 @to_ivy_arrays_and_back
248 def Inv(*, x, name="Inv"):
249 return ivy.astype(ivy.reciprocal(x), x.dtype)
250
251
252 Reciprocal = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.reciprocal))
253
254
255 @to_ivy_arrays_and_back
256 def Reverse(*, tensor, dims, name="Reverse"):
257 ret = tensor
258 for dim in enumerate(dims):
259 if dim[1]:
260 ret = ivy.flip(ret, axis=dim[0])
261 return ret
262
263
264 @to_ivy_arrays_and_back
265 def Invert(*, x, name="Invert"):
266 return ivy.bitwise_invert(x)
267
268
269 @to_ivy_arrays_and_back
270 def InvGrad(*, y, dy, name="InvGrad"):
271 return ivy.multiply(ivy.negative(dy), ivy.multiply(y, y))
272
273
274 @to_ivy_arrays_and_back
275 def LeftShift(*, x, y, name="LeftShift"):
276 return ivy.bitwise_left_shift(x, y)
277
278
279 @to_ivy_arrays_and_back
280 def Less(*, x, y, name="Less"):
281 x, y = check_tensorflow_casting(x, y)
282 return ivy.less(x, y)
283
284
285 LessEqual = to_ivy_arrays_and_back(
286 with_unsupported_dtypes(
287 {
288 "2.13.0 and below": ("complex",),
289 },
290 "tensorflow",
291 )(map_raw_ops_alias(tf_frontend.math.less_equal))
292 )
293
294
295 @to_ivy_arrays_and_back
296 def Log(*, x, name="Log"):
297 return ivy.log(x)
298
299
300 Log1p = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.log1p))
301
302 LogSoftmax = to_ivy_arrays_and_back(
303 with_supported_dtypes(
304 {
305 "2.13.0 and below": (
306 "bfloat16",
307 "float32",
308 "float64",
309 ),
310 },
311 "tensorflow",
312 )(map_raw_ops_alias(tf_frontend.math.log_softmax))
313 )
314
315 LogicalOr = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.logical_or))
316
317
318 @to_ivy_arrays_and_back
319 def LogicalNot(*, x, name="LogicalNot"):
320 return ivy.logical_not(x)
321
322
323 @to_ivy_arrays_and_back
324 def MatMul(*, a, b, transpose_a=False, transpose_b=False, name="MatMul"):
325 a, b = check_tensorflow_casting(a, b)
326 return ivy.matmul(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
327
328
329 @to_ivy_arrays_and_back
330 def Rsqrt(*, x, name="Rsqrt"):
331 return ivy.sqrt(ivy.reciprocal(x))
332
333
334 @to_ivy_arrays_and_back
335 def MatrixInverse(*, input, adjoint=False, name="MatrixInverse"):
336 return ivy.inv(input, adjoint=adjoint)
337
338
339 MatrixDeterminant = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.linalg.det))
340
341
342 Max = to_ivy_arrays_and_back(
343 with_unsupported_dtypes(
344 {
345 "2.13.0 and below": ("complex",),
346 },
347 "tensorflow",
348 )(
349 map_raw_ops_alias(
350 tf_frontend.math.reduce_max,
351 kwargs_to_update={
352 "input": "input_tensor",
353 "keep_dims": "keepdims",
354 },
355 )
356 )
357 )
358
359
360 Maximum = to_ivy_arrays_and_back(
361 with_unsupported_dtypes(
362 {
363 "2.13.0 and below": ("complex",),
364 },
365 "tensorflow",
366 )(map_raw_ops_alias(tf_frontend.math.maximum))
367 )
368
369
370 Min = to_ivy_arrays_and_back(
371 with_unsupported_dtypes(
372 {
373 "2.13.0 and below": ("complex",),
374 },
375 "tensorflow",
376 )(
377 map_raw_ops_alias(
378 tf_frontend.math.reduce_min,
379 kwargs_to_update={
380 "input": "input_tensor",
381 "keep_dims": "keepdims",
382 },
383 )
384 )
385 )
386
387
388 @to_ivy_arrays_and_back
389 def Minimum(*, x, y, name="Minimum"):
390 return ivy.minimum(x, y)
391
392
393 Mul = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.multiply))
394
395
396 Neg = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.negative))
397
398
399 @to_ivy_arrays_and_back
400 def NotEqual(*, x, y, incompatible_shape_error=True, name="NotEqual"):
401 x, y = check_tensorflow_casting(x, y)
402 if incompatible_shape_error:
403 return ivy.not_equal(x, y)
404
405 try:
406 return ivy.not_equal(x, y)
407 except (ivy.utils.exceptions.IvyError, ivy.utils.exceptions.IvyBackendException):
408 return ivy.array(True)
409
410
411 @to_ivy_arrays_and_back
412 def NthElement(*, input, n, reverse=False, name="NthElement"):
413 return ivy.astype(ivy.sort(input, descending=reverse)[..., n], input.dtype)
414
415
416 @to_ivy_arrays_and_back
417 def OnesLike(*, x, name="OnesLike"):
418 return ivy.ones_like(x)
419
420
421 @to_ivy_arrays_and_back
422 def Pack(*, values, axis=0, name="Pack"):
423 return ivy.stack(values, axis=axis)
424
425
426 @to_ivy_arrays_and_back
427 def Pad(*, input, paddings, name="Pad"):
428 return ivy.constant_pad(input, paddings.to_list())
429
430
431 @to_ivy_arrays_and_back
432 def PadV2(*, input, paddings, constant_values, name="PadV2"):
433 return ivy.constant_pad(input, paddings.to_list(), value=constant_values)
434
435
436 Relu = to_ivy_arrays_and_back(
437 with_unsupported_dtypes(
438 {
439 "2.13.0 and below": ("complex", "float16"),
440 },
441 "tensorflow",
442 )(map_raw_ops_alias(tf_frontend.nn.relu))
443 )
444
445
446 RealDiv = to_ivy_arrays_and_back(
447 with_supported_dtypes(
448 {
449 "2.13.0 and below": (
450 "complex",
451 "bfloat16",
452 "float16",
453 "float64",
454 "float32",
455 ),
456 },
457 "tensorflow",
458 )(map_raw_ops_alias(tf_frontend.general_functions.realdiv))
459 )
460
461
462 Reshape = to_ivy_arrays_and_back(
463 map_raw_ops_alias(tf_frontend.general_functions.reshape)
464 )
465
466
467 @to_ivy_arrays_and_back
468 def RightShift(*, x, y, name="RightShift"):
469 return ivy.bitwise_right_shift(x, y)
470
471
472 @to_ivy_arrays_and_back
473 def Round(*, x, name="Round"):
474 return ivy.round(x)
475
476
477 @to_ivy_arrays_and_back
478 def Shape(*, input, output_type=ivy.int32, name="Shape"):
479 output_type = to_ivy_dtype(output_type)
480 return ivy.astype(ivy.shape(input, as_array=True), output_type, copy=False)
481
482
483 ShapeN = to_ivy_arrays_and_back(
484 map_raw_ops_alias(tf_frontend.general_functions.shape_n)
485 )
486
487
488 Sin = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.sin))
489
490
491 @to_ivy_arrays_and_back
492 def Sinh(*, x, name="Sinh"):
493 return ivy.sinh(x)
494
495
496 @with_unsupported_dtypes(
497 {"2.13.0 and below": ("unsigned",)},
498 "tensorflow",
499 )
500 @to_ivy_arrays_and_back
501 def Sign(*, x, name="Sign"):
502 return ivy.sign(x, np_variant=False)
503
504
505 Size = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.general_functions.size))
506
507
508 Split = to_ivy_arrays_and_back(
509 map_raw_ops_alias(
510 tf_frontend.split, kwargs_to_update={"num_split": "num_or_size_splits"}
511 )
512 )
513
514
515 @to_ivy_arrays_and_back
516 def SplitV(*, value, size_splits, axis, num_split, name="SplitV"):
517 return ivy.split(value, num_or_size_splits=size_splits, axis=axis)
518
519
520 @to_ivy_arrays_and_back
521 def Sqrt(*, x, name="Sqrt"):
522 return ivy.sqrt(x)
523
524
525 @to_ivy_arrays_and_back
526 def Square(*, x, name="Square"):
527 return ivy.square(x)
528
529
530 Squeeze = to_ivy_arrays_and_back(
531 map_raw_ops_alias(tf_frontend.general_functions.squeeze)
532 )
533
534
535 Sub = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.subtract))
536
537
538 @to_ivy_arrays_and_back
539 def Sum(*, input, axis, keep_dims=False, name="Sum"):
540 return ivy.astype(ivy.sum(input, axis=axis, keepdims=keep_dims), input.dtype)
541
542
543 Tan = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.tan))
544
545
546 Tanh = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.tanh))
547
548
549 @to_ivy_arrays_and_back
550 def TanhGrad(*, y, dy, name="TanhGrad"):
551 return ivy.multiply(dy, ivy.subtract(1, ivy.multiply(y, y)))
552
553
554 @to_ivy_arrays_and_back
555 def Transpose(*, x, perm, name="Transpose"):
556 ret = ivy.permute_dims(x, axes=perm)
557 return ret
558
559
560 Cumsum = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cumsum))
561
562
563 @to_ivy_arrays_and_back
564 def TruncateDiv(*, x, y, name="TruncateDiv"):
565 return ivy.astype(ivy.trunc_divide(x, y), x.dtype)
566
567
568 @with_unsupported_dtypes({"2.13.0 and below": ("float16", "bfloat16")}, "tensorflow")
569 @to_ivy_arrays_and_back
570 def Unpack(*, value, num, axis=0, name="Unpack"):
571 return ivy.unstack(value, axis=axis)[:num]
572
573
574 @to_ivy_arrays_and_back
575 def ZerosLike(*, x, name="ZerosLike"):
576 return ivy.zeros_like(x)
577
578
579 Mean = to_ivy_arrays_and_back(
580 map_raw_ops_alias(
581 tf_frontend.math.reduce_mean,
582 kwargs_to_update={
583 "input": "input_tensor",
584 "keep_dims": "keepdims",
585 },
586 )
587 )
588
589
590 Pow = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.pow))
591
592
593 Relu6 = to_ivy_arrays_and_back(
594 with_unsupported_dtypes(
595 {
596 "2.13.0 and below": ("complex", "float16"),
597 },
598 "tensorflow",
599 )(
600 map_raw_ops_alias(
601 tf_frontend.nn.relu6,
602 )
603 )
604 )
605
606
607 Sigmoid = to_ivy_arrays_and_back(
608 map_raw_ops_alias(tf_frontend.keras.activations.sigmoid)
609 )
610
611
612 Softmax = to_ivy_arrays_and_back(
613 with_unsupported_dtypes(
614 {
615 "2.13.0 and below": ("float16",),
616 },
617 "tensorflow",
618 )(map_raw_ops_alias(tf_frontend.nn.softmax))
619 )
620
621
622 @to_ivy_arrays_and_back
623 def Softplus(*, features, name="Softplus"):
624 return ivy.softplus(features)
625
626
627 @to_ivy_arrays_and_back
628 def Xdivy(*, x, y, name="Xdivy"):
629 if (x == 0).all():
630 return 0.0
631 return ivy.divide(x, y)
632
633
634 @with_unsupported_dtypes({"2.13.0 and below": ("bfloat16",)}, "tensorflow")
635 @to_ivy_arrays_and_back
636 def Xlog1py(*, x, y, name="Xlog1py"):
637 if (x == 0).all():
638 return 0.0
639 return ivy.multiply(x, ivy.log1p(y))
640
641
642 Xlogy = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.xlogy))
643
644
645 @to_ivy_arrays_and_back
646 def EuclideanNorm(*, input, axis, keep_dims=False, name="EuclideanNorm"):
647 return ivy.astype(
648 ivy.vector_norm(input, axis=axis, keepdims=keep_dims), input.dtype
649 )
650
651
652 ConcatV2 = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.concat))
653
654
655 def _tf_to_ivy_ivy_arguments_for_conv(
656 padding, ex_pading, strides, dilations, data_format
657 ):
658 if data_format.find("C") == 1:
659 strides = strides[2:]
660 dilations = dilations[2:]
661 data_format = "channel_first"
662 pad_index = [4, 8]
663 else:
664 strides = strides[1:-1]
665 dilations = dilations[1:-1]
666 data_format = "channel_last"
667 pad_index = [2, 6]
668 if padding == "EXPLICIT":
669 padding = [
670 (ex_pading[i], ex_pading[i + 1])
671 for i in range(pad_index[0], pad_index[1], 2)
672 ]
673 return padding, strides, dilations, data_format
674
675
676 @to_ivy_arrays_and_back
677 def Conv2D(
678 *,
679 input,
680 filter,
681 strides,
682 padding,
683 use_cudnn_on_gpu,
684 explicit_paddings,
685 data_format="NHWC",
686 dilations=[1, 1, 1, 1],
687 name="Conv2D",
688 ):
689 padding, strides, dilations, data_format = _tf_to_ivy_ivy_arguments_for_conv(
690 padding, explicit_paddings, strides, dilations, data_format
691 )
692 return ivy.conv_general_dilated(
693 input,
694 filter,
695 strides,
696 padding,
697 data_format=data_format,
698 dilations=dilations,
699 dims=2,
700 )
701
702
703 @to_ivy_arrays_and_back
704 def Conv3D(
705 *,
706 input,
707 filter,
708 strides,
709 padding,
710 data_format="NDHWC",
711 dilations=[1, 1, 1, 1, 1],
712 name="Conv3D",
713 ):
714 # ivy.backends.tensorflow expects strides and dilations to be
715 # a single integer value or a list of 3 values whereas the raw op
716 # expects a list of 5 values
717 if data_format == "NDHWC":
718 strides = strides[1:-1]
719 dilations = dilations[1:-1]
720 elif data_format == "NCDHW":
721 strides = strides[2:]
722 dilations = dilations[2:]
723
724 return tf_frontend.nn.conv3d(
725 input,
726 filter,
727 strides,
728 padding,
729 data_format=data_format,
730 dilations=dilations,
731 name=name,
732 )
733
734
735 @to_ivy_arrays_and_back
736 def Elu(features, name=None):
737 zeros = ivy.zeros_like(features, dtype=ivy.dtype(features))
738 ones = ivy.ones_like(features, dtype=ivy.dtype(features))
739 ret_val = ivy.where(
740 # if x > 0 => x; else e^x - 1
741 features > zeros,
742 features,
743 ivy.subtract(ivy.exp(features), ones),
744 )
745 return ret_val
746
747
748 Elu.supported_dtypes = {
749 "numpy": (
750 "float16",
751 "float32",
752 "float64",
753 ),
754 "tensorflow": (
755 "bfloat16",
756 "float16",
757 "float32",
758 "float64",
759 ),
760 "torch": (
761 "bfloat16",
762 "float32",
763 "float64",
764 ),
765 "jax": (
766 "bfloat16",
767 "float16",
768 "float32",
769 "float64",
770 ),
771 }
772
773
774 @to_ivy_arrays_and_back
775 def LinSpace(*, start, stop, num, name=None):
776 return ivy.linspace(start, stop, num)
777
778
779 Roll = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.roll))
780
781
782 @to_ivy_arrays_and_back
783 def CumulativeLogsumexp(
784 x, axis, exclusive=False, reverse=False, name="CumulativeLogsumexp"
785 ):
786 # TODO
787 raise IvyNotImplementedException
788
789
790 @to_ivy_arrays_and_back
791 def Complex(real, imag, Tout=ivy.complex64, name="Complex"):
792 # TODO
793 raise IvyNotImplementedException
794
795
796 @to_ivy_arrays_and_back
797 def AccumulateNV2(inputs, shape, name="AccumulateNV2"):
798 # TODO
799 raise IvyNotImplementedException
800
801
802 @to_ivy_arrays_and_back
803 def DebugGradientIdentity(input, name="DebugGradientIdentity"):
804 # TODO
805 raise IvyNotImplementedException
806
807
808 @to_ivy_arrays_and_back
809 def Real(input, Tout=ivy.float32, name="Real"):
810 # TODO
811 raise IvyNotImplementedException
812
813
814 @to_ivy_arrays_and_back
815 def BandedTriangularSolve(
816 matrix,
817 rhs,
818 lower=True,
819 adjoint=False,
820 name="BandedTriangularSolve",
821 ):
822 # TODO
823 raise IvyNotImplementedException
824
825
826 @to_ivy_arrays_and_back
827 def BatchMatMul(x, y, adj_x=False, adj_y=False, name="BatchMatMul"):
828 # TODO
829 raise IvyNotImplementedException
830
831
832 @to_ivy_arrays_and_back
833 def BatchMatMulV2(x, y, adj_x=False, adj_y=False, name="BatchMatMulV2"):
834 # TODO
835 raise IvyNotImplementedException
836
837
838 @to_ivy_arrays_and_back
839 def BatchMatMulV3(x, y, Tout=ivy.Dtype, adj_x=False, adj_y=False, name="BatchMatMulV3"):
840 # TODO
841 raise IvyNotImplementedException
842
843
844 Slice = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.slice))
845
846 LeakyRelu = to_ivy_arrays_and_back(
847 map_raw_ops_alias(
848 tf_frontend.nn.leaky_relu,
849 )
850 )
851
852 LeakyRelu.supported_dtypes = {
853 "numpy": (
854 "float32",
855 "float64",
856 ),
857 "tensorflow": (
858 "bfloat16",
859 "float16",
860 "float32",
861 "float64",
862 ),
863 "torch": (
864 "float32",
865 "float64",
866 ),
867 "jax": (
868 "bfloat16",
869 "float16",
870 "float32",
871 "float64",
872 ),
873 }
874
875
876 @to_ivy_arrays_and_back
877 def Prod(*, input, axis, keep_dims=False, name="Prod"):
878 return ivy.astype(ivy.prod(input, axis=axis, keepdims=keep_dims), input.dtype)
879
880
881 Zeta = to_ivy_arrays_and_back(
882 with_supported_dtypes(
883 {
884 "2.13.0 and below": ("float32", "float64"),
885 },
886 "tensorflow",
887 )(map_raw_ops_alias(tf_frontend.math.zeta))
888 )
889
890
891 @to_ivy_arrays_and_back
892 def Imag(
893 *,
894 input,
895 Tout=ivy.float32,
896 name="Imag",
897 ):
898 Tout = ivy.as_ivy_dtype(Tout) if Tout is not None else ivy.float32
899 return ivy.astype(ivy.imag(input), Tout)
900
901
902 Imag.supported_dtypes = {
903 "tensorflow": (
904 "complex64",
905 "complex128",
906 ),
907 }
908
909
910 @to_ivy_arrays_and_back
911 def Svd(*, input, full_matrices=False, compute_uv=True, name=None):
912 return ivy.svd(input, compute_uv=compute_uv, full_matrices=full_matrices)
913
914
915 Svd.supported_dtypes = {
916 "tensorflow": (
917 "float64",
918 "float128",
919 "halfcomplex64",
920 "complex128",
921 ),
922 }
923
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/tensorflow/raw_ops.py b/ivy/functional/frontends/tensorflow/raw_ops.py
--- a/ivy/functional/frontends/tensorflow/raw_ops.py
+++ b/ivy/functional/frontends/tensorflow/raw_ops.py
@@ -920,3 +920,22 @@
"complex128",
),
}
+
+
+Einsum = to_ivy_arrays_and_back(
+ with_supported_dtypes(
+ {
+ "2.13.0 and below": (
+ "bfloat16",
+ "complex128 ",
+ "complex64",
+ "float64",
+ "float32",
+ "float16",
+ "int64",
+ "int32"
+ ),
+ },
+ "tensorflow",
+ )(map_raw_ops_alias(tf_frontend.general_functions.einsum))
+)
| {"golden_diff": "diff --git a/ivy/functional/frontends/tensorflow/raw_ops.py b/ivy/functional/frontends/tensorflow/raw_ops.py\n--- a/ivy/functional/frontends/tensorflow/raw_ops.py\n+++ b/ivy/functional/frontends/tensorflow/raw_ops.py\n@@ -920,3 +920,22 @@\n \"complex128\",\n ),\n }\n+\n+\n+Einsum = to_ivy_arrays_and_back(\n+ with_supported_dtypes(\n+ {\n+ \"2.13.0 and below\": (\n+ \"bfloat16\",\n+ \"complex128 \",\n+ \"complex64\",\n+ \"float64\",\n+ \"float32\",\n+ \"float16\",\n+ \"int64\",\n+ \"int32\"\n+ ),\n+ },\n+ \"tensorflow\",\n+ )(map_raw_ops_alias(tf_frontend.general_functions.einsum))\n+)\n", "issue": "Einsum\n\n", "before_files": [{"content": "# global\nimport ivy\nimport ivy.functional.frontends.tensorflow as tf_frontend\nfrom ivy.functional.frontends.tensorflow import check_tensorflow_casting\nfrom ivy.functional.frontends.tensorflow.func_wrapper import (\n to_ivy_arrays_and_back,\n map_raw_ops_alias,\n to_ivy_dtype,\n)\n\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.utils.exceptions import IvyNotImplementedException\n\nAddN = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.add_n))\n\n\nAcos = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.acos))\n\n\nAcosh = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.acosh))\n\n\nAdd = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.add))\n\n\nArgMax = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\"2.13.0 and below\": (\"complex\",)},\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.math.argmax, kwargs_to_update={\"dimension\": \"axis\"}\n )\n )\n)\n\n\nAddV2 = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.add))\n\n\nAtan2 = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\"2.13.0 and below\": \"float16\"},\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.atan2))\n)\n\n\n@with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\n \"float16\",\n \"bool\",\n \"bfloat16\",\n )\n },\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef ApproximateEqual(\n *,\n x,\n y,\n tolerance=1e-05,\n name=\"ApproximateEqual\",\n):\n x, y = check_tensorflow_casting(x, y)\n return ivy.abs(x - y) < tolerance\n\n\n@to_ivy_arrays_and_back\ndef Angle(\n *,\n input,\n Tout=ivy.float32,\n name=\"Angle\",\n):\n Tout = ivy.as_ivy_dtype(Tout) if Tout is not None else ivy.float32\n return ivy.astype(ivy.angle(input), Tout)\n\n\nArgMin = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\"2.13.0 and below\": (\"complex\",)},\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.math.argmin, kwargs_to_update={\"dimension\": \"axis\"}\n )\n )\n)\n\n\nAsin = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.asin))\n\n\nAtan = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.atan))\n\n\n@to_ivy_arrays_and_back\ndef Atanh(*, x, name=\"Atanh\"):\n return ivy.atanh(x)\n\n\n@to_ivy_arrays_and_back\ndef BitwiseAnd(*, x, y, name=\"BitwiseAnd\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.bitwise_and(x, y)\n\n\n@to_ivy_arrays_and_back\ndef BitwiseOr(*, x, y, name=\"BitwiseOr\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.bitwise_or(x, y)\n\n\n@to_ivy_arrays_and_back\ndef BitwiseXor(*, x, y, name=\"BitwiseXor\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.bitwise_xor(x, y)\n\n\n@to_ivy_arrays_and_back\ndef BroadcastTo(*, input, shape, name=\"BroadcastTo\"):\n return ivy.broadcast_to(input, shape=shape)\n\n\n@to_ivy_arrays_and_back\ndef Cholesky(*, input, name=\"Cholesky\"):\n return ivy.astype(ivy.cholesky(input), input.dtype)\n\n\n@to_ivy_arrays_and_back\ndef Ceil(*, x, name=None):\n return ivy.ceil(x)\n\n\n@to_ivy_arrays_and_back\ndef Concat(*, concat_dim, values, name=\"Concat\"):\n return ivy.concat(values, axis=concat_dim)\n\n\nCos = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cos))\n\n\nCosh = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cosh))\n\n\n@to_ivy_arrays_and_back\ndef Cross(*, a, b, name=\"Cross\"):\n a, b = check_tensorflow_casting(a, b)\n return ivy.cross(a, b)\n\n\n@to_ivy_arrays_and_back\ndef Cosh(*, x, name=\"Cosh\"):\n return ivy.cosh(x)\n\n\nDiv = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.divide))\n\n\n@to_ivy_arrays_and_back\ndef Diag(*, diagonal, name=\"Diag\"):\n return ivy.astype(ivy.diag(diagonal), diagonal.dtype)\n\n\nCumprod = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cumprod))\n\n\n@to_ivy_arrays_and_back\ndef Equal(*, x, y, incompatible_shape_error=True, name=\"Equal\"):\n x, y = check_tensorflow_casting(x, y)\n if incompatible_shape_error:\n return ivy.equal(x, y)\n\n try:\n return ivy.equal(x, y)\n except (ivy.utils.exceptions.IvyError, ivy.utils.exceptions.IvyBackendException):\n return ivy.array(False)\n\n\n@to_ivy_arrays_and_back\ndef Exp(*, x, name=\"Exp\"):\n return ivy.exp(x)\n\n\n@to_ivy_arrays_and_back\ndef Expm1(*, x, name=\"Expm1\"):\n return ivy.expm1(x)\n\n\n@to_ivy_arrays_and_back\ndef Fill(*, dims, value, name=\"Full\"):\n return ivy.full(dims, value)\n\n\n@to_ivy_arrays_and_back\ndef Floor(*, x, name=\"Floor\"):\n return ivy.floor(x)\n\n\n@to_ivy_arrays_and_back\ndef FloorDiv(*, x, y, name=\"FloorDiv\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.floor_divide(x, y)\n\n\n@to_ivy_arrays_and_back\ndef FloorMod(*, x, y, name=\"FloorMod\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.remainder(x, y)\n\n\n@to_ivy_arrays_and_back\ndef FFT(*, input, name=\"FFT\"):\n return ivy.astype(ivy.fft(input, -1), input.dtype)\n\n\n@to_ivy_arrays_and_back\ndef Gather(*, params, indices, validate_indices=None, name=\"Gather\"):\n return ivy.gather(params, indices, axis=0, batch_dims=0)\n\n\n@to_ivy_arrays_and_back\ndef Greater(*, x, y, name=\"Greater\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.greater(x, y)\n\n\n@to_ivy_arrays_and_back\ndef GreaterEqual(*, x, y, name=\"GreaterEqual\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.greater_equal(x, y)\n\n\nIdentity = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.general_functions.identity)\n)\n\n\nIdentityN = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.general_functions.identity_n)\n)\n\n\n@to_ivy_arrays_and_back\ndef Inv(*, x, name=\"Inv\"):\n return ivy.astype(ivy.reciprocal(x), x.dtype)\n\n\nReciprocal = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.reciprocal))\n\n\n@to_ivy_arrays_and_back\ndef Reverse(*, tensor, dims, name=\"Reverse\"):\n ret = tensor\n for dim in enumerate(dims):\n if dim[1]:\n ret = ivy.flip(ret, axis=dim[0])\n return ret\n\n\n@to_ivy_arrays_and_back\ndef Invert(*, x, name=\"Invert\"):\n return ivy.bitwise_invert(x)\n\n\n@to_ivy_arrays_and_back\ndef InvGrad(*, y, dy, name=\"InvGrad\"):\n return ivy.multiply(ivy.negative(dy), ivy.multiply(y, y))\n\n\n@to_ivy_arrays_and_back\ndef LeftShift(*, x, y, name=\"LeftShift\"):\n return ivy.bitwise_left_shift(x, y)\n\n\n@to_ivy_arrays_and_back\ndef Less(*, x, y, name=\"Less\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.less(x, y)\n\n\nLessEqual = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\",),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.less_equal))\n)\n\n\n@to_ivy_arrays_and_back\ndef Log(*, x, name=\"Log\"):\n return ivy.log(x)\n\n\nLog1p = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.log1p))\n\nLogSoftmax = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\n \"bfloat16\",\n \"float32\",\n \"float64\",\n ),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.log_softmax))\n)\n\nLogicalOr = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.logical_or))\n\n\n@to_ivy_arrays_and_back\ndef LogicalNot(*, x, name=\"LogicalNot\"):\n return ivy.logical_not(x)\n\n\n@to_ivy_arrays_and_back\ndef MatMul(*, a, b, transpose_a=False, transpose_b=False, name=\"MatMul\"):\n a, b = check_tensorflow_casting(a, b)\n return ivy.matmul(a, b, transpose_a=transpose_a, transpose_b=transpose_b)\n\n\n@to_ivy_arrays_and_back\ndef Rsqrt(*, x, name=\"Rsqrt\"):\n return ivy.sqrt(ivy.reciprocal(x))\n\n\n@to_ivy_arrays_and_back\ndef MatrixInverse(*, input, adjoint=False, name=\"MatrixInverse\"):\n return ivy.inv(input, adjoint=adjoint)\n\n\nMatrixDeterminant = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.linalg.det))\n\n\nMax = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\",),\n },\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.math.reduce_max,\n kwargs_to_update={\n \"input\": \"input_tensor\",\n \"keep_dims\": \"keepdims\",\n },\n )\n )\n)\n\n\nMaximum = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\",),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.maximum))\n)\n\n\nMin = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\",),\n },\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.math.reduce_min,\n kwargs_to_update={\n \"input\": \"input_tensor\",\n \"keep_dims\": \"keepdims\",\n },\n )\n )\n)\n\n\n@to_ivy_arrays_and_back\ndef Minimum(*, x, y, name=\"Minimum\"):\n return ivy.minimum(x, y)\n\n\nMul = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.multiply))\n\n\nNeg = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.negative))\n\n\n@to_ivy_arrays_and_back\ndef NotEqual(*, x, y, incompatible_shape_error=True, name=\"NotEqual\"):\n x, y = check_tensorflow_casting(x, y)\n if incompatible_shape_error:\n return ivy.not_equal(x, y)\n\n try:\n return ivy.not_equal(x, y)\n except (ivy.utils.exceptions.IvyError, ivy.utils.exceptions.IvyBackendException):\n return ivy.array(True)\n\n\n@to_ivy_arrays_and_back\ndef NthElement(*, input, n, reverse=False, name=\"NthElement\"):\n return ivy.astype(ivy.sort(input, descending=reverse)[..., n], input.dtype)\n\n\n@to_ivy_arrays_and_back\ndef OnesLike(*, x, name=\"OnesLike\"):\n return ivy.ones_like(x)\n\n\n@to_ivy_arrays_and_back\ndef Pack(*, values, axis=0, name=\"Pack\"):\n return ivy.stack(values, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef Pad(*, input, paddings, name=\"Pad\"):\n return ivy.constant_pad(input, paddings.to_list())\n\n\n@to_ivy_arrays_and_back\ndef PadV2(*, input, paddings, constant_values, name=\"PadV2\"):\n return ivy.constant_pad(input, paddings.to_list(), value=constant_values)\n\n\nRelu = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\", \"float16\"),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.nn.relu))\n)\n\n\nRealDiv = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\n \"complex\",\n \"bfloat16\",\n \"float16\",\n \"float64\",\n \"float32\",\n ),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.general_functions.realdiv))\n)\n\n\nReshape = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.general_functions.reshape)\n)\n\n\n@to_ivy_arrays_and_back\ndef RightShift(*, x, y, name=\"RightShift\"):\n return ivy.bitwise_right_shift(x, y)\n\n\n@to_ivy_arrays_and_back\ndef Round(*, x, name=\"Round\"):\n return ivy.round(x)\n\n\n@to_ivy_arrays_and_back\ndef Shape(*, input, output_type=ivy.int32, name=\"Shape\"):\n output_type = to_ivy_dtype(output_type)\n return ivy.astype(ivy.shape(input, as_array=True), output_type, copy=False)\n\n\nShapeN = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.general_functions.shape_n)\n)\n\n\nSin = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.sin))\n\n\n@to_ivy_arrays_and_back\ndef Sinh(*, x, name=\"Sinh\"):\n return ivy.sinh(x)\n\n\n@with_unsupported_dtypes(\n {\"2.13.0 and below\": (\"unsigned\",)},\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef Sign(*, x, name=\"Sign\"):\n return ivy.sign(x, np_variant=False)\n\n\nSize = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.general_functions.size))\n\n\nSplit = to_ivy_arrays_and_back(\n map_raw_ops_alias(\n tf_frontend.split, kwargs_to_update={\"num_split\": \"num_or_size_splits\"}\n )\n)\n\n\n@to_ivy_arrays_and_back\ndef SplitV(*, value, size_splits, axis, num_split, name=\"SplitV\"):\n return ivy.split(value, num_or_size_splits=size_splits, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef Sqrt(*, x, name=\"Sqrt\"):\n return ivy.sqrt(x)\n\n\n@to_ivy_arrays_and_back\ndef Square(*, x, name=\"Square\"):\n return ivy.square(x)\n\n\nSqueeze = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.general_functions.squeeze)\n)\n\n\nSub = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.subtract))\n\n\n@to_ivy_arrays_and_back\ndef Sum(*, input, axis, keep_dims=False, name=\"Sum\"):\n return ivy.astype(ivy.sum(input, axis=axis, keepdims=keep_dims), input.dtype)\n\n\nTan = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.tan))\n\n\nTanh = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.tanh))\n\n\n@to_ivy_arrays_and_back\ndef TanhGrad(*, y, dy, name=\"TanhGrad\"):\n return ivy.multiply(dy, ivy.subtract(1, ivy.multiply(y, y)))\n\n\n@to_ivy_arrays_and_back\ndef Transpose(*, x, perm, name=\"Transpose\"):\n ret = ivy.permute_dims(x, axes=perm)\n return ret\n\n\nCumsum = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cumsum))\n\n\n@to_ivy_arrays_and_back\ndef TruncateDiv(*, x, y, name=\"TruncateDiv\"):\n return ivy.astype(ivy.trunc_divide(x, y), x.dtype)\n\n\n@with_unsupported_dtypes({\"2.13.0 and below\": (\"float16\", \"bfloat16\")}, \"tensorflow\")\n@to_ivy_arrays_and_back\ndef Unpack(*, value, num, axis=0, name=\"Unpack\"):\n return ivy.unstack(value, axis=axis)[:num]\n\n\n@to_ivy_arrays_and_back\ndef ZerosLike(*, x, name=\"ZerosLike\"):\n return ivy.zeros_like(x)\n\n\nMean = to_ivy_arrays_and_back(\n map_raw_ops_alias(\n tf_frontend.math.reduce_mean,\n kwargs_to_update={\n \"input\": \"input_tensor\",\n \"keep_dims\": \"keepdims\",\n },\n )\n)\n\n\nPow = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.pow))\n\n\nRelu6 = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\", \"float16\"),\n },\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.nn.relu6,\n )\n )\n)\n\n\nSigmoid = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.keras.activations.sigmoid)\n)\n\n\nSoftmax = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"float16\",),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.nn.softmax))\n)\n\n\n@to_ivy_arrays_and_back\ndef Softplus(*, features, name=\"Softplus\"):\n return ivy.softplus(features)\n\n\n@to_ivy_arrays_and_back\ndef Xdivy(*, x, y, name=\"Xdivy\"):\n if (x == 0).all():\n return 0.0\n return ivy.divide(x, y)\n\n\n@with_unsupported_dtypes({\"2.13.0 and below\": (\"bfloat16\",)}, \"tensorflow\")\n@to_ivy_arrays_and_back\ndef Xlog1py(*, x, y, name=\"Xlog1py\"):\n if (x == 0).all():\n return 0.0\n return ivy.multiply(x, ivy.log1p(y))\n\n\nXlogy = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.xlogy))\n\n\n@to_ivy_arrays_and_back\ndef EuclideanNorm(*, input, axis, keep_dims=False, name=\"EuclideanNorm\"):\n return ivy.astype(\n ivy.vector_norm(input, axis=axis, keepdims=keep_dims), input.dtype\n )\n\n\nConcatV2 = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.concat))\n\n\ndef _tf_to_ivy_ivy_arguments_for_conv(\n padding, ex_pading, strides, dilations, data_format\n):\n if data_format.find(\"C\") == 1:\n strides = strides[2:]\n dilations = dilations[2:]\n data_format = \"channel_first\"\n pad_index = [4, 8]\n else:\n strides = strides[1:-1]\n dilations = dilations[1:-1]\n data_format = \"channel_last\"\n pad_index = [2, 6]\n if padding == \"EXPLICIT\":\n padding = [\n (ex_pading[i], ex_pading[i + 1])\n for i in range(pad_index[0], pad_index[1], 2)\n ]\n return padding, strides, dilations, data_format\n\n\n@to_ivy_arrays_and_back\ndef Conv2D(\n *,\n input,\n filter,\n strides,\n padding,\n use_cudnn_on_gpu,\n explicit_paddings,\n data_format=\"NHWC\",\n dilations=[1, 1, 1, 1],\n name=\"Conv2D\",\n):\n padding, strides, dilations, data_format = _tf_to_ivy_ivy_arguments_for_conv(\n padding, explicit_paddings, strides, dilations, data_format\n )\n return ivy.conv_general_dilated(\n input,\n filter,\n strides,\n padding,\n data_format=data_format,\n dilations=dilations,\n dims=2,\n )\n\n\n@to_ivy_arrays_and_back\ndef Conv3D(\n *,\n input,\n filter,\n strides,\n padding,\n data_format=\"NDHWC\",\n dilations=[1, 1, 1, 1, 1],\n name=\"Conv3D\",\n):\n # ivy.backends.tensorflow expects strides and dilations to be\n # a single integer value or a list of 3 values whereas the raw op\n # expects a list of 5 values\n if data_format == \"NDHWC\":\n strides = strides[1:-1]\n dilations = dilations[1:-1]\n elif data_format == \"NCDHW\":\n strides = strides[2:]\n dilations = dilations[2:]\n\n return tf_frontend.nn.conv3d(\n input,\n filter,\n strides,\n padding,\n data_format=data_format,\n dilations=dilations,\n name=name,\n )\n\n\n@to_ivy_arrays_and_back\ndef Elu(features, name=None):\n zeros = ivy.zeros_like(features, dtype=ivy.dtype(features))\n ones = ivy.ones_like(features, dtype=ivy.dtype(features))\n ret_val = ivy.where(\n # if x > 0 => x; else e^x - 1\n features > zeros,\n features,\n ivy.subtract(ivy.exp(features), ones),\n )\n return ret_val\n\n\nElu.supported_dtypes = {\n \"numpy\": (\n \"float16\",\n \"float32\",\n \"float64\",\n ),\n \"tensorflow\": (\n \"bfloat16\",\n \"float16\",\n \"float32\",\n \"float64\",\n ),\n \"torch\": (\n \"bfloat16\",\n \"float32\",\n \"float64\",\n ),\n \"jax\": (\n \"bfloat16\",\n \"float16\",\n \"float32\",\n \"float64\",\n ),\n}\n\n\n@to_ivy_arrays_and_back\ndef LinSpace(*, start, stop, num, name=None):\n return ivy.linspace(start, stop, num)\n\n\nRoll = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.roll))\n\n\n@to_ivy_arrays_and_back\ndef CumulativeLogsumexp(\n x, axis, exclusive=False, reverse=False, name=\"CumulativeLogsumexp\"\n):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef Complex(real, imag, Tout=ivy.complex64, name=\"Complex\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef AccumulateNV2(inputs, shape, name=\"AccumulateNV2\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef DebugGradientIdentity(input, name=\"DebugGradientIdentity\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef Real(input, Tout=ivy.float32, name=\"Real\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef BandedTriangularSolve(\n matrix,\n rhs,\n lower=True,\n adjoint=False,\n name=\"BandedTriangularSolve\",\n):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef BatchMatMul(x, y, adj_x=False, adj_y=False, name=\"BatchMatMul\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef BatchMatMulV2(x, y, adj_x=False, adj_y=False, name=\"BatchMatMulV2\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef BatchMatMulV3(x, y, Tout=ivy.Dtype, adj_x=False, adj_y=False, name=\"BatchMatMulV3\"):\n # TODO\n raise IvyNotImplementedException\n\n\nSlice = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.slice))\n\nLeakyRelu = to_ivy_arrays_and_back(\n map_raw_ops_alias(\n tf_frontend.nn.leaky_relu,\n )\n)\n\nLeakyRelu.supported_dtypes = {\n \"numpy\": (\n \"float32\",\n \"float64\",\n ),\n \"tensorflow\": (\n \"bfloat16\",\n \"float16\",\n \"float32\",\n \"float64\",\n ),\n \"torch\": (\n \"float32\",\n \"float64\",\n ),\n \"jax\": (\n \"bfloat16\",\n \"float16\",\n \"float32\",\n \"float64\",\n ),\n}\n\n\n@to_ivy_arrays_and_back\ndef Prod(*, input, axis, keep_dims=False, name=\"Prod\"):\n return ivy.astype(ivy.prod(input, axis=axis, keepdims=keep_dims), input.dtype)\n\n\nZeta = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\"float32\", \"float64\"),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.zeta))\n)\n\n\n@to_ivy_arrays_and_back\ndef Imag(\n *,\n input,\n Tout=ivy.float32,\n name=\"Imag\",\n):\n Tout = ivy.as_ivy_dtype(Tout) if Tout is not None else ivy.float32\n return ivy.astype(ivy.imag(input), Tout)\n\n\nImag.supported_dtypes = {\n \"tensorflow\": (\n \"complex64\",\n \"complex128\",\n ),\n}\n\n\n@to_ivy_arrays_and_back\ndef Svd(*, input, full_matrices=False, compute_uv=True, name=None):\n return ivy.svd(input, compute_uv=compute_uv, full_matrices=full_matrices)\n\n\nSvd.supported_dtypes = {\n \"tensorflow\": (\n \"float64\",\n \"float128\",\n \"halfcomplex64\",\n \"complex128\",\n ),\n}\n", "path": "ivy/functional/frontends/tensorflow/raw_ops.py"}], "after_files": [{"content": "# global\nimport ivy\nimport ivy.functional.frontends.tensorflow as tf_frontend\nfrom ivy.functional.frontends.tensorflow import check_tensorflow_casting\nfrom ivy.functional.frontends.tensorflow.func_wrapper import (\n to_ivy_arrays_and_back,\n map_raw_ops_alias,\n to_ivy_dtype,\n)\n\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.utils.exceptions import IvyNotImplementedException\n\nAddN = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.add_n))\n\n\nAcos = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.acos))\n\n\nAcosh = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.acosh))\n\n\nAdd = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.add))\n\n\nArgMax = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\"2.13.0 and below\": (\"complex\",)},\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.math.argmax, kwargs_to_update={\"dimension\": \"axis\"}\n )\n )\n)\n\n\nAddV2 = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.add))\n\n\nAtan2 = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\"2.13.0 and below\": \"float16\"},\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.atan2))\n)\n\n\n@with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\n \"float16\",\n \"bool\",\n \"bfloat16\",\n )\n },\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef ApproximateEqual(\n *,\n x,\n y,\n tolerance=1e-05,\n name=\"ApproximateEqual\",\n):\n x, y = check_tensorflow_casting(x, y)\n return ivy.abs(x - y) < tolerance\n\n\n@to_ivy_arrays_and_back\ndef Angle(\n *,\n input,\n Tout=ivy.float32,\n name=\"Angle\",\n):\n Tout = ivy.as_ivy_dtype(Tout) if Tout is not None else ivy.float32\n return ivy.astype(ivy.angle(input), Tout)\n\n\nArgMin = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\"2.13.0 and below\": (\"complex\",)},\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.math.argmin, kwargs_to_update={\"dimension\": \"axis\"}\n )\n )\n)\n\n\nAsin = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.asin))\n\n\nAtan = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.atan))\n\n\n@to_ivy_arrays_and_back\ndef Atanh(*, x, name=\"Atanh\"):\n return ivy.atanh(x)\n\n\n@to_ivy_arrays_and_back\ndef BitwiseAnd(*, x, y, name=\"BitwiseAnd\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.bitwise_and(x, y)\n\n\n@to_ivy_arrays_and_back\ndef BitwiseOr(*, x, y, name=\"BitwiseOr\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.bitwise_or(x, y)\n\n\n@to_ivy_arrays_and_back\ndef BitwiseXor(*, x, y, name=\"BitwiseXor\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.bitwise_xor(x, y)\n\n\n@to_ivy_arrays_and_back\ndef BroadcastTo(*, input, shape, name=\"BroadcastTo\"):\n return ivy.broadcast_to(input, shape=shape)\n\n\n@to_ivy_arrays_and_back\ndef Cholesky(*, input, name=\"Cholesky\"):\n return ivy.astype(ivy.cholesky(input), input.dtype)\n\n\n@to_ivy_arrays_and_back\ndef Ceil(*, x, name=None):\n return ivy.ceil(x)\n\n\n@to_ivy_arrays_and_back\ndef Concat(*, concat_dim, values, name=\"Concat\"):\n return ivy.concat(values, axis=concat_dim)\n\n\nCos = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cos))\n\n\nCosh = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cosh))\n\n\n@to_ivy_arrays_and_back\ndef Cross(*, a, b, name=\"Cross\"):\n a, b = check_tensorflow_casting(a, b)\n return ivy.cross(a, b)\n\n\n@to_ivy_arrays_and_back\ndef Cosh(*, x, name=\"Cosh\"):\n return ivy.cosh(x)\n\n\nDiv = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.divide))\n\n\n@to_ivy_arrays_and_back\ndef Diag(*, diagonal, name=\"Diag\"):\n return ivy.astype(ivy.diag(diagonal), diagonal.dtype)\n\n\nCumprod = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cumprod))\n\n\n@to_ivy_arrays_and_back\ndef Equal(*, x, y, incompatible_shape_error=True, name=\"Equal\"):\n x, y = check_tensorflow_casting(x, y)\n if incompatible_shape_error:\n return ivy.equal(x, y)\n\n try:\n return ivy.equal(x, y)\n except (ivy.utils.exceptions.IvyError, ivy.utils.exceptions.IvyBackendException):\n return ivy.array(False)\n\n\n@to_ivy_arrays_and_back\ndef Exp(*, x, name=\"Exp\"):\n return ivy.exp(x)\n\n\n@to_ivy_arrays_and_back\ndef Expm1(*, x, name=\"Expm1\"):\n return ivy.expm1(x)\n\n\n@to_ivy_arrays_and_back\ndef Fill(*, dims, value, name=\"Full\"):\n return ivy.full(dims, value)\n\n\n@to_ivy_arrays_and_back\ndef Floor(*, x, name=\"Floor\"):\n return ivy.floor(x)\n\n\n@to_ivy_arrays_and_back\ndef FloorDiv(*, x, y, name=\"FloorDiv\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.floor_divide(x, y)\n\n\n@to_ivy_arrays_and_back\ndef FloorMod(*, x, y, name=\"FloorMod\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.remainder(x, y)\n\n\n@to_ivy_arrays_and_back\ndef FFT(*, input, name=\"FFT\"):\n return ivy.astype(ivy.fft(input, -1), input.dtype)\n\n\n@to_ivy_arrays_and_back\ndef Gather(*, params, indices, validate_indices=None, name=\"Gather\"):\n return ivy.gather(params, indices, axis=0, batch_dims=0)\n\n\n@to_ivy_arrays_and_back\ndef Greater(*, x, y, name=\"Greater\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.greater(x, y)\n\n\n@to_ivy_arrays_and_back\ndef GreaterEqual(*, x, y, name=\"GreaterEqual\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.greater_equal(x, y)\n\n\nIdentity = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.general_functions.identity)\n)\n\n\nIdentityN = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.general_functions.identity_n)\n)\n\n\n@to_ivy_arrays_and_back\ndef Inv(*, x, name=\"Inv\"):\n return ivy.astype(ivy.reciprocal(x), x.dtype)\n\n\nReciprocal = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.reciprocal))\n\n\n@to_ivy_arrays_and_back\ndef Reverse(*, tensor, dims, name=\"Reverse\"):\n ret = tensor\n for dim in enumerate(dims):\n if dim[1]:\n ret = ivy.flip(ret, axis=dim[0])\n return ret\n\n\n@to_ivy_arrays_and_back\ndef Invert(*, x, name=\"Invert\"):\n return ivy.bitwise_invert(x)\n\n\n@to_ivy_arrays_and_back\ndef InvGrad(*, y, dy, name=\"InvGrad\"):\n return ivy.multiply(ivy.negative(dy), ivy.multiply(y, y))\n\n\n@to_ivy_arrays_and_back\ndef LeftShift(*, x, y, name=\"LeftShift\"):\n return ivy.bitwise_left_shift(x, y)\n\n\n@to_ivy_arrays_and_back\ndef Less(*, x, y, name=\"Less\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.less(x, y)\n\n\nLessEqual = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\",),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.less_equal))\n)\n\n\n@to_ivy_arrays_and_back\ndef Log(*, x, name=\"Log\"):\n return ivy.log(x)\n\n\nLog1p = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.log1p))\n\nLogSoftmax = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\n \"bfloat16\",\n \"float32\",\n \"float64\",\n ),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.log_softmax))\n)\n\nLogicalOr = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.logical_or))\n\n\n@to_ivy_arrays_and_back\ndef LogicalNot(*, x, name=\"LogicalNot\"):\n return ivy.logical_not(x)\n\n\n@to_ivy_arrays_and_back\ndef MatMul(*, a, b, transpose_a=False, transpose_b=False, name=\"MatMul\"):\n a, b = check_tensorflow_casting(a, b)\n return ivy.matmul(a, b, transpose_a=transpose_a, transpose_b=transpose_b)\n\n\n@to_ivy_arrays_and_back\ndef Rsqrt(*, x, name=\"Rsqrt\"):\n return ivy.sqrt(ivy.reciprocal(x))\n\n\n@to_ivy_arrays_and_back\ndef MatrixInverse(*, input, adjoint=False, name=\"MatrixInverse\"):\n return ivy.inv(input, adjoint=adjoint)\n\n\nMatrixDeterminant = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.linalg.det))\n\n\nMax = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\",),\n },\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.math.reduce_max,\n kwargs_to_update={\n \"input\": \"input_tensor\",\n \"keep_dims\": \"keepdims\",\n },\n )\n )\n)\n\n\nMaximum = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\",),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.maximum))\n)\n\n\nMin = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\",),\n },\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.math.reduce_min,\n kwargs_to_update={\n \"input\": \"input_tensor\",\n \"keep_dims\": \"keepdims\",\n },\n )\n )\n)\n\n\n@to_ivy_arrays_and_back\ndef Minimum(*, x, y, name=\"Minimum\"):\n return ivy.minimum(x, y)\n\n\nMul = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.multiply))\n\n\nNeg = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.negative))\n\n\n@to_ivy_arrays_and_back\ndef NotEqual(*, x, y, incompatible_shape_error=True, name=\"NotEqual\"):\n x, y = check_tensorflow_casting(x, y)\n if incompatible_shape_error:\n return ivy.not_equal(x, y)\n\n try:\n return ivy.not_equal(x, y)\n except (ivy.utils.exceptions.IvyError, ivy.utils.exceptions.IvyBackendException):\n return ivy.array(True)\n\n\n@to_ivy_arrays_and_back\ndef NthElement(*, input, n, reverse=False, name=\"NthElement\"):\n return ivy.astype(ivy.sort(input, descending=reverse)[..., n], input.dtype)\n\n\n@to_ivy_arrays_and_back\ndef OnesLike(*, x, name=\"OnesLike\"):\n return ivy.ones_like(x)\n\n\n@to_ivy_arrays_and_back\ndef Pack(*, values, axis=0, name=\"Pack\"):\n return ivy.stack(values, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef Pad(*, input, paddings, name=\"Pad\"):\n return ivy.constant_pad(input, paddings.to_list())\n\n\n@to_ivy_arrays_and_back\ndef PadV2(*, input, paddings, constant_values, name=\"PadV2\"):\n return ivy.constant_pad(input, paddings.to_list(), value=constant_values)\n\n\nRelu = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\", \"float16\"),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.nn.relu))\n)\n\n\nRealDiv = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\n \"complex\",\n \"bfloat16\",\n \"float16\",\n \"float64\",\n \"float32\",\n ),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.general_functions.realdiv))\n)\n\n\nReshape = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.general_functions.reshape)\n)\n\n\n@to_ivy_arrays_and_back\ndef RightShift(*, x, y, name=\"RightShift\"):\n return ivy.bitwise_right_shift(x, y)\n\n\n@to_ivy_arrays_and_back\ndef Round(*, x, name=\"Round\"):\n return ivy.round(x)\n\n\n@to_ivy_arrays_and_back\ndef Shape(*, input, output_type=ivy.int32, name=\"Shape\"):\n output_type = to_ivy_dtype(output_type)\n return ivy.astype(ivy.shape(input, as_array=True), output_type, copy=False)\n\n\nShapeN = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.general_functions.shape_n)\n)\n\n\nSin = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.sin))\n\n\n@to_ivy_arrays_and_back\ndef Sinh(*, x, name=\"Sinh\"):\n return ivy.sinh(x)\n\n\n@with_unsupported_dtypes(\n {\"2.13.0 and below\": (\"unsigned\",)},\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef Sign(*, x, name=\"Sign\"):\n return ivy.sign(x, np_variant=False)\n\n\nSize = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.general_functions.size))\n\n\nSplit = to_ivy_arrays_and_back(\n map_raw_ops_alias(\n tf_frontend.split, kwargs_to_update={\"num_split\": \"num_or_size_splits\"}\n )\n)\n\n\n@to_ivy_arrays_and_back\ndef SplitV(*, value, size_splits, axis, num_split, name=\"SplitV\"):\n return ivy.split(value, num_or_size_splits=size_splits, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef Sqrt(*, x, name=\"Sqrt\"):\n return ivy.sqrt(x)\n\n\n@to_ivy_arrays_and_back\ndef Square(*, x, name=\"Square\"):\n return ivy.square(x)\n\n\nSqueeze = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.general_functions.squeeze)\n)\n\n\nSub = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.subtract))\n\n\n@to_ivy_arrays_and_back\ndef Sum(*, input, axis, keep_dims=False, name=\"Sum\"):\n return ivy.astype(ivy.sum(input, axis=axis, keepdims=keep_dims), input.dtype)\n\n\nTan = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.tan))\n\n\nTanh = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.tanh))\n\n\n@to_ivy_arrays_and_back\ndef TanhGrad(*, y, dy, name=\"TanhGrad\"):\n return ivy.multiply(dy, ivy.subtract(1, ivy.multiply(y, y)))\n\n\n@to_ivy_arrays_and_back\ndef Transpose(*, x, perm, name=\"Transpose\"):\n ret = ivy.permute_dims(x, axes=perm)\n return ret\n\n\nCumsum = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cumsum))\n\n\n@to_ivy_arrays_and_back\ndef TruncateDiv(*, x, y, name=\"TruncateDiv\"):\n return ivy.astype(ivy.trunc_divide(x, y), x.dtype)\n\n\n@with_unsupported_dtypes({\"2.13.0 and below\": (\"float16\", \"bfloat16\")}, \"tensorflow\")\n@to_ivy_arrays_and_back\ndef Unpack(*, value, num, axis=0, name=\"Unpack\"):\n return ivy.unstack(value, axis=axis)[:num]\n\n\n@to_ivy_arrays_and_back\ndef ZerosLike(*, x, name=\"ZerosLike\"):\n return ivy.zeros_like(x)\n\n\nMean = to_ivy_arrays_and_back(\n map_raw_ops_alias(\n tf_frontend.math.reduce_mean,\n kwargs_to_update={\n \"input\": \"input_tensor\",\n \"keep_dims\": \"keepdims\",\n },\n )\n)\n\n\nPow = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.pow))\n\n\nRelu6 = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"complex\", \"float16\"),\n },\n \"tensorflow\",\n )(\n map_raw_ops_alias(\n tf_frontend.nn.relu6,\n )\n )\n)\n\n\nSigmoid = to_ivy_arrays_and_back(\n map_raw_ops_alias(tf_frontend.keras.activations.sigmoid)\n)\n\n\nSoftmax = to_ivy_arrays_and_back(\n with_unsupported_dtypes(\n {\n \"2.13.0 and below\": (\"float16\",),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.nn.softmax))\n)\n\n\n@to_ivy_arrays_and_back\ndef Softplus(*, features, name=\"Softplus\"):\n return ivy.softplus(features)\n\n\n@to_ivy_arrays_and_back\ndef Xdivy(*, x, y, name=\"Xdivy\"):\n if (x == 0).all():\n return 0.0\n return ivy.divide(x, y)\n\n\n@with_unsupported_dtypes({\"2.13.0 and below\": (\"bfloat16\",)}, \"tensorflow\")\n@to_ivy_arrays_and_back\ndef Xlog1py(*, x, y, name=\"Xlog1py\"):\n if (x == 0).all():\n return 0.0\n return ivy.multiply(x, ivy.log1p(y))\n\n\nXlogy = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.xlogy))\n\n\n@to_ivy_arrays_and_back\ndef EuclideanNorm(*, input, axis, keep_dims=False, name=\"EuclideanNorm\"):\n return ivy.astype(\n ivy.vector_norm(input, axis=axis, keepdims=keep_dims), input.dtype\n )\n\n\nConcatV2 = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.concat))\n\n\ndef _tf_to_ivy_ivy_arguments_for_conv(\n padding, ex_pading, strides, dilations, data_format\n):\n if data_format.find(\"C\") == 1:\n strides = strides[2:]\n dilations = dilations[2:]\n data_format = \"channel_first\"\n pad_index = [4, 8]\n else:\n strides = strides[1:-1]\n dilations = dilations[1:-1]\n data_format = \"channel_last\"\n pad_index = [2, 6]\n if padding == \"EXPLICIT\":\n padding = [\n (ex_pading[i], ex_pading[i + 1])\n for i in range(pad_index[0], pad_index[1], 2)\n ]\n return padding, strides, dilations, data_format\n\n\n@to_ivy_arrays_and_back\ndef Conv2D(\n *,\n input,\n filter,\n strides,\n padding,\n use_cudnn_on_gpu,\n explicit_paddings,\n data_format=\"NHWC\",\n dilations=[1, 1, 1, 1],\n name=\"Conv2D\",\n):\n padding, strides, dilations, data_format = _tf_to_ivy_ivy_arguments_for_conv(\n padding, explicit_paddings, strides, dilations, data_format\n )\n return ivy.conv_general_dilated(\n input,\n filter,\n strides,\n padding,\n data_format=data_format,\n dilations=dilations,\n dims=2,\n )\n\n\n@to_ivy_arrays_and_back\ndef Conv3D(\n *,\n input,\n filter,\n strides,\n padding,\n data_format=\"NDHWC\",\n dilations=[1, 1, 1, 1, 1],\n name=\"Conv3D\",\n):\n # ivy.backends.tensorflow expects strides and dilations to be\n # a single integer value or a list of 3 values whereas the raw op\n # expects a list of 5 values\n if data_format == \"NDHWC\":\n strides = strides[1:-1]\n dilations = dilations[1:-1]\n elif data_format == \"NCDHW\":\n strides = strides[2:]\n dilations = dilations[2:]\n\n return tf_frontend.nn.conv3d(\n input,\n filter,\n strides,\n padding,\n data_format=data_format,\n dilations=dilations,\n name=name,\n )\n\n\n@to_ivy_arrays_and_back\ndef Elu(features, name=None):\n zeros = ivy.zeros_like(features, dtype=ivy.dtype(features))\n ones = ivy.ones_like(features, dtype=ivy.dtype(features))\n ret_val = ivy.where(\n # if x > 0 => x; else e^x - 1\n features > zeros,\n features,\n ivy.subtract(ivy.exp(features), ones),\n )\n return ret_val\n\n\nElu.supported_dtypes = {\n \"numpy\": (\n \"float16\",\n \"float32\",\n \"float64\",\n ),\n \"tensorflow\": (\n \"bfloat16\",\n \"float16\",\n \"float32\",\n \"float64\",\n ),\n \"torch\": (\n \"bfloat16\",\n \"float32\",\n \"float64\",\n ),\n \"jax\": (\n \"bfloat16\",\n \"float16\",\n \"float32\",\n \"float64\",\n ),\n}\n\n\n@to_ivy_arrays_and_back\ndef LinSpace(*, start, stop, num, name=None):\n return ivy.linspace(start, stop, num)\n\n\nRoll = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.roll))\n\n\n@to_ivy_arrays_and_back\ndef CumulativeLogsumexp(\n x, axis, exclusive=False, reverse=False, name=\"CumulativeLogsumexp\"\n):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef Complex(real, imag, Tout=ivy.complex64, name=\"Complex\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef AccumulateNV2(inputs, shape, name=\"AccumulateNV2\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef DebugGradientIdentity(input, name=\"DebugGradientIdentity\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef Real(input, Tout=ivy.float32, name=\"Real\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef BandedTriangularSolve(\n matrix,\n rhs,\n lower=True,\n adjoint=False,\n name=\"BandedTriangularSolve\",\n):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef BatchMatMul(x, y, adj_x=False, adj_y=False, name=\"BatchMatMul\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef BatchMatMulV2(x, y, adj_x=False, adj_y=False, name=\"BatchMatMulV2\"):\n # TODO\n raise IvyNotImplementedException\n\n\n@to_ivy_arrays_and_back\ndef BatchMatMulV3(x, y, Tout=ivy.Dtype, adj_x=False, adj_y=False, name=\"BatchMatMulV3\"):\n # TODO\n raise IvyNotImplementedException\n\n\nSlice = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.slice))\n\nLeakyRelu = to_ivy_arrays_and_back(\n map_raw_ops_alias(\n tf_frontend.nn.leaky_relu,\n )\n)\n\nLeakyRelu.supported_dtypes = {\n \"numpy\": (\n \"float32\",\n \"float64\",\n ),\n \"tensorflow\": (\n \"bfloat16\",\n \"float16\",\n \"float32\",\n \"float64\",\n ),\n \"torch\": (\n \"float32\",\n \"float64\",\n ),\n \"jax\": (\n \"bfloat16\",\n \"float16\",\n \"float32\",\n \"float64\",\n ),\n}\n\n\n@to_ivy_arrays_and_back\ndef Prod(*, input, axis, keep_dims=False, name=\"Prod\"):\n return ivy.astype(ivy.prod(input, axis=axis, keepdims=keep_dims), input.dtype)\n\n\nZeta = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\"float32\", \"float64\"),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.math.zeta))\n)\n\n\n@to_ivy_arrays_and_back\ndef Imag(\n *,\n input,\n Tout=ivy.float32,\n name=\"Imag\",\n):\n Tout = ivy.as_ivy_dtype(Tout) if Tout is not None else ivy.float32\n return ivy.astype(ivy.imag(input), Tout)\n\n\nImag.supported_dtypes = {\n \"tensorflow\": (\n \"complex64\",\n \"complex128\",\n ),\n}\n\n\n@to_ivy_arrays_and_back\ndef Svd(*, input, full_matrices=False, compute_uv=True, name=None):\n return ivy.svd(input, compute_uv=compute_uv, full_matrices=full_matrices)\n\n\nSvd.supported_dtypes = {\n \"tensorflow\": (\n \"float64\",\n \"float128\",\n \"halfcomplex64\",\n \"complex128\",\n ),\n}\n\n\nEinsum = to_ivy_arrays_and_back(\n with_supported_dtypes(\n {\n \"2.13.0 and below\": (\n \"bfloat16\",\n \"complex128 \",\n \"complex64\",\n \"float64\",\n \"float32\",\n \"float16\",\n \"int64\",\n \"int32\"\n ),\n },\n \"tensorflow\",\n )(map_raw_ops_alias(tf_frontend.general_functions.einsum))\n)\n", "path": "ivy/functional/frontends/tensorflow/raw_ops.py"}]} |
gh_patches_debug_1226 | rasdani/github-patches | git_diff | jazzband__django-oauth-toolkit-948 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Token introspection always uses time-zone-aware datetimes
Hi,
I'm attempting to use the new token introspection and ran into a bit of a snag. Our app is using USE_TZ=False, which results in the following:
```
File "lib/python3.6/site-packages/rest_framework/views.py", line 489, in dispatch
response = self.handle_exception(exc)
File "lib/python3.6/site-packages/rest_framework/views.py", line 449, in handle_exception
self.raise_uncaught_exception(exc)
File "lib/python3.6/site-packages/rest_framework/views.py", line 477, in dispatch
self.initial(request, *args, **kwargs)
File "lib/python3.6/site-packages/rest_framework/views.py", line 394, in initial
self.perform_authentication(request)
File "lib/python3.6/site-packages/rest_framework/views.py", line 320, in perform_authentication
request.user
File "lib/python3.6/site-packages/rest_framework/request.py", line 381, in __getattribute__
return super(Request, self).__getattribute__(attr)
File "lib/python3.6/site-packages/rest_framework/request.py", line 196, in user
self._authenticate()
File "lib/python3.6/site-packages/rest_framework/request.py", line 345, in _authenticate
user_auth_tuple = authenticator.authenticate(self)
File "lib/python3.6/site-packages/oauth2_provider/contrib/rest_framework/authentication.py", line 18, in authenticate
valid, r = oauthlib_core.verify_request(request, scopes=[])
File "lib/python3.6/site-packages/oauth2_provider/oauth2_backends.py", line 168, in verify_request
valid, r = self.server.verify_request(uri, http_method, body, headers, scopes=scopes)
File "lib/python3.6/site-packages/oauthlib/oauth2/rfc6749/endpoints/base.py", line 64, in wrapper
return f(endpoint, uri, *args, **kwargs)
File "lib/python3.6/site-packages/oauthlib/oauth2/rfc6749/endpoints/resource.py", line 75, in verify_request
return token_type_handler.validate_request(request), request
File "lib/python3.6/site-packages/oauthlib/oauth2/rfc6749/tokens.py", line 297, in validate_request
token, request.scopes, request)
File "lib/python3.6/site-packages/oauth2_provider/oauth2_validators.py", line 340, in validate_bearer_token
if access_token and access_token.is_valid(scopes):
File "lib/python3.6/site-packages/oauth2_provider/models.py", line 250, in is_valid
return not self.is_expired() and self.allow_scopes(scopes)
File "lib/python3.6/site-packages/oauth2_provider/models.py", line 259, in is_expired
return timezone.now() >= self.expires
TypeError: can't compare offset-naive and offset-aware datetimes
```
I discovered that OAuth2Validator._get_token_from_authentication_server() is unconditionally calling django.utils.timezone.make_aware() on the expiry time:
https://github.com/evonove/django-oauth-toolkit/blob/fa33444e81b3c95432999d51cdb2acdb98fc16bf/oauth2_provider/oauth2_validators.py#L281-L281
Is there any reason for this?
Thanks
Edit: I think we're just going to turn on USE_TZ. Still, this could definitely be handled more gracefully.
Token introspection always uses time-zone-aware datetimes
Hi,
I'm attempting to use the new token introspection and ran into a bit of a snag. Our app is using USE_TZ=False, which results in the following:
```
File "lib/python3.6/site-packages/rest_framework/views.py", line 489, in dispatch
response = self.handle_exception(exc)
File "lib/python3.6/site-packages/rest_framework/views.py", line 449, in handle_exception
self.raise_uncaught_exception(exc)
File "lib/python3.6/site-packages/rest_framework/views.py", line 477, in dispatch
self.initial(request, *args, **kwargs)
File "lib/python3.6/site-packages/rest_framework/views.py", line 394, in initial
self.perform_authentication(request)
File "lib/python3.6/site-packages/rest_framework/views.py", line 320, in perform_authentication
request.user
File "lib/python3.6/site-packages/rest_framework/request.py", line 381, in __getattribute__
return super(Request, self).__getattribute__(attr)
File "lib/python3.6/site-packages/rest_framework/request.py", line 196, in user
self._authenticate()
File "lib/python3.6/site-packages/rest_framework/request.py", line 345, in _authenticate
user_auth_tuple = authenticator.authenticate(self)
File "lib/python3.6/site-packages/oauth2_provider/contrib/rest_framework/authentication.py", line 18, in authenticate
valid, r = oauthlib_core.verify_request(request, scopes=[])
File "lib/python3.6/site-packages/oauth2_provider/oauth2_backends.py", line 168, in verify_request
valid, r = self.server.verify_request(uri, http_method, body, headers, scopes=scopes)
File "lib/python3.6/site-packages/oauthlib/oauth2/rfc6749/endpoints/base.py", line 64, in wrapper
return f(endpoint, uri, *args, **kwargs)
File "lib/python3.6/site-packages/oauthlib/oauth2/rfc6749/endpoints/resource.py", line 75, in verify_request
return token_type_handler.validate_request(request), request
File "lib/python3.6/site-packages/oauthlib/oauth2/rfc6749/tokens.py", line 297, in validate_request
token, request.scopes, request)
File "lib/python3.6/site-packages/oauth2_provider/oauth2_validators.py", line 340, in validate_bearer_token
if access_token and access_token.is_valid(scopes):
File "lib/python3.6/site-packages/oauth2_provider/models.py", line 250, in is_valid
return not self.is_expired() and self.allow_scopes(scopes)
File "lib/python3.6/site-packages/oauth2_provider/models.py", line 259, in is_expired
return timezone.now() >= self.expires
TypeError: can't compare offset-naive and offset-aware datetimes
```
I discovered that OAuth2Validator._get_token_from_authentication_server() is unconditionally calling django.utils.timezone.make_aware() on the expiry time:
https://github.com/evonove/django-oauth-toolkit/blob/fa33444e81b3c95432999d51cdb2acdb98fc16bf/oauth2_provider/oauth2_validators.py#L281-L281
Is there any reason for this?
Thanks
Edit: I think we're just going to turn on USE_TZ. Still, this could definitely be handled more gracefully.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `oauth2_provider/oauth2_validators.py`
Content:
```
1 import base64
2 import binascii
3 import http.client
4 import json
5 import logging
6 import uuid
7 from collections import OrderedDict
8 from datetime import datetime, timedelta
9 from urllib.parse import unquote_plus
10
11 import requests
12 from django.conf import settings
13 from django.contrib.auth import authenticate, get_user_model
14 from django.core.exceptions import ObjectDoesNotExist
15 from django.db import transaction
16 from django.db.models import Q
17 from django.utils import dateformat, timezone
18 from django.utils.timezone import make_aware
19 from django.utils.translation import gettext_lazy as _
20 from jwcrypto import jws, jwt
21 from jwcrypto.common import JWException
22 from jwcrypto.jwt import JWTExpired
23 from oauthlib.oauth2.rfc6749 import utils
24 from oauthlib.openid import RequestValidator
25
26 from .exceptions import FatalClientError
27 from .models import (
28 AbstractApplication,
29 get_access_token_model,
30 get_application_model,
31 get_grant_model,
32 get_id_token_model,
33 get_refresh_token_model,
34 )
35 from .scopes import get_scopes_backend
36 from .settings import oauth2_settings
37
38
39 log = logging.getLogger("oauth2_provider")
40
41 GRANT_TYPE_MAPPING = {
42 "authorization_code": (
43 AbstractApplication.GRANT_AUTHORIZATION_CODE,
44 AbstractApplication.GRANT_OPENID_HYBRID,
45 ),
46 "password": (AbstractApplication.GRANT_PASSWORD,),
47 "client_credentials": (AbstractApplication.GRANT_CLIENT_CREDENTIALS,),
48 "refresh_token": (
49 AbstractApplication.GRANT_AUTHORIZATION_CODE,
50 AbstractApplication.GRANT_PASSWORD,
51 AbstractApplication.GRANT_CLIENT_CREDENTIALS,
52 AbstractApplication.GRANT_OPENID_HYBRID,
53 ),
54 }
55
56 Application = get_application_model()
57 AccessToken = get_access_token_model()
58 IDToken = get_id_token_model()
59 Grant = get_grant_model()
60 RefreshToken = get_refresh_token_model()
61 UserModel = get_user_model()
62
63
64 class OAuth2Validator(RequestValidator):
65 def _extract_basic_auth(self, request):
66 """
67 Return authentication string if request contains basic auth credentials,
68 otherwise return None
69 """
70 auth = request.headers.get("HTTP_AUTHORIZATION", None)
71 if not auth:
72 return None
73
74 splitted = auth.split(" ", 1)
75 if len(splitted) != 2:
76 return None
77 auth_type, auth_string = splitted
78
79 if auth_type != "Basic":
80 return None
81
82 return auth_string
83
84 def _authenticate_basic_auth(self, request):
85 """
86 Authenticates with HTTP Basic Auth.
87
88 Note: as stated in rfc:`2.3.1`, client_id and client_secret must be encoded with
89 "application/x-www-form-urlencoded" encoding algorithm.
90 """
91 auth_string = self._extract_basic_auth(request)
92 if not auth_string:
93 return False
94
95 try:
96 encoding = request.encoding or settings.DEFAULT_CHARSET or "utf-8"
97 except AttributeError:
98 encoding = "utf-8"
99
100 try:
101 b64_decoded = base64.b64decode(auth_string)
102 except (TypeError, binascii.Error):
103 log.debug("Failed basic auth: %r can't be decoded as base64", auth_string)
104 return False
105
106 try:
107 auth_string_decoded = b64_decoded.decode(encoding)
108 except UnicodeDecodeError:
109 log.debug("Failed basic auth: %r can't be decoded as unicode by %r", auth_string, encoding)
110 return False
111
112 try:
113 client_id, client_secret = map(unquote_plus, auth_string_decoded.split(":", 1))
114 except ValueError:
115 log.debug("Failed basic auth, Invalid base64 encoding.")
116 return False
117
118 if self._load_application(client_id, request) is None:
119 log.debug("Failed basic auth: Application %s does not exist" % client_id)
120 return False
121 elif request.client.client_id != client_id:
122 log.debug("Failed basic auth: wrong client id %s" % client_id)
123 return False
124 elif request.client.client_secret != client_secret:
125 log.debug("Failed basic auth: wrong client secret %s" % client_secret)
126 return False
127 else:
128 return True
129
130 def _authenticate_request_body(self, request):
131 """
132 Try to authenticate the client using client_id and client_secret
133 parameters included in body.
134
135 Remember that this method is NOT RECOMMENDED and SHOULD be limited to
136 clients unable to directly utilize the HTTP Basic authentication scheme.
137 See rfc:`2.3.1` for more details.
138 """
139 # TODO: check if oauthlib has already unquoted client_id and client_secret
140 try:
141 client_id = request.client_id
142 client_secret = request.client_secret
143 except AttributeError:
144 return False
145
146 if self._load_application(client_id, request) is None:
147 log.debug("Failed body auth: Application %s does not exists" % client_id)
148 return False
149 elif request.client.client_secret != client_secret:
150 log.debug("Failed body auth: wrong client secret %s" % client_secret)
151 return False
152 else:
153 return True
154
155 def _load_application(self, client_id, request):
156 """
157 If request.client was not set, load application instance for given
158 client_id and store it in request.client
159 """
160
161 # we want to be sure that request has the client attribute!
162 assert hasattr(request, "client"), '"request" instance has no "client" attribute'
163
164 try:
165 request.client = request.client or Application.objects.get(client_id=client_id)
166 # Check that the application can be used (defaults to always True)
167 if not request.client.is_usable(request):
168 log.debug("Failed body authentication: Application %r is disabled" % (client_id))
169 return None
170 return request.client
171 except Application.DoesNotExist:
172 log.debug("Failed body authentication: Application %r does not exist" % (client_id))
173 return None
174
175 def _set_oauth2_error_on_request(self, request, access_token, scopes):
176 if access_token is None:
177 error = OrderedDict(
178 [
179 ("error", "invalid_token"),
180 ("error_description", _("The access token is invalid.")),
181 ]
182 )
183 elif access_token.is_expired():
184 error = OrderedDict(
185 [
186 ("error", "invalid_token"),
187 ("error_description", _("The access token has expired.")),
188 ]
189 )
190 elif not access_token.allow_scopes(scopes):
191 error = OrderedDict(
192 [
193 ("error", "insufficient_scope"),
194 ("error_description", _("The access token is valid but does not have enough scope.")),
195 ]
196 )
197 else:
198 log.warning("OAuth2 access token is invalid for an unknown reason.")
199 error = OrderedDict(
200 [
201 ("error", "invalid_token"),
202 ]
203 )
204 request.oauth2_error = error
205 return request
206
207 def client_authentication_required(self, request, *args, **kwargs):
208 """
209 Determine if the client has to be authenticated
210
211 This method is called only for grant types that supports client authentication:
212 * Authorization code grant
213 * Resource owner password grant
214 * Refresh token grant
215
216 If the request contains authorization headers, always authenticate the client
217 no matter the grant type.
218
219 If the request does not contain authorization headers, proceed with authentication
220 only if the client is of type `Confidential`.
221
222 If something goes wrong, call oauthlib implementation of the method.
223 """
224 if self._extract_basic_auth(request):
225 return True
226
227 try:
228 if request.client_id and request.client_secret:
229 return True
230 except AttributeError:
231 log.debug("Client ID or client secret not provided...")
232 pass
233
234 self._load_application(request.client_id, request)
235 if request.client:
236 return request.client.client_type == AbstractApplication.CLIENT_CONFIDENTIAL
237
238 return super().client_authentication_required(request, *args, **kwargs)
239
240 def authenticate_client(self, request, *args, **kwargs):
241 """
242 Check if client exists and is authenticating itself as in rfc:`3.2.1`
243
244 First we try to authenticate with HTTP Basic Auth, and that is the PREFERRED
245 authentication method.
246 Whether this fails we support including the client credentials in the request-body,
247 but this method is NOT RECOMMENDED and SHOULD be limited to clients unable to
248 directly utilize the HTTP Basic authentication scheme.
249 See rfc:`2.3.1` for more details
250 """
251 authenticated = self._authenticate_basic_auth(request)
252
253 if not authenticated:
254 authenticated = self._authenticate_request_body(request)
255
256 return authenticated
257
258 def authenticate_client_id(self, client_id, request, *args, **kwargs):
259 """
260 If we are here, the client did not authenticate itself as in rfc:`3.2.1` and we can
261 proceed only if the client exists and is not of type "Confidential".
262 """
263 if self._load_application(client_id, request) is not None:
264 log.debug("Application %r has type %r" % (client_id, request.client.client_type))
265 return request.client.client_type != AbstractApplication.CLIENT_CONFIDENTIAL
266 return False
267
268 def confirm_redirect_uri(self, client_id, code, redirect_uri, client, *args, **kwargs):
269 """
270 Ensure the redirect_uri is listed in the Application instance redirect_uris field
271 """
272 grant = Grant.objects.get(code=code, application=client)
273 return grant.redirect_uri_allowed(redirect_uri)
274
275 def invalidate_authorization_code(self, client_id, code, request, *args, **kwargs):
276 """
277 Remove the temporary grant used to swap the authorization token
278 """
279 grant = Grant.objects.get(code=code, application=request.client)
280 grant.delete()
281
282 def validate_client_id(self, client_id, request, *args, **kwargs):
283 """
284 Ensure an Application exists with given client_id.
285 If it exists, it's assigned to request.client.
286 """
287 return self._load_application(client_id, request) is not None
288
289 def get_default_redirect_uri(self, client_id, request, *args, **kwargs):
290 return request.client.default_redirect_uri
291
292 def _get_token_from_authentication_server(
293 self, token, introspection_url, introspection_token, introspection_credentials
294 ):
295 """Use external introspection endpoint to "crack open" the token.
296 :param introspection_url: introspection endpoint URL
297 :param introspection_token: Bearer token
298 :param introspection_credentials: Basic Auth credentials (id,secret)
299 :return: :class:`models.AccessToken`
300
301 Some RFC 7662 implementations (including this one) use a Bearer token while others use Basic
302 Auth. Depending on the external AS's implementation, provide either the introspection_token
303 or the introspection_credentials.
304
305 If the resulting access_token identifies a username (e.g. Authorization Code grant), add
306 that user to the UserModel. Also cache the access_token up until its expiry time or a
307 configured maximum time.
308
309 """
310 headers = None
311 if introspection_token:
312 headers = {"Authorization": "Bearer {}".format(introspection_token)}
313 elif introspection_credentials:
314 client_id = introspection_credentials[0].encode("utf-8")
315 client_secret = introspection_credentials[1].encode("utf-8")
316 basic_auth = base64.b64encode(client_id + b":" + client_secret)
317 headers = {"Authorization": "Basic {}".format(basic_auth.decode("utf-8"))}
318
319 try:
320 response = requests.post(introspection_url, data={"token": token}, headers=headers)
321 except requests.exceptions.RequestException:
322 log.exception("Introspection: Failed POST to %r in token lookup", introspection_url)
323 return None
324
325 # Log an exception when response from auth server is not successful
326 if response.status_code != http.client.OK:
327 log.exception(
328 "Introspection: Failed to get a valid response "
329 "from authentication server. Status code: {}, "
330 "Reason: {}.".format(response.status_code, response.reason)
331 )
332 return None
333
334 try:
335 content = response.json()
336 except ValueError:
337 log.exception("Introspection: Failed to parse response as json")
338 return None
339
340 if "active" in content and content["active"] is True:
341 if "username" in content:
342 user, _created = UserModel.objects.get_or_create(
343 **{UserModel.USERNAME_FIELD: content["username"]}
344 )
345 else:
346 user = None
347
348 max_caching_time = datetime.now() + timedelta(
349 seconds=oauth2_settings.RESOURCE_SERVER_TOKEN_CACHING_SECONDS
350 )
351
352 if "exp" in content:
353 expires = datetime.utcfromtimestamp(content["exp"])
354 if expires > max_caching_time:
355 expires = max_caching_time
356 else:
357 expires = max_caching_time
358
359 scope = content.get("scope", "")
360 expires = make_aware(expires)
361
362 access_token, _created = AccessToken.objects.update_or_create(
363 token=token,
364 defaults={
365 "user": user,
366 "application": None,
367 "scope": scope,
368 "expires": expires,
369 },
370 )
371
372 return access_token
373
374 def validate_bearer_token(self, token, scopes, request):
375 """
376 When users try to access resources, check that provided token is valid
377 """
378 if not token:
379 return False
380
381 introspection_url = oauth2_settings.RESOURCE_SERVER_INTROSPECTION_URL
382 introspection_token = oauth2_settings.RESOURCE_SERVER_AUTH_TOKEN
383 introspection_credentials = oauth2_settings.RESOURCE_SERVER_INTROSPECTION_CREDENTIALS
384
385 access_token = self._load_access_token(token)
386
387 # if there is no token or it's invalid then introspect the token if there's an external OAuth server
388 if not access_token or not access_token.is_valid(scopes):
389 if introspection_url and (introspection_token or introspection_credentials):
390 access_token = self._get_token_from_authentication_server(
391 token, introspection_url, introspection_token, introspection_credentials
392 )
393
394 if access_token and access_token.is_valid(scopes):
395 request.client = access_token.application
396 request.user = access_token.user
397 request.scopes = scopes
398
399 # this is needed by django rest framework
400 request.access_token = access_token
401 return True
402 else:
403 self._set_oauth2_error_on_request(request, access_token, scopes)
404 return False
405
406 def _load_access_token(self, token):
407 return AccessToken.objects.select_related("application", "user").filter(token=token).first()
408
409 def validate_code(self, client_id, code, client, request, *args, **kwargs):
410 try:
411 grant = Grant.objects.get(code=code, application=client)
412 if not grant.is_expired():
413 request.scopes = grant.scope.split(" ")
414 request.user = grant.user
415 if grant.nonce:
416 request.nonce = grant.nonce
417 if grant.claims:
418 request.claims = json.loads(grant.claims)
419 return True
420 return False
421
422 except Grant.DoesNotExist:
423 return False
424
425 def validate_grant_type(self, client_id, grant_type, client, request, *args, **kwargs):
426 """
427 Validate both grant_type is a valid string and grant_type is allowed for current workflow
428 """
429 assert grant_type in GRANT_TYPE_MAPPING # mapping misconfiguration
430 return request.client.allows_grant_type(*GRANT_TYPE_MAPPING[grant_type])
431
432 def validate_response_type(self, client_id, response_type, client, request, *args, **kwargs):
433 """
434 We currently do not support the Authorization Endpoint Response Types registry as in
435 rfc:`8.4`, so validate the response_type only if it matches "code" or "token"
436 """
437 if response_type == "code":
438 return client.allows_grant_type(AbstractApplication.GRANT_AUTHORIZATION_CODE)
439 elif response_type == "token":
440 return client.allows_grant_type(AbstractApplication.GRANT_IMPLICIT)
441 elif response_type == "id_token":
442 return client.allows_grant_type(AbstractApplication.GRANT_IMPLICIT)
443 elif response_type == "id_token token":
444 return client.allows_grant_type(AbstractApplication.GRANT_IMPLICIT)
445 elif response_type == "code id_token":
446 return client.allows_grant_type(AbstractApplication.GRANT_OPENID_HYBRID)
447 elif response_type == "code token":
448 return client.allows_grant_type(AbstractApplication.GRANT_OPENID_HYBRID)
449 elif response_type == "code id_token token":
450 return client.allows_grant_type(AbstractApplication.GRANT_OPENID_HYBRID)
451 else:
452 return False
453
454 def validate_scopes(self, client_id, scopes, client, request, *args, **kwargs):
455 """
456 Ensure required scopes are permitted (as specified in the settings file)
457 """
458 available_scopes = get_scopes_backend().get_available_scopes(application=client, request=request)
459 return set(scopes).issubset(set(available_scopes))
460
461 def get_default_scopes(self, client_id, request, *args, **kwargs):
462 default_scopes = get_scopes_backend().get_default_scopes(application=request.client, request=request)
463 return default_scopes
464
465 def validate_redirect_uri(self, client_id, redirect_uri, request, *args, **kwargs):
466 return request.client.redirect_uri_allowed(redirect_uri)
467
468 def is_pkce_required(self, client_id, request):
469 """
470 Enables or disables PKCE verification.
471
472 Uses the setting PKCE_REQUIRED, which can be either a bool or a callable that
473 receives the client id and returns a bool.
474 """
475 if callable(oauth2_settings.PKCE_REQUIRED):
476 return oauth2_settings.PKCE_REQUIRED(client_id)
477 return oauth2_settings.PKCE_REQUIRED
478
479 def get_code_challenge(self, code, request):
480 grant = Grant.objects.get(code=code, application=request.client)
481 return grant.code_challenge or None
482
483 def get_code_challenge_method(self, code, request):
484 grant = Grant.objects.get(code=code, application=request.client)
485 return grant.code_challenge_method or None
486
487 def save_authorization_code(self, client_id, code, request, *args, **kwargs):
488 self._create_authorization_code(request, code)
489
490 def get_authorization_code_scopes(self, client_id, code, redirect_uri, request):
491 scopes = Grant.objects.filter(code=code).values_list("scope", flat=True).first()
492 if scopes:
493 return utils.scope_to_list(scopes)
494 return []
495
496 def rotate_refresh_token(self, request):
497 """
498 Checks if rotate refresh token is enabled
499 """
500 return oauth2_settings.ROTATE_REFRESH_TOKEN
501
502 @transaction.atomic
503 def save_bearer_token(self, token, request, *args, **kwargs):
504 """
505 Save access and refresh token, If refresh token is issued, remove or
506 reuse old refresh token as in rfc:`6`
507
508 @see: https://tools.ietf.org/html/draft-ietf-oauth-v2-31#page-43
509 """
510
511 if "scope" not in token:
512 raise FatalClientError("Failed to renew access token: missing scope")
513
514 # expires_in is passed to Server on initialization
515 # custom server class can have logic to override this
516 expires = timezone.now() + timedelta(
517 seconds=token.get(
518 "expires_in",
519 oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS,
520 )
521 )
522
523 if request.grant_type == "client_credentials":
524 request.user = None
525
526 # This comes from OAuthLib:
527 # https://github.com/idan/oauthlib/blob/1.0.3/oauthlib/oauth2/rfc6749/tokens.py#L267
528 # Its value is either a new random code; or if we are reusing
529 # refresh tokens, then it is the same value that the request passed in
530 # (stored in `request.refresh_token`)
531 refresh_token_code = token.get("refresh_token", None)
532
533 if refresh_token_code:
534 # an instance of `RefreshToken` that matches the old refresh code.
535 # Set on the request in `validate_refresh_token`
536 refresh_token_instance = getattr(request, "refresh_token_instance", None)
537
538 # If we are to reuse tokens, and we can: do so
539 if (
540 not self.rotate_refresh_token(request)
541 and isinstance(refresh_token_instance, RefreshToken)
542 and refresh_token_instance.access_token
543 ):
544
545 access_token = AccessToken.objects.select_for_update().get(
546 pk=refresh_token_instance.access_token.pk
547 )
548 access_token.user = request.user
549 access_token.scope = token["scope"]
550 access_token.expires = expires
551 access_token.token = token["access_token"]
552 access_token.application = request.client
553 access_token.save()
554
555 # else create fresh with access & refresh tokens
556 else:
557 # revoke existing tokens if possible to allow reuse of grant
558 if isinstance(refresh_token_instance, RefreshToken):
559 # First, to ensure we don't have concurrency issues, we refresh the refresh token
560 # from the db while acquiring a lock on it
561 # We also put it in the "request cache"
562 refresh_token_instance = RefreshToken.objects.select_for_update().get(
563 id=refresh_token_instance.id
564 )
565 request.refresh_token_instance = refresh_token_instance
566
567 previous_access_token = AccessToken.objects.filter(
568 source_refresh_token=refresh_token_instance
569 ).first()
570 try:
571 refresh_token_instance.revoke()
572 except (AccessToken.DoesNotExist, RefreshToken.DoesNotExist):
573 pass
574 else:
575 setattr(request, "refresh_token_instance", None)
576 else:
577 previous_access_token = None
578
579 # If the refresh token has already been used to create an
580 # access token (ie it's within the grace period), return that
581 # access token
582 if not previous_access_token:
583 access_token = self._create_access_token(
584 expires,
585 request,
586 token,
587 source_refresh_token=refresh_token_instance,
588 )
589
590 self._create_refresh_token(request, refresh_token_code, access_token)
591 else:
592 # make sure that the token data we're returning matches
593 # the existing token
594 token["access_token"] = previous_access_token.token
595 token["refresh_token"] = (
596 RefreshToken.objects.filter(access_token=previous_access_token).first().token
597 )
598 token["scope"] = previous_access_token.scope
599
600 # No refresh token should be created, just access token
601 else:
602 self._create_access_token(expires, request, token)
603
604 def _create_access_token(self, expires, request, token, source_refresh_token=None):
605 id_token = token.get("id_token", None)
606 if id_token:
607 id_token = self._load_id_token(id_token)
608 return AccessToken.objects.create(
609 user=request.user,
610 scope=token["scope"],
611 expires=expires,
612 token=token["access_token"],
613 id_token=id_token,
614 application=request.client,
615 source_refresh_token=source_refresh_token,
616 )
617
618 def _create_authorization_code(self, request, code, expires=None):
619 if not expires:
620 expires = timezone.now() + timedelta(seconds=oauth2_settings.AUTHORIZATION_CODE_EXPIRE_SECONDS)
621 return Grant.objects.create(
622 application=request.client,
623 user=request.user,
624 code=code["code"],
625 expires=expires,
626 redirect_uri=request.redirect_uri,
627 scope=" ".join(request.scopes),
628 code_challenge=request.code_challenge or "",
629 code_challenge_method=request.code_challenge_method or "",
630 nonce=request.nonce or "",
631 claims=json.dumps(request.claims or {}),
632 )
633
634 def _create_refresh_token(self, request, refresh_token_code, access_token):
635 return RefreshToken.objects.create(
636 user=request.user, token=refresh_token_code, application=request.client, access_token=access_token
637 )
638
639 def revoke_token(self, token, token_type_hint, request, *args, **kwargs):
640 """
641 Revoke an access or refresh token.
642
643 :param token: The token string.
644 :param token_type_hint: access_token or refresh_token.
645 :param request: The HTTP Request (oauthlib.common.Request)
646 """
647 if token_type_hint not in ["access_token", "refresh_token"]:
648 token_type_hint = None
649
650 token_types = {
651 "access_token": AccessToken,
652 "refresh_token": RefreshToken,
653 }
654
655 token_type = token_types.get(token_type_hint, AccessToken)
656 try:
657 token_type.objects.get(token=token).revoke()
658 except ObjectDoesNotExist:
659 for other_type in [_t for _t in token_types.values() if _t != token_type]:
660 # slightly inefficient on Python2, but the queryset contains only one instance
661 list(map(lambda t: t.revoke(), other_type.objects.filter(token=token)))
662
663 def validate_user(self, username, password, client, request, *args, **kwargs):
664 """
665 Check username and password correspond to a valid and active User
666 """
667 u = authenticate(username=username, password=password)
668 if u is not None and u.is_active:
669 request.user = u
670 return True
671 return False
672
673 def get_original_scopes(self, refresh_token, request, *args, **kwargs):
674 # Avoid second query for RefreshToken since this method is invoked *after*
675 # validate_refresh_token.
676 rt = request.refresh_token_instance
677 if not rt.access_token_id:
678 return AccessToken.objects.get(source_refresh_token_id=rt.id).scope
679
680 return rt.access_token.scope
681
682 def validate_refresh_token(self, refresh_token, client, request, *args, **kwargs):
683 """
684 Check refresh_token exists and refers to the right client.
685 Also attach User instance to the request object
686 """
687
688 null_or_recent = Q(revoked__isnull=True) | Q(
689 revoked__gt=timezone.now() - timedelta(seconds=oauth2_settings.REFRESH_TOKEN_GRACE_PERIOD_SECONDS)
690 )
691 rt = (
692 RefreshToken.objects.filter(null_or_recent, token=refresh_token)
693 .select_related("access_token")
694 .first()
695 )
696
697 if not rt:
698 return False
699
700 request.user = rt.user
701 request.refresh_token = rt.token
702 # Temporary store RefreshToken instance to be reused by get_original_scopes and save_bearer_token.
703 request.refresh_token_instance = rt
704 return rt.application == client
705
706 @transaction.atomic
707 def _save_id_token(self, jti, request, expires, *args, **kwargs):
708 scopes = request.scope or " ".join(request.scopes)
709
710 id_token = IDToken.objects.create(
711 user=request.user,
712 scope=scopes,
713 expires=expires,
714 jti=jti,
715 application=request.client,
716 )
717 return id_token
718
719 def get_jwt_bearer_token(self, token, token_handler, request):
720 return self.get_id_token(token, token_handler, request)
721
722 def get_oidc_claims(self, token, token_handler, request):
723 # Required OIDC claims
724 claims = {
725 "sub": str(request.user.id),
726 }
727
728 # https://openid.net/specs/openid-connect-core-1_0.html#StandardClaims
729 claims.update(**self.get_additional_claims(request))
730
731 return claims
732
733 def get_id_token_dictionary(self, token, token_handler, request):
734 """
735 Get the claims to put in the ID Token.
736
737 These claims are in addition to the claims automatically added by
738 ``oauthlib`` - aud, iat, nonce, at_hash, c_hash.
739
740 This function adds in iss, exp and auth_time, plus any claims added from
741 calling ``get_oidc_claims()``
742 """
743 claims = self.get_oidc_claims(token, token_handler, request)
744
745 expiration_time = timezone.now() + timedelta(seconds=oauth2_settings.ID_TOKEN_EXPIRE_SECONDS)
746 # Required ID Token claims
747 claims.update(
748 **{
749 "iss": self.get_oidc_issuer_endpoint(request),
750 "exp": int(dateformat.format(expiration_time, "U")),
751 "auth_time": int(dateformat.format(request.user.last_login, "U")),
752 "jti": str(uuid.uuid4()),
753 }
754 )
755
756 return claims, expiration_time
757
758 def get_oidc_issuer_endpoint(self, request):
759 return oauth2_settings.oidc_issuer(request)
760
761 def finalize_id_token(self, id_token, token, token_handler, request):
762 claims, expiration_time = self.get_id_token_dictionary(token, token_handler, request)
763 id_token.update(**claims)
764 # Workaround for oauthlib bug #746
765 # https://github.com/oauthlib/oauthlib/issues/746
766 if "nonce" not in id_token and request.nonce:
767 id_token["nonce"] = request.nonce
768
769 header = {
770 "typ": "JWT",
771 "alg": request.client.algorithm,
772 }
773 # RS256 consumers expect a kid in the header for verifying the token
774 if request.client.algorithm == AbstractApplication.RS256_ALGORITHM:
775 header["kid"] = request.client.jwk_key.thumbprint()
776
777 jwt_token = jwt.JWT(
778 header=json.dumps(header, default=str),
779 claims=json.dumps(id_token, default=str),
780 )
781 jwt_token.make_signed_token(request.client.jwk_key)
782 id_token = self._save_id_token(id_token["jti"], request, expiration_time)
783 # this is needed by django rest framework
784 request.access_token = id_token
785 request.id_token = id_token
786 return jwt_token.serialize()
787
788 def validate_jwt_bearer_token(self, token, scopes, request):
789 return self.validate_id_token(token, scopes, request)
790
791 def validate_id_token(self, token, scopes, request):
792 """
793 When users try to access resources, check that provided id_token is valid
794 """
795 if not token:
796 return False
797
798 id_token = self._load_id_token(token)
799 if not id_token:
800 return False
801
802 if not id_token.allow_scopes(scopes):
803 return False
804
805 request.client = id_token.application
806 request.user = id_token.user
807 request.scopes = scopes
808 # this is needed by django rest framework
809 request.access_token = id_token
810 return True
811
812 def _load_id_token(self, token):
813 key = self._get_key_for_token(token)
814 if not key:
815 return None
816 try:
817 jwt_token = jwt.JWT(key=key, jwt=token)
818 claims = json.loads(jwt_token.claims)
819 return IDToken.objects.get(jti=claims["jti"])
820 except (JWException, JWTExpired, IDToken.DoesNotExist):
821 return None
822
823 def _get_key_for_token(self, token):
824 """
825 Peek at the unvalidated token to discover who it was issued for
826 and then use that to load that application and its key.
827 """
828 unverified_token = jws.JWS()
829 unverified_token.deserialize(token)
830 claims = json.loads(unverified_token.objects["payload"].decode("utf-8"))
831 if "aud" not in claims:
832 return None
833 application = self._get_client_by_audience(claims["aud"])
834 if application:
835 return application.jwk_key
836
837 def _get_client_by_audience(self, audience):
838 """
839 Load a client by the aud claim in a JWT.
840 aud may be multi-valued, if your provider makes it so.
841 This function is separate to allow further customization.
842 """
843 if isinstance(audience, str):
844 audience = [audience]
845 return Application.objects.filter(client_id__in=audience).first()
846
847 def validate_user_match(self, id_token_hint, scopes, claims, request):
848 # TODO: Fix to validate when necessary acording
849 # https://github.com/idan/oauthlib/blob/master/oauthlib/oauth2/rfc6749/request_validator.py#L556
850 # http://openid.net/specs/openid-connect-core-1_0.html#AuthRequest id_token_hint section
851 return True
852
853 def get_authorization_code_nonce(self, client_id, code, redirect_uri, request):
854 """Extracts nonce from saved authorization code.
855 If present in the Authentication Request, Authorization
856 Servers MUST include a nonce Claim in the ID Token with the
857 Claim Value being the nonce value sent in the Authentication
858 Request. Authorization Servers SHOULD perform no other
859 processing on nonce values used. The nonce value is a
860 case-sensitive string.
861 Only code param should be sufficient to retrieve grant code from
862 any storage you are using. However, `client_id` and `redirect_uri`
863 have been validated and can be used also.
864 :param client_id: Unicode client identifier
865 :param code: Unicode authorization code grant
866 :param redirect_uri: Unicode absolute URI
867 :return: Unicode nonce
868 Method is used by:
869 - Authorization Token Grant Dispatcher
870 """
871 nonce = Grant.objects.filter(code=code).values_list("nonce", flat=True).first()
872 if nonce:
873 return nonce
874
875 def get_userinfo_claims(self, request):
876 """
877 Generates and saves a new JWT for this request, and returns it as the
878 current user's claims.
879
880 """
881 return self.get_oidc_claims(None, None, request)
882
883 def get_additional_claims(self, request):
884 return {}
885
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/oauth2_provider/oauth2_validators.py b/oauth2_provider/oauth2_validators.py
--- a/oauth2_provider/oauth2_validators.py
+++ b/oauth2_provider/oauth2_validators.py
@@ -357,7 +357,7 @@
expires = max_caching_time
scope = content.get("scope", "")
- expires = make_aware(expires)
+ expires = make_aware(expires) if settings.USE_TZ else expires
access_token, _created = AccessToken.objects.update_or_create(
token=token,
| {"golden_diff": "diff --git a/oauth2_provider/oauth2_validators.py b/oauth2_provider/oauth2_validators.py\n--- a/oauth2_provider/oauth2_validators.py\n+++ b/oauth2_provider/oauth2_validators.py\n@@ -357,7 +357,7 @@\n expires = max_caching_time\n \n scope = content.get(\"scope\", \"\")\n- expires = make_aware(expires)\n+ expires = make_aware(expires) if settings.USE_TZ else expires\n \n access_token, _created = AccessToken.objects.update_or_create(\n token=token,\n", "issue": "Token introspection always uses time-zone-aware datetimes \nHi,\r\n\r\nI'm attempting to use the new token introspection and ran into a bit of a snag. Our app is using USE_TZ=False, which results in the following:\r\n\r\n```\r\n File \"lib/python3.6/site-packages/rest_framework/views.py\", line 489, in dispatch\r\n response = self.handle_exception(exc)\r\n File \"lib/python3.6/site-packages/rest_framework/views.py\", line 449, in handle_exception\r\n self.raise_uncaught_exception(exc)\r\n File \"lib/python3.6/site-packages/rest_framework/views.py\", line 477, in dispatch\r\n self.initial(request, *args, **kwargs)\r\n File \"lib/python3.6/site-packages/rest_framework/views.py\", line 394, in initial\r\n self.perform_authentication(request)\r\n File \"lib/python3.6/site-packages/rest_framework/views.py\", line 320, in perform_authentication\r\n request.user\r\n File \"lib/python3.6/site-packages/rest_framework/request.py\", line 381, in __getattribute__\r\n return super(Request, self).__getattribute__(attr)\r\n File \"lib/python3.6/site-packages/rest_framework/request.py\", line 196, in user\r\n self._authenticate()\r\n File \"lib/python3.6/site-packages/rest_framework/request.py\", line 345, in _authenticate\r\n user_auth_tuple = authenticator.authenticate(self)\r\n File \"lib/python3.6/site-packages/oauth2_provider/contrib/rest_framework/authentication.py\", line 18, in authenticate\r\n valid, r = oauthlib_core.verify_request(request, scopes=[])\r\n File \"lib/python3.6/site-packages/oauth2_provider/oauth2_backends.py\", line 168, in verify_request\r\n valid, r = self.server.verify_request(uri, http_method, body, headers, scopes=scopes)\r\n File \"lib/python3.6/site-packages/oauthlib/oauth2/rfc6749/endpoints/base.py\", line 64, in wrapper\r\n return f(endpoint, uri, *args, **kwargs)\r\n File \"lib/python3.6/site-packages/oauthlib/oauth2/rfc6749/endpoints/resource.py\", line 75, in verify_request\r\n return token_type_handler.validate_request(request), request\r\n File \"lib/python3.6/site-packages/oauthlib/oauth2/rfc6749/tokens.py\", line 297, in validate_request\r\n token, request.scopes, request)\r\n File \"lib/python3.6/site-packages/oauth2_provider/oauth2_validators.py\", line 340, in validate_bearer_token\r\n if access_token and access_token.is_valid(scopes):\r\n File \"lib/python3.6/site-packages/oauth2_provider/models.py\", line 250, in is_valid\r\n return not self.is_expired() and self.allow_scopes(scopes)\r\n File \"lib/python3.6/site-packages/oauth2_provider/models.py\", line 259, in is_expired\r\n return timezone.now() >= self.expires\r\nTypeError: can't compare offset-naive and offset-aware datetimes\r\n```\r\nI discovered that OAuth2Validator._get_token_from_authentication_server() is unconditionally calling django.utils.timezone.make_aware() on the expiry time:\r\n\r\nhttps://github.com/evonove/django-oauth-toolkit/blob/fa33444e81b3c95432999d51cdb2acdb98fc16bf/oauth2_provider/oauth2_validators.py#L281-L281\r\n\r\nIs there any reason for this?\r\n\r\nThanks\r\n\r\nEdit: I think we're just going to turn on USE_TZ. Still, this could definitely be handled more gracefully.\nToken introspection always uses time-zone-aware datetimes \nHi,\r\n\r\nI'm attempting to use the new token introspection and ran into a bit of a snag. Our app is using USE_TZ=False, which results in the following:\r\n\r\n```\r\n File \"lib/python3.6/site-packages/rest_framework/views.py\", line 489, in dispatch\r\n response = self.handle_exception(exc)\r\n File \"lib/python3.6/site-packages/rest_framework/views.py\", line 449, in handle_exception\r\n self.raise_uncaught_exception(exc)\r\n File \"lib/python3.6/site-packages/rest_framework/views.py\", line 477, in dispatch\r\n self.initial(request, *args, **kwargs)\r\n File \"lib/python3.6/site-packages/rest_framework/views.py\", line 394, in initial\r\n self.perform_authentication(request)\r\n File \"lib/python3.6/site-packages/rest_framework/views.py\", line 320, in perform_authentication\r\n request.user\r\n File \"lib/python3.6/site-packages/rest_framework/request.py\", line 381, in __getattribute__\r\n return super(Request, self).__getattribute__(attr)\r\n File \"lib/python3.6/site-packages/rest_framework/request.py\", line 196, in user\r\n self._authenticate()\r\n File \"lib/python3.6/site-packages/rest_framework/request.py\", line 345, in _authenticate\r\n user_auth_tuple = authenticator.authenticate(self)\r\n File \"lib/python3.6/site-packages/oauth2_provider/contrib/rest_framework/authentication.py\", line 18, in authenticate\r\n valid, r = oauthlib_core.verify_request(request, scopes=[])\r\n File \"lib/python3.6/site-packages/oauth2_provider/oauth2_backends.py\", line 168, in verify_request\r\n valid, r = self.server.verify_request(uri, http_method, body, headers, scopes=scopes)\r\n File \"lib/python3.6/site-packages/oauthlib/oauth2/rfc6749/endpoints/base.py\", line 64, in wrapper\r\n return f(endpoint, uri, *args, **kwargs)\r\n File \"lib/python3.6/site-packages/oauthlib/oauth2/rfc6749/endpoints/resource.py\", line 75, in verify_request\r\n return token_type_handler.validate_request(request), request\r\n File \"lib/python3.6/site-packages/oauthlib/oauth2/rfc6749/tokens.py\", line 297, in validate_request\r\n token, request.scopes, request)\r\n File \"lib/python3.6/site-packages/oauth2_provider/oauth2_validators.py\", line 340, in validate_bearer_token\r\n if access_token and access_token.is_valid(scopes):\r\n File \"lib/python3.6/site-packages/oauth2_provider/models.py\", line 250, in is_valid\r\n return not self.is_expired() and self.allow_scopes(scopes)\r\n File \"lib/python3.6/site-packages/oauth2_provider/models.py\", line 259, in is_expired\r\n return timezone.now() >= self.expires\r\nTypeError: can't compare offset-naive and offset-aware datetimes\r\n```\r\nI discovered that OAuth2Validator._get_token_from_authentication_server() is unconditionally calling django.utils.timezone.make_aware() on the expiry time:\r\n\r\nhttps://github.com/evonove/django-oauth-toolkit/blob/fa33444e81b3c95432999d51cdb2acdb98fc16bf/oauth2_provider/oauth2_validators.py#L281-L281\r\n\r\nIs there any reason for this?\r\n\r\nThanks\r\n\r\nEdit: I think we're just going to turn on USE_TZ. Still, this could definitely be handled more gracefully.\n", "before_files": [{"content": "import base64\nimport binascii\nimport http.client\nimport json\nimport logging\nimport uuid\nfrom collections import OrderedDict\nfrom datetime import datetime, timedelta\nfrom urllib.parse import unquote_plus\n\nimport requests\nfrom django.conf import settings\nfrom django.contrib.auth import authenticate, get_user_model\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import transaction\nfrom django.db.models import Q\nfrom django.utils import dateformat, timezone\nfrom django.utils.timezone import make_aware\nfrom django.utils.translation import gettext_lazy as _\nfrom jwcrypto import jws, jwt\nfrom jwcrypto.common import JWException\nfrom jwcrypto.jwt import JWTExpired\nfrom oauthlib.oauth2.rfc6749 import utils\nfrom oauthlib.openid import RequestValidator\n\nfrom .exceptions import FatalClientError\nfrom .models import (\n AbstractApplication,\n get_access_token_model,\n get_application_model,\n get_grant_model,\n get_id_token_model,\n get_refresh_token_model,\n)\nfrom .scopes import get_scopes_backend\nfrom .settings import oauth2_settings\n\n\nlog = logging.getLogger(\"oauth2_provider\")\n\nGRANT_TYPE_MAPPING = {\n \"authorization_code\": (\n AbstractApplication.GRANT_AUTHORIZATION_CODE,\n AbstractApplication.GRANT_OPENID_HYBRID,\n ),\n \"password\": (AbstractApplication.GRANT_PASSWORD,),\n \"client_credentials\": (AbstractApplication.GRANT_CLIENT_CREDENTIALS,),\n \"refresh_token\": (\n AbstractApplication.GRANT_AUTHORIZATION_CODE,\n AbstractApplication.GRANT_PASSWORD,\n AbstractApplication.GRANT_CLIENT_CREDENTIALS,\n AbstractApplication.GRANT_OPENID_HYBRID,\n ),\n}\n\nApplication = get_application_model()\nAccessToken = get_access_token_model()\nIDToken = get_id_token_model()\nGrant = get_grant_model()\nRefreshToken = get_refresh_token_model()\nUserModel = get_user_model()\n\n\nclass OAuth2Validator(RequestValidator):\n def _extract_basic_auth(self, request):\n \"\"\"\n Return authentication string if request contains basic auth credentials,\n otherwise return None\n \"\"\"\n auth = request.headers.get(\"HTTP_AUTHORIZATION\", None)\n if not auth:\n return None\n\n splitted = auth.split(\" \", 1)\n if len(splitted) != 2:\n return None\n auth_type, auth_string = splitted\n\n if auth_type != \"Basic\":\n return None\n\n return auth_string\n\n def _authenticate_basic_auth(self, request):\n \"\"\"\n Authenticates with HTTP Basic Auth.\n\n Note: as stated in rfc:`2.3.1`, client_id and client_secret must be encoded with\n \"application/x-www-form-urlencoded\" encoding algorithm.\n \"\"\"\n auth_string = self._extract_basic_auth(request)\n if not auth_string:\n return False\n\n try:\n encoding = request.encoding or settings.DEFAULT_CHARSET or \"utf-8\"\n except AttributeError:\n encoding = \"utf-8\"\n\n try:\n b64_decoded = base64.b64decode(auth_string)\n except (TypeError, binascii.Error):\n log.debug(\"Failed basic auth: %r can't be decoded as base64\", auth_string)\n return False\n\n try:\n auth_string_decoded = b64_decoded.decode(encoding)\n except UnicodeDecodeError:\n log.debug(\"Failed basic auth: %r can't be decoded as unicode by %r\", auth_string, encoding)\n return False\n\n try:\n client_id, client_secret = map(unquote_plus, auth_string_decoded.split(\":\", 1))\n except ValueError:\n log.debug(\"Failed basic auth, Invalid base64 encoding.\")\n return False\n\n if self._load_application(client_id, request) is None:\n log.debug(\"Failed basic auth: Application %s does not exist\" % client_id)\n return False\n elif request.client.client_id != client_id:\n log.debug(\"Failed basic auth: wrong client id %s\" % client_id)\n return False\n elif request.client.client_secret != client_secret:\n log.debug(\"Failed basic auth: wrong client secret %s\" % client_secret)\n return False\n else:\n return True\n\n def _authenticate_request_body(self, request):\n \"\"\"\n Try to authenticate the client using client_id and client_secret\n parameters included in body.\n\n Remember that this method is NOT RECOMMENDED and SHOULD be limited to\n clients unable to directly utilize the HTTP Basic authentication scheme.\n See rfc:`2.3.1` for more details.\n \"\"\"\n # TODO: check if oauthlib has already unquoted client_id and client_secret\n try:\n client_id = request.client_id\n client_secret = request.client_secret\n except AttributeError:\n return False\n\n if self._load_application(client_id, request) is None:\n log.debug(\"Failed body auth: Application %s does not exists\" % client_id)\n return False\n elif request.client.client_secret != client_secret:\n log.debug(\"Failed body auth: wrong client secret %s\" % client_secret)\n return False\n else:\n return True\n\n def _load_application(self, client_id, request):\n \"\"\"\n If request.client was not set, load application instance for given\n client_id and store it in request.client\n \"\"\"\n\n # we want to be sure that request has the client attribute!\n assert hasattr(request, \"client\"), '\"request\" instance has no \"client\" attribute'\n\n try:\n request.client = request.client or Application.objects.get(client_id=client_id)\n # Check that the application can be used (defaults to always True)\n if not request.client.is_usable(request):\n log.debug(\"Failed body authentication: Application %r is disabled\" % (client_id))\n return None\n return request.client\n except Application.DoesNotExist:\n log.debug(\"Failed body authentication: Application %r does not exist\" % (client_id))\n return None\n\n def _set_oauth2_error_on_request(self, request, access_token, scopes):\n if access_token is None:\n error = OrderedDict(\n [\n (\"error\", \"invalid_token\"),\n (\"error_description\", _(\"The access token is invalid.\")),\n ]\n )\n elif access_token.is_expired():\n error = OrderedDict(\n [\n (\"error\", \"invalid_token\"),\n (\"error_description\", _(\"The access token has expired.\")),\n ]\n )\n elif not access_token.allow_scopes(scopes):\n error = OrderedDict(\n [\n (\"error\", \"insufficient_scope\"),\n (\"error_description\", _(\"The access token is valid but does not have enough scope.\")),\n ]\n )\n else:\n log.warning(\"OAuth2 access token is invalid for an unknown reason.\")\n error = OrderedDict(\n [\n (\"error\", \"invalid_token\"),\n ]\n )\n request.oauth2_error = error\n return request\n\n def client_authentication_required(self, request, *args, **kwargs):\n \"\"\"\n Determine if the client has to be authenticated\n\n This method is called only for grant types that supports client authentication:\n * Authorization code grant\n * Resource owner password grant\n * Refresh token grant\n\n If the request contains authorization headers, always authenticate the client\n no matter the grant type.\n\n If the request does not contain authorization headers, proceed with authentication\n only if the client is of type `Confidential`.\n\n If something goes wrong, call oauthlib implementation of the method.\n \"\"\"\n if self._extract_basic_auth(request):\n return True\n\n try:\n if request.client_id and request.client_secret:\n return True\n except AttributeError:\n log.debug(\"Client ID or client secret not provided...\")\n pass\n\n self._load_application(request.client_id, request)\n if request.client:\n return request.client.client_type == AbstractApplication.CLIENT_CONFIDENTIAL\n\n return super().client_authentication_required(request, *args, **kwargs)\n\n def authenticate_client(self, request, *args, **kwargs):\n \"\"\"\n Check if client exists and is authenticating itself as in rfc:`3.2.1`\n\n First we try to authenticate with HTTP Basic Auth, and that is the PREFERRED\n authentication method.\n Whether this fails we support including the client credentials in the request-body,\n but this method is NOT RECOMMENDED and SHOULD be limited to clients unable to\n directly utilize the HTTP Basic authentication scheme.\n See rfc:`2.3.1` for more details\n \"\"\"\n authenticated = self._authenticate_basic_auth(request)\n\n if not authenticated:\n authenticated = self._authenticate_request_body(request)\n\n return authenticated\n\n def authenticate_client_id(self, client_id, request, *args, **kwargs):\n \"\"\"\n If we are here, the client did not authenticate itself as in rfc:`3.2.1` and we can\n proceed only if the client exists and is not of type \"Confidential\".\n \"\"\"\n if self._load_application(client_id, request) is not None:\n log.debug(\"Application %r has type %r\" % (client_id, request.client.client_type))\n return request.client.client_type != AbstractApplication.CLIENT_CONFIDENTIAL\n return False\n\n def confirm_redirect_uri(self, client_id, code, redirect_uri, client, *args, **kwargs):\n \"\"\"\n Ensure the redirect_uri is listed in the Application instance redirect_uris field\n \"\"\"\n grant = Grant.objects.get(code=code, application=client)\n return grant.redirect_uri_allowed(redirect_uri)\n\n def invalidate_authorization_code(self, client_id, code, request, *args, **kwargs):\n \"\"\"\n Remove the temporary grant used to swap the authorization token\n \"\"\"\n grant = Grant.objects.get(code=code, application=request.client)\n grant.delete()\n\n def validate_client_id(self, client_id, request, *args, **kwargs):\n \"\"\"\n Ensure an Application exists with given client_id.\n If it exists, it's assigned to request.client.\n \"\"\"\n return self._load_application(client_id, request) is not None\n\n def get_default_redirect_uri(self, client_id, request, *args, **kwargs):\n return request.client.default_redirect_uri\n\n def _get_token_from_authentication_server(\n self, token, introspection_url, introspection_token, introspection_credentials\n ):\n \"\"\"Use external introspection endpoint to \"crack open\" the token.\n :param introspection_url: introspection endpoint URL\n :param introspection_token: Bearer token\n :param introspection_credentials: Basic Auth credentials (id,secret)\n :return: :class:`models.AccessToken`\n\n Some RFC 7662 implementations (including this one) use a Bearer token while others use Basic\n Auth. Depending on the external AS's implementation, provide either the introspection_token\n or the introspection_credentials.\n\n If the resulting access_token identifies a username (e.g. Authorization Code grant), add\n that user to the UserModel. Also cache the access_token up until its expiry time or a\n configured maximum time.\n\n \"\"\"\n headers = None\n if introspection_token:\n headers = {\"Authorization\": \"Bearer {}\".format(introspection_token)}\n elif introspection_credentials:\n client_id = introspection_credentials[0].encode(\"utf-8\")\n client_secret = introspection_credentials[1].encode(\"utf-8\")\n basic_auth = base64.b64encode(client_id + b\":\" + client_secret)\n headers = {\"Authorization\": \"Basic {}\".format(basic_auth.decode(\"utf-8\"))}\n\n try:\n response = requests.post(introspection_url, data={\"token\": token}, headers=headers)\n except requests.exceptions.RequestException:\n log.exception(\"Introspection: Failed POST to %r in token lookup\", introspection_url)\n return None\n\n # Log an exception when response from auth server is not successful\n if response.status_code != http.client.OK:\n log.exception(\n \"Introspection: Failed to get a valid response \"\n \"from authentication server. Status code: {}, \"\n \"Reason: {}.\".format(response.status_code, response.reason)\n )\n return None\n\n try:\n content = response.json()\n except ValueError:\n log.exception(\"Introspection: Failed to parse response as json\")\n return None\n\n if \"active\" in content and content[\"active\"] is True:\n if \"username\" in content:\n user, _created = UserModel.objects.get_or_create(\n **{UserModel.USERNAME_FIELD: content[\"username\"]}\n )\n else:\n user = None\n\n max_caching_time = datetime.now() + timedelta(\n seconds=oauth2_settings.RESOURCE_SERVER_TOKEN_CACHING_SECONDS\n )\n\n if \"exp\" in content:\n expires = datetime.utcfromtimestamp(content[\"exp\"])\n if expires > max_caching_time:\n expires = max_caching_time\n else:\n expires = max_caching_time\n\n scope = content.get(\"scope\", \"\")\n expires = make_aware(expires)\n\n access_token, _created = AccessToken.objects.update_or_create(\n token=token,\n defaults={\n \"user\": user,\n \"application\": None,\n \"scope\": scope,\n \"expires\": expires,\n },\n )\n\n return access_token\n\n def validate_bearer_token(self, token, scopes, request):\n \"\"\"\n When users try to access resources, check that provided token is valid\n \"\"\"\n if not token:\n return False\n\n introspection_url = oauth2_settings.RESOURCE_SERVER_INTROSPECTION_URL\n introspection_token = oauth2_settings.RESOURCE_SERVER_AUTH_TOKEN\n introspection_credentials = oauth2_settings.RESOURCE_SERVER_INTROSPECTION_CREDENTIALS\n\n access_token = self._load_access_token(token)\n\n # if there is no token or it's invalid then introspect the token if there's an external OAuth server\n if not access_token or not access_token.is_valid(scopes):\n if introspection_url and (introspection_token or introspection_credentials):\n access_token = self._get_token_from_authentication_server(\n token, introspection_url, introspection_token, introspection_credentials\n )\n\n if access_token and access_token.is_valid(scopes):\n request.client = access_token.application\n request.user = access_token.user\n request.scopes = scopes\n\n # this is needed by django rest framework\n request.access_token = access_token\n return True\n else:\n self._set_oauth2_error_on_request(request, access_token, scopes)\n return False\n\n def _load_access_token(self, token):\n return AccessToken.objects.select_related(\"application\", \"user\").filter(token=token).first()\n\n def validate_code(self, client_id, code, client, request, *args, **kwargs):\n try:\n grant = Grant.objects.get(code=code, application=client)\n if not grant.is_expired():\n request.scopes = grant.scope.split(\" \")\n request.user = grant.user\n if grant.nonce:\n request.nonce = grant.nonce\n if grant.claims:\n request.claims = json.loads(grant.claims)\n return True\n return False\n\n except Grant.DoesNotExist:\n return False\n\n def validate_grant_type(self, client_id, grant_type, client, request, *args, **kwargs):\n \"\"\"\n Validate both grant_type is a valid string and grant_type is allowed for current workflow\n \"\"\"\n assert grant_type in GRANT_TYPE_MAPPING # mapping misconfiguration\n return request.client.allows_grant_type(*GRANT_TYPE_MAPPING[grant_type])\n\n def validate_response_type(self, client_id, response_type, client, request, *args, **kwargs):\n \"\"\"\n We currently do not support the Authorization Endpoint Response Types registry as in\n rfc:`8.4`, so validate the response_type only if it matches \"code\" or \"token\"\n \"\"\"\n if response_type == \"code\":\n return client.allows_grant_type(AbstractApplication.GRANT_AUTHORIZATION_CODE)\n elif response_type == \"token\":\n return client.allows_grant_type(AbstractApplication.GRANT_IMPLICIT)\n elif response_type == \"id_token\":\n return client.allows_grant_type(AbstractApplication.GRANT_IMPLICIT)\n elif response_type == \"id_token token\":\n return client.allows_grant_type(AbstractApplication.GRANT_IMPLICIT)\n elif response_type == \"code id_token\":\n return client.allows_grant_type(AbstractApplication.GRANT_OPENID_HYBRID)\n elif response_type == \"code token\":\n return client.allows_grant_type(AbstractApplication.GRANT_OPENID_HYBRID)\n elif response_type == \"code id_token token\":\n return client.allows_grant_type(AbstractApplication.GRANT_OPENID_HYBRID)\n else:\n return False\n\n def validate_scopes(self, client_id, scopes, client, request, *args, **kwargs):\n \"\"\"\n Ensure required scopes are permitted (as specified in the settings file)\n \"\"\"\n available_scopes = get_scopes_backend().get_available_scopes(application=client, request=request)\n return set(scopes).issubset(set(available_scopes))\n\n def get_default_scopes(self, client_id, request, *args, **kwargs):\n default_scopes = get_scopes_backend().get_default_scopes(application=request.client, request=request)\n return default_scopes\n\n def validate_redirect_uri(self, client_id, redirect_uri, request, *args, **kwargs):\n return request.client.redirect_uri_allowed(redirect_uri)\n\n def is_pkce_required(self, client_id, request):\n \"\"\"\n Enables or disables PKCE verification.\n\n Uses the setting PKCE_REQUIRED, which can be either a bool or a callable that\n receives the client id and returns a bool.\n \"\"\"\n if callable(oauth2_settings.PKCE_REQUIRED):\n return oauth2_settings.PKCE_REQUIRED(client_id)\n return oauth2_settings.PKCE_REQUIRED\n\n def get_code_challenge(self, code, request):\n grant = Grant.objects.get(code=code, application=request.client)\n return grant.code_challenge or None\n\n def get_code_challenge_method(self, code, request):\n grant = Grant.objects.get(code=code, application=request.client)\n return grant.code_challenge_method or None\n\n def save_authorization_code(self, client_id, code, request, *args, **kwargs):\n self._create_authorization_code(request, code)\n\n def get_authorization_code_scopes(self, client_id, code, redirect_uri, request):\n scopes = Grant.objects.filter(code=code).values_list(\"scope\", flat=True).first()\n if scopes:\n return utils.scope_to_list(scopes)\n return []\n\n def rotate_refresh_token(self, request):\n \"\"\"\n Checks if rotate refresh token is enabled\n \"\"\"\n return oauth2_settings.ROTATE_REFRESH_TOKEN\n\n @transaction.atomic\n def save_bearer_token(self, token, request, *args, **kwargs):\n \"\"\"\n Save access and refresh token, If refresh token is issued, remove or\n reuse old refresh token as in rfc:`6`\n\n @see: https://tools.ietf.org/html/draft-ietf-oauth-v2-31#page-43\n \"\"\"\n\n if \"scope\" not in token:\n raise FatalClientError(\"Failed to renew access token: missing scope\")\n\n # expires_in is passed to Server on initialization\n # custom server class can have logic to override this\n expires = timezone.now() + timedelta(\n seconds=token.get(\n \"expires_in\",\n oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS,\n )\n )\n\n if request.grant_type == \"client_credentials\":\n request.user = None\n\n # This comes from OAuthLib:\n # https://github.com/idan/oauthlib/blob/1.0.3/oauthlib/oauth2/rfc6749/tokens.py#L267\n # Its value is either a new random code; or if we are reusing\n # refresh tokens, then it is the same value that the request passed in\n # (stored in `request.refresh_token`)\n refresh_token_code = token.get(\"refresh_token\", None)\n\n if refresh_token_code:\n # an instance of `RefreshToken` that matches the old refresh code.\n # Set on the request in `validate_refresh_token`\n refresh_token_instance = getattr(request, \"refresh_token_instance\", None)\n\n # If we are to reuse tokens, and we can: do so\n if (\n not self.rotate_refresh_token(request)\n and isinstance(refresh_token_instance, RefreshToken)\n and refresh_token_instance.access_token\n ):\n\n access_token = AccessToken.objects.select_for_update().get(\n pk=refresh_token_instance.access_token.pk\n )\n access_token.user = request.user\n access_token.scope = token[\"scope\"]\n access_token.expires = expires\n access_token.token = token[\"access_token\"]\n access_token.application = request.client\n access_token.save()\n\n # else create fresh with access & refresh tokens\n else:\n # revoke existing tokens if possible to allow reuse of grant\n if isinstance(refresh_token_instance, RefreshToken):\n # First, to ensure we don't have concurrency issues, we refresh the refresh token\n # from the db while acquiring a lock on it\n # We also put it in the \"request cache\"\n refresh_token_instance = RefreshToken.objects.select_for_update().get(\n id=refresh_token_instance.id\n )\n request.refresh_token_instance = refresh_token_instance\n\n previous_access_token = AccessToken.objects.filter(\n source_refresh_token=refresh_token_instance\n ).first()\n try:\n refresh_token_instance.revoke()\n except (AccessToken.DoesNotExist, RefreshToken.DoesNotExist):\n pass\n else:\n setattr(request, \"refresh_token_instance\", None)\n else:\n previous_access_token = None\n\n # If the refresh token has already been used to create an\n # access token (ie it's within the grace period), return that\n # access token\n if not previous_access_token:\n access_token = self._create_access_token(\n expires,\n request,\n token,\n source_refresh_token=refresh_token_instance,\n )\n\n self._create_refresh_token(request, refresh_token_code, access_token)\n else:\n # make sure that the token data we're returning matches\n # the existing token\n token[\"access_token\"] = previous_access_token.token\n token[\"refresh_token\"] = (\n RefreshToken.objects.filter(access_token=previous_access_token).first().token\n )\n token[\"scope\"] = previous_access_token.scope\n\n # No refresh token should be created, just access token\n else:\n self._create_access_token(expires, request, token)\n\n def _create_access_token(self, expires, request, token, source_refresh_token=None):\n id_token = token.get(\"id_token\", None)\n if id_token:\n id_token = self._load_id_token(id_token)\n return AccessToken.objects.create(\n user=request.user,\n scope=token[\"scope\"],\n expires=expires,\n token=token[\"access_token\"],\n id_token=id_token,\n application=request.client,\n source_refresh_token=source_refresh_token,\n )\n\n def _create_authorization_code(self, request, code, expires=None):\n if not expires:\n expires = timezone.now() + timedelta(seconds=oauth2_settings.AUTHORIZATION_CODE_EXPIRE_SECONDS)\n return Grant.objects.create(\n application=request.client,\n user=request.user,\n code=code[\"code\"],\n expires=expires,\n redirect_uri=request.redirect_uri,\n scope=\" \".join(request.scopes),\n code_challenge=request.code_challenge or \"\",\n code_challenge_method=request.code_challenge_method or \"\",\n nonce=request.nonce or \"\",\n claims=json.dumps(request.claims or {}),\n )\n\n def _create_refresh_token(self, request, refresh_token_code, access_token):\n return RefreshToken.objects.create(\n user=request.user, token=refresh_token_code, application=request.client, access_token=access_token\n )\n\n def revoke_token(self, token, token_type_hint, request, *args, **kwargs):\n \"\"\"\n Revoke an access or refresh token.\n\n :param token: The token string.\n :param token_type_hint: access_token or refresh_token.\n :param request: The HTTP Request (oauthlib.common.Request)\n \"\"\"\n if token_type_hint not in [\"access_token\", \"refresh_token\"]:\n token_type_hint = None\n\n token_types = {\n \"access_token\": AccessToken,\n \"refresh_token\": RefreshToken,\n }\n\n token_type = token_types.get(token_type_hint, AccessToken)\n try:\n token_type.objects.get(token=token).revoke()\n except ObjectDoesNotExist:\n for other_type in [_t for _t in token_types.values() if _t != token_type]:\n # slightly inefficient on Python2, but the queryset contains only one instance\n list(map(lambda t: t.revoke(), other_type.objects.filter(token=token)))\n\n def validate_user(self, username, password, client, request, *args, **kwargs):\n \"\"\"\n Check username and password correspond to a valid and active User\n \"\"\"\n u = authenticate(username=username, password=password)\n if u is not None and u.is_active:\n request.user = u\n return True\n return False\n\n def get_original_scopes(self, refresh_token, request, *args, **kwargs):\n # Avoid second query for RefreshToken since this method is invoked *after*\n # validate_refresh_token.\n rt = request.refresh_token_instance\n if not rt.access_token_id:\n return AccessToken.objects.get(source_refresh_token_id=rt.id).scope\n\n return rt.access_token.scope\n\n def validate_refresh_token(self, refresh_token, client, request, *args, **kwargs):\n \"\"\"\n Check refresh_token exists and refers to the right client.\n Also attach User instance to the request object\n \"\"\"\n\n null_or_recent = Q(revoked__isnull=True) | Q(\n revoked__gt=timezone.now() - timedelta(seconds=oauth2_settings.REFRESH_TOKEN_GRACE_PERIOD_SECONDS)\n )\n rt = (\n RefreshToken.objects.filter(null_or_recent, token=refresh_token)\n .select_related(\"access_token\")\n .first()\n )\n\n if not rt:\n return False\n\n request.user = rt.user\n request.refresh_token = rt.token\n # Temporary store RefreshToken instance to be reused by get_original_scopes and save_bearer_token.\n request.refresh_token_instance = rt\n return rt.application == client\n\n @transaction.atomic\n def _save_id_token(self, jti, request, expires, *args, **kwargs):\n scopes = request.scope or \" \".join(request.scopes)\n\n id_token = IDToken.objects.create(\n user=request.user,\n scope=scopes,\n expires=expires,\n jti=jti,\n application=request.client,\n )\n return id_token\n\n def get_jwt_bearer_token(self, token, token_handler, request):\n return self.get_id_token(token, token_handler, request)\n\n def get_oidc_claims(self, token, token_handler, request):\n # Required OIDC claims\n claims = {\n \"sub\": str(request.user.id),\n }\n\n # https://openid.net/specs/openid-connect-core-1_0.html#StandardClaims\n claims.update(**self.get_additional_claims(request))\n\n return claims\n\n def get_id_token_dictionary(self, token, token_handler, request):\n \"\"\"\n Get the claims to put in the ID Token.\n\n These claims are in addition to the claims automatically added by\n ``oauthlib`` - aud, iat, nonce, at_hash, c_hash.\n\n This function adds in iss, exp and auth_time, plus any claims added from\n calling ``get_oidc_claims()``\n \"\"\"\n claims = self.get_oidc_claims(token, token_handler, request)\n\n expiration_time = timezone.now() + timedelta(seconds=oauth2_settings.ID_TOKEN_EXPIRE_SECONDS)\n # Required ID Token claims\n claims.update(\n **{\n \"iss\": self.get_oidc_issuer_endpoint(request),\n \"exp\": int(dateformat.format(expiration_time, \"U\")),\n \"auth_time\": int(dateformat.format(request.user.last_login, \"U\")),\n \"jti\": str(uuid.uuid4()),\n }\n )\n\n return claims, expiration_time\n\n def get_oidc_issuer_endpoint(self, request):\n return oauth2_settings.oidc_issuer(request)\n\n def finalize_id_token(self, id_token, token, token_handler, request):\n claims, expiration_time = self.get_id_token_dictionary(token, token_handler, request)\n id_token.update(**claims)\n # Workaround for oauthlib bug #746\n # https://github.com/oauthlib/oauthlib/issues/746\n if \"nonce\" not in id_token and request.nonce:\n id_token[\"nonce\"] = request.nonce\n\n header = {\n \"typ\": \"JWT\",\n \"alg\": request.client.algorithm,\n }\n # RS256 consumers expect a kid in the header for verifying the token\n if request.client.algorithm == AbstractApplication.RS256_ALGORITHM:\n header[\"kid\"] = request.client.jwk_key.thumbprint()\n\n jwt_token = jwt.JWT(\n header=json.dumps(header, default=str),\n claims=json.dumps(id_token, default=str),\n )\n jwt_token.make_signed_token(request.client.jwk_key)\n id_token = self._save_id_token(id_token[\"jti\"], request, expiration_time)\n # this is needed by django rest framework\n request.access_token = id_token\n request.id_token = id_token\n return jwt_token.serialize()\n\n def validate_jwt_bearer_token(self, token, scopes, request):\n return self.validate_id_token(token, scopes, request)\n\n def validate_id_token(self, token, scopes, request):\n \"\"\"\n When users try to access resources, check that provided id_token is valid\n \"\"\"\n if not token:\n return False\n\n id_token = self._load_id_token(token)\n if not id_token:\n return False\n\n if not id_token.allow_scopes(scopes):\n return False\n\n request.client = id_token.application\n request.user = id_token.user\n request.scopes = scopes\n # this is needed by django rest framework\n request.access_token = id_token\n return True\n\n def _load_id_token(self, token):\n key = self._get_key_for_token(token)\n if not key:\n return None\n try:\n jwt_token = jwt.JWT(key=key, jwt=token)\n claims = json.loads(jwt_token.claims)\n return IDToken.objects.get(jti=claims[\"jti\"])\n except (JWException, JWTExpired, IDToken.DoesNotExist):\n return None\n\n def _get_key_for_token(self, token):\n \"\"\"\n Peek at the unvalidated token to discover who it was issued for\n and then use that to load that application and its key.\n \"\"\"\n unverified_token = jws.JWS()\n unverified_token.deserialize(token)\n claims = json.loads(unverified_token.objects[\"payload\"].decode(\"utf-8\"))\n if \"aud\" not in claims:\n return None\n application = self._get_client_by_audience(claims[\"aud\"])\n if application:\n return application.jwk_key\n\n def _get_client_by_audience(self, audience):\n \"\"\"\n Load a client by the aud claim in a JWT.\n aud may be multi-valued, if your provider makes it so.\n This function is separate to allow further customization.\n \"\"\"\n if isinstance(audience, str):\n audience = [audience]\n return Application.objects.filter(client_id__in=audience).first()\n\n def validate_user_match(self, id_token_hint, scopes, claims, request):\n # TODO: Fix to validate when necessary acording\n # https://github.com/idan/oauthlib/blob/master/oauthlib/oauth2/rfc6749/request_validator.py#L556\n # http://openid.net/specs/openid-connect-core-1_0.html#AuthRequest id_token_hint section\n return True\n\n def get_authorization_code_nonce(self, client_id, code, redirect_uri, request):\n \"\"\"Extracts nonce from saved authorization code.\n If present in the Authentication Request, Authorization\n Servers MUST include a nonce Claim in the ID Token with the\n Claim Value being the nonce value sent in the Authentication\n Request. Authorization Servers SHOULD perform no other\n processing on nonce values used. The nonce value is a\n case-sensitive string.\n Only code param should be sufficient to retrieve grant code from\n any storage you are using. However, `client_id` and `redirect_uri`\n have been validated and can be used also.\n :param client_id: Unicode client identifier\n :param code: Unicode authorization code grant\n :param redirect_uri: Unicode absolute URI\n :return: Unicode nonce\n Method is used by:\n - Authorization Token Grant Dispatcher\n \"\"\"\n nonce = Grant.objects.filter(code=code).values_list(\"nonce\", flat=True).first()\n if nonce:\n return nonce\n\n def get_userinfo_claims(self, request):\n \"\"\"\n Generates and saves a new JWT for this request, and returns it as the\n current user's claims.\n\n \"\"\"\n return self.get_oidc_claims(None, None, request)\n\n def get_additional_claims(self, request):\n return {}\n", "path": "oauth2_provider/oauth2_validators.py"}], "after_files": [{"content": "import base64\nimport binascii\nimport http.client\nimport json\nimport logging\nimport uuid\nfrom collections import OrderedDict\nfrom datetime import datetime, timedelta\nfrom urllib.parse import unquote_plus\n\nimport requests\nfrom django.conf import settings\nfrom django.contrib.auth import authenticate, get_user_model\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import transaction\nfrom django.db.models import Q\nfrom django.utils import dateformat, timezone\nfrom django.utils.timezone import make_aware\nfrom django.utils.translation import gettext_lazy as _\nfrom jwcrypto import jws, jwt\nfrom jwcrypto.common import JWException\nfrom jwcrypto.jwt import JWTExpired\nfrom oauthlib.oauth2.rfc6749 import utils\nfrom oauthlib.openid import RequestValidator\n\nfrom .exceptions import FatalClientError\nfrom .models import (\n AbstractApplication,\n get_access_token_model,\n get_application_model,\n get_grant_model,\n get_id_token_model,\n get_refresh_token_model,\n)\nfrom .scopes import get_scopes_backend\nfrom .settings import oauth2_settings\n\n\nlog = logging.getLogger(\"oauth2_provider\")\n\nGRANT_TYPE_MAPPING = {\n \"authorization_code\": (\n AbstractApplication.GRANT_AUTHORIZATION_CODE,\n AbstractApplication.GRANT_OPENID_HYBRID,\n ),\n \"password\": (AbstractApplication.GRANT_PASSWORD,),\n \"client_credentials\": (AbstractApplication.GRANT_CLIENT_CREDENTIALS,),\n \"refresh_token\": (\n AbstractApplication.GRANT_AUTHORIZATION_CODE,\n AbstractApplication.GRANT_PASSWORD,\n AbstractApplication.GRANT_CLIENT_CREDENTIALS,\n AbstractApplication.GRANT_OPENID_HYBRID,\n ),\n}\n\nApplication = get_application_model()\nAccessToken = get_access_token_model()\nIDToken = get_id_token_model()\nGrant = get_grant_model()\nRefreshToken = get_refresh_token_model()\nUserModel = get_user_model()\n\n\nclass OAuth2Validator(RequestValidator):\n def _extract_basic_auth(self, request):\n \"\"\"\n Return authentication string if request contains basic auth credentials,\n otherwise return None\n \"\"\"\n auth = request.headers.get(\"HTTP_AUTHORIZATION\", None)\n if not auth:\n return None\n\n splitted = auth.split(\" \", 1)\n if len(splitted) != 2:\n return None\n auth_type, auth_string = splitted\n\n if auth_type != \"Basic\":\n return None\n\n return auth_string\n\n def _authenticate_basic_auth(self, request):\n \"\"\"\n Authenticates with HTTP Basic Auth.\n\n Note: as stated in rfc:`2.3.1`, client_id and client_secret must be encoded with\n \"application/x-www-form-urlencoded\" encoding algorithm.\n \"\"\"\n auth_string = self._extract_basic_auth(request)\n if not auth_string:\n return False\n\n try:\n encoding = request.encoding or settings.DEFAULT_CHARSET or \"utf-8\"\n except AttributeError:\n encoding = \"utf-8\"\n\n try:\n b64_decoded = base64.b64decode(auth_string)\n except (TypeError, binascii.Error):\n log.debug(\"Failed basic auth: %r can't be decoded as base64\", auth_string)\n return False\n\n try:\n auth_string_decoded = b64_decoded.decode(encoding)\n except UnicodeDecodeError:\n log.debug(\"Failed basic auth: %r can't be decoded as unicode by %r\", auth_string, encoding)\n return False\n\n try:\n client_id, client_secret = map(unquote_plus, auth_string_decoded.split(\":\", 1))\n except ValueError:\n log.debug(\"Failed basic auth, Invalid base64 encoding.\")\n return False\n\n if self._load_application(client_id, request) is None:\n log.debug(\"Failed basic auth: Application %s does not exist\" % client_id)\n return False\n elif request.client.client_id != client_id:\n log.debug(\"Failed basic auth: wrong client id %s\" % client_id)\n return False\n elif request.client.client_secret != client_secret:\n log.debug(\"Failed basic auth: wrong client secret %s\" % client_secret)\n return False\n else:\n return True\n\n def _authenticate_request_body(self, request):\n \"\"\"\n Try to authenticate the client using client_id and client_secret\n parameters included in body.\n\n Remember that this method is NOT RECOMMENDED and SHOULD be limited to\n clients unable to directly utilize the HTTP Basic authentication scheme.\n See rfc:`2.3.1` for more details.\n \"\"\"\n # TODO: check if oauthlib has already unquoted client_id and client_secret\n try:\n client_id = request.client_id\n client_secret = request.client_secret\n except AttributeError:\n return False\n\n if self._load_application(client_id, request) is None:\n log.debug(\"Failed body auth: Application %s does not exists\" % client_id)\n return False\n elif request.client.client_secret != client_secret:\n log.debug(\"Failed body auth: wrong client secret %s\" % client_secret)\n return False\n else:\n return True\n\n def _load_application(self, client_id, request):\n \"\"\"\n If request.client was not set, load application instance for given\n client_id and store it in request.client\n \"\"\"\n\n # we want to be sure that request has the client attribute!\n assert hasattr(request, \"client\"), '\"request\" instance has no \"client\" attribute'\n\n try:\n request.client = request.client or Application.objects.get(client_id=client_id)\n # Check that the application can be used (defaults to always True)\n if not request.client.is_usable(request):\n log.debug(\"Failed body authentication: Application %r is disabled\" % (client_id))\n return None\n return request.client\n except Application.DoesNotExist:\n log.debug(\"Failed body authentication: Application %r does not exist\" % (client_id))\n return None\n\n def _set_oauth2_error_on_request(self, request, access_token, scopes):\n if access_token is None:\n error = OrderedDict(\n [\n (\"error\", \"invalid_token\"),\n (\"error_description\", _(\"The access token is invalid.\")),\n ]\n )\n elif access_token.is_expired():\n error = OrderedDict(\n [\n (\"error\", \"invalid_token\"),\n (\"error_description\", _(\"The access token has expired.\")),\n ]\n )\n elif not access_token.allow_scopes(scopes):\n error = OrderedDict(\n [\n (\"error\", \"insufficient_scope\"),\n (\"error_description\", _(\"The access token is valid but does not have enough scope.\")),\n ]\n )\n else:\n log.warning(\"OAuth2 access token is invalid for an unknown reason.\")\n error = OrderedDict(\n [\n (\"error\", \"invalid_token\"),\n ]\n )\n request.oauth2_error = error\n return request\n\n def client_authentication_required(self, request, *args, **kwargs):\n \"\"\"\n Determine if the client has to be authenticated\n\n This method is called only for grant types that supports client authentication:\n * Authorization code grant\n * Resource owner password grant\n * Refresh token grant\n\n If the request contains authorization headers, always authenticate the client\n no matter the grant type.\n\n If the request does not contain authorization headers, proceed with authentication\n only if the client is of type `Confidential`.\n\n If something goes wrong, call oauthlib implementation of the method.\n \"\"\"\n if self._extract_basic_auth(request):\n return True\n\n try:\n if request.client_id and request.client_secret:\n return True\n except AttributeError:\n log.debug(\"Client ID or client secret not provided...\")\n pass\n\n self._load_application(request.client_id, request)\n if request.client:\n return request.client.client_type == AbstractApplication.CLIENT_CONFIDENTIAL\n\n return super().client_authentication_required(request, *args, **kwargs)\n\n def authenticate_client(self, request, *args, **kwargs):\n \"\"\"\n Check if client exists and is authenticating itself as in rfc:`3.2.1`\n\n First we try to authenticate with HTTP Basic Auth, and that is the PREFERRED\n authentication method.\n Whether this fails we support including the client credentials in the request-body,\n but this method is NOT RECOMMENDED and SHOULD be limited to clients unable to\n directly utilize the HTTP Basic authentication scheme.\n See rfc:`2.3.1` for more details\n \"\"\"\n authenticated = self._authenticate_basic_auth(request)\n\n if not authenticated:\n authenticated = self._authenticate_request_body(request)\n\n return authenticated\n\n def authenticate_client_id(self, client_id, request, *args, **kwargs):\n \"\"\"\n If we are here, the client did not authenticate itself as in rfc:`3.2.1` and we can\n proceed only if the client exists and is not of type \"Confidential\".\n \"\"\"\n if self._load_application(client_id, request) is not None:\n log.debug(\"Application %r has type %r\" % (client_id, request.client.client_type))\n return request.client.client_type != AbstractApplication.CLIENT_CONFIDENTIAL\n return False\n\n def confirm_redirect_uri(self, client_id, code, redirect_uri, client, *args, **kwargs):\n \"\"\"\n Ensure the redirect_uri is listed in the Application instance redirect_uris field\n \"\"\"\n grant = Grant.objects.get(code=code, application=client)\n return grant.redirect_uri_allowed(redirect_uri)\n\n def invalidate_authorization_code(self, client_id, code, request, *args, **kwargs):\n \"\"\"\n Remove the temporary grant used to swap the authorization token\n \"\"\"\n grant = Grant.objects.get(code=code, application=request.client)\n grant.delete()\n\n def validate_client_id(self, client_id, request, *args, **kwargs):\n \"\"\"\n Ensure an Application exists with given client_id.\n If it exists, it's assigned to request.client.\n \"\"\"\n return self._load_application(client_id, request) is not None\n\n def get_default_redirect_uri(self, client_id, request, *args, **kwargs):\n return request.client.default_redirect_uri\n\n def _get_token_from_authentication_server(\n self, token, introspection_url, introspection_token, introspection_credentials\n ):\n \"\"\"Use external introspection endpoint to \"crack open\" the token.\n :param introspection_url: introspection endpoint URL\n :param introspection_token: Bearer token\n :param introspection_credentials: Basic Auth credentials (id,secret)\n :return: :class:`models.AccessToken`\n\n Some RFC 7662 implementations (including this one) use a Bearer token while others use Basic\n Auth. Depending on the external AS's implementation, provide either the introspection_token\n or the introspection_credentials.\n\n If the resulting access_token identifies a username (e.g. Authorization Code grant), add\n that user to the UserModel. Also cache the access_token up until its expiry time or a\n configured maximum time.\n\n \"\"\"\n headers = None\n if introspection_token:\n headers = {\"Authorization\": \"Bearer {}\".format(introspection_token)}\n elif introspection_credentials:\n client_id = introspection_credentials[0].encode(\"utf-8\")\n client_secret = introspection_credentials[1].encode(\"utf-8\")\n basic_auth = base64.b64encode(client_id + b\":\" + client_secret)\n headers = {\"Authorization\": \"Basic {}\".format(basic_auth.decode(\"utf-8\"))}\n\n try:\n response = requests.post(introspection_url, data={\"token\": token}, headers=headers)\n except requests.exceptions.RequestException:\n log.exception(\"Introspection: Failed POST to %r in token lookup\", introspection_url)\n return None\n\n # Log an exception when response from auth server is not successful\n if response.status_code != http.client.OK:\n log.exception(\n \"Introspection: Failed to get a valid response \"\n \"from authentication server. Status code: {}, \"\n \"Reason: {}.\".format(response.status_code, response.reason)\n )\n return None\n\n try:\n content = response.json()\n except ValueError:\n log.exception(\"Introspection: Failed to parse response as json\")\n return None\n\n if \"active\" in content and content[\"active\"] is True:\n if \"username\" in content:\n user, _created = UserModel.objects.get_or_create(\n **{UserModel.USERNAME_FIELD: content[\"username\"]}\n )\n else:\n user = None\n\n max_caching_time = datetime.now() + timedelta(\n seconds=oauth2_settings.RESOURCE_SERVER_TOKEN_CACHING_SECONDS\n )\n\n if \"exp\" in content:\n expires = datetime.utcfromtimestamp(content[\"exp\"])\n if expires > max_caching_time:\n expires = max_caching_time\n else:\n expires = max_caching_time\n\n scope = content.get(\"scope\", \"\")\n expires = make_aware(expires) if settings.USE_TZ else expires\n\n access_token, _created = AccessToken.objects.update_or_create(\n token=token,\n defaults={\n \"user\": user,\n \"application\": None,\n \"scope\": scope,\n \"expires\": expires,\n },\n )\n\n return access_token\n\n def validate_bearer_token(self, token, scopes, request):\n \"\"\"\n When users try to access resources, check that provided token is valid\n \"\"\"\n if not token:\n return False\n\n introspection_url = oauth2_settings.RESOURCE_SERVER_INTROSPECTION_URL\n introspection_token = oauth2_settings.RESOURCE_SERVER_AUTH_TOKEN\n introspection_credentials = oauth2_settings.RESOURCE_SERVER_INTROSPECTION_CREDENTIALS\n\n access_token = self._load_access_token(token)\n\n # if there is no token or it's invalid then introspect the token if there's an external OAuth server\n if not access_token or not access_token.is_valid(scopes):\n if introspection_url and (introspection_token or introspection_credentials):\n access_token = self._get_token_from_authentication_server(\n token, introspection_url, introspection_token, introspection_credentials\n )\n\n if access_token and access_token.is_valid(scopes):\n request.client = access_token.application\n request.user = access_token.user\n request.scopes = scopes\n\n # this is needed by django rest framework\n request.access_token = access_token\n return True\n else:\n self._set_oauth2_error_on_request(request, access_token, scopes)\n return False\n\n def _load_access_token(self, token):\n return AccessToken.objects.select_related(\"application\", \"user\").filter(token=token).first()\n\n def validate_code(self, client_id, code, client, request, *args, **kwargs):\n try:\n grant = Grant.objects.get(code=code, application=client)\n if not grant.is_expired():\n request.scopes = grant.scope.split(\" \")\n request.user = grant.user\n if grant.nonce:\n request.nonce = grant.nonce\n if grant.claims:\n request.claims = json.loads(grant.claims)\n return True\n return False\n\n except Grant.DoesNotExist:\n return False\n\n def validate_grant_type(self, client_id, grant_type, client, request, *args, **kwargs):\n \"\"\"\n Validate both grant_type is a valid string and grant_type is allowed for current workflow\n \"\"\"\n assert grant_type in GRANT_TYPE_MAPPING # mapping misconfiguration\n return request.client.allows_grant_type(*GRANT_TYPE_MAPPING[grant_type])\n\n def validate_response_type(self, client_id, response_type, client, request, *args, **kwargs):\n \"\"\"\n We currently do not support the Authorization Endpoint Response Types registry as in\n rfc:`8.4`, so validate the response_type only if it matches \"code\" or \"token\"\n \"\"\"\n if response_type == \"code\":\n return client.allows_grant_type(AbstractApplication.GRANT_AUTHORIZATION_CODE)\n elif response_type == \"token\":\n return client.allows_grant_type(AbstractApplication.GRANT_IMPLICIT)\n elif response_type == \"id_token\":\n return client.allows_grant_type(AbstractApplication.GRANT_IMPLICIT)\n elif response_type == \"id_token token\":\n return client.allows_grant_type(AbstractApplication.GRANT_IMPLICIT)\n elif response_type == \"code id_token\":\n return client.allows_grant_type(AbstractApplication.GRANT_OPENID_HYBRID)\n elif response_type == \"code token\":\n return client.allows_grant_type(AbstractApplication.GRANT_OPENID_HYBRID)\n elif response_type == \"code id_token token\":\n return client.allows_grant_type(AbstractApplication.GRANT_OPENID_HYBRID)\n else:\n return False\n\n def validate_scopes(self, client_id, scopes, client, request, *args, **kwargs):\n \"\"\"\n Ensure required scopes are permitted (as specified in the settings file)\n \"\"\"\n available_scopes = get_scopes_backend().get_available_scopes(application=client, request=request)\n return set(scopes).issubset(set(available_scopes))\n\n def get_default_scopes(self, client_id, request, *args, **kwargs):\n default_scopes = get_scopes_backend().get_default_scopes(application=request.client, request=request)\n return default_scopes\n\n def validate_redirect_uri(self, client_id, redirect_uri, request, *args, **kwargs):\n return request.client.redirect_uri_allowed(redirect_uri)\n\n def is_pkce_required(self, client_id, request):\n \"\"\"\n Enables or disables PKCE verification.\n\n Uses the setting PKCE_REQUIRED, which can be either a bool or a callable that\n receives the client id and returns a bool.\n \"\"\"\n if callable(oauth2_settings.PKCE_REQUIRED):\n return oauth2_settings.PKCE_REQUIRED(client_id)\n return oauth2_settings.PKCE_REQUIRED\n\n def get_code_challenge(self, code, request):\n grant = Grant.objects.get(code=code, application=request.client)\n return grant.code_challenge or None\n\n def get_code_challenge_method(self, code, request):\n grant = Grant.objects.get(code=code, application=request.client)\n return grant.code_challenge_method or None\n\n def save_authorization_code(self, client_id, code, request, *args, **kwargs):\n self._create_authorization_code(request, code)\n\n def get_authorization_code_scopes(self, client_id, code, redirect_uri, request):\n scopes = Grant.objects.filter(code=code).values_list(\"scope\", flat=True).first()\n if scopes:\n return utils.scope_to_list(scopes)\n return []\n\n def rotate_refresh_token(self, request):\n \"\"\"\n Checks if rotate refresh token is enabled\n \"\"\"\n return oauth2_settings.ROTATE_REFRESH_TOKEN\n\n @transaction.atomic\n def save_bearer_token(self, token, request, *args, **kwargs):\n \"\"\"\n Save access and refresh token, If refresh token is issued, remove or\n reuse old refresh token as in rfc:`6`\n\n @see: https://tools.ietf.org/html/draft-ietf-oauth-v2-31#page-43\n \"\"\"\n\n if \"scope\" not in token:\n raise FatalClientError(\"Failed to renew access token: missing scope\")\n\n # expires_in is passed to Server on initialization\n # custom server class can have logic to override this\n expires = timezone.now() + timedelta(\n seconds=token.get(\n \"expires_in\",\n oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS,\n )\n )\n\n if request.grant_type == \"client_credentials\":\n request.user = None\n\n # This comes from OAuthLib:\n # https://github.com/idan/oauthlib/blob/1.0.3/oauthlib/oauth2/rfc6749/tokens.py#L267\n # Its value is either a new random code; or if we are reusing\n # refresh tokens, then it is the same value that the request passed in\n # (stored in `request.refresh_token`)\n refresh_token_code = token.get(\"refresh_token\", None)\n\n if refresh_token_code:\n # an instance of `RefreshToken` that matches the old refresh code.\n # Set on the request in `validate_refresh_token`\n refresh_token_instance = getattr(request, \"refresh_token_instance\", None)\n\n # If we are to reuse tokens, and we can: do so\n if (\n not self.rotate_refresh_token(request)\n and isinstance(refresh_token_instance, RefreshToken)\n and refresh_token_instance.access_token\n ):\n\n access_token = AccessToken.objects.select_for_update().get(\n pk=refresh_token_instance.access_token.pk\n )\n access_token.user = request.user\n access_token.scope = token[\"scope\"]\n access_token.expires = expires\n access_token.token = token[\"access_token\"]\n access_token.application = request.client\n access_token.save()\n\n # else create fresh with access & refresh tokens\n else:\n # revoke existing tokens if possible to allow reuse of grant\n if isinstance(refresh_token_instance, RefreshToken):\n # First, to ensure we don't have concurrency issues, we refresh the refresh token\n # from the db while acquiring a lock on it\n # We also put it in the \"request cache\"\n refresh_token_instance = RefreshToken.objects.select_for_update().get(\n id=refresh_token_instance.id\n )\n request.refresh_token_instance = refresh_token_instance\n\n previous_access_token = AccessToken.objects.filter(\n source_refresh_token=refresh_token_instance\n ).first()\n try:\n refresh_token_instance.revoke()\n except (AccessToken.DoesNotExist, RefreshToken.DoesNotExist):\n pass\n else:\n setattr(request, \"refresh_token_instance\", None)\n else:\n previous_access_token = None\n\n # If the refresh token has already been used to create an\n # access token (ie it's within the grace period), return that\n # access token\n if not previous_access_token:\n access_token = self._create_access_token(\n expires,\n request,\n token,\n source_refresh_token=refresh_token_instance,\n )\n\n self._create_refresh_token(request, refresh_token_code, access_token)\n else:\n # make sure that the token data we're returning matches\n # the existing token\n token[\"access_token\"] = previous_access_token.token\n token[\"refresh_token\"] = (\n RefreshToken.objects.filter(access_token=previous_access_token).first().token\n )\n token[\"scope\"] = previous_access_token.scope\n\n # No refresh token should be created, just access token\n else:\n self._create_access_token(expires, request, token)\n\n def _create_access_token(self, expires, request, token, source_refresh_token=None):\n id_token = token.get(\"id_token\", None)\n if id_token:\n id_token = self._load_id_token(id_token)\n return AccessToken.objects.create(\n user=request.user,\n scope=token[\"scope\"],\n expires=expires,\n token=token[\"access_token\"],\n id_token=id_token,\n application=request.client,\n source_refresh_token=source_refresh_token,\n )\n\n def _create_authorization_code(self, request, code, expires=None):\n if not expires:\n expires = timezone.now() + timedelta(seconds=oauth2_settings.AUTHORIZATION_CODE_EXPIRE_SECONDS)\n return Grant.objects.create(\n application=request.client,\n user=request.user,\n code=code[\"code\"],\n expires=expires,\n redirect_uri=request.redirect_uri,\n scope=\" \".join(request.scopes),\n code_challenge=request.code_challenge or \"\",\n code_challenge_method=request.code_challenge_method or \"\",\n nonce=request.nonce or \"\",\n claims=json.dumps(request.claims or {}),\n )\n\n def _create_refresh_token(self, request, refresh_token_code, access_token):\n return RefreshToken.objects.create(\n user=request.user, token=refresh_token_code, application=request.client, access_token=access_token\n )\n\n def revoke_token(self, token, token_type_hint, request, *args, **kwargs):\n \"\"\"\n Revoke an access or refresh token.\n\n :param token: The token string.\n :param token_type_hint: access_token or refresh_token.\n :param request: The HTTP Request (oauthlib.common.Request)\n \"\"\"\n if token_type_hint not in [\"access_token\", \"refresh_token\"]:\n token_type_hint = None\n\n token_types = {\n \"access_token\": AccessToken,\n \"refresh_token\": RefreshToken,\n }\n\n token_type = token_types.get(token_type_hint, AccessToken)\n try:\n token_type.objects.get(token=token).revoke()\n except ObjectDoesNotExist:\n for other_type in [_t for _t in token_types.values() if _t != token_type]:\n # slightly inefficient on Python2, but the queryset contains only one instance\n list(map(lambda t: t.revoke(), other_type.objects.filter(token=token)))\n\n def validate_user(self, username, password, client, request, *args, **kwargs):\n \"\"\"\n Check username and password correspond to a valid and active User\n \"\"\"\n u = authenticate(username=username, password=password)\n if u is not None and u.is_active:\n request.user = u\n return True\n return False\n\n def get_original_scopes(self, refresh_token, request, *args, **kwargs):\n # Avoid second query for RefreshToken since this method is invoked *after*\n # validate_refresh_token.\n rt = request.refresh_token_instance\n if not rt.access_token_id:\n return AccessToken.objects.get(source_refresh_token_id=rt.id).scope\n\n return rt.access_token.scope\n\n def validate_refresh_token(self, refresh_token, client, request, *args, **kwargs):\n \"\"\"\n Check refresh_token exists and refers to the right client.\n Also attach User instance to the request object\n \"\"\"\n\n null_or_recent = Q(revoked__isnull=True) | Q(\n revoked__gt=timezone.now() - timedelta(seconds=oauth2_settings.REFRESH_TOKEN_GRACE_PERIOD_SECONDS)\n )\n rt = (\n RefreshToken.objects.filter(null_or_recent, token=refresh_token)\n .select_related(\"access_token\")\n .first()\n )\n\n if not rt:\n return False\n\n request.user = rt.user\n request.refresh_token = rt.token\n # Temporary store RefreshToken instance to be reused by get_original_scopes and save_bearer_token.\n request.refresh_token_instance = rt\n return rt.application == client\n\n @transaction.atomic\n def _save_id_token(self, jti, request, expires, *args, **kwargs):\n scopes = request.scope or \" \".join(request.scopes)\n\n id_token = IDToken.objects.create(\n user=request.user,\n scope=scopes,\n expires=expires,\n jti=jti,\n application=request.client,\n )\n return id_token\n\n def get_jwt_bearer_token(self, token, token_handler, request):\n return self.get_id_token(token, token_handler, request)\n\n def get_oidc_claims(self, token, token_handler, request):\n # Required OIDC claims\n claims = {\n \"sub\": str(request.user.id),\n }\n\n # https://openid.net/specs/openid-connect-core-1_0.html#StandardClaims\n claims.update(**self.get_additional_claims(request))\n\n return claims\n\n def get_id_token_dictionary(self, token, token_handler, request):\n \"\"\"\n Get the claims to put in the ID Token.\n\n These claims are in addition to the claims automatically added by\n ``oauthlib`` - aud, iat, nonce, at_hash, c_hash.\n\n This function adds in iss, exp and auth_time, plus any claims added from\n calling ``get_oidc_claims()``\n \"\"\"\n claims = self.get_oidc_claims(token, token_handler, request)\n\n expiration_time = timezone.now() + timedelta(seconds=oauth2_settings.ID_TOKEN_EXPIRE_SECONDS)\n # Required ID Token claims\n claims.update(\n **{\n \"iss\": self.get_oidc_issuer_endpoint(request),\n \"exp\": int(dateformat.format(expiration_time, \"U\")),\n \"auth_time\": int(dateformat.format(request.user.last_login, \"U\")),\n \"jti\": str(uuid.uuid4()),\n }\n )\n\n return claims, expiration_time\n\n def get_oidc_issuer_endpoint(self, request):\n return oauth2_settings.oidc_issuer(request)\n\n def finalize_id_token(self, id_token, token, token_handler, request):\n claims, expiration_time = self.get_id_token_dictionary(token, token_handler, request)\n id_token.update(**claims)\n # Workaround for oauthlib bug #746\n # https://github.com/oauthlib/oauthlib/issues/746\n if \"nonce\" not in id_token and request.nonce:\n id_token[\"nonce\"] = request.nonce\n\n header = {\n \"typ\": \"JWT\",\n \"alg\": request.client.algorithm,\n }\n # RS256 consumers expect a kid in the header for verifying the token\n if request.client.algorithm == AbstractApplication.RS256_ALGORITHM:\n header[\"kid\"] = request.client.jwk_key.thumbprint()\n\n jwt_token = jwt.JWT(\n header=json.dumps(header, default=str),\n claims=json.dumps(id_token, default=str),\n )\n jwt_token.make_signed_token(request.client.jwk_key)\n id_token = self._save_id_token(id_token[\"jti\"], request, expiration_time)\n # this is needed by django rest framework\n request.access_token = id_token\n request.id_token = id_token\n return jwt_token.serialize()\n\n def validate_jwt_bearer_token(self, token, scopes, request):\n return self.validate_id_token(token, scopes, request)\n\n def validate_id_token(self, token, scopes, request):\n \"\"\"\n When users try to access resources, check that provided id_token is valid\n \"\"\"\n if not token:\n return False\n\n id_token = self._load_id_token(token)\n if not id_token:\n return False\n\n if not id_token.allow_scopes(scopes):\n return False\n\n request.client = id_token.application\n request.user = id_token.user\n request.scopes = scopes\n # this is needed by django rest framework\n request.access_token = id_token\n return True\n\n def _load_id_token(self, token):\n key = self._get_key_for_token(token)\n if not key:\n return None\n try:\n jwt_token = jwt.JWT(key=key, jwt=token)\n claims = json.loads(jwt_token.claims)\n return IDToken.objects.get(jti=claims[\"jti\"])\n except (JWException, JWTExpired, IDToken.DoesNotExist):\n return None\n\n def _get_key_for_token(self, token):\n \"\"\"\n Peek at the unvalidated token to discover who it was issued for\n and then use that to load that application and its key.\n \"\"\"\n unverified_token = jws.JWS()\n unverified_token.deserialize(token)\n claims = json.loads(unverified_token.objects[\"payload\"].decode(\"utf-8\"))\n if \"aud\" not in claims:\n return None\n application = self._get_client_by_audience(claims[\"aud\"])\n if application:\n return application.jwk_key\n\n def _get_client_by_audience(self, audience):\n \"\"\"\n Load a client by the aud claim in a JWT.\n aud may be multi-valued, if your provider makes it so.\n This function is separate to allow further customization.\n \"\"\"\n if isinstance(audience, str):\n audience = [audience]\n return Application.objects.filter(client_id__in=audience).first()\n\n def validate_user_match(self, id_token_hint, scopes, claims, request):\n # TODO: Fix to validate when necessary acording\n # https://github.com/idan/oauthlib/blob/master/oauthlib/oauth2/rfc6749/request_validator.py#L556\n # http://openid.net/specs/openid-connect-core-1_0.html#AuthRequest id_token_hint section\n return True\n\n def get_authorization_code_nonce(self, client_id, code, redirect_uri, request):\n \"\"\"Extracts nonce from saved authorization code.\n If present in the Authentication Request, Authorization\n Servers MUST include a nonce Claim in the ID Token with the\n Claim Value being the nonce value sent in the Authentication\n Request. Authorization Servers SHOULD perform no other\n processing on nonce values used. The nonce value is a\n case-sensitive string.\n Only code param should be sufficient to retrieve grant code from\n any storage you are using. However, `client_id` and `redirect_uri`\n have been validated and can be used also.\n :param client_id: Unicode client identifier\n :param code: Unicode authorization code grant\n :param redirect_uri: Unicode absolute URI\n :return: Unicode nonce\n Method is used by:\n - Authorization Token Grant Dispatcher\n \"\"\"\n nonce = Grant.objects.filter(code=code).values_list(\"nonce\", flat=True).first()\n if nonce:\n return nonce\n\n def get_userinfo_claims(self, request):\n \"\"\"\n Generates and saves a new JWT for this request, and returns it as the\n current user's claims.\n\n \"\"\"\n return self.get_oidc_claims(None, None, request)\n\n def get_additional_claims(self, request):\n return {}\n", "path": "oauth2_provider/oauth2_validators.py"}]} |
gh_patches_debug_1227 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-1463 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can't view options
##### Steps to reproduce the problem:
1. `mitmproxy`
2. Press `o`
##### What is the expected behavior?
No Crash!
##### What went wrong?
mitmproxy crashed!
```
~/dev/mitmproxy (master) > env/bin/mitmproxy 03:29:44
Traceback (most recent call last):
File "/home/dufferzafar/dev/mitmproxy/mitmproxy/console/master.py", line 537, in run
self.loop.run()
File "/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/main_loop.py", line 278, in run
self._run()
File "/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/main_loop.py", line 376, in _run
self.event_loop.run()
File "/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/main_loop.py", line 682, in run
self._loop()
File "/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/main_loop.py", line 719, in _loop
self._watch_files[fd]()
File "/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/raw_display.py", line 393, in <lambda>
event_loop, callback, self.get_available_raw_input())
File "/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/raw_display.py", line 493, in parse_input
callback(processed, processed_codes)
File "/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/main_loop.py", line 403, in _update
self.process_input(keys)
File "/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/main_loop.py", line 503, in process_input
k = self._topmost_widget.keypress(self.screen_size, k)
File "/home/dufferzafar/dev/mitmproxy/mitmproxy/console/window.py", line 89, in keypress
self.master.view_options()
File "/home/dufferzafar/dev/mitmproxy/mitmproxy/console/master.py", line 572, in view_options
options.help_context,
File "/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/blinker/base.py", line 267, in send
for receiver in self.receivers_for(sender)]
File "/home/dufferzafar/dev/mitmproxy/mitmproxy/console/master.py", line 333, in sig_push_view_state
self.loop.draw_screen()
File "/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/main_loop.py", line 578, in draw_screen
canvas = self._topmost_widget.render(self.screen_size, focus=True)
File "/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/container.py", line 1083, in render
focus and self.focus_part == 'body')
File "/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/decoration.py", line 225, in render
canv = self._original_widget.render(size, focus=focus)
File "/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/widget.py", line 1751, in render
canv = get_delegate(self).render(size, focus=focus)
File "/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/container.py", line 1083, in render
focus and self.focus_part == 'body')
File "/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/listbox.py", line 457, in render
(maxcol, maxrow), focus=focus)
File "/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/listbox.py", line 339, in calculate_visible
self._set_focus_complete( (maxcol, maxrow), focus )
File "/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/listbox.py", line 704, in _set_focus_complete
(maxcol,maxrow), focus)
File "/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/listbox.py", line 674, in _set_focus_first_selectable
(maxcol, maxrow), focus=focus)
File "/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/listbox.py", line 402, in calculate_visible
next, pos = self.body.get_next( pos )
File "/home/dufferzafar/dev/mitmproxy/mitmproxy/console/select.py", line 57, in get_next
return self.options[pos + 1].render(False), pos + 1
File "/home/dufferzafar/dev/mitmproxy/mitmproxy/console/select.py", line 94, in render
self.getstate(),
File "/home/dufferzafar/dev/mitmproxy/mitmproxy/console/options.py", line 97, in <lambda>
lambda: master.server.config.ssl_insecure,
AttributeError: ProxyConfig instance has no attribute 'ssl_insecure'
mitmproxy has crashed!
Please lodge a bug report at:
https://github.com/mitmproxy/mitmproxy
Shutting down...
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/proxy/config.py`
Content:
```
1 from __future__ import absolute_import, print_function, division
2
3 import base64
4 import collections
5 import os
6 import re
7 from netlib import strutils
8
9 import six
10 from OpenSSL import SSL, crypto
11
12 from mitmproxy import exceptions
13 from netlib import certutils
14 from netlib import tcp
15 from netlib.http import authentication
16 from netlib.http import url
17
18 CONF_BASENAME = "mitmproxy"
19
20
21 class HostMatcher(object):
22
23 def __init__(self, patterns=tuple()):
24 self.patterns = list(patterns)
25 self.regexes = [re.compile(p, re.IGNORECASE) for p in self.patterns]
26
27 def __call__(self, address):
28 if not address:
29 return False
30 address = tcp.Address.wrap(address)
31 host = "%s:%s" % (address.host, address.port)
32 if any(rex.search(host) for rex in self.regexes):
33 return True
34 else:
35 return False
36
37 def __bool__(self):
38 return bool(self.patterns)
39
40 if six.PY2:
41 __nonzero__ = __bool__
42
43
44 ServerSpec = collections.namedtuple("ServerSpec", "scheme address")
45
46
47 def parse_server_spec(spec):
48 try:
49 p = url.parse(spec)
50 if p[0] not in (b"http", b"https"):
51 raise ValueError()
52 except ValueError:
53 raise exceptions.OptionsError(
54 "Invalid server specification: %s" % spec
55 )
56 host, port = p[1:3]
57 address = tcp.Address((host.decode("ascii"), port))
58 scheme = p[0].decode("ascii").lower()
59 return ServerSpec(scheme, address)
60
61
62 def parse_upstream_auth(auth):
63 pattern = re.compile(".+:")
64 if pattern.search(auth) is None:
65 raise exceptions.OptionsError(
66 "Invalid upstream auth specification: %s" % auth
67 )
68 return b"Basic" + b" " + base64.b64encode(strutils.always_bytes(auth))
69
70
71 class ProxyConfig:
72
73 def __init__(self, options):
74 self.options = options
75
76 self.authenticator = None
77 self.check_ignore = None
78 self.check_tcp = None
79 self.certstore = None
80 self.clientcerts = None
81 self.openssl_verification_mode_server = None
82 self.configure(options, set(options.keys()))
83 options.changed.connect(self.configure)
84
85 def configure(self, options, updated):
86 # type: (mitmproxy.options.Options, Any) -> None
87 if options.add_upstream_certs_to_client_chain and not options.ssl_insecure:
88 raise exceptions.OptionsError(
89 "The verify-upstream-cert requires certificate verification to be disabled. "
90 "If upstream certificates are verified then extra upstream certificates are "
91 "not available for inclusion to the client chain."
92 )
93
94 if options.ssl_insecure:
95 self.openssl_verification_mode_server = SSL.VERIFY_NONE
96 else:
97 self.openssl_verification_mode_server = SSL.VERIFY_PEER
98
99 self.check_ignore = HostMatcher(options.ignore_hosts)
100 self.check_tcp = HostMatcher(options.tcp_hosts)
101
102 self.openssl_method_client, self.openssl_options_client = \
103 tcp.sslversion_choices[options.ssl_version_client]
104 self.openssl_method_server, self.openssl_options_server = \
105 tcp.sslversion_choices[options.ssl_version_server]
106
107 certstore_path = os.path.expanduser(options.cadir)
108 if not os.path.exists(os.path.dirname(certstore_path)):
109 raise exceptions.OptionsError(
110 "Certificate Authority parent directory does not exist: %s" %
111 os.path.dirname(options.cadir)
112 )
113 self.certstore = certutils.CertStore.from_store(
114 certstore_path,
115 CONF_BASENAME
116 )
117
118 if options.clientcerts:
119 clientcerts = os.path.expanduser(options.clientcerts)
120 if not os.path.exists(clientcerts):
121 raise exceptions.OptionsError(
122 "Client certificate path does not exist: %s" %
123 options.clientcerts
124 )
125 self.clientcerts = clientcerts
126
127 for spec, cert in options.certs:
128 cert = os.path.expanduser(cert)
129 if not os.path.exists(cert):
130 raise exceptions.OptionsError(
131 "Certificate file does not exist: %s" % cert
132 )
133 try:
134 self.certstore.add_cert_file(spec, cert)
135 except crypto.Error:
136 raise exceptions.OptionsError(
137 "Invalid certificate format: %s" % cert
138 )
139
140 self.upstream_server = None
141 self.upstream_auth = None
142 if options.upstream_server:
143 self.upstream_server = parse_server_spec(options.upstream_server)
144 if options.upstream_auth:
145 self.upstream_auth = parse_upstream_auth(options.upstream_auth)
146
147 self.authenticator = authentication.NullProxyAuth(None)
148 needsauth = any(
149 [
150 options.auth_nonanonymous,
151 options.auth_singleuser,
152 options.auth_htpasswd
153 ]
154 )
155 if needsauth:
156 if options.mode == "transparent":
157 raise exceptions.OptionsError(
158 "Proxy Authentication not supported in transparent mode."
159 )
160 elif options.mode == "socks5":
161 raise exceptions.OptionsError(
162 "Proxy Authentication not supported in SOCKS mode. "
163 "https://github.com/mitmproxy/mitmproxy/issues/738"
164 )
165 elif options.auth_singleuser:
166 parts = options.auth_singleuser.split(':')
167 if len(parts) != 2:
168 raise exceptions.OptionsError(
169 "Invalid single-user specification. "
170 "Please use the format username:password"
171 )
172 password_manager = authentication.PassManSingleUser(*parts)
173 elif options.auth_nonanonymous:
174 password_manager = authentication.PassManNonAnon()
175 elif options.auth_htpasswd:
176 try:
177 password_manager = authentication.PassManHtpasswd(
178 options.auth_htpasswd
179 )
180 except ValueError as v:
181 raise exceptions.OptionsError(str(v))
182 self.authenticator = authentication.BasicProxyAuth(
183 password_manager,
184 "mitmproxy"
185 )
186
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mitmproxy/proxy/config.py b/mitmproxy/proxy/config.py
--- a/mitmproxy/proxy/config.py
+++ b/mitmproxy/proxy/config.py
@@ -78,6 +78,7 @@
self.check_tcp = None
self.certstore = None
self.clientcerts = None
+ self.ssl_insecure = False
self.openssl_verification_mode_server = None
self.configure(options, set(options.keys()))
options.changed.connect(self.configure)
| {"golden_diff": "diff --git a/mitmproxy/proxy/config.py b/mitmproxy/proxy/config.py\n--- a/mitmproxy/proxy/config.py\n+++ b/mitmproxy/proxy/config.py\n@@ -78,6 +78,7 @@\n self.check_tcp = None\n self.certstore = None\n self.clientcerts = None\n+ self.ssl_insecure = False\n self.openssl_verification_mode_server = None\n self.configure(options, set(options.keys()))\n options.changed.connect(self.configure)\n", "issue": "Can't view options\n##### Steps to reproduce the problem:\n1. `mitmproxy`\n2. Press `o`\n##### What is the expected behavior?\n\nNo Crash!\n##### What went wrong?\n\nmitmproxy crashed!\n\n```\n~/dev/mitmproxy (master) > env/bin/mitmproxy 03:29:44\nTraceback (most recent call last):\n File \"/home/dufferzafar/dev/mitmproxy/mitmproxy/console/master.py\", line 537, in run\n self.loop.run()\n File \"/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/main_loop.py\", line 278, in run\n self._run()\n File \"/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/main_loop.py\", line 376, in _run\n self.event_loop.run()\n File \"/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/main_loop.py\", line 682, in run\n self._loop()\n File \"/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/main_loop.py\", line 719, in _loop\n self._watch_files[fd]()\n File \"/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/raw_display.py\", line 393, in <lambda>\n event_loop, callback, self.get_available_raw_input())\n File \"/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/raw_display.py\", line 493, in parse_input\n callback(processed, processed_codes)\n File \"/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/main_loop.py\", line 403, in _update\n self.process_input(keys)\n File \"/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/main_loop.py\", line 503, in process_input\n k = self._topmost_widget.keypress(self.screen_size, k)\n File \"/home/dufferzafar/dev/mitmproxy/mitmproxy/console/window.py\", line 89, in keypress\n self.master.view_options()\n File \"/home/dufferzafar/dev/mitmproxy/mitmproxy/console/master.py\", line 572, in view_options\n options.help_context,\n File \"/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/blinker/base.py\", line 267, in send\n for receiver in self.receivers_for(sender)]\n File \"/home/dufferzafar/dev/mitmproxy/mitmproxy/console/master.py\", line 333, in sig_push_view_state\n self.loop.draw_screen()\n File \"/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/main_loop.py\", line 578, in draw_screen\n canvas = self._topmost_widget.render(self.screen_size, focus=True)\n File \"/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/widget.py\", line 141, in cached_render\n canv = fn(self, size, focus=focus)\n File \"/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/container.py\", line 1083, in render\n focus and self.focus_part == 'body')\n File \"/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/widget.py\", line 141, in cached_render\n canv = fn(self, size, focus=focus)\n File \"/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/decoration.py\", line 225, in render\n canv = self._original_widget.render(size, focus=focus)\n File \"/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/widget.py\", line 141, in cached_render\n canv = fn(self, size, focus=focus)\n File \"/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/widget.py\", line 1751, in render\n canv = get_delegate(self).render(size, focus=focus)\n File \"/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/widget.py\", line 141, in cached_render\n canv = fn(self, size, focus=focus)\n File \"/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/container.py\", line 1083, in render\n focus and self.focus_part == 'body')\n File \"/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/widget.py\", line 141, in cached_render\n canv = fn(self, size, focus=focus)\n File \"/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/listbox.py\", line 457, in render\n (maxcol, maxrow), focus=focus)\n File \"/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/listbox.py\", line 339, in calculate_visible\n self._set_focus_complete( (maxcol, maxrow), focus )\n File \"/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/listbox.py\", line 704, in _set_focus_complete\n (maxcol,maxrow), focus)\n File \"/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/listbox.py\", line 674, in _set_focus_first_selectable\n (maxcol, maxrow), focus=focus)\n File \"/home/dufferzafar/dev/mitmproxy/env/lib/python2.7/site-packages/urwid/listbox.py\", line 402, in calculate_visible\n next, pos = self.body.get_next( pos )\n File \"/home/dufferzafar/dev/mitmproxy/mitmproxy/console/select.py\", line 57, in get_next\n return self.options[pos + 1].render(False), pos + 1\n File \"/home/dufferzafar/dev/mitmproxy/mitmproxy/console/select.py\", line 94, in render\n self.getstate(),\n File \"/home/dufferzafar/dev/mitmproxy/mitmproxy/console/options.py\", line 97, in <lambda>\n lambda: master.server.config.ssl_insecure,\nAttributeError: ProxyConfig instance has no attribute 'ssl_insecure'\n\nmitmproxy has crashed!\nPlease lodge a bug report at:\n https://github.com/mitmproxy/mitmproxy\nShutting down...\n```\n\n", "before_files": [{"content": "from __future__ import absolute_import, print_function, division\n\nimport base64\nimport collections\nimport os\nimport re\nfrom netlib import strutils\n\nimport six\nfrom OpenSSL import SSL, crypto\n\nfrom mitmproxy import exceptions\nfrom netlib import certutils\nfrom netlib import tcp\nfrom netlib.http import authentication\nfrom netlib.http import url\n\nCONF_BASENAME = \"mitmproxy\"\n\n\nclass HostMatcher(object):\n\n def __init__(self, patterns=tuple()):\n self.patterns = list(patterns)\n self.regexes = [re.compile(p, re.IGNORECASE) for p in self.patterns]\n\n def __call__(self, address):\n if not address:\n return False\n address = tcp.Address.wrap(address)\n host = \"%s:%s\" % (address.host, address.port)\n if any(rex.search(host) for rex in self.regexes):\n return True\n else:\n return False\n\n def __bool__(self):\n return bool(self.patterns)\n\n if six.PY2:\n __nonzero__ = __bool__\n\n\nServerSpec = collections.namedtuple(\"ServerSpec\", \"scheme address\")\n\n\ndef parse_server_spec(spec):\n try:\n p = url.parse(spec)\n if p[0] not in (b\"http\", b\"https\"):\n raise ValueError()\n except ValueError:\n raise exceptions.OptionsError(\n \"Invalid server specification: %s\" % spec\n )\n host, port = p[1:3]\n address = tcp.Address((host.decode(\"ascii\"), port))\n scheme = p[0].decode(\"ascii\").lower()\n return ServerSpec(scheme, address)\n\n\ndef parse_upstream_auth(auth):\n pattern = re.compile(\".+:\")\n if pattern.search(auth) is None:\n raise exceptions.OptionsError(\n \"Invalid upstream auth specification: %s\" % auth\n )\n return b\"Basic\" + b\" \" + base64.b64encode(strutils.always_bytes(auth))\n\n\nclass ProxyConfig:\n\n def __init__(self, options):\n self.options = options\n\n self.authenticator = None\n self.check_ignore = None\n self.check_tcp = None\n self.certstore = None\n self.clientcerts = None\n self.openssl_verification_mode_server = None\n self.configure(options, set(options.keys()))\n options.changed.connect(self.configure)\n\n def configure(self, options, updated):\n # type: (mitmproxy.options.Options, Any) -> None\n if options.add_upstream_certs_to_client_chain and not options.ssl_insecure:\n raise exceptions.OptionsError(\n \"The verify-upstream-cert requires certificate verification to be disabled. \"\n \"If upstream certificates are verified then extra upstream certificates are \"\n \"not available for inclusion to the client chain.\"\n )\n\n if options.ssl_insecure:\n self.openssl_verification_mode_server = SSL.VERIFY_NONE\n else:\n self.openssl_verification_mode_server = SSL.VERIFY_PEER\n\n self.check_ignore = HostMatcher(options.ignore_hosts)\n self.check_tcp = HostMatcher(options.tcp_hosts)\n\n self.openssl_method_client, self.openssl_options_client = \\\n tcp.sslversion_choices[options.ssl_version_client]\n self.openssl_method_server, self.openssl_options_server = \\\n tcp.sslversion_choices[options.ssl_version_server]\n\n certstore_path = os.path.expanduser(options.cadir)\n if not os.path.exists(os.path.dirname(certstore_path)):\n raise exceptions.OptionsError(\n \"Certificate Authority parent directory does not exist: %s\" %\n os.path.dirname(options.cadir)\n )\n self.certstore = certutils.CertStore.from_store(\n certstore_path,\n CONF_BASENAME\n )\n\n if options.clientcerts:\n clientcerts = os.path.expanduser(options.clientcerts)\n if not os.path.exists(clientcerts):\n raise exceptions.OptionsError(\n \"Client certificate path does not exist: %s\" %\n options.clientcerts\n )\n self.clientcerts = clientcerts\n\n for spec, cert in options.certs:\n cert = os.path.expanduser(cert)\n if not os.path.exists(cert):\n raise exceptions.OptionsError(\n \"Certificate file does not exist: %s\" % cert\n )\n try:\n self.certstore.add_cert_file(spec, cert)\n except crypto.Error:\n raise exceptions.OptionsError(\n \"Invalid certificate format: %s\" % cert\n )\n\n self.upstream_server = None\n self.upstream_auth = None\n if options.upstream_server:\n self.upstream_server = parse_server_spec(options.upstream_server)\n if options.upstream_auth:\n self.upstream_auth = parse_upstream_auth(options.upstream_auth)\n\n self.authenticator = authentication.NullProxyAuth(None)\n needsauth = any(\n [\n options.auth_nonanonymous,\n options.auth_singleuser,\n options.auth_htpasswd\n ]\n )\n if needsauth:\n if options.mode == \"transparent\":\n raise exceptions.OptionsError(\n \"Proxy Authentication not supported in transparent mode.\"\n )\n elif options.mode == \"socks5\":\n raise exceptions.OptionsError(\n \"Proxy Authentication not supported in SOCKS mode. \"\n \"https://github.com/mitmproxy/mitmproxy/issues/738\"\n )\n elif options.auth_singleuser:\n parts = options.auth_singleuser.split(':')\n if len(parts) != 2:\n raise exceptions.OptionsError(\n \"Invalid single-user specification. \"\n \"Please use the format username:password\"\n )\n password_manager = authentication.PassManSingleUser(*parts)\n elif options.auth_nonanonymous:\n password_manager = authentication.PassManNonAnon()\n elif options.auth_htpasswd:\n try:\n password_manager = authentication.PassManHtpasswd(\n options.auth_htpasswd\n )\n except ValueError as v:\n raise exceptions.OptionsError(str(v))\n self.authenticator = authentication.BasicProxyAuth(\n password_manager,\n \"mitmproxy\"\n )\n", "path": "mitmproxy/proxy/config.py"}], "after_files": [{"content": "from __future__ import absolute_import, print_function, division\n\nimport base64\nimport collections\nimport os\nimport re\nfrom netlib import strutils\n\nimport six\nfrom OpenSSL import SSL, crypto\n\nfrom mitmproxy import exceptions\nfrom netlib import certutils\nfrom netlib import tcp\nfrom netlib.http import authentication\nfrom netlib.http import url\n\nCONF_BASENAME = \"mitmproxy\"\n\n\nclass HostMatcher(object):\n\n def __init__(self, patterns=tuple()):\n self.patterns = list(patterns)\n self.regexes = [re.compile(p, re.IGNORECASE) for p in self.patterns]\n\n def __call__(self, address):\n if not address:\n return False\n address = tcp.Address.wrap(address)\n host = \"%s:%s\" % (address.host, address.port)\n if any(rex.search(host) for rex in self.regexes):\n return True\n else:\n return False\n\n def __bool__(self):\n return bool(self.patterns)\n\n if six.PY2:\n __nonzero__ = __bool__\n\n\nServerSpec = collections.namedtuple(\"ServerSpec\", \"scheme address\")\n\n\ndef parse_server_spec(spec):\n try:\n p = url.parse(spec)\n if p[0] not in (b\"http\", b\"https\"):\n raise ValueError()\n except ValueError:\n raise exceptions.OptionsError(\n \"Invalid server specification: %s\" % spec\n )\n host, port = p[1:3]\n address = tcp.Address((host.decode(\"ascii\"), port))\n scheme = p[0].decode(\"ascii\").lower()\n return ServerSpec(scheme, address)\n\n\ndef parse_upstream_auth(auth):\n pattern = re.compile(\".+:\")\n if pattern.search(auth) is None:\n raise exceptions.OptionsError(\n \"Invalid upstream auth specification: %s\" % auth\n )\n return b\"Basic\" + b\" \" + base64.b64encode(strutils.always_bytes(auth))\n\n\nclass ProxyConfig:\n\n def __init__(self, options):\n self.options = options\n\n self.authenticator = None\n self.check_ignore = None\n self.check_tcp = None\n self.certstore = None\n self.clientcerts = None\n self.ssl_insecure = False\n self.openssl_verification_mode_server = None\n self.configure(options, set(options.keys()))\n options.changed.connect(self.configure)\n\n def configure(self, options, updated):\n # type: (mitmproxy.options.Options, Any) -> None\n if options.add_upstream_certs_to_client_chain and not options.ssl_insecure:\n raise exceptions.OptionsError(\n \"The verify-upstream-cert requires certificate verification to be disabled. \"\n \"If upstream certificates are verified then extra upstream certificates are \"\n \"not available for inclusion to the client chain.\"\n )\n\n if options.ssl_insecure:\n self.openssl_verification_mode_server = SSL.VERIFY_NONE\n else:\n self.openssl_verification_mode_server = SSL.VERIFY_PEER\n\n self.check_ignore = HostMatcher(options.ignore_hosts)\n self.check_tcp = HostMatcher(options.tcp_hosts)\n\n self.openssl_method_client, self.openssl_options_client = \\\n tcp.sslversion_choices[options.ssl_version_client]\n self.openssl_method_server, self.openssl_options_server = \\\n tcp.sslversion_choices[options.ssl_version_server]\n\n certstore_path = os.path.expanduser(options.cadir)\n if not os.path.exists(os.path.dirname(certstore_path)):\n raise exceptions.OptionsError(\n \"Certificate Authority parent directory does not exist: %s\" %\n os.path.dirname(options.cadir)\n )\n self.certstore = certutils.CertStore.from_store(\n certstore_path,\n CONF_BASENAME\n )\n\n if options.clientcerts:\n clientcerts = os.path.expanduser(options.clientcerts)\n if not os.path.exists(clientcerts):\n raise exceptions.OptionsError(\n \"Client certificate path does not exist: %s\" %\n options.clientcerts\n )\n self.clientcerts = clientcerts\n\n for spec, cert in options.certs:\n cert = os.path.expanduser(cert)\n if not os.path.exists(cert):\n raise exceptions.OptionsError(\n \"Certificate file does not exist: %s\" % cert\n )\n try:\n self.certstore.add_cert_file(spec, cert)\n except crypto.Error:\n raise exceptions.OptionsError(\n \"Invalid certificate format: %s\" % cert\n )\n\n self.upstream_server = None\n self.upstream_auth = None\n if options.upstream_server:\n self.upstream_server = parse_server_spec(options.upstream_server)\n if options.upstream_auth:\n self.upstream_auth = parse_upstream_auth(options.upstream_auth)\n\n self.authenticator = authentication.NullProxyAuth(None)\n needsauth = any(\n [\n options.auth_nonanonymous,\n options.auth_singleuser,\n options.auth_htpasswd\n ]\n )\n if needsauth:\n if options.mode == \"transparent\":\n raise exceptions.OptionsError(\n \"Proxy Authentication not supported in transparent mode.\"\n )\n elif options.mode == \"socks5\":\n raise exceptions.OptionsError(\n \"Proxy Authentication not supported in SOCKS mode. \"\n \"https://github.com/mitmproxy/mitmproxy/issues/738\"\n )\n elif options.auth_singleuser:\n parts = options.auth_singleuser.split(':')\n if len(parts) != 2:\n raise exceptions.OptionsError(\n \"Invalid single-user specification. \"\n \"Please use the format username:password\"\n )\n password_manager = authentication.PassManSingleUser(*parts)\n elif options.auth_nonanonymous:\n password_manager = authentication.PassManNonAnon()\n elif options.auth_htpasswd:\n try:\n password_manager = authentication.PassManHtpasswd(\n options.auth_htpasswd\n )\n except ValueError as v:\n raise exceptions.OptionsError(str(v))\n self.authenticator = authentication.BasicProxyAuth(\n password_manager,\n \"mitmproxy\"\n )\n", "path": "mitmproxy/proxy/config.py"}]} |
gh_patches_debug_1228 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-963 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Implement (isActive)
### Issue Description
Requires #851
isActive will return true if the domain has a state of CREATED
Note: check the domain status
### Additional Context (optional)
separate function b/c we might check if this is live on the internet.
This will be used when check if an approved domain can be switched to ineligible or rejected.
### Issue Links
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/registrar/models/domain.py`
Content:
```
1 import logging
2
3 from datetime import date
4 from string import digits
5 from django_fsm import FSMField # type: ignore
6
7 from django.db import models
8
9 from epplibwrapper import (
10 CLIENT as registry,
11 commands,
12 common as epp,
13 RegistryError,
14 ErrorCode,
15 )
16
17 from .utility.domain_field import DomainField
18 from .utility.domain_helper import DomainHelper
19 from .utility.time_stamped_model import TimeStampedModel
20
21 from .public_contact import PublicContact
22
23 logger = logging.getLogger(__name__)
24
25
26 class Domain(TimeStampedModel, DomainHelper):
27 """
28 Manage the lifecycle of domain names.
29
30 The registry is the source of truth for this data and this model exists:
31 1. To tie ownership information in the registrar to
32 DNS entries in the registry
33
34 ~~~ HOW TO USE THIS CLASS ~~~
35
36 A) You can create a Domain object with just a name. `Domain(name="something.gov")`.
37 B) Saving the Domain object will not contact the registry, as it may be useful
38 to have Domain objects in an `UNKNOWN` pre-created state.
39 C) Domain properties are lazily loaded. Accessing `my_domain.expiration_date` will
40 contact the registry, if a cached copy does not exist.
41 D) Domain creation is lazy. If `my_domain.expiration_date` finds that `my_domain`
42 does not exist in the registry, it will ask the registry to create it.
43 F) Created is _not_ the same as active aka live on the internet.
44 G) Activation is controlled by the registry. It will happen automatically when the
45 domain meets the required checks.
46 """
47
48 def __init__(self, *args, **kwargs):
49 self._cache = {}
50 super(Domain, self).__init__(*args, **kwargs)
51
52 class Status(models.TextChoices):
53 """
54 The status codes we can receive from the registry.
55
56 These are detailed in RFC 5731 in section 2.3.
57 https://www.rfc-editor.org/std/std69.txt
58 """
59
60 # Requests to delete the object MUST be rejected.
61 CLIENT_DELETE_PROHIBITED = "clientDeleteProhibited"
62 SERVER_DELETE_PROHIBITED = "serverDeleteProhibited"
63
64 # DNS delegation information MUST NOT be published for the object.
65 CLIENT_HOLD = "clientHold"
66 SERVER_HOLD = "serverHold"
67
68 # Requests to renew the object MUST be rejected.
69 CLIENT_RENEW_PROHIBITED = "clientRenewProhibited"
70 SERVER_RENEW_PROHIBITED = "serverRenewProhibited"
71
72 # Requests to transfer the object MUST be rejected.
73 CLIENT_TRANSFER_PROHIBITED = "clientTransferProhibited"
74 SERVER_TRANSFER_PROHIBITED = "serverTransferProhibited"
75
76 # Requests to update the object (other than to remove this status)
77 # MUST be rejected.
78 CLIENT_UPDATE_PROHIBITED = "clientUpdateProhibited"
79 SERVER_UPDATE_PROHIBITED = "serverUpdateProhibited"
80
81 # Delegation information has not been associated with the object.
82 # This is the default status when a domain object is first created
83 # and there are no associated host objects for the DNS delegation.
84 # This status can also be set by the server when all host-object
85 # associations are removed.
86 INACTIVE = "inactive"
87
88 # This is the normal status value for an object that has no pending
89 # operations or prohibitions. This value is set and removed by the
90 # server as other status values are added or removed.
91 OK = "ok"
92
93 # A transform command has been processed for the object, but the
94 # action has not been completed by the server. Server operators can
95 # delay action completion for a variety of reasons, such as to allow
96 # for human review or third-party action. A transform command that
97 # is processed, but whose requested action is pending, is noted with
98 # response code 1001.
99 PENDING_CREATE = "pendingCreate"
100 PENDING_DELETE = "pendingDelete"
101 PENDING_RENEW = "pendingRenew"
102 PENDING_TRANSFER = "pendingTransfer"
103 PENDING_UPDATE = "pendingUpdate"
104
105 class State(models.TextChoices):
106 """These capture (some of) the states a domain object can be in."""
107
108 # the normal state of a domain object -- may or may not be active!
109 CREATED = "created"
110
111 # previously existed but has been deleted from the registry
112 DELETED = "deleted"
113
114 # the state is indeterminate
115 UNKNOWN = "unknown"
116
117 class Cache(property):
118 """
119 Python descriptor to turn class methods into properties.
120
121 The purpose of subclassing `property` rather than using it directly
122 as a decorator (`@Cache`) is to insert generic code to run
123 before or after _all_ properties are accessed, modified, or deleted.
124
125 As an example:
126
127 domain = Domain(name="example.gov")
128 domain.save()
129 <--- insert code here
130 date = domain.creation_date
131 <--- or here
132 (...other stuff...)
133 """
134
135 def __get__(self, obj, objtype=None):
136 """Called during get. Example: `r = domain.registrant`."""
137 return super().__get__(obj, objtype)
138
139 def __set__(self, obj, value):
140 """Called during set. Example: `domain.registrant = 'abc123'`."""
141 super().__set__(obj, value)
142 # always invalidate cache after sending updates to the registry
143 obj._invalidate_cache()
144
145 def __delete__(self, obj):
146 """Called during delete. Example: `del domain.registrant`."""
147 super().__delete__(obj)
148
149 @classmethod
150 def available(cls, domain: str) -> bool:
151 """Check if a domain is available."""
152 if not cls.string_could_be_domain(domain):
153 raise ValueError("Not a valid domain: %s" % str(domain))
154 req = commands.CheckDomain([domain])
155 return registry.send(req, cleaned=True).res_data[0].avail
156
157 @classmethod
158 def registered(cls, domain: str) -> bool:
159 """Check if a domain is _not_ available."""
160 return not cls.available(domain)
161
162 @Cache
163 def contacts(self) -> dict[str, str]:
164 """
165 Get a dictionary of registry IDs for the contacts for this domain.
166
167 IDs are provided as strings, e.g.
168
169 { PublicContact.ContactTypeChoices.REGISTRANT: "jd1234",
170 PublicContact.ContactTypeChoices.ADMINISTRATIVE: "sh8013",...}
171 """
172 raise NotImplementedError()
173
174 @Cache
175 def creation_date(self) -> date:
176 """Get the `cr_date` element from the registry."""
177 return self._get_property("cr_date")
178
179 @Cache
180 def last_transferred_date(self) -> date:
181 """Get the `tr_date` element from the registry."""
182 raise NotImplementedError()
183
184 @Cache
185 def last_updated_date(self) -> date:
186 """Get the `up_date` element from the registry."""
187 return self._get_property("up_date")
188
189 @Cache
190 def expiration_date(self) -> date:
191 """Get or set the `ex_date` element from the registry."""
192 return self._get_property("ex_date")
193
194 @expiration_date.setter # type: ignore
195 def expiration_date(self, ex_date: date):
196 raise NotImplementedError()
197
198 @Cache
199 def password(self) -> str:
200 """
201 Get the `auth_info.pw` element from the registry. Not a real password.
202
203 This `auth_info` element is required by the EPP protocol, but the registry is
204 using a different mechanism to ensure unauthorized clients cannot perform
205 actions on domains they do not own. This field provides no security features.
206 It is not a secret.
207 """
208 raise NotImplementedError()
209
210 @Cache
211 def nameservers(self) -> list[tuple[str]]:
212 """
213 Get or set a complete list of nameservers for this domain.
214
215 Hosts are provided as a list of tuples, e.g.
216
217 [("ns1.example.com",), ("ns1.example.gov", "0.0.0.0")]
218
219 Subordinate hosts (something.your-domain.gov) MUST have IP addresses,
220 while non-subordinate hosts MUST NOT.
221 """
222 # TODO: call EPP to get this info instead of returning fake data.
223 return [
224 ("ns1.example.com",),
225 ("ns2.example.com",),
226 ("ns3.example.com",),
227 ]
228
229 @nameservers.setter # type: ignore
230 def nameservers(self, hosts: list[tuple[str]]):
231 # TODO: call EPP to set this info.
232 pass
233
234 @Cache
235 def statuses(self) -> list[str]:
236 """
237 Get or set the domain `status` elements from the registry.
238
239 A domain's status indicates various properties. See Domain.Status.
240 """
241 # implementation note: the Status object from EPP stores the string in
242 # a dataclass property `state`, not to be confused with the `state` field here
243 raise NotImplementedError()
244
245 @statuses.setter # type: ignore
246 def statuses(self, statuses: list[str]):
247 # TODO: there are a long list of rules in the RFC about which statuses
248 # can be combined; check that here and raise errors for invalid combinations -
249 # some statuses cannot be set by the client at all
250 raise NotImplementedError()
251
252 @Cache
253 def registrant_contact(self) -> PublicContact:
254 """Get or set the registrant for this domain."""
255 raise NotImplementedError()
256
257 @registrant_contact.setter # type: ignore
258 def registrant_contact(self, contact: PublicContact):
259 # get id from PublicContact->.registry_id
260 # call UpdateDomain() command with registrant as parameter
261 raise NotImplementedError()
262
263 @Cache
264 def administrative_contact(self) -> PublicContact:
265 """Get or set the admin contact for this domain."""
266 raise NotImplementedError()
267
268 @administrative_contact.setter # type: ignore
269 def administrative_contact(self, contact: PublicContact):
270 # call CreateContact, if contact doesn't exist yet for domain
271 # call UpdateDomain with contact,
272 # type options are[admin, billing, tech, security]
273 # use admin as type parameter for this contact
274 raise NotImplementedError()
275
276 @Cache
277 def security_contact(self) -> PublicContact:
278 """Get or set the security contact for this domain."""
279 # TODO: replace this with a real implementation
280 contact = PublicContact.get_default_security()
281 contact.domain = self
282 contact.email = "[email protected]"
283 return contact
284
285 @security_contact.setter # type: ignore
286 def security_contact(self, contact: PublicContact):
287 # TODO: replace this with a real implementation
288 pass
289
290 @Cache
291 def technical_contact(self) -> PublicContact:
292 """Get or set the tech contact for this domain."""
293 raise NotImplementedError()
294
295 @technical_contact.setter # type: ignore
296 def technical_contact(self, contact: PublicContact):
297 raise NotImplementedError()
298
299 def is_active(self) -> bool:
300 """Is the domain live on the inter webs?"""
301 # TODO: implement a check -- should be performant so it can be called for
302 # any number of domains on a status page
303 # this is NOT as simple as checking if Domain.Status.OK is in self.statuses
304 return False
305
306 def transfer(self):
307 """Going somewhere. Not implemented."""
308 raise NotImplementedError()
309
310 def renew(self):
311 """Time to renew. Not implemented."""
312 raise NotImplementedError()
313
314 def place_client_hold(self):
315 """This domain should not be active."""
316 raise NotImplementedError("This is not implemented yet.")
317
318 def remove_client_hold(self):
319 """This domain is okay to be active."""
320 raise NotImplementedError()
321
322 def __str__(self) -> str:
323 return self.name
324
325 name = DomainField(
326 max_length=253,
327 blank=False,
328 default=None, # prevent saving without a value
329 unique=True,
330 help_text="Fully qualified domain name",
331 )
332
333 state = FSMField(
334 max_length=21,
335 choices=State.choices,
336 default=State.UNKNOWN,
337 protected=True, # cannot change state directly, particularly in Django admin
338 help_text="Very basic info about the lifecycle of this domain object",
339 )
340
341 # ForeignKey on UserDomainRole creates a "permissions" member for
342 # all of the user-roles that are in place for this domain
343
344 # ManyToManyField on User creates a "users" member for all of the
345 # users who have some role on this domain
346
347 # ForeignKey on DomainInvitation creates an "invitations" member for
348 # all of the invitations that have been sent for this domain
349
350 def _validate_host_tuples(self, hosts: list[tuple[str]]):
351 """
352 Helper function. Validate hostnames and IP addresses.
353
354 Raises:
355 ValueError if hostname or IP address appears invalid or mismatched.
356 """
357 for host in hosts:
358 hostname = host[0].lower()
359 addresses: tuple[str] = host[1:] # type: ignore
360 if not bool(Domain.HOST_REGEX.match(hostname)):
361 raise ValueError("Invalid hostname: %s." % hostname)
362 if len(hostname) > Domain.MAX_LENGTH:
363 raise ValueError("Too long hostname: %s" % hostname)
364
365 is_subordinate = hostname.split(".", 1)[-1] == self.name
366 if is_subordinate and len(addresses) == 0:
367 raise ValueError(
368 "Must supply IP addresses for subordinate host %s" % hostname
369 )
370 if not is_subordinate and len(addresses) > 0:
371 raise ValueError("Must not supply IP addresses for %s" % hostname)
372
373 for address in addresses:
374 allow = set(":." + digits)
375 if any(c not in allow for c in address):
376 raise ValueError("Invalid IP address: %s." % address)
377
378 def _get_or_create_domain(self):
379 """Try to fetch info about this domain. Create it if it does not exist."""
380 already_tried_to_create = False
381 while True:
382 try:
383 req = commands.InfoDomain(name=self.name)
384 return registry.send(req, cleaned=True).res_data[0]
385 except RegistryError as e:
386 if already_tried_to_create:
387 raise e
388 if e.code == ErrorCode.OBJECT_DOES_NOT_EXIST:
389 # avoid infinite loop
390 already_tried_to_create = True
391 registrant = self._get_or_create_contact(
392 PublicContact.get_default_registrant()
393 )
394 req = commands.CreateDomain(
395 name=self.name,
396 registrant=registrant.id,
397 auth_info=epp.DomainAuthInfo(
398 pw="2fooBAR123fooBaz"
399 ), # not a password
400 )
401 registry.send(req, cleaned=True)
402 # no error, so go ahead and update state
403 self.state = Domain.State.CREATED
404 self.save()
405 else:
406 raise e
407
408 def _get_or_create_contact(self, contact: PublicContact):
409 """Try to fetch info about a contact. Create it if it does not exist."""
410 while True:
411 try:
412 req = commands.InfoContact(id=contact.registry_id)
413 return registry.send(req, cleaned=True).res_data[0]
414 except RegistryError as e:
415 if e.code == ErrorCode.OBJECT_DOES_NOT_EXIST:
416 create = commands.CreateContact(
417 id=contact.registry_id,
418 postal_info=epp.PostalInfo( # type: ignore
419 name=contact.name,
420 addr=epp.ContactAddr(
421 street=[
422 getattr(contact, street)
423 for street in ["street1", "street2", "street3"]
424 if hasattr(contact, street)
425 ],
426 city=contact.city,
427 pc=contact.pc,
428 cc=contact.cc,
429 sp=contact.sp,
430 ),
431 org=contact.org,
432 type="loc",
433 ),
434 email=contact.email,
435 voice=contact.voice,
436 fax=contact.fax,
437 auth_info=epp.ContactAuthInfo(pw="2fooBAR123fooBaz"),
438 )
439 # security contacts should only show email addresses, for now
440 if (
441 contact.contact_type
442 == PublicContact.ContactTypeChoices.SECURITY
443 ):
444 DF = epp.DiscloseField
445 create.disclose = epp.Disclose(
446 flag=False,
447 fields={DF.FAX, DF.VOICE, DF.ADDR},
448 types={DF.ADDR: "loc"},
449 )
450 registry.send(create)
451 else:
452 raise e
453
454 def _update_or_create_host(self, host):
455 raise NotImplementedError()
456
457 def _delete_host(self, host):
458 raise NotImplementedError()
459
460 def _fetch_cache(self, fetch_hosts=False, fetch_contacts=False):
461 """Contact registry for info about a domain."""
462 try:
463 # get info from registry
464 data = self._get_or_create_domain()
465 # extract properties from response
466 # (Ellipsis is used to mean "null")
467 cache = {
468 "auth_info": getattr(data, "auth_info", ...),
469 "_contacts": getattr(data, "contacts", ...),
470 "cr_date": getattr(data, "cr_date", ...),
471 "ex_date": getattr(data, "ex_date", ...),
472 "_hosts": getattr(data, "hosts", ...),
473 "name": getattr(data, "name", ...),
474 "registrant": getattr(data, "registrant", ...),
475 "statuses": getattr(data, "statuses", ...),
476 "tr_date": getattr(data, "tr_date", ...),
477 "up_date": getattr(data, "up_date", ...),
478 }
479
480 # remove null properties (to distinguish between "a value of None" and null)
481 cleaned = {k: v for k, v in cache.items() if v is not ...}
482
483 # get contact info, if there are any
484 if (
485 fetch_contacts
486 and "_contacts" in cleaned
487 and isinstance(cleaned["_contacts"], list)
488 and len(cleaned["_contacts"])
489 ):
490 cleaned["contacts"] = []
491 for id in cleaned["_contacts"]:
492 # we do not use _get_or_create_* because we expect the object we
493 # just asked the registry for still exists --
494 # if not, that's a problem
495 req = commands.InfoContact(id=id)
496 data = registry.send(req, cleaned=True).res_data[0]
497
498 # extract properties from response
499 # (Ellipsis is used to mean "null")
500 contact = {
501 "id": id,
502 "auth_info": getattr(data, "auth_info", ...),
503 "cr_date": getattr(data, "cr_date", ...),
504 "disclose": getattr(data, "disclose", ...),
505 "email": getattr(data, "email", ...),
506 "fax": getattr(data, "fax", ...),
507 "postal_info": getattr(data, "postal_info", ...),
508 "statuses": getattr(data, "statuses", ...),
509 "tr_date": getattr(data, "tr_date", ...),
510 "up_date": getattr(data, "up_date", ...),
511 "voice": getattr(data, "voice", ...),
512 }
513
514 cleaned["contacts"].append(
515 {k: v for k, v in contact.items() if v is not ...}
516 )
517
518 # get nameserver info, if there are any
519 if (
520 fetch_hosts
521 and "_hosts" in cleaned
522 and isinstance(cleaned["_hosts"], list)
523 and len(cleaned["_hosts"])
524 ):
525 cleaned["hosts"] = []
526 for name in cleaned["_hosts"]:
527 # we do not use _get_or_create_* because we expect the object we
528 # just asked the registry for still exists --
529 # if not, that's a problem
530 req = commands.InfoHost(name=name)
531 data = registry.send(req, cleaned=True).res_data[0]
532 # extract properties from response
533 # (Ellipsis is used to mean "null")
534 host = {
535 "name": name,
536 "addrs": getattr(data, "addrs", ...),
537 "cr_date": getattr(data, "cr_date", ...),
538 "statuses": getattr(data, "statuses", ...),
539 "tr_date": getattr(data, "tr_date", ...),
540 "up_date": getattr(data, "up_date", ...),
541 }
542 cleaned["hosts"].append(
543 {k: v for k, v in host.items() if v is not ...}
544 )
545
546 # replace the prior cache with new data
547 self._cache = cleaned
548
549 except RegistryError as e:
550 logger.error(e)
551
552 def _invalidate_cache(self):
553 """Remove cache data when updates are made."""
554 self._cache = {}
555
556 def _get_property(self, property):
557 """Get some piece of info about a domain."""
558 if property not in self._cache:
559 self._fetch_cache(
560 fetch_hosts=(property == "hosts"),
561 fetch_contacts=(property == "contacts"),
562 )
563
564 if property in self._cache:
565 return self._cache[property]
566 else:
567 raise KeyError(
568 "Requested key %s was not found in registry cache." % str(property)
569 )
570
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/registrar/models/domain.py b/src/registrar/models/domain.py
--- a/src/registrar/models/domain.py
+++ b/src/registrar/models/domain.py
@@ -338,6 +338,9 @@
help_text="Very basic info about the lifecycle of this domain object",
)
+ def isActive(self):
+ return self.state == Domain.State.CREATED
+
# ForeignKey on UserDomainRole creates a "permissions" member for
# all of the user-roles that are in place for this domain
| {"golden_diff": "diff --git a/src/registrar/models/domain.py b/src/registrar/models/domain.py\n--- a/src/registrar/models/domain.py\n+++ b/src/registrar/models/domain.py\n@@ -338,6 +338,9 @@\n help_text=\"Very basic info about the lifecycle of this domain object\",\n )\n \n+ def isActive(self):\n+ return self.state == Domain.State.CREATED\n+\n # ForeignKey on UserDomainRole creates a \"permissions\" member for\n # all of the user-roles that are in place for this domain\n", "issue": "Implement (isActive)\n### Issue Description\r\n\r\nRequires #851 \r\nisActive will return true if the domain has a state of CREATED\r\nNote: check the domain status\r\n\r\n### Additional Context (optional)\r\n\r\nseparate function b/c we might check if this is live on the internet.\r\n\r\n\r\nThis will be used when check if an approved domain can be switched to ineligible or rejected.\r\n\r\n### Issue Links\r\n\r\n_No response_\n", "before_files": [{"content": "import logging\n\nfrom datetime import date\nfrom string import digits\nfrom django_fsm import FSMField # type: ignore\n\nfrom django.db import models\n\nfrom epplibwrapper import (\n CLIENT as registry,\n commands,\n common as epp,\n RegistryError,\n ErrorCode,\n)\n\nfrom .utility.domain_field import DomainField\nfrom .utility.domain_helper import DomainHelper\nfrom .utility.time_stamped_model import TimeStampedModel\n\nfrom .public_contact import PublicContact\n\nlogger = logging.getLogger(__name__)\n\n\nclass Domain(TimeStampedModel, DomainHelper):\n \"\"\"\n Manage the lifecycle of domain names.\n\n The registry is the source of truth for this data and this model exists:\n 1. To tie ownership information in the registrar to\n DNS entries in the registry\n\n ~~~ HOW TO USE THIS CLASS ~~~\n\n A) You can create a Domain object with just a name. `Domain(name=\"something.gov\")`.\n B) Saving the Domain object will not contact the registry, as it may be useful\n to have Domain objects in an `UNKNOWN` pre-created state.\n C) Domain properties are lazily loaded. Accessing `my_domain.expiration_date` will\n contact the registry, if a cached copy does not exist.\n D) Domain creation is lazy. If `my_domain.expiration_date` finds that `my_domain`\n does not exist in the registry, it will ask the registry to create it.\n F) Created is _not_ the same as active aka live on the internet.\n G) Activation is controlled by the registry. It will happen automatically when the\n domain meets the required checks.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self._cache = {}\n super(Domain, self).__init__(*args, **kwargs)\n\n class Status(models.TextChoices):\n \"\"\"\n The status codes we can receive from the registry.\n\n These are detailed in RFC 5731 in section 2.3.\n https://www.rfc-editor.org/std/std69.txt\n \"\"\"\n\n # Requests to delete the object MUST be rejected.\n CLIENT_DELETE_PROHIBITED = \"clientDeleteProhibited\"\n SERVER_DELETE_PROHIBITED = \"serverDeleteProhibited\"\n\n # DNS delegation information MUST NOT be published for the object.\n CLIENT_HOLD = \"clientHold\"\n SERVER_HOLD = \"serverHold\"\n\n # Requests to renew the object MUST be rejected.\n CLIENT_RENEW_PROHIBITED = \"clientRenewProhibited\"\n SERVER_RENEW_PROHIBITED = \"serverRenewProhibited\"\n\n # Requests to transfer the object MUST be rejected.\n CLIENT_TRANSFER_PROHIBITED = \"clientTransferProhibited\"\n SERVER_TRANSFER_PROHIBITED = \"serverTransferProhibited\"\n\n # Requests to update the object (other than to remove this status)\n # MUST be rejected.\n CLIENT_UPDATE_PROHIBITED = \"clientUpdateProhibited\"\n SERVER_UPDATE_PROHIBITED = \"serverUpdateProhibited\"\n\n # Delegation information has not been associated with the object.\n # This is the default status when a domain object is first created\n # and there are no associated host objects for the DNS delegation.\n # This status can also be set by the server when all host-object\n # associations are removed.\n INACTIVE = \"inactive\"\n\n # This is the normal status value for an object that has no pending\n # operations or prohibitions. This value is set and removed by the\n # server as other status values are added or removed.\n OK = \"ok\"\n\n # A transform command has been processed for the object, but the\n # action has not been completed by the server. Server operators can\n # delay action completion for a variety of reasons, such as to allow\n # for human review or third-party action. A transform command that\n # is processed, but whose requested action is pending, is noted with\n # response code 1001.\n PENDING_CREATE = \"pendingCreate\"\n PENDING_DELETE = \"pendingDelete\"\n PENDING_RENEW = \"pendingRenew\"\n PENDING_TRANSFER = \"pendingTransfer\"\n PENDING_UPDATE = \"pendingUpdate\"\n\n class State(models.TextChoices):\n \"\"\"These capture (some of) the states a domain object can be in.\"\"\"\n\n # the normal state of a domain object -- may or may not be active!\n CREATED = \"created\"\n\n # previously existed but has been deleted from the registry\n DELETED = \"deleted\"\n\n # the state is indeterminate\n UNKNOWN = \"unknown\"\n\n class Cache(property):\n \"\"\"\n Python descriptor to turn class methods into properties.\n\n The purpose of subclassing `property` rather than using it directly\n as a decorator (`@Cache`) is to insert generic code to run\n before or after _all_ properties are accessed, modified, or deleted.\n\n As an example:\n\n domain = Domain(name=\"example.gov\")\n domain.save()\n <--- insert code here\n date = domain.creation_date\n <--- or here\n (...other stuff...)\n \"\"\"\n\n def __get__(self, obj, objtype=None):\n \"\"\"Called during get. Example: `r = domain.registrant`.\"\"\"\n return super().__get__(obj, objtype)\n\n def __set__(self, obj, value):\n \"\"\"Called during set. Example: `domain.registrant = 'abc123'`.\"\"\"\n super().__set__(obj, value)\n # always invalidate cache after sending updates to the registry\n obj._invalidate_cache()\n\n def __delete__(self, obj):\n \"\"\"Called during delete. Example: `del domain.registrant`.\"\"\"\n super().__delete__(obj)\n\n @classmethod\n def available(cls, domain: str) -> bool:\n \"\"\"Check if a domain is available.\"\"\"\n if not cls.string_could_be_domain(domain):\n raise ValueError(\"Not a valid domain: %s\" % str(domain))\n req = commands.CheckDomain([domain])\n return registry.send(req, cleaned=True).res_data[0].avail\n\n @classmethod\n def registered(cls, domain: str) -> bool:\n \"\"\"Check if a domain is _not_ available.\"\"\"\n return not cls.available(domain)\n\n @Cache\n def contacts(self) -> dict[str, str]:\n \"\"\"\n Get a dictionary of registry IDs for the contacts for this domain.\n\n IDs are provided as strings, e.g.\n\n { PublicContact.ContactTypeChoices.REGISTRANT: \"jd1234\",\n PublicContact.ContactTypeChoices.ADMINISTRATIVE: \"sh8013\",...}\n \"\"\"\n raise NotImplementedError()\n\n @Cache\n def creation_date(self) -> date:\n \"\"\"Get the `cr_date` element from the registry.\"\"\"\n return self._get_property(\"cr_date\")\n\n @Cache\n def last_transferred_date(self) -> date:\n \"\"\"Get the `tr_date` element from the registry.\"\"\"\n raise NotImplementedError()\n\n @Cache\n def last_updated_date(self) -> date:\n \"\"\"Get the `up_date` element from the registry.\"\"\"\n return self._get_property(\"up_date\")\n\n @Cache\n def expiration_date(self) -> date:\n \"\"\"Get or set the `ex_date` element from the registry.\"\"\"\n return self._get_property(\"ex_date\")\n\n @expiration_date.setter # type: ignore\n def expiration_date(self, ex_date: date):\n raise NotImplementedError()\n\n @Cache\n def password(self) -> str:\n \"\"\"\n Get the `auth_info.pw` element from the registry. Not a real password.\n\n This `auth_info` element is required by the EPP protocol, but the registry is\n using a different mechanism to ensure unauthorized clients cannot perform\n actions on domains they do not own. This field provides no security features.\n It is not a secret.\n \"\"\"\n raise NotImplementedError()\n\n @Cache\n def nameservers(self) -> list[tuple[str]]:\n \"\"\"\n Get or set a complete list of nameservers for this domain.\n\n Hosts are provided as a list of tuples, e.g.\n\n [(\"ns1.example.com\",), (\"ns1.example.gov\", \"0.0.0.0\")]\n\n Subordinate hosts (something.your-domain.gov) MUST have IP addresses,\n while non-subordinate hosts MUST NOT.\n \"\"\"\n # TODO: call EPP to get this info instead of returning fake data.\n return [\n (\"ns1.example.com\",),\n (\"ns2.example.com\",),\n (\"ns3.example.com\",),\n ]\n\n @nameservers.setter # type: ignore\n def nameservers(self, hosts: list[tuple[str]]):\n # TODO: call EPP to set this info.\n pass\n\n @Cache\n def statuses(self) -> list[str]:\n \"\"\"\n Get or set the domain `status` elements from the registry.\n\n A domain's status indicates various properties. See Domain.Status.\n \"\"\"\n # implementation note: the Status object from EPP stores the string in\n # a dataclass property `state`, not to be confused with the `state` field here\n raise NotImplementedError()\n\n @statuses.setter # type: ignore\n def statuses(self, statuses: list[str]):\n # TODO: there are a long list of rules in the RFC about which statuses\n # can be combined; check that here and raise errors for invalid combinations -\n # some statuses cannot be set by the client at all\n raise NotImplementedError()\n\n @Cache\n def registrant_contact(self) -> PublicContact:\n \"\"\"Get or set the registrant for this domain.\"\"\"\n raise NotImplementedError()\n\n @registrant_contact.setter # type: ignore\n def registrant_contact(self, contact: PublicContact):\n # get id from PublicContact->.registry_id\n # call UpdateDomain() command with registrant as parameter\n raise NotImplementedError()\n\n @Cache\n def administrative_contact(self) -> PublicContact:\n \"\"\"Get or set the admin contact for this domain.\"\"\"\n raise NotImplementedError()\n\n @administrative_contact.setter # type: ignore\n def administrative_contact(self, contact: PublicContact):\n # call CreateContact, if contact doesn't exist yet for domain\n # call UpdateDomain with contact,\n # type options are[admin, billing, tech, security]\n # use admin as type parameter for this contact\n raise NotImplementedError()\n\n @Cache\n def security_contact(self) -> PublicContact:\n \"\"\"Get or set the security contact for this domain.\"\"\"\n # TODO: replace this with a real implementation\n contact = PublicContact.get_default_security()\n contact.domain = self\n contact.email = \"[email protected]\"\n return contact\n\n @security_contact.setter # type: ignore\n def security_contact(self, contact: PublicContact):\n # TODO: replace this with a real implementation\n pass\n\n @Cache\n def technical_contact(self) -> PublicContact:\n \"\"\"Get or set the tech contact for this domain.\"\"\"\n raise NotImplementedError()\n\n @technical_contact.setter # type: ignore\n def technical_contact(self, contact: PublicContact):\n raise NotImplementedError()\n\n def is_active(self) -> bool:\n \"\"\"Is the domain live on the inter webs?\"\"\"\n # TODO: implement a check -- should be performant so it can be called for\n # any number of domains on a status page\n # this is NOT as simple as checking if Domain.Status.OK is in self.statuses\n return False\n\n def transfer(self):\n \"\"\"Going somewhere. Not implemented.\"\"\"\n raise NotImplementedError()\n\n def renew(self):\n \"\"\"Time to renew. Not implemented.\"\"\"\n raise NotImplementedError()\n\n def place_client_hold(self):\n \"\"\"This domain should not be active.\"\"\"\n raise NotImplementedError(\"This is not implemented yet.\")\n\n def remove_client_hold(self):\n \"\"\"This domain is okay to be active.\"\"\"\n raise NotImplementedError()\n\n def __str__(self) -> str:\n return self.name\n\n name = DomainField(\n max_length=253,\n blank=False,\n default=None, # prevent saving without a value\n unique=True,\n help_text=\"Fully qualified domain name\",\n )\n\n state = FSMField(\n max_length=21,\n choices=State.choices,\n default=State.UNKNOWN,\n protected=True, # cannot change state directly, particularly in Django admin\n help_text=\"Very basic info about the lifecycle of this domain object\",\n )\n\n # ForeignKey on UserDomainRole creates a \"permissions\" member for\n # all of the user-roles that are in place for this domain\n\n # ManyToManyField on User creates a \"users\" member for all of the\n # users who have some role on this domain\n\n # ForeignKey on DomainInvitation creates an \"invitations\" member for\n # all of the invitations that have been sent for this domain\n\n def _validate_host_tuples(self, hosts: list[tuple[str]]):\n \"\"\"\n Helper function. Validate hostnames and IP addresses.\n\n Raises:\n ValueError if hostname or IP address appears invalid or mismatched.\n \"\"\"\n for host in hosts:\n hostname = host[0].lower()\n addresses: tuple[str] = host[1:] # type: ignore\n if not bool(Domain.HOST_REGEX.match(hostname)):\n raise ValueError(\"Invalid hostname: %s.\" % hostname)\n if len(hostname) > Domain.MAX_LENGTH:\n raise ValueError(\"Too long hostname: %s\" % hostname)\n\n is_subordinate = hostname.split(\".\", 1)[-1] == self.name\n if is_subordinate and len(addresses) == 0:\n raise ValueError(\n \"Must supply IP addresses for subordinate host %s\" % hostname\n )\n if not is_subordinate and len(addresses) > 0:\n raise ValueError(\"Must not supply IP addresses for %s\" % hostname)\n\n for address in addresses:\n allow = set(\":.\" + digits)\n if any(c not in allow for c in address):\n raise ValueError(\"Invalid IP address: %s.\" % address)\n\n def _get_or_create_domain(self):\n \"\"\"Try to fetch info about this domain. Create it if it does not exist.\"\"\"\n already_tried_to_create = False\n while True:\n try:\n req = commands.InfoDomain(name=self.name)\n return registry.send(req, cleaned=True).res_data[0]\n except RegistryError as e:\n if already_tried_to_create:\n raise e\n if e.code == ErrorCode.OBJECT_DOES_NOT_EXIST:\n # avoid infinite loop\n already_tried_to_create = True\n registrant = self._get_or_create_contact(\n PublicContact.get_default_registrant()\n )\n req = commands.CreateDomain(\n name=self.name,\n registrant=registrant.id,\n auth_info=epp.DomainAuthInfo(\n pw=\"2fooBAR123fooBaz\"\n ), # not a password\n )\n registry.send(req, cleaned=True)\n # no error, so go ahead and update state\n self.state = Domain.State.CREATED\n self.save()\n else:\n raise e\n\n def _get_or_create_contact(self, contact: PublicContact):\n \"\"\"Try to fetch info about a contact. Create it if it does not exist.\"\"\"\n while True:\n try:\n req = commands.InfoContact(id=contact.registry_id)\n return registry.send(req, cleaned=True).res_data[0]\n except RegistryError as e:\n if e.code == ErrorCode.OBJECT_DOES_NOT_EXIST:\n create = commands.CreateContact(\n id=contact.registry_id,\n postal_info=epp.PostalInfo( # type: ignore\n name=contact.name,\n addr=epp.ContactAddr(\n street=[\n getattr(contact, street)\n for street in [\"street1\", \"street2\", \"street3\"]\n if hasattr(contact, street)\n ],\n city=contact.city,\n pc=contact.pc,\n cc=contact.cc,\n sp=contact.sp,\n ),\n org=contact.org,\n type=\"loc\",\n ),\n email=contact.email,\n voice=contact.voice,\n fax=contact.fax,\n auth_info=epp.ContactAuthInfo(pw=\"2fooBAR123fooBaz\"),\n )\n # security contacts should only show email addresses, for now\n if (\n contact.contact_type\n == PublicContact.ContactTypeChoices.SECURITY\n ):\n DF = epp.DiscloseField\n create.disclose = epp.Disclose(\n flag=False,\n fields={DF.FAX, DF.VOICE, DF.ADDR},\n types={DF.ADDR: \"loc\"},\n )\n registry.send(create)\n else:\n raise e\n\n def _update_or_create_host(self, host):\n raise NotImplementedError()\n\n def _delete_host(self, host):\n raise NotImplementedError()\n\n def _fetch_cache(self, fetch_hosts=False, fetch_contacts=False):\n \"\"\"Contact registry for info about a domain.\"\"\"\n try:\n # get info from registry\n data = self._get_or_create_domain()\n # extract properties from response\n # (Ellipsis is used to mean \"null\")\n cache = {\n \"auth_info\": getattr(data, \"auth_info\", ...),\n \"_contacts\": getattr(data, \"contacts\", ...),\n \"cr_date\": getattr(data, \"cr_date\", ...),\n \"ex_date\": getattr(data, \"ex_date\", ...),\n \"_hosts\": getattr(data, \"hosts\", ...),\n \"name\": getattr(data, \"name\", ...),\n \"registrant\": getattr(data, \"registrant\", ...),\n \"statuses\": getattr(data, \"statuses\", ...),\n \"tr_date\": getattr(data, \"tr_date\", ...),\n \"up_date\": getattr(data, \"up_date\", ...),\n }\n\n # remove null properties (to distinguish between \"a value of None\" and null)\n cleaned = {k: v for k, v in cache.items() if v is not ...}\n\n # get contact info, if there are any\n if (\n fetch_contacts\n and \"_contacts\" in cleaned\n and isinstance(cleaned[\"_contacts\"], list)\n and len(cleaned[\"_contacts\"])\n ):\n cleaned[\"contacts\"] = []\n for id in cleaned[\"_contacts\"]:\n # we do not use _get_or_create_* because we expect the object we\n # just asked the registry for still exists --\n # if not, that's a problem\n req = commands.InfoContact(id=id)\n data = registry.send(req, cleaned=True).res_data[0]\n\n # extract properties from response\n # (Ellipsis is used to mean \"null\")\n contact = {\n \"id\": id,\n \"auth_info\": getattr(data, \"auth_info\", ...),\n \"cr_date\": getattr(data, \"cr_date\", ...),\n \"disclose\": getattr(data, \"disclose\", ...),\n \"email\": getattr(data, \"email\", ...),\n \"fax\": getattr(data, \"fax\", ...),\n \"postal_info\": getattr(data, \"postal_info\", ...),\n \"statuses\": getattr(data, \"statuses\", ...),\n \"tr_date\": getattr(data, \"tr_date\", ...),\n \"up_date\": getattr(data, \"up_date\", ...),\n \"voice\": getattr(data, \"voice\", ...),\n }\n\n cleaned[\"contacts\"].append(\n {k: v for k, v in contact.items() if v is not ...}\n )\n\n # get nameserver info, if there are any\n if (\n fetch_hosts\n and \"_hosts\" in cleaned\n and isinstance(cleaned[\"_hosts\"], list)\n and len(cleaned[\"_hosts\"])\n ):\n cleaned[\"hosts\"] = []\n for name in cleaned[\"_hosts\"]:\n # we do not use _get_or_create_* because we expect the object we\n # just asked the registry for still exists --\n # if not, that's a problem\n req = commands.InfoHost(name=name)\n data = registry.send(req, cleaned=True).res_data[0]\n # extract properties from response\n # (Ellipsis is used to mean \"null\")\n host = {\n \"name\": name,\n \"addrs\": getattr(data, \"addrs\", ...),\n \"cr_date\": getattr(data, \"cr_date\", ...),\n \"statuses\": getattr(data, \"statuses\", ...),\n \"tr_date\": getattr(data, \"tr_date\", ...),\n \"up_date\": getattr(data, \"up_date\", ...),\n }\n cleaned[\"hosts\"].append(\n {k: v for k, v in host.items() if v is not ...}\n )\n\n # replace the prior cache with new data\n self._cache = cleaned\n\n except RegistryError as e:\n logger.error(e)\n\n def _invalidate_cache(self):\n \"\"\"Remove cache data when updates are made.\"\"\"\n self._cache = {}\n\n def _get_property(self, property):\n \"\"\"Get some piece of info about a domain.\"\"\"\n if property not in self._cache:\n self._fetch_cache(\n fetch_hosts=(property == \"hosts\"),\n fetch_contacts=(property == \"contacts\"),\n )\n\n if property in self._cache:\n return self._cache[property]\n else:\n raise KeyError(\n \"Requested key %s was not found in registry cache.\" % str(property)\n )\n", "path": "src/registrar/models/domain.py"}], "after_files": [{"content": "import logging\n\nfrom datetime import date\nfrom string import digits\nfrom django_fsm import FSMField # type: ignore\n\nfrom django.db import models\n\nfrom epplibwrapper import (\n CLIENT as registry,\n commands,\n common as epp,\n RegistryError,\n ErrorCode,\n)\n\nfrom .utility.domain_field import DomainField\nfrom .utility.domain_helper import DomainHelper\nfrom .utility.time_stamped_model import TimeStampedModel\n\nfrom .public_contact import PublicContact\n\nlogger = logging.getLogger(__name__)\n\n\nclass Domain(TimeStampedModel, DomainHelper):\n \"\"\"\n Manage the lifecycle of domain names.\n\n The registry is the source of truth for this data and this model exists:\n 1. To tie ownership information in the registrar to\n DNS entries in the registry\n\n ~~~ HOW TO USE THIS CLASS ~~~\n\n A) You can create a Domain object with just a name. `Domain(name=\"something.gov\")`.\n B) Saving the Domain object will not contact the registry, as it may be useful\n to have Domain objects in an `UNKNOWN` pre-created state.\n C) Domain properties are lazily loaded. Accessing `my_domain.expiration_date` will\n contact the registry, if a cached copy does not exist.\n D) Domain creation is lazy. If `my_domain.expiration_date` finds that `my_domain`\n does not exist in the registry, it will ask the registry to create it.\n F) Created is _not_ the same as active aka live on the internet.\n G) Activation is controlled by the registry. It will happen automatically when the\n domain meets the required checks.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self._cache = {}\n super(Domain, self).__init__(*args, **kwargs)\n\n class Status(models.TextChoices):\n \"\"\"\n The status codes we can receive from the registry.\n\n These are detailed in RFC 5731 in section 2.3.\n https://www.rfc-editor.org/std/std69.txt\n \"\"\"\n\n # Requests to delete the object MUST be rejected.\n CLIENT_DELETE_PROHIBITED = \"clientDeleteProhibited\"\n SERVER_DELETE_PROHIBITED = \"serverDeleteProhibited\"\n\n # DNS delegation information MUST NOT be published for the object.\n CLIENT_HOLD = \"clientHold\"\n SERVER_HOLD = \"serverHold\"\n\n # Requests to renew the object MUST be rejected.\n CLIENT_RENEW_PROHIBITED = \"clientRenewProhibited\"\n SERVER_RENEW_PROHIBITED = \"serverRenewProhibited\"\n\n # Requests to transfer the object MUST be rejected.\n CLIENT_TRANSFER_PROHIBITED = \"clientTransferProhibited\"\n SERVER_TRANSFER_PROHIBITED = \"serverTransferProhibited\"\n\n # Requests to update the object (other than to remove this status)\n # MUST be rejected.\n CLIENT_UPDATE_PROHIBITED = \"clientUpdateProhibited\"\n SERVER_UPDATE_PROHIBITED = \"serverUpdateProhibited\"\n\n # Delegation information has not been associated with the object.\n # This is the default status when a domain object is first created\n # and there are no associated host objects for the DNS delegation.\n # This status can also be set by the server when all host-object\n # associations are removed.\n INACTIVE = \"inactive\"\n\n # This is the normal status value for an object that has no pending\n # operations or prohibitions. This value is set and removed by the\n # server as other status values are added or removed.\n OK = \"ok\"\n\n # A transform command has been processed for the object, but the\n # action has not been completed by the server. Server operators can\n # delay action completion for a variety of reasons, such as to allow\n # for human review or third-party action. A transform command that\n # is processed, but whose requested action is pending, is noted with\n # response code 1001.\n PENDING_CREATE = \"pendingCreate\"\n PENDING_DELETE = \"pendingDelete\"\n PENDING_RENEW = \"pendingRenew\"\n PENDING_TRANSFER = \"pendingTransfer\"\n PENDING_UPDATE = \"pendingUpdate\"\n\n class State(models.TextChoices):\n \"\"\"These capture (some of) the states a domain object can be in.\"\"\"\n\n # the normal state of a domain object -- may or may not be active!\n CREATED = \"created\"\n\n # previously existed but has been deleted from the registry\n DELETED = \"deleted\"\n\n # the state is indeterminate\n UNKNOWN = \"unknown\"\n\n class Cache(property):\n \"\"\"\n Python descriptor to turn class methods into properties.\n\n The purpose of subclassing `property` rather than using it directly\n as a decorator (`@Cache`) is to insert generic code to run\n before or after _all_ properties are accessed, modified, or deleted.\n\n As an example:\n\n domain = Domain(name=\"example.gov\")\n domain.save()\n <--- insert code here\n date = domain.creation_date\n <--- or here\n (...other stuff...)\n \"\"\"\n\n def __get__(self, obj, objtype=None):\n \"\"\"Called during get. Example: `r = domain.registrant`.\"\"\"\n return super().__get__(obj, objtype)\n\n def __set__(self, obj, value):\n \"\"\"Called during set. Example: `domain.registrant = 'abc123'`.\"\"\"\n super().__set__(obj, value)\n # always invalidate cache after sending updates to the registry\n obj._invalidate_cache()\n\n def __delete__(self, obj):\n \"\"\"Called during delete. Example: `del domain.registrant`.\"\"\"\n super().__delete__(obj)\n\n @classmethod\n def available(cls, domain: str) -> bool:\n \"\"\"Check if a domain is available.\"\"\"\n if not cls.string_could_be_domain(domain):\n raise ValueError(\"Not a valid domain: %s\" % str(domain))\n req = commands.CheckDomain([domain])\n return registry.send(req, cleaned=True).res_data[0].avail\n\n @classmethod\n def registered(cls, domain: str) -> bool:\n \"\"\"Check if a domain is _not_ available.\"\"\"\n return not cls.available(domain)\n\n @Cache\n def contacts(self) -> dict[str, str]:\n \"\"\"\n Get a dictionary of registry IDs for the contacts for this domain.\n\n IDs are provided as strings, e.g.\n\n { PublicContact.ContactTypeChoices.REGISTRANT: \"jd1234\",\n PublicContact.ContactTypeChoices.ADMINISTRATIVE: \"sh8013\",...}\n \"\"\"\n raise NotImplementedError()\n\n @Cache\n def creation_date(self) -> date:\n \"\"\"Get the `cr_date` element from the registry.\"\"\"\n return self._get_property(\"cr_date\")\n\n @Cache\n def last_transferred_date(self) -> date:\n \"\"\"Get the `tr_date` element from the registry.\"\"\"\n raise NotImplementedError()\n\n @Cache\n def last_updated_date(self) -> date:\n \"\"\"Get the `up_date` element from the registry.\"\"\"\n return self._get_property(\"up_date\")\n\n @Cache\n def expiration_date(self) -> date:\n \"\"\"Get or set the `ex_date` element from the registry.\"\"\"\n return self._get_property(\"ex_date\")\n\n @expiration_date.setter # type: ignore\n def expiration_date(self, ex_date: date):\n raise NotImplementedError()\n\n @Cache\n def password(self) -> str:\n \"\"\"\n Get the `auth_info.pw` element from the registry. Not a real password.\n\n This `auth_info` element is required by the EPP protocol, but the registry is\n using a different mechanism to ensure unauthorized clients cannot perform\n actions on domains they do not own. This field provides no security features.\n It is not a secret.\n \"\"\"\n raise NotImplementedError()\n\n @Cache\n def nameservers(self) -> list[tuple[str]]:\n \"\"\"\n Get or set a complete list of nameservers for this domain.\n\n Hosts are provided as a list of tuples, e.g.\n\n [(\"ns1.example.com\",), (\"ns1.example.gov\", \"0.0.0.0\")]\n\n Subordinate hosts (something.your-domain.gov) MUST have IP addresses,\n while non-subordinate hosts MUST NOT.\n \"\"\"\n # TODO: call EPP to get this info instead of returning fake data.\n return [\n (\"ns1.example.com\",),\n (\"ns2.example.com\",),\n (\"ns3.example.com\",),\n ]\n\n @nameservers.setter # type: ignore\n def nameservers(self, hosts: list[tuple[str]]):\n # TODO: call EPP to set this info.\n pass\n\n @Cache\n def statuses(self) -> list[str]:\n \"\"\"\n Get or set the domain `status` elements from the registry.\n\n A domain's status indicates various properties. See Domain.Status.\n \"\"\"\n # implementation note: the Status object from EPP stores the string in\n # a dataclass property `state`, not to be confused with the `state` field here\n raise NotImplementedError()\n\n @statuses.setter # type: ignore\n def statuses(self, statuses: list[str]):\n # TODO: there are a long list of rules in the RFC about which statuses\n # can be combined; check that here and raise errors for invalid combinations -\n # some statuses cannot be set by the client at all\n raise NotImplementedError()\n\n @Cache\n def registrant_contact(self) -> PublicContact:\n \"\"\"Get or set the registrant for this domain.\"\"\"\n raise NotImplementedError()\n\n @registrant_contact.setter # type: ignore\n def registrant_contact(self, contact: PublicContact):\n # get id from PublicContact->.registry_id\n # call UpdateDomain() command with registrant as parameter\n raise NotImplementedError()\n\n @Cache\n def administrative_contact(self) -> PublicContact:\n \"\"\"Get or set the admin contact for this domain.\"\"\"\n raise NotImplementedError()\n\n @administrative_contact.setter # type: ignore\n def administrative_contact(self, contact: PublicContact):\n # call CreateContact, if contact doesn't exist yet for domain\n # call UpdateDomain with contact,\n # type options are[admin, billing, tech, security]\n # use admin as type parameter for this contact\n raise NotImplementedError()\n\n @Cache\n def security_contact(self) -> PublicContact:\n \"\"\"Get or set the security contact for this domain.\"\"\"\n # TODO: replace this with a real implementation\n contact = PublicContact.get_default_security()\n contact.domain = self\n contact.email = \"[email protected]\"\n return contact\n\n @security_contact.setter # type: ignore\n def security_contact(self, contact: PublicContact):\n # TODO: replace this with a real implementation\n pass\n\n @Cache\n def technical_contact(self) -> PublicContact:\n \"\"\"Get or set the tech contact for this domain.\"\"\"\n raise NotImplementedError()\n\n @technical_contact.setter # type: ignore\n def technical_contact(self, contact: PublicContact):\n raise NotImplementedError()\n\n def is_active(self) -> bool:\n \"\"\"Is the domain live on the inter webs?\"\"\"\n # TODO: implement a check -- should be performant so it can be called for\n # any number of domains on a status page\n # this is NOT as simple as checking if Domain.Status.OK is in self.statuses\n return False\n\n def transfer(self):\n \"\"\"Going somewhere. Not implemented.\"\"\"\n raise NotImplementedError()\n\n def renew(self):\n \"\"\"Time to renew. Not implemented.\"\"\"\n raise NotImplementedError()\n\n def place_client_hold(self):\n \"\"\"This domain should not be active.\"\"\"\n raise NotImplementedError(\"This is not implemented yet.\")\n\n def remove_client_hold(self):\n \"\"\"This domain is okay to be active.\"\"\"\n raise NotImplementedError()\n\n def __str__(self) -> str:\n return self.name\n\n name = DomainField(\n max_length=253,\n blank=False,\n default=None, # prevent saving without a value\n unique=True,\n help_text=\"Fully qualified domain name\",\n )\n\n state = FSMField(\n max_length=21,\n choices=State.choices,\n default=State.UNKNOWN,\n protected=True, # cannot change state directly, particularly in Django admin\n help_text=\"Very basic info about the lifecycle of this domain object\",\n )\n\n def isActive(self):\n return self.state == Domain.State.CREATED\n\n # ForeignKey on UserDomainRole creates a \"permissions\" member for\n # all of the user-roles that are in place for this domain\n\n # ManyToManyField on User creates a \"users\" member for all of the\n # users who have some role on this domain\n\n # ForeignKey on DomainInvitation creates an \"invitations\" member for\n # all of the invitations that have been sent for this domain\n\n def _validate_host_tuples(self, hosts: list[tuple[str]]):\n \"\"\"\n Helper function. Validate hostnames and IP addresses.\n\n Raises:\n ValueError if hostname or IP address appears invalid or mismatched.\n \"\"\"\n for host in hosts:\n hostname = host[0].lower()\n addresses: tuple[str] = host[1:] # type: ignore\n if not bool(Domain.HOST_REGEX.match(hostname)):\n raise ValueError(\"Invalid hostname: %s.\" % hostname)\n if len(hostname) > Domain.MAX_LENGTH:\n raise ValueError(\"Too long hostname: %s\" % hostname)\n\n is_subordinate = hostname.split(\".\", 1)[-1] == self.name\n if is_subordinate and len(addresses) == 0:\n raise ValueError(\n \"Must supply IP addresses for subordinate host %s\" % hostname\n )\n if not is_subordinate and len(addresses) > 0:\n raise ValueError(\"Must not supply IP addresses for %s\" % hostname)\n\n for address in addresses:\n allow = set(\":.\" + digits)\n if any(c not in allow for c in address):\n raise ValueError(\"Invalid IP address: %s.\" % address)\n\n def _get_or_create_domain(self):\n \"\"\"Try to fetch info about this domain. Create it if it does not exist.\"\"\"\n already_tried_to_create = False\n while True:\n try:\n req = commands.InfoDomain(name=self.name)\n return registry.send(req, cleaned=True).res_data[0]\n except RegistryError as e:\n if already_tried_to_create:\n raise e\n if e.code == ErrorCode.OBJECT_DOES_NOT_EXIST:\n # avoid infinite loop\n already_tried_to_create = True\n registrant = self._get_or_create_contact(\n PublicContact.get_default_registrant()\n )\n req = commands.CreateDomain(\n name=self.name,\n registrant=registrant.id,\n auth_info=epp.DomainAuthInfo(\n pw=\"2fooBAR123fooBaz\"\n ), # not a password\n )\n registry.send(req, cleaned=True)\n # no error, so go ahead and update state\n self.state = Domain.State.CREATED\n self.save()\n else:\n raise e\n\n def _get_or_create_contact(self, contact: PublicContact):\n \"\"\"Try to fetch info about a contact. Create it if it does not exist.\"\"\"\n while True:\n try:\n req = commands.InfoContact(id=contact.registry_id)\n return registry.send(req, cleaned=True).res_data[0]\n except RegistryError as e:\n if e.code == ErrorCode.OBJECT_DOES_NOT_EXIST:\n create = commands.CreateContact(\n id=contact.registry_id,\n postal_info=epp.PostalInfo( # type: ignore\n name=contact.name,\n addr=epp.ContactAddr(\n street=[\n getattr(contact, street)\n for street in [\"street1\", \"street2\", \"street3\"]\n if hasattr(contact, street)\n ],\n city=contact.city,\n pc=contact.pc,\n cc=contact.cc,\n sp=contact.sp,\n ),\n org=contact.org,\n type=\"loc\",\n ),\n email=contact.email,\n voice=contact.voice,\n fax=contact.fax,\n auth_info=epp.ContactAuthInfo(pw=\"2fooBAR123fooBaz\"),\n )\n # security contacts should only show email addresses, for now\n if (\n contact.contact_type\n == PublicContact.ContactTypeChoices.SECURITY\n ):\n DF = epp.DiscloseField\n create.disclose = epp.Disclose(\n flag=False,\n fields={DF.FAX, DF.VOICE, DF.ADDR},\n types={DF.ADDR: \"loc\"},\n )\n registry.send(create)\n else:\n raise e\n\n def _update_or_create_host(self, host):\n raise NotImplementedError()\n\n def _delete_host(self, host):\n raise NotImplementedError()\n\n def _fetch_cache(self, fetch_hosts=False, fetch_contacts=False):\n \"\"\"Contact registry for info about a domain.\"\"\"\n try:\n # get info from registry\n data = self._get_or_create_domain()\n # extract properties from response\n # (Ellipsis is used to mean \"null\")\n cache = {\n \"auth_info\": getattr(data, \"auth_info\", ...),\n \"_contacts\": getattr(data, \"contacts\", ...),\n \"cr_date\": getattr(data, \"cr_date\", ...),\n \"ex_date\": getattr(data, \"ex_date\", ...),\n \"_hosts\": getattr(data, \"hosts\", ...),\n \"name\": getattr(data, \"name\", ...),\n \"registrant\": getattr(data, \"registrant\", ...),\n \"statuses\": getattr(data, \"statuses\", ...),\n \"tr_date\": getattr(data, \"tr_date\", ...),\n \"up_date\": getattr(data, \"up_date\", ...),\n }\n\n # remove null properties (to distinguish between \"a value of None\" and null)\n cleaned = {k: v for k, v in cache.items() if v is not ...}\n\n # get contact info, if there are any\n if (\n fetch_contacts\n and \"_contacts\" in cleaned\n and isinstance(cleaned[\"_contacts\"], list)\n and len(cleaned[\"_contacts\"])\n ):\n cleaned[\"contacts\"] = []\n for id in cleaned[\"_contacts\"]:\n # we do not use _get_or_create_* because we expect the object we\n # just asked the registry for still exists --\n # if not, that's a problem\n req = commands.InfoContact(id=id)\n data = registry.send(req, cleaned=True).res_data[0]\n\n # extract properties from response\n # (Ellipsis is used to mean \"null\")\n contact = {\n \"id\": id,\n \"auth_info\": getattr(data, \"auth_info\", ...),\n \"cr_date\": getattr(data, \"cr_date\", ...),\n \"disclose\": getattr(data, \"disclose\", ...),\n \"email\": getattr(data, \"email\", ...),\n \"fax\": getattr(data, \"fax\", ...),\n \"postal_info\": getattr(data, \"postal_info\", ...),\n \"statuses\": getattr(data, \"statuses\", ...),\n \"tr_date\": getattr(data, \"tr_date\", ...),\n \"up_date\": getattr(data, \"up_date\", ...),\n \"voice\": getattr(data, \"voice\", ...),\n }\n\n cleaned[\"contacts\"].append(\n {k: v for k, v in contact.items() if v is not ...}\n )\n\n # get nameserver info, if there are any\n if (\n fetch_hosts\n and \"_hosts\" in cleaned\n and isinstance(cleaned[\"_hosts\"], list)\n and len(cleaned[\"_hosts\"])\n ):\n cleaned[\"hosts\"] = []\n for name in cleaned[\"_hosts\"]:\n # we do not use _get_or_create_* because we expect the object we\n # just asked the registry for still exists --\n # if not, that's a problem\n req = commands.InfoHost(name=name)\n data = registry.send(req, cleaned=True).res_data[0]\n # extract properties from response\n # (Ellipsis is used to mean \"null\")\n host = {\n \"name\": name,\n \"addrs\": getattr(data, \"addrs\", ...),\n \"cr_date\": getattr(data, \"cr_date\", ...),\n \"statuses\": getattr(data, \"statuses\", ...),\n \"tr_date\": getattr(data, \"tr_date\", ...),\n \"up_date\": getattr(data, \"up_date\", ...),\n }\n cleaned[\"hosts\"].append(\n {k: v for k, v in host.items() if v is not ...}\n )\n\n # replace the prior cache with new data\n self._cache = cleaned\n\n except RegistryError as e:\n logger.error(e)\n\n def _invalidate_cache(self):\n \"\"\"Remove cache data when updates are made.\"\"\"\n self._cache = {}\n\n def _get_property(self, property):\n \"\"\"Get some piece of info about a domain.\"\"\"\n if property not in self._cache:\n self._fetch_cache(\n fetch_hosts=(property == \"hosts\"),\n fetch_contacts=(property == \"contacts\"),\n )\n\n if property in self._cache:\n return self._cache[property]\n else:\n raise KeyError(\n \"Requested key %s was not found in registry cache.\" % str(property)\n )\n", "path": "src/registrar/models/domain.py"}]} |
gh_patches_debug_1229 | rasdani/github-patches | git_diff | deepset-ai__haystack-7086 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pipeline drawings in Colab have black background
**Describe the bug**
Since Haystack 2.0-beta8, Pipeline drawings in Colab and other environments (VS Code/Pycharm) randomly
have a black background.

These images are not nice and less readable than the previous ones **with transparent background**:

**To Reproduce**
Run the [first 2.0 tutorial](https://haystack.deepset.ai/tutorials/27_first_rag_pipeline)
**System:**
- Haystack version (commit or version number): 2.0-beta8
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `haystack/core/pipeline/draw.py`
Content:
```
1 # SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>
2 #
3 # SPDX-License-Identifier: Apache-2.0
4 import base64
5 import logging
6
7 import networkx # type:ignore
8 import requests
9
10 from haystack.core.errors import PipelineDrawingError
11 from haystack.core.pipeline.descriptions import find_pipeline_inputs, find_pipeline_outputs
12 from haystack.core.type_utils import _type_name
13
14 logger = logging.getLogger(__name__)
15
16
17 def _prepare_for_drawing(graph: networkx.MultiDiGraph) -> networkx.MultiDiGraph:
18 """
19 Add some extra nodes to show the inputs and outputs of the pipeline.
20 Also adds labels to edges.
21 """
22 # Label the edges
23 for inp, outp, key, data in graph.edges(keys=True, data=True):
24 data[
25 "label"
26 ] = f"{data['from_socket'].name} -> {data['to_socket'].name}{' (opt.)' if not data['mandatory'] else ''}"
27 graph.add_edge(inp, outp, key=key, **data)
28
29 # Add inputs fake node
30 graph.add_node("input")
31 for node, in_sockets in find_pipeline_inputs(graph).items():
32 for in_socket in in_sockets:
33 if not in_socket.senders and in_socket.is_mandatory:
34 # If this socket has no sender it could be a socket that receives input
35 # directly when running the Pipeline. We can't know that for sure, in doubt
36 # we draw it as receiving input directly.
37 graph.add_edge("input", node, label=in_socket.name, conn_type=_type_name(in_socket.type))
38
39 # Add outputs fake node
40 graph.add_node("output")
41 for node, out_sockets in find_pipeline_outputs(graph).items():
42 for out_socket in out_sockets:
43 graph.add_edge(node, "output", label=out_socket.name, conn_type=_type_name(out_socket.type))
44
45 return graph
46
47
48 ARROWTAIL_MANDATORY = "--"
49 ARROWTAIL_OPTIONAL = "-."
50 ARROWHEAD_MANDATORY = "-->"
51 ARROWHEAD_OPTIONAL = ".->"
52 MERMAID_STYLED_TEMPLATE = """
53 %%{{ init: {{'theme': 'neutral' }} }}%%
54
55 graph TD;
56
57 {connections}
58
59 classDef component text-align:center;
60 """
61
62
63 def _to_mermaid_image(graph: networkx.MultiDiGraph):
64 """
65 Renders a pipeline using Mermaid (hosted version at 'https://mermaid.ink'). Requires Internet access.
66 """
67 # Copy the graph to avoid modifying the original
68 graph_styled = _to_mermaid_text(graph.copy())
69
70 graphbytes = graph_styled.encode("ascii")
71 base64_bytes = base64.b64encode(graphbytes)
72 base64_string = base64_bytes.decode("ascii")
73 url = "https://mermaid.ink/img/" + base64_string
74
75 logging.debug("Rendeding graph at %s", url)
76 try:
77 resp = requests.get(url, timeout=10)
78 if resp.status_code >= 400:
79 logger.warning("Failed to draw the pipeline: https://mermaid.ink/img/ returned status %s", resp.status_code)
80 logger.info("Exact URL requested: %s", url)
81 logger.warning("No pipeline diagram will be saved.")
82 resp.raise_for_status()
83
84 except Exception as exc: # pylint: disable=broad-except
85 logger.warning("Failed to draw the pipeline: could not connect to https://mermaid.ink/img/ (%s)", exc)
86 logger.info("Exact URL requested: %s", url)
87 logger.warning("No pipeline diagram will be saved.")
88 raise PipelineDrawingError(
89 "There was an issue with https://mermaid.ink/, see the stacktrace for details."
90 ) from exc
91
92 return resp.content
93
94
95 def _to_mermaid_text(graph: networkx.MultiDiGraph) -> str:
96 """
97 Converts a Networkx graph into Mermaid syntax. The output of this function can be used in the documentation
98 with `mermaid` codeblocks and it will be automatically rendered.
99 """
100 # Copy the graph to avoid modifying the original
101 graph = _prepare_for_drawing(graph.copy())
102 sockets = {
103 comp: "".join(
104 [
105 f"<li>{name} ({_type_name(socket.type)})</li>"
106 for name, socket in data.get("input_sockets", {}).items()
107 if (not socket.is_mandatory and not socket.senders) or socket.is_variadic
108 ]
109 )
110 for comp, data in graph.nodes(data=True)
111 }
112 optional_inputs = {
113 comp: f"<br><br>Optional inputs:<ul style='text-align:left;'>{sockets}</ul>" if sockets else ""
114 for comp, sockets in sockets.items()
115 }
116
117 states = {
118 comp: f"{comp}[\"<b>{comp}</b><br><small><i>{type(data['instance']).__name__}{optional_inputs[comp]}</i></small>\"]:::component"
119 for comp, data in graph.nodes(data=True)
120 if comp not in ["input", "output"]
121 }
122
123 connections_list = []
124 for from_comp, to_comp, conn_data in graph.edges(data=True):
125 if from_comp != "input" and to_comp != "output":
126 arrowtail = ARROWTAIL_MANDATORY if conn_data["mandatory"] else ARROWTAIL_OPTIONAL
127 arrowhead = ARROWHEAD_MANDATORY if conn_data["mandatory"] else ARROWHEAD_OPTIONAL
128 label = f'"{conn_data["label"]}<br><small><i>{conn_data["conn_type"]}</i></small>"'
129 conn_string = f"{states[from_comp]} {arrowtail} {label} {arrowhead} {states[to_comp]}"
130 connections_list.append(conn_string)
131
132 input_connections = [
133 f"i{{*}} -- \"{conn_data['label']}<br><small><i>{conn_data['conn_type']}</i></small>\" --> {states[to_comp]}"
134 for _, to_comp, conn_data in graph.out_edges("input", data=True)
135 ]
136 output_connections = [
137 f"{states[from_comp]} -- \"{conn_data['label']}<br><small><i>{conn_data['conn_type']}</i></small>\"--> o{{*}}"
138 for from_comp, _, conn_data in graph.in_edges("output", data=True)
139 ]
140 connections = "\n".join(connections_list + input_connections + output_connections)
141
142 graph_styled = MERMAID_STYLED_TEMPLATE.format(connections=connections)
143 logger.debug("Mermaid diagram:\n%s", graph_styled)
144
145 return graph_styled
146
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/haystack/core/pipeline/draw.py b/haystack/core/pipeline/draw.py
--- a/haystack/core/pipeline/draw.py
+++ b/haystack/core/pipeline/draw.py
@@ -70,7 +70,7 @@
graphbytes = graph_styled.encode("ascii")
base64_bytes = base64.b64encode(graphbytes)
base64_string = base64_bytes.decode("ascii")
- url = "https://mermaid.ink/img/" + base64_string
+ url = f"https://mermaid.ink/img/{base64_string}?type=png"
logging.debug("Rendeding graph at %s", url)
try:
| {"golden_diff": "diff --git a/haystack/core/pipeline/draw.py b/haystack/core/pipeline/draw.py\n--- a/haystack/core/pipeline/draw.py\n+++ b/haystack/core/pipeline/draw.py\n@@ -70,7 +70,7 @@\n graphbytes = graph_styled.encode(\"ascii\")\n base64_bytes = base64.b64encode(graphbytes)\n base64_string = base64_bytes.decode(\"ascii\")\n- url = \"https://mermaid.ink/img/\" + base64_string\n+ url = f\"https://mermaid.ink/img/{base64_string}?type=png\"\n \n logging.debug(\"Rendeding graph at %s\", url)\n try:\n", "issue": "Pipeline drawings in Colab have black background\n**Describe the bug**\r\nSince Haystack 2.0-beta8, Pipeline drawings in Colab and other environments (VS Code/Pycharm) randomly\r\nhave a black background.\r\n\r\n\r\n\r\nThese images are not nice and less readable than the previous ones **with transparent background**:\r\n\r\n\r\n\r\n**To Reproduce**\r\nRun the [first 2.0 tutorial](https://haystack.deepset.ai/tutorials/27_first_rag_pipeline)\r\n\r\n\r\n**System:**\r\n - Haystack version (commit or version number): 2.0-beta8\r\n\n", "before_files": [{"content": "# SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>\n#\n# SPDX-License-Identifier: Apache-2.0\nimport base64\nimport logging\n\nimport networkx # type:ignore\nimport requests\n\nfrom haystack.core.errors import PipelineDrawingError\nfrom haystack.core.pipeline.descriptions import find_pipeline_inputs, find_pipeline_outputs\nfrom haystack.core.type_utils import _type_name\n\nlogger = logging.getLogger(__name__)\n\n\ndef _prepare_for_drawing(graph: networkx.MultiDiGraph) -> networkx.MultiDiGraph:\n \"\"\"\n Add some extra nodes to show the inputs and outputs of the pipeline.\n Also adds labels to edges.\n \"\"\"\n # Label the edges\n for inp, outp, key, data in graph.edges(keys=True, data=True):\n data[\n \"label\"\n ] = f\"{data['from_socket'].name} -> {data['to_socket'].name}{' (opt.)' if not data['mandatory'] else ''}\"\n graph.add_edge(inp, outp, key=key, **data)\n\n # Add inputs fake node\n graph.add_node(\"input\")\n for node, in_sockets in find_pipeline_inputs(graph).items():\n for in_socket in in_sockets:\n if not in_socket.senders and in_socket.is_mandatory:\n # If this socket has no sender it could be a socket that receives input\n # directly when running the Pipeline. We can't know that for sure, in doubt\n # we draw it as receiving input directly.\n graph.add_edge(\"input\", node, label=in_socket.name, conn_type=_type_name(in_socket.type))\n\n # Add outputs fake node\n graph.add_node(\"output\")\n for node, out_sockets in find_pipeline_outputs(graph).items():\n for out_socket in out_sockets:\n graph.add_edge(node, \"output\", label=out_socket.name, conn_type=_type_name(out_socket.type))\n\n return graph\n\n\nARROWTAIL_MANDATORY = \"--\"\nARROWTAIL_OPTIONAL = \"-.\"\nARROWHEAD_MANDATORY = \"-->\"\nARROWHEAD_OPTIONAL = \".->\"\nMERMAID_STYLED_TEMPLATE = \"\"\"\n%%{{ init: {{'theme': 'neutral' }} }}%%\n\ngraph TD;\n\n{connections}\n\nclassDef component text-align:center;\n\"\"\"\n\n\ndef _to_mermaid_image(graph: networkx.MultiDiGraph):\n \"\"\"\n Renders a pipeline using Mermaid (hosted version at 'https://mermaid.ink'). Requires Internet access.\n \"\"\"\n # Copy the graph to avoid modifying the original\n graph_styled = _to_mermaid_text(graph.copy())\n\n graphbytes = graph_styled.encode(\"ascii\")\n base64_bytes = base64.b64encode(graphbytes)\n base64_string = base64_bytes.decode(\"ascii\")\n url = \"https://mermaid.ink/img/\" + base64_string\n\n logging.debug(\"Rendeding graph at %s\", url)\n try:\n resp = requests.get(url, timeout=10)\n if resp.status_code >= 400:\n logger.warning(\"Failed to draw the pipeline: https://mermaid.ink/img/ returned status %s\", resp.status_code)\n logger.info(\"Exact URL requested: %s\", url)\n logger.warning(\"No pipeline diagram will be saved.\")\n resp.raise_for_status()\n\n except Exception as exc: # pylint: disable=broad-except\n logger.warning(\"Failed to draw the pipeline: could not connect to https://mermaid.ink/img/ (%s)\", exc)\n logger.info(\"Exact URL requested: %s\", url)\n logger.warning(\"No pipeline diagram will be saved.\")\n raise PipelineDrawingError(\n \"There was an issue with https://mermaid.ink/, see the stacktrace for details.\"\n ) from exc\n\n return resp.content\n\n\ndef _to_mermaid_text(graph: networkx.MultiDiGraph) -> str:\n \"\"\"\n Converts a Networkx graph into Mermaid syntax. The output of this function can be used in the documentation\n with `mermaid` codeblocks and it will be automatically rendered.\n \"\"\"\n # Copy the graph to avoid modifying the original\n graph = _prepare_for_drawing(graph.copy())\n sockets = {\n comp: \"\".join(\n [\n f\"<li>{name} ({_type_name(socket.type)})</li>\"\n for name, socket in data.get(\"input_sockets\", {}).items()\n if (not socket.is_mandatory and not socket.senders) or socket.is_variadic\n ]\n )\n for comp, data in graph.nodes(data=True)\n }\n optional_inputs = {\n comp: f\"<br><br>Optional inputs:<ul style='text-align:left;'>{sockets}</ul>\" if sockets else \"\"\n for comp, sockets in sockets.items()\n }\n\n states = {\n comp: f\"{comp}[\\\"<b>{comp}</b><br><small><i>{type(data['instance']).__name__}{optional_inputs[comp]}</i></small>\\\"]:::component\"\n for comp, data in graph.nodes(data=True)\n if comp not in [\"input\", \"output\"]\n }\n\n connections_list = []\n for from_comp, to_comp, conn_data in graph.edges(data=True):\n if from_comp != \"input\" and to_comp != \"output\":\n arrowtail = ARROWTAIL_MANDATORY if conn_data[\"mandatory\"] else ARROWTAIL_OPTIONAL\n arrowhead = ARROWHEAD_MANDATORY if conn_data[\"mandatory\"] else ARROWHEAD_OPTIONAL\n label = f'\"{conn_data[\"label\"]}<br><small><i>{conn_data[\"conn_type\"]}</i></small>\"'\n conn_string = f\"{states[from_comp]} {arrowtail} {label} {arrowhead} {states[to_comp]}\"\n connections_list.append(conn_string)\n\n input_connections = [\n f\"i{{*}} -- \\\"{conn_data['label']}<br><small><i>{conn_data['conn_type']}</i></small>\\\" --> {states[to_comp]}\"\n for _, to_comp, conn_data in graph.out_edges(\"input\", data=True)\n ]\n output_connections = [\n f\"{states[from_comp]} -- \\\"{conn_data['label']}<br><small><i>{conn_data['conn_type']}</i></small>\\\"--> o{{*}}\"\n for from_comp, _, conn_data in graph.in_edges(\"output\", data=True)\n ]\n connections = \"\\n\".join(connections_list + input_connections + output_connections)\n\n graph_styled = MERMAID_STYLED_TEMPLATE.format(connections=connections)\n logger.debug(\"Mermaid diagram:\\n%s\", graph_styled)\n\n return graph_styled\n", "path": "haystack/core/pipeline/draw.py"}], "after_files": [{"content": "# SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>\n#\n# SPDX-License-Identifier: Apache-2.0\nimport base64\nimport logging\n\nimport networkx # type:ignore\nimport requests\n\nfrom haystack.core.errors import PipelineDrawingError\nfrom haystack.core.pipeline.descriptions import find_pipeline_inputs, find_pipeline_outputs\nfrom haystack.core.type_utils import _type_name\n\nlogger = logging.getLogger(__name__)\n\n\ndef _prepare_for_drawing(graph: networkx.MultiDiGraph) -> networkx.MultiDiGraph:\n \"\"\"\n Add some extra nodes to show the inputs and outputs of the pipeline.\n Also adds labels to edges.\n \"\"\"\n # Label the edges\n for inp, outp, key, data in graph.edges(keys=True, data=True):\n data[\n \"label\"\n ] = f\"{data['from_socket'].name} -> {data['to_socket'].name}{' (opt.)' if not data['mandatory'] else ''}\"\n graph.add_edge(inp, outp, key=key, **data)\n\n # Add inputs fake node\n graph.add_node(\"input\")\n for node, in_sockets in find_pipeline_inputs(graph).items():\n for in_socket in in_sockets:\n if not in_socket.senders and in_socket.is_mandatory:\n # If this socket has no sender it could be a socket that receives input\n # directly when running the Pipeline. We can't know that for sure, in doubt\n # we draw it as receiving input directly.\n graph.add_edge(\"input\", node, label=in_socket.name, conn_type=_type_name(in_socket.type))\n\n # Add outputs fake node\n graph.add_node(\"output\")\n for node, out_sockets in find_pipeline_outputs(graph).items():\n for out_socket in out_sockets:\n graph.add_edge(node, \"output\", label=out_socket.name, conn_type=_type_name(out_socket.type))\n\n return graph\n\n\nARROWTAIL_MANDATORY = \"--\"\nARROWTAIL_OPTIONAL = \"-.\"\nARROWHEAD_MANDATORY = \"-->\"\nARROWHEAD_OPTIONAL = \".->\"\nMERMAID_STYLED_TEMPLATE = \"\"\"\n%%{{ init: {{'theme': 'neutral' }} }}%%\n\ngraph TD;\n\n{connections}\n\nclassDef component text-align:center;\n\"\"\"\n\n\ndef _to_mermaid_image(graph: networkx.MultiDiGraph):\n \"\"\"\n Renders a pipeline using Mermaid (hosted version at 'https://mermaid.ink'). Requires Internet access.\n \"\"\"\n # Copy the graph to avoid modifying the original\n graph_styled = _to_mermaid_text(graph.copy())\n\n graphbytes = graph_styled.encode(\"ascii\")\n base64_bytes = base64.b64encode(graphbytes)\n base64_string = base64_bytes.decode(\"ascii\")\n url = f\"https://mermaid.ink/img/{base64_string}?type=png\"\n\n logging.debug(\"Rendeding graph at %s\", url)\n try:\n resp = requests.get(url, timeout=10)\n if resp.status_code >= 400:\n logger.warning(\"Failed to draw the pipeline: https://mermaid.ink/img/ returned status %s\", resp.status_code)\n logger.info(\"Exact URL requested: %s\", url)\n logger.warning(\"No pipeline diagram will be saved.\")\n resp.raise_for_status()\n\n except Exception as exc: # pylint: disable=broad-except\n logger.warning(\"Failed to draw the pipeline: could not connect to https://mermaid.ink/img/ (%s)\", exc)\n logger.info(\"Exact URL requested: %s\", url)\n logger.warning(\"No pipeline diagram will be saved.\")\n raise PipelineDrawingError(\n \"There was an issue with https://mermaid.ink/, see the stacktrace for details.\"\n ) from exc\n\n return resp.content\n\n\ndef _to_mermaid_text(graph: networkx.MultiDiGraph) -> str:\n \"\"\"\n Converts a Networkx graph into Mermaid syntax. The output of this function can be used in the documentation\n with `mermaid` codeblocks and it will be automatically rendered.\n \"\"\"\n # Copy the graph to avoid modifying the original\n graph = _prepare_for_drawing(graph.copy())\n sockets = {\n comp: \"\".join(\n [\n f\"<li>{name} ({_type_name(socket.type)})</li>\"\n for name, socket in data.get(\"input_sockets\", {}).items()\n if (not socket.is_mandatory and not socket.senders) or socket.is_variadic\n ]\n )\n for comp, data in graph.nodes(data=True)\n }\n optional_inputs = {\n comp: f\"<br><br>Optional inputs:<ul style='text-align:left;'>{sockets}</ul>\" if sockets else \"\"\n for comp, sockets in sockets.items()\n }\n\n states = {\n comp: f\"{comp}[\\\"<b>{comp}</b><br><small><i>{type(data['instance']).__name__}{optional_inputs[comp]}</i></small>\\\"]:::component\"\n for comp, data in graph.nodes(data=True)\n if comp not in [\"input\", \"output\"]\n }\n\n connections_list = []\n for from_comp, to_comp, conn_data in graph.edges(data=True):\n if from_comp != \"input\" and to_comp != \"output\":\n arrowtail = ARROWTAIL_MANDATORY if conn_data[\"mandatory\"] else ARROWTAIL_OPTIONAL\n arrowhead = ARROWHEAD_MANDATORY if conn_data[\"mandatory\"] else ARROWHEAD_OPTIONAL\n label = f'\"{conn_data[\"label\"]}<br><small><i>{conn_data[\"conn_type\"]}</i></small>\"'\n conn_string = f\"{states[from_comp]} {arrowtail} {label} {arrowhead} {states[to_comp]}\"\n connections_list.append(conn_string)\n\n input_connections = [\n f\"i{{*}} -- \\\"{conn_data['label']}<br><small><i>{conn_data['conn_type']}</i></small>\\\" --> {states[to_comp]}\"\n for _, to_comp, conn_data in graph.out_edges(\"input\", data=True)\n ]\n output_connections = [\n f\"{states[from_comp]} -- \\\"{conn_data['label']}<br><small><i>{conn_data['conn_type']}</i></small>\\\"--> o{{*}}\"\n for from_comp, _, conn_data in graph.in_edges(\"output\", data=True)\n ]\n connections = \"\\n\".join(connections_list + input_connections + output_connections)\n\n graph_styled = MERMAID_STYLED_TEMPLATE.format(connections=connections)\n logger.debug(\"Mermaid diagram:\\n%s\", graph_styled)\n\n return graph_styled\n", "path": "haystack/core/pipeline/draw.py"}]} |
gh_patches_debug_1230 | rasdani/github-patches | git_diff | facebookresearch__ParlAI-1447 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error when running DrQA PyTorch 1.0.0
When running the basic example on SQUAD
```python examples/train_model.py -m drqa -t squad -bs 32```
Throwing this.
```[ training... ]
/content/DuReader/data/ParlAI/parlai/agents/drqa/layers.py:177: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.
alpha_flat = F.softmax(scores.view(-1, y.size(1)))
/content/DuReader/data/ParlAI/parlai/agents/drqa/layers.py:237: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.
alpha = F.softmax(scores)
/content/DuReader/data/ParlAI/parlai/agents/drqa/layers.py:210: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.
alpha = F.log_softmax(xWy)
Traceback (most recent call last):
File "examples/train_model.py", line 16, in <module>
TrainLoop(opt).train()
File "/content/DuReader/data/ParlAI/parlai/scripts/train_model.py", line 500, in train
world.parley()
File "/content/DuReader/data/ParlAI/parlai/core/worlds.py", line 641, in parley
batch_act = self.batch_act(agent_idx, batch_observations[agent_idx])
File "/content/DuReader/data/ParlAI/parlai/core/worlds.py", line 614, in batch_act
batch_actions = a.batch_act(batch_observation)
File "/content/DuReader/data/ParlAI/parlai/agents/drqa/drqa.py", line 227, in batch_act
self.model.update(batch)
File "/content/DuReader/data/ParlAI/parlai/agents/drqa/model.py", line 102, in update
self.train_loss.update(loss.data[0], ex[0].size(0))
IndexError: invalid index of a 0-dim tensor. Use tensor.item() to convert a 0-dim tensor to a Python number
```
I think this is related to the new version of PyTorch and the fix should be fairly trivial. Anyone working on fixing it soon? If not I could probably do a PR.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parlai/agents/drqa/model.py`
Content:
```
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6 import torch
7 import torch.optim as optim
8 import torch.nn.functional as F
9 import numpy as np
10 import logging
11
12 from torch.autograd import Variable
13 from .utils import load_embeddings, AverageMeter
14 from .rnn_reader import RnnDocReader
15
16 logger = logging.getLogger('DrQA')
17
18
19 class DocReaderModel(object):
20 """High level model that handles intializing the underlying network
21 architecture, saving, updating examples, and predicting examples.
22 """
23
24 def __init__(self, opt, word_dict, feature_dict, state_dict=None):
25 # Book-keeping.
26 self.opt = opt
27 self.word_dict = word_dict
28 self.feature_dict = feature_dict
29 self.updates = 0
30 self.train_loss = AverageMeter()
31
32 # Building network.
33 self.network = RnnDocReader(opt)
34 if state_dict:
35 new_state = set(self.network.state_dict().keys())
36 for k in list(state_dict['network'].keys()):
37 if k not in new_state:
38 del state_dict['network'][k]
39 self.network.load_state_dict(state_dict['network'])
40
41 # Building optimizer.
42 parameters = [p for p in self.network.parameters() if p.requires_grad]
43 if opt['optimizer'] == 'sgd':
44 self.optimizer = optim.SGD(parameters, opt['learning_rate'],
45 momentum=opt['momentum'],
46 weight_decay=opt['weight_decay'])
47 elif opt['optimizer'] == 'adamax':
48 self.optimizer = optim.Adamax(parameters,
49 weight_decay=opt['weight_decay'])
50 else:
51 raise RuntimeError('Unsupported optimizer: %s' % opt['optimizer'])
52
53 def set_embeddings(self):
54 # Read word embeddings.
55 if not self.opt.get('embedding_file'):
56 logger.warning('[ WARNING: No embeddings provided. '
57 'Keeping random initialization. ]')
58 return
59 logger.info('[ Loading pre-trained embeddings ]')
60 embeddings = load_embeddings(self.opt, self.word_dict)
61 logger.info('[ Num embeddings = %d ]' % embeddings.size(0))
62
63 # Sanity check dimensions
64 new_size = embeddings.size()
65 old_size = self.network.embedding.weight.size()
66 if new_size[1] != old_size[1]:
67 raise RuntimeError('Embedding dimensions do not match.')
68 if new_size[0] != old_size[0]:
69 logger.warning(
70 '[ WARNING: Number of embeddings changed (%d->%d) ]' %
71 (old_size[0], new_size[0])
72 )
73
74 # Swap weights
75 self.network.embedding.weight.data = embeddings
76
77 # If partially tuning the embeddings, keep the old values
78 if self.opt['tune_partial'] > 0:
79 if self.opt['tune_partial'] + 2 < embeddings.size(0):
80 fixed_embedding = embeddings[self.opt['tune_partial'] + 2:]
81 self.network.fixed_embedding = fixed_embedding
82
83 def update(self, ex):
84 # Train mode
85 self.network.train()
86
87 # Transfer to GPU
88 if self.opt['cuda']:
89 inputs = [Variable(e.cuda(non_blocking=True)) for e in ex[:5]]
90 target_s = Variable(ex[5].cuda(non_blocking=True))
91 target_e = Variable(ex[6].cuda(non_blocking=True))
92 else:
93 inputs = [Variable(e) for e in ex[:5]]
94 target_s = Variable(ex[5])
95 target_e = Variable(ex[6])
96
97 # Run forward
98 score_s, score_e = self.network(*inputs)
99
100 # Compute loss and accuracies
101 loss = F.nll_loss(score_s, target_s) + F.nll_loss(score_e, target_e)
102 self.train_loss.update(loss.data[0], ex[0].size(0))
103
104 # Clear gradients and run backward
105 self.optimizer.zero_grad()
106 loss.backward()
107
108 # Clip gradients
109 torch.nn.utils.clip_grad_norm(self.network.parameters(),
110 self.opt['grad_clipping'])
111
112 # Update parameters
113 self.optimizer.step()
114 self.updates += 1
115
116 # Reset any partially fixed parameters (e.g. rare words)
117 self.reset_parameters()
118
119 def predict(self, ex):
120 # Eval mode
121 self.network.eval()
122
123 # Transfer to GPU
124 if self.opt['cuda']:
125 inputs = [Variable(e.cuda(non_blocking=True), volatile=True)
126 for e in ex[:5]]
127 else:
128 inputs = [Variable(e, volatile=True) for e in ex[:5]]
129
130 # Run forward
131 score_s, score_e = self.network(*inputs)
132
133 # Transfer to CPU/normal tensors for numpy ops
134 score_s = score_s.data.cpu()
135 score_e = score_e.data.cpu()
136
137 # Get argmax text spans
138 text = ex[-2]
139 spans = ex[-1]
140 predictions = []
141 pred_scores = []
142 max_len = self.opt['max_len'] or score_s.size(1)
143 for i in range(score_s.size(0)):
144 scores = torch.ger(score_s[i], score_e[i])
145 scores.triu_().tril_(max_len - 1)
146 scores = scores.numpy()
147 s_idx, e_idx = np.unravel_index(np.argmax(scores), scores.shape)
148 s_offset, e_offset = spans[i][s_idx][0], spans[i][e_idx][1]
149 predictions.append(text[i][s_offset:e_offset])
150 pred_scores.append(np.max(scores))
151
152 return predictions, pred_scores
153
154 def reset_parameters(self):
155 # Reset fixed embeddings to original value
156 if self.opt['tune_partial'] > 0:
157 offset = self.opt['tune_partial'] + 2
158 if offset < self.network.embedding.weight.data.size(0):
159 self.network.embedding.weight.data[offset:] \
160 = self.network.fixed_embedding
161
162 def save(self, filename):
163 params = {
164 'state_dict': {
165 'network': self.network.state_dict(),
166 },
167 'feature_dict': self.feature_dict,
168 'config': self.opt,
169 }
170 try:
171 torch.save(params, filename)
172 except BaseException:
173 logger.warn('[ WARN: Saving failed... continuing anyway. ]')
174
175 def cuda(self):
176 self.network.cuda()
177
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parlai/agents/drqa/model.py b/parlai/agents/drqa/model.py
--- a/parlai/agents/drqa/model.py
+++ b/parlai/agents/drqa/model.py
@@ -99,7 +99,7 @@
# Compute loss and accuracies
loss = F.nll_loss(score_s, target_s) + F.nll_loss(score_e, target_e)
- self.train_loss.update(loss.data[0], ex[0].size(0))
+ self.train_loss.update(loss.data.item(), ex[0].size(0))
# Clear gradients and run backward
self.optimizer.zero_grad()
| {"golden_diff": "diff --git a/parlai/agents/drqa/model.py b/parlai/agents/drqa/model.py\n--- a/parlai/agents/drqa/model.py\n+++ b/parlai/agents/drqa/model.py\n@@ -99,7 +99,7 @@\n \n # Compute loss and accuracies\n loss = F.nll_loss(score_s, target_s) + F.nll_loss(score_e, target_e)\n- self.train_loss.update(loss.data[0], ex[0].size(0))\n+ self.train_loss.update(loss.data.item(), ex[0].size(0))\n \n # Clear gradients and run backward\n self.optimizer.zero_grad()\n", "issue": "Error when running DrQA PyTorch 1.0.0\nWhen running the basic example on SQUAD \r\n```python examples/train_model.py -m drqa -t squad -bs 32```\r\nThrowing this. \r\n```[ training... ]\r\n/content/DuReader/data/ParlAI/parlai/agents/drqa/layers.py:177: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\r\n alpha_flat = F.softmax(scores.view(-1, y.size(1)))\r\n/content/DuReader/data/ParlAI/parlai/agents/drqa/layers.py:237: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\r\n alpha = F.softmax(scores)\r\n/content/DuReader/data/ParlAI/parlai/agents/drqa/layers.py:210: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.\r\n alpha = F.log_softmax(xWy)\r\nTraceback (most recent call last):\r\n File \"examples/train_model.py\", line 16, in <module>\r\n TrainLoop(opt).train()\r\n File \"/content/DuReader/data/ParlAI/parlai/scripts/train_model.py\", line 500, in train\r\n world.parley()\r\n File \"/content/DuReader/data/ParlAI/parlai/core/worlds.py\", line 641, in parley\r\n batch_act = self.batch_act(agent_idx, batch_observations[agent_idx])\r\n File \"/content/DuReader/data/ParlAI/parlai/core/worlds.py\", line 614, in batch_act\r\n batch_actions = a.batch_act(batch_observation)\r\n File \"/content/DuReader/data/ParlAI/parlai/agents/drqa/drqa.py\", line 227, in batch_act\r\n self.model.update(batch)\r\n File \"/content/DuReader/data/ParlAI/parlai/agents/drqa/model.py\", line 102, in update\r\n self.train_loss.update(loss.data[0], ex[0].size(0))\r\nIndexError: invalid index of a 0-dim tensor. Use tensor.item() to convert a 0-dim tensor to a Python number\r\n```\r\nI think this is related to the new version of PyTorch and the fix should be fairly trivial. Anyone working on fixing it soon? If not I could probably do a PR. \n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nimport torch\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport numpy as np\nimport logging\n\nfrom torch.autograd import Variable\nfrom .utils import load_embeddings, AverageMeter\nfrom .rnn_reader import RnnDocReader\n\nlogger = logging.getLogger('DrQA')\n\n\nclass DocReaderModel(object):\n \"\"\"High level model that handles intializing the underlying network\n architecture, saving, updating examples, and predicting examples.\n \"\"\"\n\n def __init__(self, opt, word_dict, feature_dict, state_dict=None):\n # Book-keeping.\n self.opt = opt\n self.word_dict = word_dict\n self.feature_dict = feature_dict\n self.updates = 0\n self.train_loss = AverageMeter()\n\n # Building network.\n self.network = RnnDocReader(opt)\n if state_dict:\n new_state = set(self.network.state_dict().keys())\n for k in list(state_dict['network'].keys()):\n if k not in new_state:\n del state_dict['network'][k]\n self.network.load_state_dict(state_dict['network'])\n\n # Building optimizer.\n parameters = [p for p in self.network.parameters() if p.requires_grad]\n if opt['optimizer'] == 'sgd':\n self.optimizer = optim.SGD(parameters, opt['learning_rate'],\n momentum=opt['momentum'],\n weight_decay=opt['weight_decay'])\n elif opt['optimizer'] == 'adamax':\n self.optimizer = optim.Adamax(parameters,\n weight_decay=opt['weight_decay'])\n else:\n raise RuntimeError('Unsupported optimizer: %s' % opt['optimizer'])\n\n def set_embeddings(self):\n # Read word embeddings.\n if not self.opt.get('embedding_file'):\n logger.warning('[ WARNING: No embeddings provided. '\n 'Keeping random initialization. ]')\n return\n logger.info('[ Loading pre-trained embeddings ]')\n embeddings = load_embeddings(self.opt, self.word_dict)\n logger.info('[ Num embeddings = %d ]' % embeddings.size(0))\n\n # Sanity check dimensions\n new_size = embeddings.size()\n old_size = self.network.embedding.weight.size()\n if new_size[1] != old_size[1]:\n raise RuntimeError('Embedding dimensions do not match.')\n if new_size[0] != old_size[0]:\n logger.warning(\n '[ WARNING: Number of embeddings changed (%d->%d) ]' %\n (old_size[0], new_size[0])\n )\n\n # Swap weights\n self.network.embedding.weight.data = embeddings\n\n # If partially tuning the embeddings, keep the old values\n if self.opt['tune_partial'] > 0:\n if self.opt['tune_partial'] + 2 < embeddings.size(0):\n fixed_embedding = embeddings[self.opt['tune_partial'] + 2:]\n self.network.fixed_embedding = fixed_embedding\n\n def update(self, ex):\n # Train mode\n self.network.train()\n\n # Transfer to GPU\n if self.opt['cuda']:\n inputs = [Variable(e.cuda(non_blocking=True)) for e in ex[:5]]\n target_s = Variable(ex[5].cuda(non_blocking=True))\n target_e = Variable(ex[6].cuda(non_blocking=True))\n else:\n inputs = [Variable(e) for e in ex[:5]]\n target_s = Variable(ex[5])\n target_e = Variable(ex[6])\n\n # Run forward\n score_s, score_e = self.network(*inputs)\n\n # Compute loss and accuracies\n loss = F.nll_loss(score_s, target_s) + F.nll_loss(score_e, target_e)\n self.train_loss.update(loss.data[0], ex[0].size(0))\n\n # Clear gradients and run backward\n self.optimizer.zero_grad()\n loss.backward()\n\n # Clip gradients\n torch.nn.utils.clip_grad_norm(self.network.parameters(),\n self.opt['grad_clipping'])\n\n # Update parameters\n self.optimizer.step()\n self.updates += 1\n\n # Reset any partially fixed parameters (e.g. rare words)\n self.reset_parameters()\n\n def predict(self, ex):\n # Eval mode\n self.network.eval()\n\n # Transfer to GPU\n if self.opt['cuda']:\n inputs = [Variable(e.cuda(non_blocking=True), volatile=True)\n for e in ex[:5]]\n else:\n inputs = [Variable(e, volatile=True) for e in ex[:5]]\n\n # Run forward\n score_s, score_e = self.network(*inputs)\n\n # Transfer to CPU/normal tensors for numpy ops\n score_s = score_s.data.cpu()\n score_e = score_e.data.cpu()\n\n # Get argmax text spans\n text = ex[-2]\n spans = ex[-1]\n predictions = []\n pred_scores = []\n max_len = self.opt['max_len'] or score_s.size(1)\n for i in range(score_s.size(0)):\n scores = torch.ger(score_s[i], score_e[i])\n scores.triu_().tril_(max_len - 1)\n scores = scores.numpy()\n s_idx, e_idx = np.unravel_index(np.argmax(scores), scores.shape)\n s_offset, e_offset = spans[i][s_idx][0], spans[i][e_idx][1]\n predictions.append(text[i][s_offset:e_offset])\n pred_scores.append(np.max(scores))\n\n return predictions, pred_scores\n\n def reset_parameters(self):\n # Reset fixed embeddings to original value\n if self.opt['tune_partial'] > 0:\n offset = self.opt['tune_partial'] + 2\n if offset < self.network.embedding.weight.data.size(0):\n self.network.embedding.weight.data[offset:] \\\n = self.network.fixed_embedding\n\n def save(self, filename):\n params = {\n 'state_dict': {\n 'network': self.network.state_dict(),\n },\n 'feature_dict': self.feature_dict,\n 'config': self.opt,\n }\n try:\n torch.save(params, filename)\n except BaseException:\n logger.warn('[ WARN: Saving failed... continuing anyway. ]')\n\n def cuda(self):\n self.network.cuda()\n", "path": "parlai/agents/drqa/model.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nimport torch\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport numpy as np\nimport logging\n\nfrom torch.autograd import Variable\nfrom .utils import load_embeddings, AverageMeter\nfrom .rnn_reader import RnnDocReader\n\nlogger = logging.getLogger('DrQA')\n\n\nclass DocReaderModel(object):\n \"\"\"High level model that handles intializing the underlying network\n architecture, saving, updating examples, and predicting examples.\n \"\"\"\n\n def __init__(self, opt, word_dict, feature_dict, state_dict=None):\n # Book-keeping.\n self.opt = opt\n self.word_dict = word_dict\n self.feature_dict = feature_dict\n self.updates = 0\n self.train_loss = AverageMeter()\n\n # Building network.\n self.network = RnnDocReader(opt)\n if state_dict:\n new_state = set(self.network.state_dict().keys())\n for k in list(state_dict['network'].keys()):\n if k not in new_state:\n del state_dict['network'][k]\n self.network.load_state_dict(state_dict['network'])\n\n # Building optimizer.\n parameters = [p for p in self.network.parameters() if p.requires_grad]\n if opt['optimizer'] == 'sgd':\n self.optimizer = optim.SGD(parameters, opt['learning_rate'],\n momentum=opt['momentum'],\n weight_decay=opt['weight_decay'])\n elif opt['optimizer'] == 'adamax':\n self.optimizer = optim.Adamax(parameters,\n weight_decay=opt['weight_decay'])\n else:\n raise RuntimeError('Unsupported optimizer: %s' % opt['optimizer'])\n\n def set_embeddings(self):\n # Read word embeddings.\n if not self.opt.get('embedding_file'):\n logger.warning('[ WARNING: No embeddings provided. '\n 'Keeping random initialization. ]')\n return\n logger.info('[ Loading pre-trained embeddings ]')\n embeddings = load_embeddings(self.opt, self.word_dict)\n logger.info('[ Num embeddings = %d ]' % embeddings.size(0))\n\n # Sanity check dimensions\n new_size = embeddings.size()\n old_size = self.network.embedding.weight.size()\n if new_size[1] != old_size[1]:\n raise RuntimeError('Embedding dimensions do not match.')\n if new_size[0] != old_size[0]:\n logger.warning(\n '[ WARNING: Number of embeddings changed (%d->%d) ]' %\n (old_size[0], new_size[0])\n )\n\n # Swap weights\n self.network.embedding.weight.data = embeddings\n\n # If partially tuning the embeddings, keep the old values\n if self.opt['tune_partial'] > 0:\n if self.opt['tune_partial'] + 2 < embeddings.size(0):\n fixed_embedding = embeddings[self.opt['tune_partial'] + 2:]\n self.network.fixed_embedding = fixed_embedding\n\n def update(self, ex):\n # Train mode\n self.network.train()\n\n # Transfer to GPU\n if self.opt['cuda']:\n inputs = [Variable(e.cuda(non_blocking=True)) for e in ex[:5]]\n target_s = Variable(ex[5].cuda(non_blocking=True))\n target_e = Variable(ex[6].cuda(non_blocking=True))\n else:\n inputs = [Variable(e) for e in ex[:5]]\n target_s = Variable(ex[5])\n target_e = Variable(ex[6])\n\n # Run forward\n score_s, score_e = self.network(*inputs)\n\n # Compute loss and accuracies\n loss = F.nll_loss(score_s, target_s) + F.nll_loss(score_e, target_e)\n self.train_loss.update(loss.data.item(), ex[0].size(0))\n\n # Clear gradients and run backward\n self.optimizer.zero_grad()\n loss.backward()\n\n # Clip gradients\n torch.nn.utils.clip_grad_norm(self.network.parameters(),\n self.opt['grad_clipping'])\n\n # Update parameters\n self.optimizer.step()\n self.updates += 1\n\n # Reset any partially fixed parameters (e.g. rare words)\n self.reset_parameters()\n\n def predict(self, ex):\n # Eval mode\n self.network.eval()\n\n # Transfer to GPU\n if self.opt['cuda']:\n inputs = [Variable(e.cuda(non_blocking=True), volatile=True)\n for e in ex[:5]]\n else:\n inputs = [Variable(e, volatile=True) for e in ex[:5]]\n\n # Run forward\n score_s, score_e = self.network(*inputs)\n\n # Transfer to CPU/normal tensors for numpy ops\n score_s = score_s.data.cpu()\n score_e = score_e.data.cpu()\n\n # Get argmax text spans\n text = ex[-2]\n spans = ex[-1]\n predictions = []\n pred_scores = []\n max_len = self.opt['max_len'] or score_s.size(1)\n for i in range(score_s.size(0)):\n scores = torch.ger(score_s[i], score_e[i])\n scores.triu_().tril_(max_len - 1)\n scores = scores.numpy()\n s_idx, e_idx = np.unravel_index(np.argmax(scores), scores.shape)\n s_offset, e_offset = spans[i][s_idx][0], spans[i][e_idx][1]\n predictions.append(text[i][s_offset:e_offset])\n pred_scores.append(np.max(scores))\n\n return predictions, pred_scores\n\n def reset_parameters(self):\n # Reset fixed embeddings to original value\n if self.opt['tune_partial'] > 0:\n offset = self.opt['tune_partial'] + 2\n if offset < self.network.embedding.weight.data.size(0):\n self.network.embedding.weight.data[offset:] \\\n = self.network.fixed_embedding\n\n def save(self, filename):\n params = {\n 'state_dict': {\n 'network': self.network.state_dict(),\n },\n 'feature_dict': self.feature_dict,\n 'config': self.opt,\n }\n try:\n torch.save(params, filename)\n except BaseException:\n logger.warn('[ WARN: Saving failed... continuing anyway. ]')\n\n def cuda(self):\n self.network.cuda()\n", "path": "parlai/agents/drqa/model.py"}]} |
gh_patches_debug_1231 | rasdani/github-patches | git_diff | svthalia__concrexit-1036 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError: 'NoneType' object has no attribute 'is_authenticated'
In GitLab by _thaliatechnicie on Mar 11, 2020, 23:02
Sentry Issue: [CONCREXIT-2A](https://sentry.io/organizations/thalia/issues/1560438241/?referrer=gitlab_integration)
```
AttributeError: 'NoneType' object has no attribute 'is_authenticated'
(14 additional frame(s) were not displayed)
...
File "rest_framework/serializers.py", line 529, in to_representation
ret[field.field_name] = field.to_representation(attribute)
File "rest_framework/fields.py", line 1905, in to_representation
return method(value)
File "events/api/serializers.py", line 86, in _class_names
if services.is_user_registered(self.context["member"], instance):
File "events/services.py", line 24, in is_user_registered
if not event.registration_required or not member.is_authenticated:
File "django/utils/functional.py", line 225, in inner
return func(self._wrapped, *args)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/events/api/serializers.py`
Content:
```
1 from django.conf import settings
2 from django.templatetags.static import static
3 from django.urls import reverse
4 from django.utils import timezone
5 from django.utils.html import strip_tags, strip_spaces_between_tags
6 from html import unescape
7 from rest_framework import serializers
8 from rest_framework.fields import empty
9
10 from payments.api.fields import PaymentTypeField
11 from payments.models import Payment
12 from thaliawebsite.api.services import create_image_thumbnail_dict
13 from events import services
14 from events.exceptions import RegistrationError
15 from events.models import Event, Registration, RegistrationInformationField
16 from pizzas.models import PizzaEvent
17 from thaliawebsite.templatetags.bleach_tags import bleach
18 from utils.snippets import create_google_maps_url
19
20
21 class CalenderJSSerializer(serializers.ModelSerializer):
22 """
23 Serializer using the right format for CalendarJS
24 """
25
26 class Meta:
27 fields = (
28 "start",
29 "end",
30 "allDay",
31 "isBirthday",
32 "url",
33 "title",
34 "description",
35 "classNames",
36 "blank",
37 )
38
39 start = serializers.SerializerMethodField("_start")
40 end = serializers.SerializerMethodField("_end")
41 allDay = serializers.SerializerMethodField("_all_day")
42 isBirthday = serializers.SerializerMethodField("_is_birthday")
43 url = serializers.SerializerMethodField("_url")
44 title = serializers.SerializerMethodField("_title")
45 description = serializers.SerializerMethodField("_description")
46 classNames = serializers.SerializerMethodField("_class_names")
47 blank = serializers.SerializerMethodField("_target_blank")
48
49 def _start(self, instance):
50 return timezone.localtime(instance.start)
51
52 def _end(self, instance):
53 return timezone.localtime(instance.end)
54
55 def _all_day(self, instance):
56 return False
57
58 def _is_birthday(self, instance):
59 return False
60
61 def _url(self, instance):
62 raise NotImplementedError
63
64 def _title(self, instance):
65 return instance.title
66
67 def _description(self, instance):
68 return unescape(strip_tags(instance.description))
69
70 def _class_names(self, instance):
71 pass
72
73 def _target_blank(self, instance):
74 return False
75
76
77 class EventCalenderJSSerializer(CalenderJSSerializer):
78 class Meta(CalenderJSSerializer.Meta):
79 model = Event
80
81 def _url(self, instance):
82 return reverse("events:event", kwargs={"pk": instance.id})
83
84 def _class_names(self, instance):
85 class_names = ["regular-event"]
86 if services.is_user_registered(self.context["member"], instance):
87 class_names.append("has-registration")
88 return class_names
89
90
91 class UnpublishedEventSerializer(CalenderJSSerializer):
92 """
93 See CalenderJSSerializer, customised classes
94 """
95
96 class Meta(CalenderJSSerializer.Meta):
97 model = Event
98
99 def _class_names(self, instance):
100 return ["unpublished-event"]
101
102 def _url(self, instance):
103 return reverse("admin:events_event_details", kwargs={"pk": instance.id})
104
105
106 class EventRetrieveSerializer(serializers.ModelSerializer):
107 """
108 Serializer for events
109 """
110
111 class Meta:
112 model = Event
113 fields = (
114 "pk",
115 "title",
116 "description",
117 "start",
118 "end",
119 "organiser",
120 "category",
121 "registration_start",
122 "registration_end",
123 "cancel_deadline",
124 "location",
125 "map_location",
126 "price",
127 "fine",
128 "max_participants",
129 "num_participants",
130 "user_registration",
131 "registration_allowed",
132 "no_registration_message",
133 "has_fields",
134 "is_pizza_event",
135 "google_maps_url",
136 "is_admin",
137 )
138
139 description = serializers.SerializerMethodField("_description")
140 user_registration = serializers.SerializerMethodField("_user_registration")
141 num_participants = serializers.SerializerMethodField("_num_participants")
142 registration_allowed = serializers.SerializerMethodField("_registration_allowed")
143 has_fields = serializers.SerializerMethodField("_has_fields")
144 is_pizza_event = serializers.SerializerMethodField("_is_pizza_event")
145 google_maps_url = serializers.SerializerMethodField("_google_maps_url")
146 is_admin = serializers.SerializerMethodField("_is_admin")
147
148 def _description(self, instance):
149 return strip_spaces_between_tags(bleach(instance.description))
150
151 def _num_participants(self, instance):
152 if (
153 instance.max_participants
154 and instance.participants.count() > instance.max_participants
155 ):
156 return instance.max_participants
157 return instance.participants.count()
158
159 def _user_registration(self, instance):
160 try:
161 if self.context["request"].member:
162 reg = instance.registration_set.get(
163 member=self.context["request"].member
164 )
165 return RegistrationAdminListSerializer(reg, context=self.context).data
166 except Registration.DoesNotExist:
167 pass
168 return None
169
170 def _registration_allowed(self, instance):
171 member = self.context["request"].member
172 return (
173 self.context["request"].user.is_authenticated
174 and member.has_active_membership
175 and member.can_attend_events
176 )
177
178 def _has_fields(self, instance):
179 return instance.has_fields()
180
181 def _is_pizza_event(self, instance):
182 return instance.is_pizza_event()
183
184 def _google_maps_url(self, instance):
185 return create_google_maps_url(instance.map_location, zoom=13, size="450x250")
186
187 def _is_admin(self, instance):
188 member = self.context["request"].member
189 return services.is_organiser(member, instance)
190
191
192 class EventListSerializer(serializers.ModelSerializer):
193 """Custom list serializer for events"""
194
195 class Meta:
196 model = Event
197 fields = (
198 "pk",
199 "title",
200 "description",
201 "start",
202 "end",
203 "location",
204 "price",
205 "registered",
206 "pizza",
207 "registration_allowed",
208 )
209
210 description = serializers.SerializerMethodField("_description")
211 registered = serializers.SerializerMethodField("_registered")
212 pizza = serializers.SerializerMethodField("_pizza")
213
214 def _description(self, instance):
215 return unescape(strip_tags(instance.description))
216
217 def _registered(self, instance):
218 try:
219 registered = services.is_user_registered(
220 self.context["request"].user, instance,
221 )
222 if registered is None:
223 return False
224 return registered
225 except AttributeError:
226 return False
227
228 def _pizza(self, instance):
229 pizza_events = PizzaEvent.objects.filter(event=instance)
230 return pizza_events.exists()
231
232
233 class RegistrationListSerializer(serializers.ModelSerializer):
234 """Custom registration list serializer"""
235
236 class Meta:
237 model = Registration
238 fields = ("pk", "member", "name", "avatar")
239
240 name = serializers.SerializerMethodField("_name")
241 avatar = serializers.SerializerMethodField("_avatar")
242 member = serializers.SerializerMethodField("_member")
243
244 def _member(self, instance):
245 if instance.member:
246 return instance.member.pk
247 return None
248
249 def _name(self, instance):
250 if instance.member:
251 return instance.member.profile.display_name()
252 return instance.name
253
254 def _avatar(self, instance):
255 placeholder = self.context["request"].build_absolute_uri(
256 static("members/images/default-avatar.jpg")
257 )
258 file = None
259 if instance.member and instance.member.profile.photo:
260 file = instance.member.profile.photo
261 return create_image_thumbnail_dict(
262 self.context["request"], file, placeholder=placeholder, size_large="800x800"
263 )
264
265
266 class RegistrationAdminListSerializer(RegistrationListSerializer):
267 """Custom registration admin list serializer"""
268
269 class Meta:
270 model = Registration
271 fields = (
272 "pk",
273 "member",
274 "name",
275 "registered_on",
276 "is_cancelled",
277 "is_late_cancellation",
278 "queue_position",
279 "payment",
280 "present",
281 "avatar",
282 )
283
284 registered_on = serializers.DateTimeField(source="date")
285 is_cancelled = serializers.SerializerMethodField("_is_cancelled")
286 is_late_cancellation = serializers.SerializerMethodField("_is_late_cancellation")
287 queue_position = serializers.SerializerMethodField("_queue_position")
288 payment = PaymentTypeField(source="payment.type", choices=Payment.PAYMENT_TYPE)
289
290 def _is_late_cancellation(self, instance):
291 return instance.is_late_cancellation()
292
293 def _queue_position(self, instance):
294 pos = instance.queue_position
295 return pos if pos > 0 else None
296
297 def _is_cancelled(self, instance):
298 return instance.date_cancelled is not None
299
300 def _name(self, instance):
301 if instance.member:
302 return instance.member.get_full_name()
303 return instance.name
304
305
306 class RegistrationSerializer(serializers.ModelSerializer):
307 """Registration serializer"""
308
309 information_fields = None
310
311 class Meta:
312 model = Registration
313 fields = (
314 "pk",
315 "member",
316 "name",
317 "photo",
318 "avatar",
319 "registered_on",
320 "is_late_cancellation",
321 "is_cancelled",
322 "queue_position",
323 "fields",
324 "payment",
325 "present",
326 )
327
328 name = serializers.SerializerMethodField("_name")
329 photo = serializers.SerializerMethodField("_photo")
330 avatar = serializers.SerializerMethodField("_avatar")
331 member = serializers.SerializerMethodField("_member")
332 payment = PaymentTypeField(source="payment.type", choices=Payment.PAYMENT_TYPE)
333 registered_on = serializers.DateTimeField(source="date", read_only=True)
334 is_cancelled = serializers.SerializerMethodField("_is_cancelled")
335 is_late_cancellation = serializers.SerializerMethodField("_is_late_cancellation")
336 queue_position = serializers.SerializerMethodField(
337 "_queue_position", read_only=False
338 )
339 fields = serializers.HiddenField(default="")
340
341 def _is_late_cancellation(self, instance):
342 val = instance.is_late_cancellation()
343 return False if val is None else val
344
345 def _is_cancelled(self, instance):
346 return instance.date_cancelled is not None
347
348 def _queue_position(self, instance):
349 pos = instance.queue_position
350 return pos if pos > 0 else None
351
352 def _member(self, instance):
353 if instance.member:
354 return instance.member.pk
355 return None
356
357 def _name(self, instance):
358 if instance.member:
359 return instance.member.profile.display_name()
360 return instance.name
361
362 def _photo(self, instance):
363 if instance.member and instance.member.profile.photo:
364 return self.context["request"].build_absolute_uri(
365 "%s%s" % (settings.MEDIA_URL, instance.member.profile.photo)
366 )
367 else:
368 return self.context["request"].build_absolute_uri(
369 static("members/images/default-avatar.jpg")
370 )
371
372 def _avatar(self, instance):
373 placeholder = self.context["request"].build_absolute_uri(
374 static("members/images/default-avatar.jpg")
375 )
376 file = None
377 if instance.member and instance.member.profile.photo:
378 file = instance.member.profile.photo
379 return create_image_thumbnail_dict(
380 self.context["request"], file, placeholder=placeholder, size_large="800x800"
381 )
382
383 def __init__(self, instance=None, data=empty, **kwargs):
384 super().__init__(instance, data, **kwargs)
385 try:
386 if instance:
387 self.information_fields = services.registration_fields(
388 kwargs["context"]["request"], registration=instance
389 )
390 except RegistrationError:
391 pass
392
393 def get_fields(self):
394 fields = super().get_fields()
395
396 if self.information_fields:
397 for key, field in self.information_fields.items():
398 key = "fields[{}]".format(key)
399 field_type = field["type"]
400
401 if field_type == RegistrationInformationField.BOOLEAN_FIELD:
402 fields[key] = serializers.BooleanField(
403 required=False, write_only=True
404 )
405 elif field_type == RegistrationInformationField.INTEGER_FIELD:
406 fields[key] = serializers.IntegerField(
407 required=field["required"],
408 write_only=True,
409 allow_null=not field["required"],
410 )
411 elif field_type == RegistrationInformationField.TEXT_FIELD:
412 fields[key] = serializers.CharField(
413 required=field["required"],
414 write_only=True,
415 allow_blank=not field["required"],
416 allow_null=not field["required"],
417 )
418
419 fields[key].label = field["label"]
420 fields[key].help_text = field["description"]
421 fields[key].initial = field["value"]
422 fields[key].default = field["value"]
423
424 try:
425 if key in self.information_fields:
426 fields[key].initial = self.validated_data[key]
427 except AssertionError:
428 pass
429
430 return fields
431
432 def to_representation(self, instance):
433 data = super().to_representation(instance)
434 data["fields"] = self.information_fields
435 return data
436
437 def field_values(self):
438 return (
439 (name[7 : len(name) - 1], value)
440 for name, value in self.validated_data.items()
441 if "info_field" in name
442 )
443
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/events/api/serializers.py b/website/events/api/serializers.py
--- a/website/events/api/serializers.py
+++ b/website/events/api/serializers.py
@@ -83,7 +83,9 @@
def _class_names(self, instance):
class_names = ["regular-event"]
- if services.is_user_registered(self.context["member"], instance):
+ if self.context["member"] and services.is_user_registered(
+ self.context["member"], instance
+ ):
class_names.append("has-registration")
return class_names
| {"golden_diff": "diff --git a/website/events/api/serializers.py b/website/events/api/serializers.py\n--- a/website/events/api/serializers.py\n+++ b/website/events/api/serializers.py\n@@ -83,7 +83,9 @@\n \n def _class_names(self, instance):\n class_names = [\"regular-event\"]\n- if services.is_user_registered(self.context[\"member\"], instance):\n+ if self.context[\"member\"] and services.is_user_registered(\n+ self.context[\"member\"], instance\n+ ):\n class_names.append(\"has-registration\")\n return class_names\n", "issue": "AttributeError: 'NoneType' object has no attribute 'is_authenticated'\nIn GitLab by _thaliatechnicie on Mar 11, 2020, 23:02\n\nSentry Issue: [CONCREXIT-2A](https://sentry.io/organizations/thalia/issues/1560438241/?referrer=gitlab_integration)\n\n```\nAttributeError: 'NoneType' object has no attribute 'is_authenticated'\n(14 additional frame(s) were not displayed)\n...\n File \"rest_framework/serializers.py\", line 529, in to_representation\n ret[field.field_name] = field.to_representation(attribute)\n File \"rest_framework/fields.py\", line 1905, in to_representation\n return method(value)\n File \"events/api/serializers.py\", line 86, in _class_names\n if services.is_user_registered(self.context[\"member\"], instance):\n File \"events/services.py\", line 24, in is_user_registered\n if not event.registration_required or not member.is_authenticated:\n File \"django/utils/functional.py\", line 225, in inner\n return func(self._wrapped, *args)\n```\n", "before_files": [{"content": "from django.conf import settings\nfrom django.templatetags.static import static\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.html import strip_tags, strip_spaces_between_tags\nfrom html import unescape\nfrom rest_framework import serializers\nfrom rest_framework.fields import empty\n\nfrom payments.api.fields import PaymentTypeField\nfrom payments.models import Payment\nfrom thaliawebsite.api.services import create_image_thumbnail_dict\nfrom events import services\nfrom events.exceptions import RegistrationError\nfrom events.models import Event, Registration, RegistrationInformationField\nfrom pizzas.models import PizzaEvent\nfrom thaliawebsite.templatetags.bleach_tags import bleach\nfrom utils.snippets import create_google_maps_url\n\n\nclass CalenderJSSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer using the right format for CalendarJS\n \"\"\"\n\n class Meta:\n fields = (\n \"start\",\n \"end\",\n \"allDay\",\n \"isBirthday\",\n \"url\",\n \"title\",\n \"description\",\n \"classNames\",\n \"blank\",\n )\n\n start = serializers.SerializerMethodField(\"_start\")\n end = serializers.SerializerMethodField(\"_end\")\n allDay = serializers.SerializerMethodField(\"_all_day\")\n isBirthday = serializers.SerializerMethodField(\"_is_birthday\")\n url = serializers.SerializerMethodField(\"_url\")\n title = serializers.SerializerMethodField(\"_title\")\n description = serializers.SerializerMethodField(\"_description\")\n classNames = serializers.SerializerMethodField(\"_class_names\")\n blank = serializers.SerializerMethodField(\"_target_blank\")\n\n def _start(self, instance):\n return timezone.localtime(instance.start)\n\n def _end(self, instance):\n return timezone.localtime(instance.end)\n\n def _all_day(self, instance):\n return False\n\n def _is_birthday(self, instance):\n return False\n\n def _url(self, instance):\n raise NotImplementedError\n\n def _title(self, instance):\n return instance.title\n\n def _description(self, instance):\n return unescape(strip_tags(instance.description))\n\n def _class_names(self, instance):\n pass\n\n def _target_blank(self, instance):\n return False\n\n\nclass EventCalenderJSSerializer(CalenderJSSerializer):\n class Meta(CalenderJSSerializer.Meta):\n model = Event\n\n def _url(self, instance):\n return reverse(\"events:event\", kwargs={\"pk\": instance.id})\n\n def _class_names(self, instance):\n class_names = [\"regular-event\"]\n if services.is_user_registered(self.context[\"member\"], instance):\n class_names.append(\"has-registration\")\n return class_names\n\n\nclass UnpublishedEventSerializer(CalenderJSSerializer):\n \"\"\"\n See CalenderJSSerializer, customised classes\n \"\"\"\n\n class Meta(CalenderJSSerializer.Meta):\n model = Event\n\n def _class_names(self, instance):\n return [\"unpublished-event\"]\n\n def _url(self, instance):\n return reverse(\"admin:events_event_details\", kwargs={\"pk\": instance.id})\n\n\nclass EventRetrieveSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer for events\n \"\"\"\n\n class Meta:\n model = Event\n fields = (\n \"pk\",\n \"title\",\n \"description\",\n \"start\",\n \"end\",\n \"organiser\",\n \"category\",\n \"registration_start\",\n \"registration_end\",\n \"cancel_deadline\",\n \"location\",\n \"map_location\",\n \"price\",\n \"fine\",\n \"max_participants\",\n \"num_participants\",\n \"user_registration\",\n \"registration_allowed\",\n \"no_registration_message\",\n \"has_fields\",\n \"is_pizza_event\",\n \"google_maps_url\",\n \"is_admin\",\n )\n\n description = serializers.SerializerMethodField(\"_description\")\n user_registration = serializers.SerializerMethodField(\"_user_registration\")\n num_participants = serializers.SerializerMethodField(\"_num_participants\")\n registration_allowed = serializers.SerializerMethodField(\"_registration_allowed\")\n has_fields = serializers.SerializerMethodField(\"_has_fields\")\n is_pizza_event = serializers.SerializerMethodField(\"_is_pizza_event\")\n google_maps_url = serializers.SerializerMethodField(\"_google_maps_url\")\n is_admin = serializers.SerializerMethodField(\"_is_admin\")\n\n def _description(self, instance):\n return strip_spaces_between_tags(bleach(instance.description))\n\n def _num_participants(self, instance):\n if (\n instance.max_participants\n and instance.participants.count() > instance.max_participants\n ):\n return instance.max_participants\n return instance.participants.count()\n\n def _user_registration(self, instance):\n try:\n if self.context[\"request\"].member:\n reg = instance.registration_set.get(\n member=self.context[\"request\"].member\n )\n return RegistrationAdminListSerializer(reg, context=self.context).data\n except Registration.DoesNotExist:\n pass\n return None\n\n def _registration_allowed(self, instance):\n member = self.context[\"request\"].member\n return (\n self.context[\"request\"].user.is_authenticated\n and member.has_active_membership\n and member.can_attend_events\n )\n\n def _has_fields(self, instance):\n return instance.has_fields()\n\n def _is_pizza_event(self, instance):\n return instance.is_pizza_event()\n\n def _google_maps_url(self, instance):\n return create_google_maps_url(instance.map_location, zoom=13, size=\"450x250\")\n\n def _is_admin(self, instance):\n member = self.context[\"request\"].member\n return services.is_organiser(member, instance)\n\n\nclass EventListSerializer(serializers.ModelSerializer):\n \"\"\"Custom list serializer for events\"\"\"\n\n class Meta:\n model = Event\n fields = (\n \"pk\",\n \"title\",\n \"description\",\n \"start\",\n \"end\",\n \"location\",\n \"price\",\n \"registered\",\n \"pizza\",\n \"registration_allowed\",\n )\n\n description = serializers.SerializerMethodField(\"_description\")\n registered = serializers.SerializerMethodField(\"_registered\")\n pizza = serializers.SerializerMethodField(\"_pizza\")\n\n def _description(self, instance):\n return unescape(strip_tags(instance.description))\n\n def _registered(self, instance):\n try:\n registered = services.is_user_registered(\n self.context[\"request\"].user, instance,\n )\n if registered is None:\n return False\n return registered\n except AttributeError:\n return False\n\n def _pizza(self, instance):\n pizza_events = PizzaEvent.objects.filter(event=instance)\n return pizza_events.exists()\n\n\nclass RegistrationListSerializer(serializers.ModelSerializer):\n \"\"\"Custom registration list serializer\"\"\"\n\n class Meta:\n model = Registration\n fields = (\"pk\", \"member\", \"name\", \"avatar\")\n\n name = serializers.SerializerMethodField(\"_name\")\n avatar = serializers.SerializerMethodField(\"_avatar\")\n member = serializers.SerializerMethodField(\"_member\")\n\n def _member(self, instance):\n if instance.member:\n return instance.member.pk\n return None\n\n def _name(self, instance):\n if instance.member:\n return instance.member.profile.display_name()\n return instance.name\n\n def _avatar(self, instance):\n placeholder = self.context[\"request\"].build_absolute_uri(\n static(\"members/images/default-avatar.jpg\")\n )\n file = None\n if instance.member and instance.member.profile.photo:\n file = instance.member.profile.photo\n return create_image_thumbnail_dict(\n self.context[\"request\"], file, placeholder=placeholder, size_large=\"800x800\"\n )\n\n\nclass RegistrationAdminListSerializer(RegistrationListSerializer):\n \"\"\"Custom registration admin list serializer\"\"\"\n\n class Meta:\n model = Registration\n fields = (\n \"pk\",\n \"member\",\n \"name\",\n \"registered_on\",\n \"is_cancelled\",\n \"is_late_cancellation\",\n \"queue_position\",\n \"payment\",\n \"present\",\n \"avatar\",\n )\n\n registered_on = serializers.DateTimeField(source=\"date\")\n is_cancelled = serializers.SerializerMethodField(\"_is_cancelled\")\n is_late_cancellation = serializers.SerializerMethodField(\"_is_late_cancellation\")\n queue_position = serializers.SerializerMethodField(\"_queue_position\")\n payment = PaymentTypeField(source=\"payment.type\", choices=Payment.PAYMENT_TYPE)\n\n def _is_late_cancellation(self, instance):\n return instance.is_late_cancellation()\n\n def _queue_position(self, instance):\n pos = instance.queue_position\n return pos if pos > 0 else None\n\n def _is_cancelled(self, instance):\n return instance.date_cancelled is not None\n\n def _name(self, instance):\n if instance.member:\n return instance.member.get_full_name()\n return instance.name\n\n\nclass RegistrationSerializer(serializers.ModelSerializer):\n \"\"\"Registration serializer\"\"\"\n\n information_fields = None\n\n class Meta:\n model = Registration\n fields = (\n \"pk\",\n \"member\",\n \"name\",\n \"photo\",\n \"avatar\",\n \"registered_on\",\n \"is_late_cancellation\",\n \"is_cancelled\",\n \"queue_position\",\n \"fields\",\n \"payment\",\n \"present\",\n )\n\n name = serializers.SerializerMethodField(\"_name\")\n photo = serializers.SerializerMethodField(\"_photo\")\n avatar = serializers.SerializerMethodField(\"_avatar\")\n member = serializers.SerializerMethodField(\"_member\")\n payment = PaymentTypeField(source=\"payment.type\", choices=Payment.PAYMENT_TYPE)\n registered_on = serializers.DateTimeField(source=\"date\", read_only=True)\n is_cancelled = serializers.SerializerMethodField(\"_is_cancelled\")\n is_late_cancellation = serializers.SerializerMethodField(\"_is_late_cancellation\")\n queue_position = serializers.SerializerMethodField(\n \"_queue_position\", read_only=False\n )\n fields = serializers.HiddenField(default=\"\")\n\n def _is_late_cancellation(self, instance):\n val = instance.is_late_cancellation()\n return False if val is None else val\n\n def _is_cancelled(self, instance):\n return instance.date_cancelled is not None\n\n def _queue_position(self, instance):\n pos = instance.queue_position\n return pos if pos > 0 else None\n\n def _member(self, instance):\n if instance.member:\n return instance.member.pk\n return None\n\n def _name(self, instance):\n if instance.member:\n return instance.member.profile.display_name()\n return instance.name\n\n def _photo(self, instance):\n if instance.member and instance.member.profile.photo:\n return self.context[\"request\"].build_absolute_uri(\n \"%s%s\" % (settings.MEDIA_URL, instance.member.profile.photo)\n )\n else:\n return self.context[\"request\"].build_absolute_uri(\n static(\"members/images/default-avatar.jpg\")\n )\n\n def _avatar(self, instance):\n placeholder = self.context[\"request\"].build_absolute_uri(\n static(\"members/images/default-avatar.jpg\")\n )\n file = None\n if instance.member and instance.member.profile.photo:\n file = instance.member.profile.photo\n return create_image_thumbnail_dict(\n self.context[\"request\"], file, placeholder=placeholder, size_large=\"800x800\"\n )\n\n def __init__(self, instance=None, data=empty, **kwargs):\n super().__init__(instance, data, **kwargs)\n try:\n if instance:\n self.information_fields = services.registration_fields(\n kwargs[\"context\"][\"request\"], registration=instance\n )\n except RegistrationError:\n pass\n\n def get_fields(self):\n fields = super().get_fields()\n\n if self.information_fields:\n for key, field in self.information_fields.items():\n key = \"fields[{}]\".format(key)\n field_type = field[\"type\"]\n\n if field_type == RegistrationInformationField.BOOLEAN_FIELD:\n fields[key] = serializers.BooleanField(\n required=False, write_only=True\n )\n elif field_type == RegistrationInformationField.INTEGER_FIELD:\n fields[key] = serializers.IntegerField(\n required=field[\"required\"],\n write_only=True,\n allow_null=not field[\"required\"],\n )\n elif field_type == RegistrationInformationField.TEXT_FIELD:\n fields[key] = serializers.CharField(\n required=field[\"required\"],\n write_only=True,\n allow_blank=not field[\"required\"],\n allow_null=not field[\"required\"],\n )\n\n fields[key].label = field[\"label\"]\n fields[key].help_text = field[\"description\"]\n fields[key].initial = field[\"value\"]\n fields[key].default = field[\"value\"]\n\n try:\n if key in self.information_fields:\n fields[key].initial = self.validated_data[key]\n except AssertionError:\n pass\n\n return fields\n\n def to_representation(self, instance):\n data = super().to_representation(instance)\n data[\"fields\"] = self.information_fields\n return data\n\n def field_values(self):\n return (\n (name[7 : len(name) - 1], value)\n for name, value in self.validated_data.items()\n if \"info_field\" in name\n )\n", "path": "website/events/api/serializers.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.templatetags.static import static\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.html import strip_tags, strip_spaces_between_tags\nfrom html import unescape\nfrom rest_framework import serializers\nfrom rest_framework.fields import empty\n\nfrom payments.api.fields import PaymentTypeField\nfrom payments.models import Payment\nfrom thaliawebsite.api.services import create_image_thumbnail_dict\nfrom events import services\nfrom events.exceptions import RegistrationError\nfrom events.models import Event, Registration, RegistrationInformationField\nfrom pizzas.models import PizzaEvent\nfrom thaliawebsite.templatetags.bleach_tags import bleach\nfrom utils.snippets import create_google_maps_url\n\n\nclass CalenderJSSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer using the right format for CalendarJS\n \"\"\"\n\n class Meta:\n fields = (\n \"start\",\n \"end\",\n \"allDay\",\n \"isBirthday\",\n \"url\",\n \"title\",\n \"description\",\n \"classNames\",\n \"blank\",\n )\n\n start = serializers.SerializerMethodField(\"_start\")\n end = serializers.SerializerMethodField(\"_end\")\n allDay = serializers.SerializerMethodField(\"_all_day\")\n isBirthday = serializers.SerializerMethodField(\"_is_birthday\")\n url = serializers.SerializerMethodField(\"_url\")\n title = serializers.SerializerMethodField(\"_title\")\n description = serializers.SerializerMethodField(\"_description\")\n classNames = serializers.SerializerMethodField(\"_class_names\")\n blank = serializers.SerializerMethodField(\"_target_blank\")\n\n def _start(self, instance):\n return timezone.localtime(instance.start)\n\n def _end(self, instance):\n return timezone.localtime(instance.end)\n\n def _all_day(self, instance):\n return False\n\n def _is_birthday(self, instance):\n return False\n\n def _url(self, instance):\n raise NotImplementedError\n\n def _title(self, instance):\n return instance.title\n\n def _description(self, instance):\n return unescape(strip_tags(instance.description))\n\n def _class_names(self, instance):\n pass\n\n def _target_blank(self, instance):\n return False\n\n\nclass EventCalenderJSSerializer(CalenderJSSerializer):\n class Meta(CalenderJSSerializer.Meta):\n model = Event\n\n def _url(self, instance):\n return reverse(\"events:event\", kwargs={\"pk\": instance.id})\n\n def _class_names(self, instance):\n class_names = [\"regular-event\"]\n if self.context[\"member\"] and services.is_user_registered(\n self.context[\"member\"], instance\n ):\n class_names.append(\"has-registration\")\n return class_names\n\n\nclass UnpublishedEventSerializer(CalenderJSSerializer):\n \"\"\"\n See CalenderJSSerializer, customised classes\n \"\"\"\n\n class Meta(CalenderJSSerializer.Meta):\n model = Event\n\n def _class_names(self, instance):\n return [\"unpublished-event\"]\n\n def _url(self, instance):\n return reverse(\"admin:events_event_details\", kwargs={\"pk\": instance.id})\n\n\nclass EventRetrieveSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer for events\n \"\"\"\n\n class Meta:\n model = Event\n fields = (\n \"pk\",\n \"title\",\n \"description\",\n \"start\",\n \"end\",\n \"organiser\",\n \"category\",\n \"registration_start\",\n \"registration_end\",\n \"cancel_deadline\",\n \"location\",\n \"map_location\",\n \"price\",\n \"fine\",\n \"max_participants\",\n \"num_participants\",\n \"user_registration\",\n \"registration_allowed\",\n \"no_registration_message\",\n \"has_fields\",\n \"is_pizza_event\",\n \"google_maps_url\",\n \"is_admin\",\n )\n\n description = serializers.SerializerMethodField(\"_description\")\n user_registration = serializers.SerializerMethodField(\"_user_registration\")\n num_participants = serializers.SerializerMethodField(\"_num_participants\")\n registration_allowed = serializers.SerializerMethodField(\"_registration_allowed\")\n has_fields = serializers.SerializerMethodField(\"_has_fields\")\n is_pizza_event = serializers.SerializerMethodField(\"_is_pizza_event\")\n google_maps_url = serializers.SerializerMethodField(\"_google_maps_url\")\n is_admin = serializers.SerializerMethodField(\"_is_admin\")\n\n def _description(self, instance):\n return strip_spaces_between_tags(bleach(instance.description))\n\n def _num_participants(self, instance):\n if (\n instance.max_participants\n and instance.participants.count() > instance.max_participants\n ):\n return instance.max_participants\n return instance.participants.count()\n\n def _user_registration(self, instance):\n try:\n if self.context[\"request\"].member:\n reg = instance.registration_set.get(\n member=self.context[\"request\"].member\n )\n return RegistrationAdminListSerializer(reg, context=self.context).data\n except Registration.DoesNotExist:\n pass\n return None\n\n def _registration_allowed(self, instance):\n member = self.context[\"request\"].member\n return (\n self.context[\"request\"].user.is_authenticated\n and member.has_active_membership\n and member.can_attend_events\n )\n\n def _has_fields(self, instance):\n return instance.has_fields()\n\n def _is_pizza_event(self, instance):\n return instance.is_pizza_event()\n\n def _google_maps_url(self, instance):\n return create_google_maps_url(instance.map_location, zoom=13, size=\"450x250\")\n\n def _is_admin(self, instance):\n member = self.context[\"request\"].member\n return services.is_organiser(member, instance)\n\n\nclass EventListSerializer(serializers.ModelSerializer):\n \"\"\"Custom list serializer for events\"\"\"\n\n class Meta:\n model = Event\n fields = (\n \"pk\",\n \"title\",\n \"description\",\n \"start\",\n \"end\",\n \"location\",\n \"price\",\n \"registered\",\n \"pizza\",\n \"registration_allowed\",\n )\n\n description = serializers.SerializerMethodField(\"_description\")\n registered = serializers.SerializerMethodField(\"_registered\")\n pizza = serializers.SerializerMethodField(\"_pizza\")\n\n def _description(self, instance):\n return unescape(strip_tags(instance.description))\n\n def _registered(self, instance):\n try:\n registered = services.is_user_registered(\n self.context[\"request\"].user, instance,\n )\n if registered is None:\n return False\n return registered\n except AttributeError:\n return False\n\n def _pizza(self, instance):\n pizza_events = PizzaEvent.objects.filter(event=instance)\n return pizza_events.exists()\n\n\nclass RegistrationListSerializer(serializers.ModelSerializer):\n \"\"\"Custom registration list serializer\"\"\"\n\n class Meta:\n model = Registration\n fields = (\"pk\", \"member\", \"name\", \"avatar\")\n\n name = serializers.SerializerMethodField(\"_name\")\n avatar = serializers.SerializerMethodField(\"_avatar\")\n member = serializers.SerializerMethodField(\"_member\")\n\n def _member(self, instance):\n if instance.member:\n return instance.member.pk\n return None\n\n def _name(self, instance):\n if instance.member:\n return instance.member.profile.display_name()\n return instance.name\n\n def _avatar(self, instance):\n placeholder = self.context[\"request\"].build_absolute_uri(\n static(\"members/images/default-avatar.jpg\")\n )\n file = None\n if instance.member and instance.member.profile.photo:\n file = instance.member.profile.photo\n return create_image_thumbnail_dict(\n self.context[\"request\"], file, placeholder=placeholder, size_large=\"800x800\"\n )\n\n\nclass RegistrationAdminListSerializer(RegistrationListSerializer):\n \"\"\"Custom registration admin list serializer\"\"\"\n\n class Meta:\n model = Registration\n fields = (\n \"pk\",\n \"member\",\n \"name\",\n \"registered_on\",\n \"is_cancelled\",\n \"is_late_cancellation\",\n \"queue_position\",\n \"payment\",\n \"present\",\n \"avatar\",\n )\n\n registered_on = serializers.DateTimeField(source=\"date\")\n is_cancelled = serializers.SerializerMethodField(\"_is_cancelled\")\n is_late_cancellation = serializers.SerializerMethodField(\"_is_late_cancellation\")\n queue_position = serializers.SerializerMethodField(\"_queue_position\")\n payment = PaymentTypeField(source=\"payment.type\", choices=Payment.PAYMENT_TYPE)\n\n def _is_late_cancellation(self, instance):\n return instance.is_late_cancellation()\n\n def _queue_position(self, instance):\n pos = instance.queue_position\n return pos if pos > 0 else None\n\n def _is_cancelled(self, instance):\n return instance.date_cancelled is not None\n\n def _name(self, instance):\n if instance.member:\n return instance.member.get_full_name()\n return instance.name\n\n\nclass RegistrationSerializer(serializers.ModelSerializer):\n \"\"\"Registration serializer\"\"\"\n\n information_fields = None\n\n class Meta:\n model = Registration\n fields = (\n \"pk\",\n \"member\",\n \"name\",\n \"photo\",\n \"avatar\",\n \"registered_on\",\n \"is_late_cancellation\",\n \"is_cancelled\",\n \"queue_position\",\n \"fields\",\n \"payment\",\n \"present\",\n )\n\n name = serializers.SerializerMethodField(\"_name\")\n photo = serializers.SerializerMethodField(\"_photo\")\n avatar = serializers.SerializerMethodField(\"_avatar\")\n member = serializers.SerializerMethodField(\"_member\")\n payment = PaymentTypeField(source=\"payment.type\", choices=Payment.PAYMENT_TYPE)\n registered_on = serializers.DateTimeField(source=\"date\", read_only=True)\n is_cancelled = serializers.SerializerMethodField(\"_is_cancelled\")\n is_late_cancellation = serializers.SerializerMethodField(\"_is_late_cancellation\")\n queue_position = serializers.SerializerMethodField(\n \"_queue_position\", read_only=False\n )\n fields = serializers.HiddenField(default=\"\")\n\n def _is_late_cancellation(self, instance):\n val = instance.is_late_cancellation()\n return False if val is None else val\n\n def _is_cancelled(self, instance):\n return instance.date_cancelled is not None\n\n def _queue_position(self, instance):\n pos = instance.queue_position\n return pos if pos > 0 else None\n\n def _member(self, instance):\n if instance.member:\n return instance.member.pk\n return None\n\n def _name(self, instance):\n if instance.member:\n return instance.member.profile.display_name()\n return instance.name\n\n def _photo(self, instance):\n if instance.member and instance.member.profile.photo:\n return self.context[\"request\"].build_absolute_uri(\n \"%s%s\" % (settings.MEDIA_URL, instance.member.profile.photo)\n )\n else:\n return self.context[\"request\"].build_absolute_uri(\n static(\"members/images/default-avatar.jpg\")\n )\n\n def _avatar(self, instance):\n placeholder = self.context[\"request\"].build_absolute_uri(\n static(\"members/images/default-avatar.jpg\")\n )\n file = None\n if instance.member and instance.member.profile.photo:\n file = instance.member.profile.photo\n return create_image_thumbnail_dict(\n self.context[\"request\"], file, placeholder=placeholder, size_large=\"800x800\"\n )\n\n def __init__(self, instance=None, data=empty, **kwargs):\n super().__init__(instance, data, **kwargs)\n try:\n if instance:\n self.information_fields = services.registration_fields(\n kwargs[\"context\"][\"request\"], registration=instance\n )\n except RegistrationError:\n pass\n\n def get_fields(self):\n fields = super().get_fields()\n\n if self.information_fields:\n for key, field in self.information_fields.items():\n key = \"fields[{}]\".format(key)\n field_type = field[\"type\"]\n\n if field_type == RegistrationInformationField.BOOLEAN_FIELD:\n fields[key] = serializers.BooleanField(\n required=False, write_only=True\n )\n elif field_type == RegistrationInformationField.INTEGER_FIELD:\n fields[key] = serializers.IntegerField(\n required=field[\"required\"],\n write_only=True,\n allow_null=not field[\"required\"],\n )\n elif field_type == RegistrationInformationField.TEXT_FIELD:\n fields[key] = serializers.CharField(\n required=field[\"required\"],\n write_only=True,\n allow_blank=not field[\"required\"],\n allow_null=not field[\"required\"],\n )\n\n fields[key].label = field[\"label\"]\n fields[key].help_text = field[\"description\"]\n fields[key].initial = field[\"value\"]\n fields[key].default = field[\"value\"]\n\n try:\n if key in self.information_fields:\n fields[key].initial = self.validated_data[key]\n except AssertionError:\n pass\n\n return fields\n\n def to_representation(self, instance):\n data = super().to_representation(instance)\n data[\"fields\"] = self.information_fields\n return data\n\n def field_values(self):\n return (\n (name[7 : len(name) - 1], value)\n for name, value in self.validated_data.items()\n if \"info_field\" in name\n )\n", "path": "website/events/api/serializers.py"}]} |
gh_patches_debug_1232 | rasdani/github-patches | git_diff | kivy__kivy-2714 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Scatter crash on windows
I just tested BETA-1 with showcase, and when i tried to move the cat:
```
running "python.exe C:\Users\tito\Desktop\Kivy-1.9.0-dev-py3.3-win32\kivy\exampl
es\demo\showcase\main.py" \n
[INFO ] [Logger ] Record log in C:\Users\tito\.kivy\logs\kivy_
14-09-21_2.txt
[INFO ] Kivy v1.9.0-dev
[INFO ] [Python ] v3.3.3 (v3.3.3:c3896275c0f6, Nov 18 2013, 21
:18:40) [MSC v.1600 32 bit (Intel)]
[INFO ] [Factory ] 170 symbols loaded
[INFO ] [Image ] Providers: img_tex, img_dds, img_pygame, img
_gif (img_sdl2, img_ffpyplayer, img_pil ignored)
[INFO ] [Window ] Provider: pygame(['window_egl_rpi'] ignored)
OpenGL Warning: WGL_SAMPLE_BUFFERS_EXT & WGL_SAMPLES_EXT not supporteed!
OpenGL Warning: WGL_SAMPLE_BUFFERS_EXT & WGL_SAMPLES_EXT not supporteed!
GLEW initialization succeeded
GL: glGenFramebuffers is NULL, try to detect an extension
GL: available extensions: GL_EXT_texture_compression_s3tc GL_EXT_draw_range_elem
ents GL_EXT_framebuffer_object GL_EXT_compiled_vertex_array GL_ARB_depth_texture
GL_ARB_fragment_program GL_ARB_multisample GL_ARB_multitexture GL_ARB_occlusion
_query GL_ARB_point_parameters GL_ARB_point_sprite GL_ARB_shadow GL_ARB_texture_
border_clamp GL_ARB_texture_compression GL_ARB_texture_cube_map GL_ARB_texture_e
nv_add GL_ARB_texture_env_combine GL_EXT_texture_env_combine GL_ARB_texture_env_
crossbar GL_ARB_texture_env_dot3 GL_EXT_texture_env_dot3 GL_ARB_texture_mirrored
_repeat GL_IBM_texture_mirrored_repeat GL_ATI_texture_mirror_once GL_ARB_texture
_non_power_of_two GL_ARB_transpose_matrix GL_ARB_vertex_buffer_object GL_ARB_pix
el_buffer_object GL_ARB_vertex_program GL_ARB_window_pos GL_EXT_blend_color GL_E
XT_blend_minmax GL_EXT_blend_func_separate GL_EXT_blend_subtract GL_EXT_texture_
env_add GL_EXT_fog_coord GL_EXT_multi_draw_arrays GL_EXT_secondary_color GL_EXT_
shadow_funcs GL_EXT_stencil_wrap GL_EXT_texture_cube_map GL_EXT_texture_edge_cla
mp GL_EXT_texture_filter_anisotropic GL_EXT_texture_lod_bias GL_EXT_texture_obje
ct GL_EXT_texture3D GL_IBM_rasterpos_clip GL_NV_fog_distance GL_NV_fragment_prog
ram GL_NV_fragment_program_option GL_NV_fragment_program2 GL_NV_register_combine
rs GL_NV_register_combiners2 GL_NV_texgen_reflection GL_NV_texture_rectangle GL_
ARB_texture_rectangle GL_NV_vertex_program GL_NV_vertex_program1_1 GL_NV_vertex_
program2 GL_NV_vertex_program2_option GL_NV_vertex_program3 GL_SGIS_generate_mip
map GL_ARB_shading_language_100 GL_ARB_shader_objects GL_ARB_vertex_shader GL_AR
B_fragment_shader GL_EXT_texture_sRGB GL_EXT_framebuffer_blit GL_EXT_blend_equat
ion_separate GL_EXT_stencil_two_side GL_CR_state_parameter GL_CR_cursor_position
GL_CR_bounding_box GL_CR_print_string GL_CR_tilesort_info GL_CR_synchronization
GL_CR_head_spu_name GL_CR_performance_info GL_CR_window_size GL_CR_tile_info GL
_CR_saveframe GL_CR_readback_barrier_size GL_CR_server_id_sharing GL_CR_server_m
atrix
GL: EXT_framebuffer_object is supported
[INFO ] [GL ] OpenGL version <b'2.1 Chromium 1.9'>
[INFO ] [GL ] OpenGL vendor <b'Humper'>
[INFO ] [GL ] OpenGL renderer <b'Chromium'>
[INFO ] [GL ] OpenGL parsed version: 2, 1
[INFO ] [GL ] Shading version <b'4.40 NVIDIA via Cg compil
er'>
[INFO ] [GL ] Texture max size <16384>
[INFO ] [GL ] Texture max units <32>
[INFO ] [Window ] virtual keyboard not allowed, single mode, n
ot docked
[INFO ] [Text ] Provider: pygame(['text_sdl2'] ignored)
[INFO ] [GL ] NPOT texture support is available
[INFO ] [OSC ] using <thread> for socket
[INFO ] [Base ] Start application main loop
[WARNING ] [Accordion ] not enough space for displaying all children
[WARNING ] [Accordion ] need 132px, got 100px
[WARNING ] [Accordion ] layout aborted.
[WARNING ] [Accordion ] not enough space for displaying all children
[WARNING ] [Accordion ] need 132px, got 84px
[WARNING ] [Accordion ] layout aborted.
[WARNING ] [Accordion ] not enough space for displaying all children
[WARNING ] [Accordion ] need 132px, got 84px
[WARNING ] [Accordion ] layout aborted.
[ERROR ] unable to access to <\pagefile.sys>
Traceback (most recent call last):
File "C:\Users\tito\Desktop\Kivy-1.9.0-dev-py3.3-win32\kivy\kivy\uix\filechoos
er.py", line 122, in is_hidden
return GetFileAttributesExW(fn)[0] & FILE_ATTRIBUTE_HIDDEN
pywintypes.error: (32, 'GetFileAttributesEx', 'Le processus ne peut pas accéder
au fichier car ce fichier est utilisé par un autre processus.')
[ERROR ] unable to access to <\pagefile.sys>
Traceback (most recent call last):
File "C:\Users\tito\Desktop\Kivy-1.9.0-dev-py3.3-win32\kivy\kivy\uix\filechoos
er.py", line 122, in is_hidden
return GetFileAttributesExW(fn)[0] & FILE_ATTRIBUTE_HIDDEN
pywintypes.error: (32, 'GetFileAttributesEx', 'Le processus ne peut pas accéder
au fichier car ce fichier est utilisé par un autre processus.')
[INFO ] [VideoGstplayer] Using Gstreamer 1.2.1.0
[INFO ] [Video ] Provider: gstplayer
[INFO ] [Base ] Leaving application in progress...
Traceback (most recent call last):
File "C:\Users\tito\Desktop\Kivy-1.9.0-dev-py3.3-win32\kivy\examples\demo\sho
wcase\main.py", line 212, in <module>
ShowcaseApp().run()
File "C:\Users\tito\Desktop\Kivy-1.9.0-dev-py3.3-win32\kivy\kivy\app.py", lin
e 825, in run
runTouchApp()
File "C:\Users\tito\Desktop\Kivy-1.9.0-dev-py3.3-win32\kivy\kivy\base.py", li
ne 484, in runTouchApp
EventLoop.window.mainloop()
File "C:\Users\tito\Desktop\Kivy-1.9.0-dev-py3.3-win32\kivy\kivy\core\window\
window_pygame.py", line 364, in mainloop
self._mainloop()
File "C:\Users\tito\Desktop\Kivy-1.9.0-dev-py3.3-win32\kivy\kivy\core\window\
window_pygame.py", line 268, in _mainloop
EventLoop.idle()
File "C:\Users\tito\Desktop\Kivy-1.9.0-dev-py3.3-win32\kivy\kivy\base.py", li
ne 327, in idle
self.dispatch_input()
File "C:\Users\tito\Desktop\Kivy-1.9.0-dev-py3.3-win32\kivy\kivy\base.py", li
ne 312, in dispatch_input
post_dispatch_input(*pop(0))
File "C:\Users\tito\Desktop\Kivy-1.9.0-dev-py3.3-win32\kivy\kivy\base.py", li
ne 271, in post_dispatch_input
wid.dispatch('on_touch_move', me)
File "_event.pyx", line 392, in kivy._event.EventDispatcher.dispatch (kivy\_e
vent.c:4602)
File "C:\Users\tito\Desktop\Kivy-1.9.0-dev-py3.3-win32\kivy\kivy\uix\scatter.
py", line 547, in on_touch_move
if self.transform_with_touch(touch):
File "C:\Users\tito\Desktop\Kivy-1.9.0-dev-py3.3-win32\kivy\kivy\uix\scatter.
py", line 452, in transform_with_touch
anchor = max(points[:-1], key=lambda p: p.distance(touch.pos))
ValueError: max() arg is an empty sequence
Appuyez sur une touche pour continuer...
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kivy/uix/scrollview.py`
Content:
```
1 '''Scroll View
2 ===========
3
4 .. versionadded:: 1.0.4
5
6 The :class:`ScrollView` widget provides a scrollable/pannable viewport that is
7 clipped at the scrollview's bounding box.
8
9
10 Scrolling Behavior
11 ------------------
12
13 The ScrollView accepts only one child and applies a viewport/window to
14 it according to the :attr:`ScrollView.scroll_x` and
15 :attr:`ScrollView.scroll_y` properties. Touches are analyzed to
16 determine if the user wants to scroll or control the child in some
17 other manner - you cannot do both at the same time. To determine if
18 interaction is a scrolling gesture, these properties are used:
19
20 - :attr:`ScrollView.scroll_distance`: the minimum distance to travel,
21 defaults to 20 pixels.
22 - :attr:`ScrollView.scroll_timeout`: the maximum time period, defaults
23 to 250 milliseconds.
24
25 If a touch travels :attr:`~ScrollView.scroll_distance` pixels within the
26 :attr:`~ScrollView.scroll_timeout` period, it is recognized as a scrolling
27 gesture and translation (scroll/pan) will begin. If the timeout occurs, the
28 touch down event is dispatched to the child instead (no translation).
29
30 The default value for those settings can be changed in the configuration file::
31
32 [widgets]
33 scroll_timeout = 250
34 scroll_distance = 20
35
36 .. versionadded:: 1.1.1
37
38 ScrollView now animates scrolling in Y when a mousewheel is used.
39
40
41 Limiting to the X or Y Axis
42 ---------------------------
43
44 By default, the ScrollView allows scrolling in both the X and Y axes. You can
45 explicitly disable scrolling on an axis by setting
46 :attr:`ScrollView.do_scroll_x` or :attr:`ScrollView.do_scroll_y` to False.
47
48
49 Managing the Content Size and Position
50 --------------------------------------
51
52 ScrollView manages the position of its children similarly to a
53 RelativeLayout (see :mod:`~kivy.uix.relativelayout`) but not the size. You must
54 carefully specify the `size_hint` of your content to get the desired
55 scroll/pan effect.
56
57 By default, size_hint is (1, 1), so the content size will fit your ScrollView
58 exactly (you will have nothing to scroll). You must deactivate at least one of
59 the size_hint instructions (x or y) of the child to enable scrolling.
60
61 To scroll a :class:`GridLayout` on Y-axis/vertically, set the child's width
62 identical to that of the ScrollView (size_hint_x=1, default), and set the
63 size_hint_y property to None::
64
65 layout = GridLayout(cols=1, spacing=10, size_hint_y=None)
66 # Make sure the height is such that there is something to scroll.
67 layout.bind(minimum_height=layout.setter('height'))
68 for i in range(30):
69 btn = Button(text=str(i), size_hint_y=None, height=40)
70 layout.add_widget(btn)
71 root = ScrollView(size_hint=(None, None), size=(400, 400))
72 root.add_widget(layout)
73
74
75 Overscroll Effects
76 ------------------
77
78 .. versionadded:: 1.7.0
79
80 When scrolling would exceed the bounds of the :class:`ScrollView`, it
81 uses a :class:`~kivy.effects.scroll.ScrollEffect` to handle the
82 overscroll. These effects can perform actions like bouncing back,
83 changing opacity, or simply preventing scrolling beyond the normal
84 boundaries. Note that complex effects may perform many computations,
85 which can be slow on weaker hardware.
86
87 You can change what effect is being used by setting
88 :attr:`ScrollView.effect_cls` to any effect class. Current options
89 include:
90
91 - :class:`~kivy.effects.scroll.ScrollEffect`: Does not allow
92 scrolling beyond the :class:`ScrollView` boundaries.
93 - :class:`~kivy.effects.dampedscroll.DampedScrollEffect`: The
94 current default. Allows the user to scroll beyond the normal
95 boundaries, but has the content spring back once the
96 touch/click is released.
97 - :class:`~kivy.effects.opacityscroll.OpacityScrollEffect`: Similar
98 to the :class:`~kivy.effect.dampedscroll.DampedScrollEffect`, but
99 also reduces opacity during overscroll.
100
101 You can also create your own scroll effect by subclassing one of these,
102 then pass it as the :attr:`~ScrollView.effect_cls` in the same way.
103
104 Alternatively, you can set :attr:`ScrollView.effect_x` and/or
105 :attr:`ScrollView.effect_y` to an *instance* of the effect you want to
106 use. This will override the default effect set in
107 :attr:`ScrollView.effect_cls`.
108
109 All the effects are located in the :mod:`kivy.effects`.
110
111 '''
112
113 __all__ = ('ScrollView', )
114
115 from functools import partial
116 from kivy.animation import Animation
117 from kivy.compat import string_types
118 from kivy.config import Config
119 from kivy.clock import Clock
120 from kivy.factory import Factory
121 from kivy.uix.stencilview import StencilView
122 from kivy.metrics import sp
123 from kivy.effects.dampedscroll import DampedScrollEffect
124 from kivy.properties import NumericProperty, BooleanProperty, AliasProperty, \
125 ObjectProperty, ListProperty, ReferenceListProperty, OptionProperty
126
127
128 # When we are generating documentation, Config doesn't exist
129 _scroll_timeout = _scroll_distance = 0
130 if Config:
131 _scroll_timeout = Config.getint('widgets', 'scroll_timeout')
132 _scroll_distance = sp(Config.getint('widgets', 'scroll_distance'))
133
134
135 class ScrollView(StencilView):
136 '''ScrollView class. See module documentation for more information.
137
138 .. versionchanged:: 1.7.0
139 `auto_scroll`, `scroll_friction`, `scroll_moves`, `scroll_stoptime' has
140 been deprecated, use :attr:`effect_cls` instead.
141 '''
142
143 scroll_distance = NumericProperty(_scroll_distance)
144 '''Distance to move before scrolling the :class:`ScrollView`, in pixels. As
145 soon as the distance has been traveled, the :class:`ScrollView` will start
146 to scroll, and no touch event will go to children.
147 It is advisable that you base this value on the dpi of your target device's
148 screen.
149
150 :attr:`scroll_distance` is a :class:`~kivy.properties.NumericProperty` and
151 defaults to 20 (pixels), according to the default value in user
152 configuration.
153 '''
154
155 scroll_wheel_distance = NumericProperty(20)
156 '''Distance to move when scrolling with a mouse wheel.
157 It is advisable that you base this value on the dpi of your target device's
158 screen.
159
160 .. versionadded:: 1.8.0
161
162 :attr:`scroll_wheel_distance` is a
163 :class:`~kivy.properties.NumericProperty` , defaults to 20 pixels.
164 '''
165
166 scroll_timeout = NumericProperty(_scroll_timeout)
167 '''Timeout allowed to trigger the :attr:`scroll_distance`, in milliseconds.
168 If the user has not moved :attr:`scroll_distance` within the timeout,
169 the scrolling will be disabled, and the touch event will go to the
170 children.
171
172 :attr:`scroll_timeout` is a :class:`~kivy.properties.NumericProperty` and
173 defaults to 55 (milliseconds) according to the default value in user
174 configuration.
175
176 .. versionchanged:: 1.5.0
177 Default value changed from 250 to 55.
178 '''
179
180 scroll_x = NumericProperty(0.)
181 '''X scrolling value, between 0 and 1. If 0, the content's left side will
182 touch the left side of the ScrollView. If 1, the content's right side will
183 touch the right side.
184
185 This property is controled by :class:`ScrollView` only if
186 :attr:`do_scroll_x` is True.
187
188 :attr:`scroll_x` is a :class:`~kivy.properties.NumericProperty` and
189 defaults to 0.
190 '''
191
192 scroll_y = NumericProperty(1.)
193 '''Y scrolling value, between 0 and 1. If 0, the content's bottom side will
194 touch the bottom side of the ScrollView. If 1, the content's top side will
195 touch the top side.
196
197 This property is controled by :class:`ScrollView` only if
198 :attr:`do_scroll_y` is True.
199
200 :attr:`scroll_y` is a :class:`~kivy.properties.NumericProperty` and
201 defaults to 1.
202 '''
203
204 do_scroll_x = BooleanProperty(True)
205 '''Allow scroll on X axis.
206
207 :attr:`do_scroll_x` is a :class:`~kivy.properties.BooleanProperty` and
208 defaults to True.
209 '''
210
211 do_scroll_y = BooleanProperty(True)
212 '''Allow scroll on Y axis.
213
214 :attr:`do_scroll_y` is a :class:`~kivy.properties.BooleanProperty` and
215 defaults to True.
216 '''
217
218 def _get_do_scroll(self):
219 return (self.do_scroll_x, self.do_scroll_y)
220
221 def _set_do_scroll(self, value):
222 if type(value) in (list, tuple):
223 self.do_scroll_x, self.do_scroll_y = value
224 else:
225 self.do_scroll_x = self.do_scroll_y = bool(value)
226 do_scroll = AliasProperty(_get_do_scroll, _set_do_scroll,
227 bind=('do_scroll_x', 'do_scroll_y'))
228 '''Allow scroll on X or Y axis.
229
230 :attr:`do_scroll` is a :class:`~kivy.properties.AliasProperty` of
231 (:attr:`do_scroll_x` + :attr:`do_scroll_y`)
232 '''
233
234 def _get_vbar(self):
235 # must return (y, height) in %
236 # calculate the viewport size / scrollview size %
237 if self._viewport is None:
238 return 0, 1.
239 vh = self._viewport.height
240 h = self.height
241 if vh < h or vh == 0:
242 return 0, 1.
243 ph = max(0.01, h / float(vh))
244 sy = min(1.0, max(0.0, self.scroll_y))
245 py = (1. - ph) * sy
246 return (py, ph)
247
248 vbar = AliasProperty(_get_vbar, None, bind=(
249 'scroll_y', '_viewport', 'viewport_size'))
250 '''Return a tuple of (position, size) of the vertical scrolling bar.
251
252 .. versionadded:: 1.2.0
253
254 The position and size are normalized between 0-1, and represent a
255 percentage of the current scrollview height. This property is used
256 internally for drawing the little vertical bar when you're scrolling.
257
258 :attr:`vbar` is a :class:`~kivy.properties.AliasProperty`, readonly.
259 '''
260
261 def _get_hbar(self):
262 # must return (x, width) in %
263 # calculate the viewport size / scrollview size %
264 if self._viewport is None:
265 return 0, 1.
266 vw = self._viewport.width
267 w = self.width
268 if vw < w or vw == 0:
269 return 0, 1.
270 pw = max(0.01, w / float(vw))
271 sx = min(1.0, max(0.0, self.scroll_x))
272 px = (1. - pw) * sx
273 return (px, pw)
274
275 hbar = AliasProperty(_get_hbar, None, bind=(
276 'scroll_x', '_viewport', 'viewport_size'))
277 '''Return a tuple of (position, size) of the horizontal scrolling bar.
278
279 .. versionadded:: 1.2.0
280
281 The position and size are normalized between 0-1, and represent a
282 percentage of the current scrollview height. This property is used
283 internally for drawing the little horizontal bar when you're scrolling.
284
285 :attr:`vbar` is a :class:`~kivy.properties.AliasProperty`, readonly.
286 '''
287
288 bar_color = ListProperty([.7, .7, .7, .9])
289 '''Color of horizontal / vertical scroll bar, in RGBA format.
290
291 .. versionadded:: 1.2.0
292
293 :attr:`bar_color` is a :class:`~kivy.properties.ListProperty` and defaults
294 to [.7, .7, .7, .9].
295 '''
296
297 bar_inactive_color = ListProperty([.7, .7, .7, .2])
298 '''Color of horizontal / vertical scroll bar (in RGBA format), when no
299 scroll is happening.
300
301 .. versionadded:: 1.9.0
302
303 :attr:`bar_inactive_color` is a
304 :class:`~kivy.properties.ListProperty` and defaults to [.7, .7, .7, .2].
305 '''
306
307 bar_width = NumericProperty('2dp')
308 '''Width of the horizontal / vertical scroll bar. The width is interpreted
309 as a height for the horizontal bar.
310
311 .. versionadded:: 1.2.0
312
313 :attr:`bar_width` is a :class:`~kivy.properties.NumericProperty` and
314 defaults to 2.
315 '''
316
317 bar_pos_x = OptionProperty('bottom', options=('top', 'bottom'))
318 '''Which side of the ScrollView the horizontal scroll bar should go
319 on. Possible values are 'top' and 'bottom'.
320
321 .. versionadded:: 1.8.0
322
323 :attr:`bar_pos_x` is an :class:`~kivy.properties.OptionProperty`,
324 default to 'bottom'
325
326 '''
327
328 bar_pos_y = OptionProperty('right', options=('left', 'right'))
329 '''Which side of the ScrollView the vertical scroll bar should go
330 on. Possible values are 'left' and 'right'.
331
332 .. versionadded:: 1.8.0
333
334 :attr:`bar_pos_y` is an :class:`~kivy.properties.OptionProperty`,
335 default to 'right'
336
337 '''
338
339 bar_pos = ReferenceListProperty(bar_pos_x, bar_pos_y)
340 '''Which side of the scroll view to place each of the bars on.
341
342 :attr:`bar_pos` is a :class:`~kivy.properties.ReferenceListProperty` of
343 (:attr:`bar_pos_x`, :attr:`bar_pos_y`)
344 '''
345
346 bar_margin = NumericProperty(0)
347 '''Margin between the bottom / right side of the scrollview when drawing
348 the horizontal / vertical scroll bar.
349
350 .. versionadded:: 1.2.0
351
352 :attr:`bar_margin` is a :class:`~kivy.properties.NumericProperty`, default
353 to 0
354 '''
355
356 effect_cls = ObjectProperty(DampedScrollEffect, allownone=True)
357 '''Class effect to instanciate for X and Y axis.
358
359 .. versionadded:: 1.7.0
360
361 :attr:`effect_cls` is an :class:`~kivy.properties.ObjectProperty` and
362 defaults to :class:`DampedScrollEffect`.
363
364 .. versionchanged:: 1.8.0
365 If you set a string, the :class:`~kivy.factory.Factory` will be used to
366 resolve the class.
367
368 '''
369
370 effect_x = ObjectProperty(None, allownone=True)
371 '''Effect to apply for the X axis. If None is set, an instance of
372 :attr:`effect_cls` will be created.
373
374 .. versionadded:: 1.7.0
375
376 :attr:`effect_x` is an :class:`~kivy.properties.ObjectProperty` and
377 defaults to None.
378 '''
379
380 effect_y = ObjectProperty(None, allownone=True)
381 '''Effect to apply for the Y axis. If None is set, an instance of
382 :attr:`effect_cls` will be created.
383
384 .. versionadded:: 1.7.0
385
386 :attr:`effect_y` is an :class:`~kivy.properties.ObjectProperty` and
387 defaults to None, read-only.
388 '''
389
390 viewport_size = ListProperty([0, 0])
391 '''(internal) Size of the internal viewport. This is the size of your only
392 child in the scrollview.
393 '''
394
395 scroll_type = OptionProperty(['content'], options=(['content'], ['bars'],
396 ['bars', 'content'], ['content', 'bars']))
397 '''Sets the type of scrolling to use for the content of the scrollview.
398 Available options are: ['content'], ['bars'], ['bars', 'content'].
399
400 .. versionadded:: 1.8.0
401
402 :attr:`scroll_type` is a :class:`~kivy.properties.OptionProperty`, defaults
403 to ['content'].
404 '''
405
406 # private, for internal use only
407
408 _viewport = ObjectProperty(None, allownone=True)
409 _bar_color = ListProperty([0, 0, 0, 0])
410
411 def _set_viewport_size(self, instance, value):
412 self.viewport_size = value
413
414 def on__viewport(self, instance, value):
415 if value:
416 value.bind(size=self._set_viewport_size)
417 self.viewport_size = value.size
418
419 def __init__(self, **kwargs):
420 self._touch = None
421 self._trigger_update_from_scroll = Clock.create_trigger(
422 self.update_from_scroll, -1)
423 # create a specific canvas for the viewport
424 from kivy.graphics import PushMatrix, Translate, PopMatrix, Canvas
425 self.canvas_viewport = Canvas()
426 self.canvas = Canvas()
427 with self.canvas_viewport.before:
428 PushMatrix()
429 self.g_translate = Translate(0, 0)
430 with self.canvas_viewport.after:
431 PopMatrix()
432
433 super(ScrollView, self).__init__(**kwargs)
434
435 self.register_event_type('on_scroll_start')
436 self.register_event_type('on_scroll_move')
437 self.register_event_type('on_scroll_stop')
438
439 # now add the viewport canvas to our canvas
440 self.canvas.add(self.canvas_viewport)
441
442 effect_cls = self.effect_cls
443 if isinstance(effect_cls, string_types):
444 effect_cls = Factory.get(effect_cls)
445 if self.effect_x is None and effect_cls is not None:
446 self.effect_x = effect_cls(target_widget=self._viewport)
447 if self.effect_y is None and effect_cls is not None:
448 self.effect_y = effect_cls(target_widget=self._viewport)
449 self.bind(
450 width=self._update_effect_x_bounds,
451 height=self._update_effect_y_bounds,
452 viewport_size=self._update_effect_bounds,
453 _viewport=self._update_effect_widget,
454 scroll_x=self._trigger_update_from_scroll,
455 scroll_y=self._trigger_update_from_scroll,
456 pos=self._trigger_update_from_scroll,
457 size=self._trigger_update_from_scroll)
458
459 self._update_effect_widget()
460 self._update_effect_x_bounds()
461 self._update_effect_y_bounds()
462
463 def on_effect_x(self, instance, value):
464 if value:
465 value.bind(scroll=self._update_effect_x)
466 value.target_widget = self._viewport
467
468 def on_effect_y(self, instance, value):
469 if value:
470 value.bind(scroll=self._update_effect_y)
471 value.target_widget = self._viewport
472
473 def on_effect_cls(self, instance, cls):
474 if isinstance(cls, string_types):
475 cls = Factory.get(cls)
476 self.effect_x = cls(target_widget=self._viewport)
477 self.effect_x.bind(scroll=self._update_effect_x)
478 self.effect_y = cls(target_widget=self._viewport)
479 self.effect_y.bind(scroll=self._update_effect_y)
480
481 def _update_effect_widget(self, *args):
482 if self.effect_x:
483 self.effect_x.target_widget = self._viewport
484 if self.effect_y:
485 self.effect_y.target_widget = self._viewport
486
487 def _update_effect_x_bounds(self, *args):
488 if not self._viewport or not self.effect_x:
489 return
490 self.effect_x.min = -(self.viewport_size[0] - self.width)
491 self.effect_x.max = 0
492 self.effect_x.value = self.effect_x.min * self.scroll_x
493
494 def _update_effect_y_bounds(self, *args):
495 if not self._viewport or not self.effect_y:
496 return
497 self.effect_y.min = -(self.viewport_size[1] - self.height)
498 self.effect_y.max = 0
499 self.effect_y.value = self.effect_y.min * self.scroll_y
500
501 def _update_effect_bounds(self, *args):
502 if not self._viewport:
503 return
504 if self.effect_x:
505 self._update_effect_x_bounds()
506 if self.effect_y:
507 self._update_effect_y_bounds()
508
509 def _update_effect_x(self, *args):
510 vp = self._viewport
511 if not vp or not self.effect_x:
512 return
513 sw = vp.width - self.width
514 if sw < 1:
515 return
516 sx = self.effect_x.scroll / float(sw)
517 self.scroll_x = -sx
518 self._trigger_update_from_scroll()
519
520 def _update_effect_y(self, *args):
521 vp = self._viewport
522 if not vp or not self.effect_y:
523 return
524 sh = vp.height - self.height
525 if sh < 1:
526 return
527 sy = self.effect_y.scroll / float(sh)
528 self.scroll_y = -sy
529 self._trigger_update_from_scroll()
530
531 def to_local(self, x, y, **k):
532 tx, ty = self.g_translate.xy
533 return x - tx, y - ty
534
535 def to_parent(self, x, y, **k):
536 tx, ty = self.g_translate.xy
537 return x + tx, y + ty
538
539 def simulate_touch_down(self, touch):
540 # at this point the touch is in parent coords
541 touch.push()
542 touch.apply_transform_2d(self.to_local)
543 ret = super(ScrollView, self).on_touch_down(touch)
544 touch.pop()
545 return ret
546
547 def on_touch_down(self, touch):
548 if self.dispatch('on_scroll_start', touch):
549 self._touch = touch
550 touch.grab(self)
551 return True
552
553 def on_scroll_start(self, touch, check_children=True):
554 if check_children:
555 touch.push()
556 touch.apply_transform_2d(self.to_local)
557 if self.dispatch_children('on_scroll_start', touch):
558 return True
559 touch.pop()
560
561 if not self.collide_point(*touch.pos):
562 touch.ud[self._get_uid('svavoid')] = True
563 return
564 if self.disabled:
565 return True
566 if self._touch or (not (self.do_scroll_x or self.do_scroll_y)):
567 return self.simulate_touch_down(touch)
568
569 # handle mouse scrolling, only if the viewport size is bigger than the
570 # scrollview size, and if the user allowed to do it
571 vp = self._viewport
572 if not vp:
573 return True
574 scroll_type = self.scroll_type
575 ud = touch.ud
576 scroll_bar = 'bars' in scroll_type
577
578 # check if touch is in bar_x(horizontal) or bay_y(bertical)
579 ud['in_bar_x'] = ud['in_bar_y'] = False
580 width_scrollable = vp.width > self.width
581 height_scrollable = vp.height > self.height
582 bar_pos_x = self.bar_pos_x[0]
583 bar_pos_y = self.bar_pos_y[0]
584
585 d = {'b': True if touch.y < self.y + self.bar_width else False,
586 't': True if touch.y > self.top - self.bar_width else False,
587 'l': True if touch.x < self.x + self.bar_width else False,
588 'r': True if touch.x > self.right - self.bar_width else False}
589 if scroll_bar:
590 if (width_scrollable and d[bar_pos_x]):
591 ud['in_bar_x'] = True
592 if (height_scrollable and d[bar_pos_y]):
593 ud['in_bar_y'] = True
594
595 if vp and 'button' in touch.profile and \
596 touch.button.startswith('scroll'):
597 btn = touch.button
598 m = sp(self.scroll_wheel_distance)
599 e = None
600
601 if ((btn == 'scrolldown' and self.scroll_y >= 1) or
602 (btn == 'scrollup' and self.scroll_y <= 0) or
603 (btn == 'scrollleft' and self.scroll_x >= 1) or
604 (btn == 'scrollright' and self.scroll_x <= 0)):
605 return False
606
607 if (self.effect_x and self.do_scroll_y and height_scrollable
608 and btn in ('scrolldown', 'scrollup')):
609 e = self.effect_x if ud['in_bar_x'] else self.effect_y
610
611 elif (self.effect_y and self.do_scroll_x and width_scrollable
612 and btn in ('scrollleft', 'scrollright')):
613 e = self.effect_y if ud['in_bar_y'] else self.effect_x
614
615 if e:
616 if btn in ('scrolldown', 'scrollleft'):
617 e.value = max(e.value - m, e.min)
618 e.velocity = 0
619 elif btn in ('scrollup', 'scrollright'):
620 e.value = min(e.value + m, e.max)
621 e.velocity = 0
622 touch.ud[self._get_uid('svavoid')] = True
623 e.trigger_velocity_update()
624 return True
625
626 # no mouse scrolling, so the user is going to drag the scrollview with
627 # this touch.
628 self._touch = touch
629 uid = self._get_uid()
630
631 ud[uid] = {
632 'mode': 'unknown',
633 'dx': 0,
634 'dy': 0,
635 'user_stopped': False,
636 'frames': Clock.frames,
637 'time': touch.time_start}
638
639 if self.do_scroll_x and self.effect_x and not ud['in_bar_x']:
640 self.effect_x.start(touch.x)
641 self._scroll_x_mouse = self.scroll_x
642 if self.do_scroll_y and self.effect_y and not ud['in_bar_y']:
643 self.effect_y.start(touch.y)
644 self._scroll_y_mouse = self.scroll_y
645
646 if (ud.get('in_bar_x', False) or ud.get('in_bar_y', False)):
647 return True
648 if scroll_type == ['bars']:
649 # touch is in parent, but _change_touch_mode expects window coords
650 touch.push()
651 touch.apply_transform_2d(self.to_local)
652 touch.apply_transform_2d(self.to_window)
653 self._change_touch_mode()
654 touch.pop()
655 return False
656 else:
657 Clock.schedule_once(self._change_touch_mode,
658 self.scroll_timeout / 1000.)
659 return True
660
661 def on_touch_move(self, touch):
662 if self._touch is not touch:
663 # touch is in parent
664 touch.push()
665 touch.apply_transform_2d(self.to_local)
666 super(ScrollView, self).on_touch_move(touch)
667 touch.pop()
668 return self._get_uid() in touch.ud
669 if touch.grab_current is not self:
670 return True
671
672 touch.ud['sv.handled'] = {'x': False, 'y': False}
673 if self.dispatch('on_scroll_move', touch):
674 return True
675
676 def on_scroll_move(self, touch):
677 if self._get_uid('svavoid') in touch.ud:
678 return False
679
680 touch.push()
681 touch.apply_transform_2d(self.to_local)
682 if self.dispatch_children('on_scroll_move', touch):
683 return True
684 touch.pop()
685
686 rv = True
687
688 uid = self._get_uid()
689 if not uid in touch.ud:
690 self._touch = False
691 return self.on_scroll_start(touch, False)
692 ud = touch.ud[uid]
693 mode = ud['mode']
694
695 # check if the minimum distance has been travelled
696 if mode == 'unknown' or mode == 'scroll':
697 if not touch.ud['sv.handled']['x'] and self.do_scroll_x \
698 and self.effect_x:
699 width = self.width
700 if touch.ud.get('in_bar_x', False):
701 dx = touch.dx / float(width - width * self.hbar[1])
702 self.scroll_x = min(max(self.scroll_x + dx, 0.), 1.)
703 self._trigger_update_from_scroll()
704 else:
705 if self.scroll_type != ['bars']:
706 self.effect_x.update(touch.x)
707 if self.scroll_x < 0 or self.scroll_x > 1:
708 rv = False
709 else:
710 touch.ud['sv.handled']['x'] = True
711 if not touch.ud['sv.handled']['y'] and self.do_scroll_y \
712 and self.effect_y:
713 height = self.height
714 if touch.ud.get('in_bar_y', False):
715 dy = touch.dy / float(height - height * self.vbar[1])
716 self.scroll_y = min(max(self.scroll_y + dy, 0.), 1.)
717 self._trigger_update_from_scroll()
718 else:
719 if self.scroll_type != ['bars']:
720 self.effect_y.update(touch.y)
721 if self.scroll_y < 0 or self.scroll_y > 1:
722 rv = False
723 else:
724 touch.ud['sv.handled']['y'] = True
725
726 if mode == 'unknown':
727 ud['dx'] += abs(touch.dx)
728 ud['dy'] += abs(touch.dy)
729 if ud['dx'] > self.scroll_distance:
730 if not self.do_scroll_x:
731 # touch is in parent, but _change expects window coords
732 touch.push()
733 touch.apply_transform_2d(self.to_local)
734 touch.apply_transform_2d(self.to_window)
735 self._change_touch_mode()
736 touch.pop()
737 return
738 mode = 'scroll'
739
740 if ud['dy'] > self.scroll_distance:
741 if not self.do_scroll_y:
742 # touch is in parent, but _change expects window coords
743 touch.push()
744 touch.apply_transform_2d(self.to_local)
745 touch.apply_transform_2d(self.to_window)
746 self._change_touch_mode()
747 touch.pop()
748 return
749 mode = 'scroll'
750 ud['mode'] = mode
751
752 if mode == 'scroll':
753 ud['dt'] = touch.time_update - ud['time']
754 ud['time'] = touch.time_update
755 ud['user_stopped'] = True
756
757 return rv
758
759 def on_touch_up(self, touch):
760 if self._touch is not touch and self.uid not in touch.ud:
761 # touch is in parents
762 touch.push()
763 touch.apply_transform_2d(self.to_local)
764 if super(ScrollView, self).on_touch_up(touch):
765 return True
766 touch.pop()
767 return False
768
769 if self.dispatch('on_scroll_stop', touch):
770 touch.ungrab(self)
771 return True
772
773 def on_scroll_stop(self, touch, check_children=True):
774 self._touch = None
775
776 if check_children:
777 touch.push()
778 touch.apply_transform_2d(self.to_local)
779 if self.dispatch_children('on_scroll_stop', touch):
780 return True
781 touch.pop()
782
783 if self._get_uid('svavoid') in touch.ud:
784 return
785 if self._get_uid() not in touch.ud:
786 return False
787
788 self._touch = None
789 uid = self._get_uid()
790 ud = touch.ud[uid]
791 if self.do_scroll_x and self.effect_x:
792 if not touch.ud.get('in_bar_x', False) and\
793 self.scroll_type != ['bars']:
794 self.effect_x.stop(touch.x)
795 if self.do_scroll_y and self.effect_y and\
796 self.scroll_type != ['bars']:
797 if not touch.ud.get('in_bar_y', False):
798 self.effect_y.stop(touch.y)
799 if ud['mode'] == 'unknown':
800 # we must do the click at least..
801 # only send the click if it was not a click to stop
802 # autoscrolling
803 if not ud['user_stopped']:
804 self.simulate_touch_down(touch)
805 Clock.schedule_once(partial(self._do_touch_up, touch), .2)
806 Clock.unschedule(self._update_effect_bounds)
807 Clock.schedule_once(self._update_effect_bounds)
808
809 # if we do mouse scrolling, always accept it
810 if 'button' in touch.profile and touch.button.startswith('scroll'):
811 return True
812
813 return self._get_uid() in touch.ud
814
815 def convert_distance_to_scroll(self, dx, dy):
816 '''Convert a distance in pixels to a scroll distance, depending on the
817 content size and the scrollview size.
818
819 The result will be a tuple of scroll distance that can be added to
820 :data:`scroll_x` and :data:`scroll_y`
821 '''
822 if not self._viewport:
823 return 0, 0
824 vp = self._viewport
825 if vp.width > self.width:
826 sw = vp.width - self.width
827 sx = dx / float(sw)
828 else:
829 sx = 0
830 if vp.height > self.height:
831 sh = vp.height - self.height
832 sy = dy / float(sh)
833 else:
834 sy = 1
835 return sx, sy
836
837 def update_from_scroll(self, *largs):
838 '''Force the reposition of the content, according to current value of
839 :attr:`scroll_x` and :attr:`scroll_y`.
840
841 This method is automatically called when one of the :attr:`scroll_x`,
842 :attr:`scroll_y`, :attr:`pos` or :attr:`size` properties change, or
843 if the size of the content changes.
844 '''
845 if not self._viewport:
846 return
847 vp = self._viewport
848
849 # update from size_hint
850 if vp.size_hint_x is not None:
851 vp.width = vp.size_hint_x * self.width
852 if vp.size_hint_y is not None:
853 vp.height = vp.size_hint_y * self.height
854
855 if vp.width > self.width:
856 sw = vp.width - self.width
857 x = self.x - self.scroll_x * sw
858 else:
859 x = self.x
860 if vp.height > self.height:
861 sh = vp.height - self.height
862 y = self.y - self.scroll_y * sh
863 else:
864 y = self.top - vp.height
865
866 # from 1.8.0, we now use a matrix by default, instead of moving the
867 # widget position behind. We set it here, but it will be a no-op most of
868 # the time.
869 vp.pos = 0, 0
870 self.g_translate.xy = x, y
871
872 # New in 1.2.0, show bar when scrolling happens and (changed in 1.9.0)
873 # fade to bar_inactive_color when no scroll is happening.
874 Clock.unschedule(self._bind_inactive_bar_color)
875 self.unbind(bar_inactive_color=self._change_bar_color)
876 Animation.stop_all(self, '_bar_color')
877 self.bind(bar_color=self._change_bar_color)
878 self._bar_color = self.bar_color
879 Clock.schedule_once(self._bind_inactive_bar_color, .5)
880
881 def _bind_inactive_bar_color(self, *l):
882 self.unbind(bar_color=self._change_bar_color)
883 self.bind(bar_inactive_color=self._change_bar_color)
884 Animation(
885 _bar_color=self.bar_inactive_color, d=.5, t='out_quart').start(self)
886
887 def _change_bar_color(self, inst, value):
888 self._bar_color = value
889
890 #
891 # Private
892 #
893 def add_widget(self, widget, index=0):
894 if self._viewport:
895 raise Exception('ScrollView accept only one widget')
896 canvas = self.canvas
897 self.canvas = self.canvas_viewport
898 super(ScrollView, self).add_widget(widget, index)
899 self.canvas = canvas
900 self._viewport = widget
901 widget.bind(size=self._trigger_update_from_scroll)
902 self._trigger_update_from_scroll()
903
904 def remove_widget(self, widget):
905 canvas = self.canvas
906 self.canvas = self.canvas_viewport
907 super(ScrollView, self).remove_widget(widget)
908 self.canvas = canvas
909 if widget is self._viewport:
910 self._viewport = None
911
912 def _get_uid(self, prefix='sv'):
913 return '{0}.{1}'.format(prefix, self.uid)
914
915 def _change_touch_mode(self, *largs):
916 if not self._touch:
917 return
918 uid = self._get_uid()
919 touch = self._touch
920 ud = touch.ud[uid]
921 if ud['mode'] != 'unknown' or ud['user_stopped']:
922 return
923 diff_frames = Clock.frames - ud['frames']
924
925 # in order to be able to scroll on very slow devices, let at least 3
926 # frames displayed to accumulate some velocity. And then, change the
927 # touch mode. Otherwise, we might never be able to compute velocity, and
928 # no way to scroll it. See #1464 and #1499
929 if diff_frames < 3:
930 Clock.schedule_once(self._change_touch_mode, 0)
931 return
932
933 if self.do_scroll_x and self.effect_x:
934 self.effect_x.cancel()
935 if self.do_scroll_y and self.effect_y:
936 self.effect_y.cancel()
937 # XXX the next line was in the condition. But this stop
938 # the possibily to "drag" an object out of the scrollview in the
939 # non-used direction: if you have an horizontal scrollview, a
940 # vertical gesture will not "stop" the scroll view to look for an
941 # horizontal gesture, until the timeout is done.
942 # and touch.dx + touch.dy == 0:
943 touch.ungrab(self)
944 self._touch = None
945 # touch is in window coords
946 touch.push()
947 touch.apply_transform_2d(self.to_widget)
948 touch.apply_transform_2d(self.to_parent)
949 self.simulate_touch_down(touch)
950 touch.pop()
951 return
952
953 def _do_touch_up(self, touch, *largs):
954 # touch is in window coords
955 touch.push()
956 touch.apply_transform_2d(self.to_widget)
957 super(ScrollView, self).on_touch_up(touch)
958 touch.pop()
959 # don't forget about grab event!
960 for x in touch.grab_list[:]:
961 touch.grab_list.remove(x)
962 x = x()
963 if not x:
964 continue
965 touch.grab_current = x
966 # touch is in window coords
967 touch.push()
968 touch.apply_transform_2d(self.to_widget)
969 super(ScrollView, self).on_touch_up(touch)
970 touch.pop()
971 touch.grab_current = None
972
973
974 if __name__ == '__main__':
975 from kivy.app import App
976
977 from kivy.uix.gridlayout import GridLayout
978 from kivy.uix.button import Button
979
980 class ScrollViewApp(App):
981
982 def build(self):
983 layout1 = GridLayout(cols=4, spacing=10, size_hint=(None, None))
984 layout1.bind(minimum_height=layout1.setter('height'),
985 minimum_width=layout1.setter('width'))
986 for i in range(40):
987 btn = Button(text=str(i), size_hint=(None, None),
988 size=(200, 100))
989 layout1.add_widget(btn)
990 scrollview1 = ScrollView(bar_width='2dp')
991 scrollview1.add_widget(layout1)
992
993 layout2 = GridLayout(cols=4, spacing=10, size_hint=(None, None))
994 layout2.bind(minimum_height=layout2.setter('height'),
995 minimum_width=layout2.setter('width'))
996 for i in range(40):
997 btn = Button(text=str(i), size_hint=(None, None),
998 size=(200, 100))
999 layout2.add_widget(btn)
1000 scrollview2 = ScrollView(scroll_type=['bars'],
1001 bar_width='9dp',
1002 scroll_wheel_distance=100)
1003 scrollview2.add_widget(layout2)
1004
1005 root = GridLayout(cols=2)
1006 root.add_widget(scrollview1)
1007 root.add_widget(scrollview2)
1008 return root
1009
1010 ScrollViewApp().run()
1011
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kivy/uix/scrollview.py b/kivy/uix/scrollview.py
--- a/kivy/uix/scrollview.py
+++ b/kivy/uix/scrollview.py
@@ -669,6 +669,9 @@
if touch.grab_current is not self:
return True
+ if not (self.do_scroll_y or self.do_scroll_x):
+ return super(ScrollView, self).on_touch_move(touch)
+
touch.ud['sv.handled'] = {'x': False, 'y': False}
if self.dispatch('on_scroll_move', touch):
return True
| {"golden_diff": "diff --git a/kivy/uix/scrollview.py b/kivy/uix/scrollview.py\n--- a/kivy/uix/scrollview.py\n+++ b/kivy/uix/scrollview.py\n@@ -669,6 +669,9 @@\n if touch.grab_current is not self:\n return True\n \n+ if not (self.do_scroll_y or self.do_scroll_x):\n+ return super(ScrollView, self).on_touch_move(touch)\n+\n touch.ud['sv.handled'] = {'x': False, 'y': False}\n if self.dispatch('on_scroll_move', touch):\n return True\n", "issue": "Scatter crash on windows\nI just tested BETA-1 with showcase, and when i tried to move the cat:\n\n```\nrunning \"python.exe C:\\Users\\tito\\Desktop\\Kivy-1.9.0-dev-py3.3-win32\\kivy\\exampl\nes\\demo\\showcase\\main.py\" \\n\n[INFO ] [Logger ] Record log in C:\\Users\\tito\\.kivy\\logs\\kivy_\n14-09-21_2.txt\n[INFO ] Kivy v1.9.0-dev\n[INFO ] [Python ] v3.3.3 (v3.3.3:c3896275c0f6, Nov 18 2013, 21\n:18:40) [MSC v.1600 32 bit (Intel)]\n[INFO ] [Factory ] 170 symbols loaded\n[INFO ] [Image ] Providers: img_tex, img_dds, img_pygame, img\n_gif (img_sdl2, img_ffpyplayer, img_pil ignored)\n[INFO ] [Window ] Provider: pygame(['window_egl_rpi'] ignored)\n\nOpenGL Warning: WGL_SAMPLE_BUFFERS_EXT & WGL_SAMPLES_EXT not supporteed!\nOpenGL Warning: WGL_SAMPLE_BUFFERS_EXT & WGL_SAMPLES_EXT not supporteed!\nGLEW initialization succeeded\nGL: glGenFramebuffers is NULL, try to detect an extension\nGL: available extensions: GL_EXT_texture_compression_s3tc GL_EXT_draw_range_elem\nents GL_EXT_framebuffer_object GL_EXT_compiled_vertex_array GL_ARB_depth_texture\n GL_ARB_fragment_program GL_ARB_multisample GL_ARB_multitexture GL_ARB_occlusion\n_query GL_ARB_point_parameters GL_ARB_point_sprite GL_ARB_shadow GL_ARB_texture_\nborder_clamp GL_ARB_texture_compression GL_ARB_texture_cube_map GL_ARB_texture_e\nnv_add GL_ARB_texture_env_combine GL_EXT_texture_env_combine GL_ARB_texture_env_\ncrossbar GL_ARB_texture_env_dot3 GL_EXT_texture_env_dot3 GL_ARB_texture_mirrored\n_repeat GL_IBM_texture_mirrored_repeat GL_ATI_texture_mirror_once GL_ARB_texture\n_non_power_of_two GL_ARB_transpose_matrix GL_ARB_vertex_buffer_object GL_ARB_pix\nel_buffer_object GL_ARB_vertex_program GL_ARB_window_pos GL_EXT_blend_color GL_E\nXT_blend_minmax GL_EXT_blend_func_separate GL_EXT_blend_subtract GL_EXT_texture_\nenv_add GL_EXT_fog_coord GL_EXT_multi_draw_arrays GL_EXT_secondary_color GL_EXT_\nshadow_funcs GL_EXT_stencil_wrap GL_EXT_texture_cube_map GL_EXT_texture_edge_cla\nmp GL_EXT_texture_filter_anisotropic GL_EXT_texture_lod_bias GL_EXT_texture_obje\nct GL_EXT_texture3D GL_IBM_rasterpos_clip GL_NV_fog_distance GL_NV_fragment_prog\nram GL_NV_fragment_program_option GL_NV_fragment_program2 GL_NV_register_combine\nrs GL_NV_register_combiners2 GL_NV_texgen_reflection GL_NV_texture_rectangle GL_\nARB_texture_rectangle GL_NV_vertex_program GL_NV_vertex_program1_1 GL_NV_vertex_\nprogram2 GL_NV_vertex_program2_option GL_NV_vertex_program3 GL_SGIS_generate_mip\nmap GL_ARB_shading_language_100 GL_ARB_shader_objects GL_ARB_vertex_shader GL_AR\nB_fragment_shader GL_EXT_texture_sRGB GL_EXT_framebuffer_blit GL_EXT_blend_equat\nion_separate GL_EXT_stencil_two_side GL_CR_state_parameter GL_CR_cursor_position\n GL_CR_bounding_box GL_CR_print_string GL_CR_tilesort_info GL_CR_synchronization\n GL_CR_head_spu_name GL_CR_performance_info GL_CR_window_size GL_CR_tile_info GL\n_CR_saveframe GL_CR_readback_barrier_size GL_CR_server_id_sharing GL_CR_server_m\natrix\nGL: EXT_framebuffer_object is supported\n[INFO ] [GL ] OpenGL version <b'2.1 Chromium 1.9'>\n[INFO ] [GL ] OpenGL vendor <b'Humper'>\n[INFO ] [GL ] OpenGL renderer <b'Chromium'>\n[INFO ] [GL ] OpenGL parsed version: 2, 1\n[INFO ] [GL ] Shading version <b'4.40 NVIDIA via Cg compil\ner'>\n[INFO ] [GL ] Texture max size <16384>\n[INFO ] [GL ] Texture max units <32>\n[INFO ] [Window ] virtual keyboard not allowed, single mode, n\not docked\n[INFO ] [Text ] Provider: pygame(['text_sdl2'] ignored)\n[INFO ] [GL ] NPOT texture support is available\n[INFO ] [OSC ] using <thread> for socket\n[INFO ] [Base ] Start application main loop\n[WARNING ] [Accordion ] not enough space for displaying all children\n\n[WARNING ] [Accordion ] need 132px, got 100px\n[WARNING ] [Accordion ] layout aborted.\n[WARNING ] [Accordion ] not enough space for displaying all children\n\n[WARNING ] [Accordion ] need 132px, got 84px\n[WARNING ] [Accordion ] layout aborted.\n[WARNING ] [Accordion ] not enough space for displaying all children\n\n[WARNING ] [Accordion ] need 132px, got 84px\n[WARNING ] [Accordion ] layout aborted.\n[ERROR ] unable to access to <\\pagefile.sys>\nTraceback (most recent call last):\n File \"C:\\Users\\tito\\Desktop\\Kivy-1.9.0-dev-py3.3-win32\\kivy\\kivy\\uix\\filechoos\ner.py\", line 122, in is_hidden\n return GetFileAttributesExW(fn)[0] & FILE_ATTRIBUTE_HIDDEN\npywintypes.error: (32, 'GetFileAttributesEx', 'Le processus ne peut pas acc\u00e9der\nau fichier car ce fichier est utilis\u00e9 par un autre processus.')\n[ERROR ] unable to access to <\\pagefile.sys>\nTraceback (most recent call last):\n File \"C:\\Users\\tito\\Desktop\\Kivy-1.9.0-dev-py3.3-win32\\kivy\\kivy\\uix\\filechoos\ner.py\", line 122, in is_hidden\n return GetFileAttributesExW(fn)[0] & FILE_ATTRIBUTE_HIDDEN\npywintypes.error: (32, 'GetFileAttributesEx', 'Le processus ne peut pas acc\u00e9der\nau fichier car ce fichier est utilis\u00e9 par un autre processus.')\n[INFO ] [VideoGstplayer] Using Gstreamer 1.2.1.0\n[INFO ] [Video ] Provider: gstplayer\n[INFO ] [Base ] Leaving application in progress...\n Traceback (most recent call last):\n File \"C:\\Users\\tito\\Desktop\\Kivy-1.9.0-dev-py3.3-win32\\kivy\\examples\\demo\\sho\nwcase\\main.py\", line 212, in <module>\n ShowcaseApp().run()\n File \"C:\\Users\\tito\\Desktop\\Kivy-1.9.0-dev-py3.3-win32\\kivy\\kivy\\app.py\", lin\ne 825, in run\n runTouchApp()\n File \"C:\\Users\\tito\\Desktop\\Kivy-1.9.0-dev-py3.3-win32\\kivy\\kivy\\base.py\", li\nne 484, in runTouchApp\n EventLoop.window.mainloop()\n File \"C:\\Users\\tito\\Desktop\\Kivy-1.9.0-dev-py3.3-win32\\kivy\\kivy\\core\\window\\\nwindow_pygame.py\", line 364, in mainloop\n self._mainloop()\n File \"C:\\Users\\tito\\Desktop\\Kivy-1.9.0-dev-py3.3-win32\\kivy\\kivy\\core\\window\\\nwindow_pygame.py\", line 268, in _mainloop\n EventLoop.idle()\n File \"C:\\Users\\tito\\Desktop\\Kivy-1.9.0-dev-py3.3-win32\\kivy\\kivy\\base.py\", li\nne 327, in idle\n self.dispatch_input()\n File \"C:\\Users\\tito\\Desktop\\Kivy-1.9.0-dev-py3.3-win32\\kivy\\kivy\\base.py\", li\nne 312, in dispatch_input\n post_dispatch_input(*pop(0))\n File \"C:\\Users\\tito\\Desktop\\Kivy-1.9.0-dev-py3.3-win32\\kivy\\kivy\\base.py\", li\nne 271, in post_dispatch_input\n wid.dispatch('on_touch_move', me)\n File \"_event.pyx\", line 392, in kivy._event.EventDispatcher.dispatch (kivy\\_e\nvent.c:4602)\n File \"C:\\Users\\tito\\Desktop\\Kivy-1.9.0-dev-py3.3-win32\\kivy\\kivy\\uix\\scatter.\npy\", line 547, in on_touch_move\n if self.transform_with_touch(touch):\n File \"C:\\Users\\tito\\Desktop\\Kivy-1.9.0-dev-py3.3-win32\\kivy\\kivy\\uix\\scatter.\npy\", line 452, in transform_with_touch\n anchor = max(points[:-1], key=lambda p: p.distance(touch.pos))\n ValueError: max() arg is an empty sequence\nAppuyez sur une touche pour continuer...\n```\n\n", "before_files": [{"content": "'''Scroll View\n===========\n\n.. versionadded:: 1.0.4\n\nThe :class:`ScrollView` widget provides a scrollable/pannable viewport that is\nclipped at the scrollview's bounding box.\n\n\nScrolling Behavior\n------------------\n\nThe ScrollView accepts only one child and applies a viewport/window to\nit according to the :attr:`ScrollView.scroll_x` and\n:attr:`ScrollView.scroll_y` properties. Touches are analyzed to\ndetermine if the user wants to scroll or control the child in some\nother manner - you cannot do both at the same time. To determine if\ninteraction is a scrolling gesture, these properties are used:\n\n - :attr:`ScrollView.scroll_distance`: the minimum distance to travel,\n defaults to 20 pixels.\n - :attr:`ScrollView.scroll_timeout`: the maximum time period, defaults\n to 250 milliseconds.\n\nIf a touch travels :attr:`~ScrollView.scroll_distance` pixels within the\n:attr:`~ScrollView.scroll_timeout` period, it is recognized as a scrolling\ngesture and translation (scroll/pan) will begin. If the timeout occurs, the\ntouch down event is dispatched to the child instead (no translation).\n\nThe default value for those settings can be changed in the configuration file::\n\n [widgets]\n scroll_timeout = 250\n scroll_distance = 20\n\n.. versionadded:: 1.1.1\n\n ScrollView now animates scrolling in Y when a mousewheel is used.\n\n\nLimiting to the X or Y Axis\n---------------------------\n\nBy default, the ScrollView allows scrolling in both the X and Y axes. You can\nexplicitly disable scrolling on an axis by setting\n:attr:`ScrollView.do_scroll_x` or :attr:`ScrollView.do_scroll_y` to False.\n\n\nManaging the Content Size and Position\n--------------------------------------\n\nScrollView manages the position of its children similarly to a\nRelativeLayout (see :mod:`~kivy.uix.relativelayout`) but not the size. You must\ncarefully specify the `size_hint` of your content to get the desired\nscroll/pan effect.\n\nBy default, size_hint is (1, 1), so the content size will fit your ScrollView\nexactly (you will have nothing to scroll). You must deactivate at least one of\nthe size_hint instructions (x or y) of the child to enable scrolling.\n\nTo scroll a :class:`GridLayout` on Y-axis/vertically, set the child's width\nidentical to that of the ScrollView (size_hint_x=1, default), and set the\nsize_hint_y property to None::\n\n layout = GridLayout(cols=1, spacing=10, size_hint_y=None)\n # Make sure the height is such that there is something to scroll.\n layout.bind(minimum_height=layout.setter('height'))\n for i in range(30):\n btn = Button(text=str(i), size_hint_y=None, height=40)\n layout.add_widget(btn)\n root = ScrollView(size_hint=(None, None), size=(400, 400))\n root.add_widget(layout)\n\n\nOverscroll Effects\n------------------\n\n.. versionadded:: 1.7.0\n\nWhen scrolling would exceed the bounds of the :class:`ScrollView`, it\nuses a :class:`~kivy.effects.scroll.ScrollEffect` to handle the\noverscroll. These effects can perform actions like bouncing back,\nchanging opacity, or simply preventing scrolling beyond the normal\nboundaries. Note that complex effects may perform many computations,\nwhich can be slow on weaker hardware.\n\nYou can change what effect is being used by setting\n:attr:`ScrollView.effect_cls` to any effect class. Current options\ninclude:\n\n - :class:`~kivy.effects.scroll.ScrollEffect`: Does not allow\n scrolling beyond the :class:`ScrollView` boundaries.\n - :class:`~kivy.effects.dampedscroll.DampedScrollEffect`: The\n current default. Allows the user to scroll beyond the normal\n boundaries, but has the content spring back once the\n touch/click is released.\n - :class:`~kivy.effects.opacityscroll.OpacityScrollEffect`: Similar\n to the :class:`~kivy.effect.dampedscroll.DampedScrollEffect`, but\n also reduces opacity during overscroll.\n\nYou can also create your own scroll effect by subclassing one of these,\nthen pass it as the :attr:`~ScrollView.effect_cls` in the same way.\n\nAlternatively, you can set :attr:`ScrollView.effect_x` and/or\n:attr:`ScrollView.effect_y` to an *instance* of the effect you want to\nuse. This will override the default effect set in\n:attr:`ScrollView.effect_cls`.\n\nAll the effects are located in the :mod:`kivy.effects`.\n\n'''\n\n__all__ = ('ScrollView', )\n\nfrom functools import partial\nfrom kivy.animation import Animation\nfrom kivy.compat import string_types\nfrom kivy.config import Config\nfrom kivy.clock import Clock\nfrom kivy.factory import Factory\nfrom kivy.uix.stencilview import StencilView\nfrom kivy.metrics import sp\nfrom kivy.effects.dampedscroll import DampedScrollEffect\nfrom kivy.properties import NumericProperty, BooleanProperty, AliasProperty, \\\n ObjectProperty, ListProperty, ReferenceListProperty, OptionProperty\n\n\n# When we are generating documentation, Config doesn't exist\n_scroll_timeout = _scroll_distance = 0\nif Config:\n _scroll_timeout = Config.getint('widgets', 'scroll_timeout')\n _scroll_distance = sp(Config.getint('widgets', 'scroll_distance'))\n\n\nclass ScrollView(StencilView):\n '''ScrollView class. See module documentation for more information.\n\n .. versionchanged:: 1.7.0\n `auto_scroll`, `scroll_friction`, `scroll_moves`, `scroll_stoptime' has\n been deprecated, use :attr:`effect_cls` instead.\n '''\n\n scroll_distance = NumericProperty(_scroll_distance)\n '''Distance to move before scrolling the :class:`ScrollView`, in pixels. As\n soon as the distance has been traveled, the :class:`ScrollView` will start\n to scroll, and no touch event will go to children.\n It is advisable that you base this value on the dpi of your target device's\n screen.\n\n :attr:`scroll_distance` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 20 (pixels), according to the default value in user\n configuration.\n '''\n\n scroll_wheel_distance = NumericProperty(20)\n '''Distance to move when scrolling with a mouse wheel.\n It is advisable that you base this value on the dpi of your target device's\n screen.\n\n .. versionadded:: 1.8.0\n\n :attr:`scroll_wheel_distance` is a\n :class:`~kivy.properties.NumericProperty` , defaults to 20 pixels.\n '''\n\n scroll_timeout = NumericProperty(_scroll_timeout)\n '''Timeout allowed to trigger the :attr:`scroll_distance`, in milliseconds.\n If the user has not moved :attr:`scroll_distance` within the timeout,\n the scrolling will be disabled, and the touch event will go to the\n children.\n\n :attr:`scroll_timeout` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 55 (milliseconds) according to the default value in user\n configuration.\n\n .. versionchanged:: 1.5.0\n Default value changed from 250 to 55.\n '''\n\n scroll_x = NumericProperty(0.)\n '''X scrolling value, between 0 and 1. If 0, the content's left side will\n touch the left side of the ScrollView. If 1, the content's right side will\n touch the right side.\n\n This property is controled by :class:`ScrollView` only if\n :attr:`do_scroll_x` is True.\n\n :attr:`scroll_x` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 0.\n '''\n\n scroll_y = NumericProperty(1.)\n '''Y scrolling value, between 0 and 1. If 0, the content's bottom side will\n touch the bottom side of the ScrollView. If 1, the content's top side will\n touch the top side.\n\n This property is controled by :class:`ScrollView` only if\n :attr:`do_scroll_y` is True.\n\n :attr:`scroll_y` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 1.\n '''\n\n do_scroll_x = BooleanProperty(True)\n '''Allow scroll on X axis.\n\n :attr:`do_scroll_x` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to True.\n '''\n\n do_scroll_y = BooleanProperty(True)\n '''Allow scroll on Y axis.\n\n :attr:`do_scroll_y` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to True.\n '''\n\n def _get_do_scroll(self):\n return (self.do_scroll_x, self.do_scroll_y)\n\n def _set_do_scroll(self, value):\n if type(value) in (list, tuple):\n self.do_scroll_x, self.do_scroll_y = value\n else:\n self.do_scroll_x = self.do_scroll_y = bool(value)\n do_scroll = AliasProperty(_get_do_scroll, _set_do_scroll,\n bind=('do_scroll_x', 'do_scroll_y'))\n '''Allow scroll on X or Y axis.\n\n :attr:`do_scroll` is a :class:`~kivy.properties.AliasProperty` of\n (:attr:`do_scroll_x` + :attr:`do_scroll_y`)\n '''\n\n def _get_vbar(self):\n # must return (y, height) in %\n # calculate the viewport size / scrollview size %\n if self._viewport is None:\n return 0, 1.\n vh = self._viewport.height\n h = self.height\n if vh < h or vh == 0:\n return 0, 1.\n ph = max(0.01, h / float(vh))\n sy = min(1.0, max(0.0, self.scroll_y))\n py = (1. - ph) * sy\n return (py, ph)\n\n vbar = AliasProperty(_get_vbar, None, bind=(\n 'scroll_y', '_viewport', 'viewport_size'))\n '''Return a tuple of (position, size) of the vertical scrolling bar.\n\n .. versionadded:: 1.2.0\n\n The position and size are normalized between 0-1, and represent a\n percentage of the current scrollview height. This property is used\n internally for drawing the little vertical bar when you're scrolling.\n\n :attr:`vbar` is a :class:`~kivy.properties.AliasProperty`, readonly.\n '''\n\n def _get_hbar(self):\n # must return (x, width) in %\n # calculate the viewport size / scrollview size %\n if self._viewport is None:\n return 0, 1.\n vw = self._viewport.width\n w = self.width\n if vw < w or vw == 0:\n return 0, 1.\n pw = max(0.01, w / float(vw))\n sx = min(1.0, max(0.0, self.scroll_x))\n px = (1. - pw) * sx\n return (px, pw)\n\n hbar = AliasProperty(_get_hbar, None, bind=(\n 'scroll_x', '_viewport', 'viewport_size'))\n '''Return a tuple of (position, size) of the horizontal scrolling bar.\n\n .. versionadded:: 1.2.0\n\n The position and size are normalized between 0-1, and represent a\n percentage of the current scrollview height. This property is used\n internally for drawing the little horizontal bar when you're scrolling.\n\n :attr:`vbar` is a :class:`~kivy.properties.AliasProperty`, readonly.\n '''\n\n bar_color = ListProperty([.7, .7, .7, .9])\n '''Color of horizontal / vertical scroll bar, in RGBA format.\n\n .. versionadded:: 1.2.0\n\n :attr:`bar_color` is a :class:`~kivy.properties.ListProperty` and defaults\n to [.7, .7, .7, .9].\n '''\n\n bar_inactive_color = ListProperty([.7, .7, .7, .2])\n '''Color of horizontal / vertical scroll bar (in RGBA format), when no\n scroll is happening.\n\n .. versionadded:: 1.9.0\n\n :attr:`bar_inactive_color` is a\n :class:`~kivy.properties.ListProperty` and defaults to [.7, .7, .7, .2].\n '''\n\n bar_width = NumericProperty('2dp')\n '''Width of the horizontal / vertical scroll bar. The width is interpreted\n as a height for the horizontal bar.\n\n .. versionadded:: 1.2.0\n\n :attr:`bar_width` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 2.\n '''\n\n bar_pos_x = OptionProperty('bottom', options=('top', 'bottom'))\n '''Which side of the ScrollView the horizontal scroll bar should go\n on. Possible values are 'top' and 'bottom'.\n\n .. versionadded:: 1.8.0\n\n :attr:`bar_pos_x` is an :class:`~kivy.properties.OptionProperty`,\n default to 'bottom'\n\n '''\n\n bar_pos_y = OptionProperty('right', options=('left', 'right'))\n '''Which side of the ScrollView the vertical scroll bar should go\n on. Possible values are 'left' and 'right'.\n\n .. versionadded:: 1.8.0\n\n :attr:`bar_pos_y` is an :class:`~kivy.properties.OptionProperty`,\n default to 'right'\n\n '''\n\n bar_pos = ReferenceListProperty(bar_pos_x, bar_pos_y)\n '''Which side of the scroll view to place each of the bars on.\n\n :attr:`bar_pos` is a :class:`~kivy.properties.ReferenceListProperty` of\n (:attr:`bar_pos_x`, :attr:`bar_pos_y`)\n '''\n\n bar_margin = NumericProperty(0)\n '''Margin between the bottom / right side of the scrollview when drawing\n the horizontal / vertical scroll bar.\n\n .. versionadded:: 1.2.0\n\n :attr:`bar_margin` is a :class:`~kivy.properties.NumericProperty`, default\n to 0\n '''\n\n effect_cls = ObjectProperty(DampedScrollEffect, allownone=True)\n '''Class effect to instanciate for X and Y axis.\n\n .. versionadded:: 1.7.0\n\n :attr:`effect_cls` is an :class:`~kivy.properties.ObjectProperty` and\n defaults to :class:`DampedScrollEffect`.\n\n .. versionchanged:: 1.8.0\n If you set a string, the :class:`~kivy.factory.Factory` will be used to\n resolve the class.\n\n '''\n\n effect_x = ObjectProperty(None, allownone=True)\n '''Effect to apply for the X axis. If None is set, an instance of\n :attr:`effect_cls` will be created.\n\n .. versionadded:: 1.7.0\n\n :attr:`effect_x` is an :class:`~kivy.properties.ObjectProperty` and\n defaults to None.\n '''\n\n effect_y = ObjectProperty(None, allownone=True)\n '''Effect to apply for the Y axis. If None is set, an instance of\n :attr:`effect_cls` will be created.\n\n .. versionadded:: 1.7.0\n\n :attr:`effect_y` is an :class:`~kivy.properties.ObjectProperty` and\n defaults to None, read-only.\n '''\n\n viewport_size = ListProperty([0, 0])\n '''(internal) Size of the internal viewport. This is the size of your only\n child in the scrollview.\n '''\n\n scroll_type = OptionProperty(['content'], options=(['content'], ['bars'],\n ['bars', 'content'], ['content', 'bars']))\n '''Sets the type of scrolling to use for the content of the scrollview.\n Available options are: ['content'], ['bars'], ['bars', 'content'].\n\n .. versionadded:: 1.8.0\n\n :attr:`scroll_type` is a :class:`~kivy.properties.OptionProperty`, defaults\n to ['content'].\n '''\n\n # private, for internal use only\n\n _viewport = ObjectProperty(None, allownone=True)\n _bar_color = ListProperty([0, 0, 0, 0])\n\n def _set_viewport_size(self, instance, value):\n self.viewport_size = value\n\n def on__viewport(self, instance, value):\n if value:\n value.bind(size=self._set_viewport_size)\n self.viewport_size = value.size\n\n def __init__(self, **kwargs):\n self._touch = None\n self._trigger_update_from_scroll = Clock.create_trigger(\n self.update_from_scroll, -1)\n # create a specific canvas for the viewport\n from kivy.graphics import PushMatrix, Translate, PopMatrix, Canvas\n self.canvas_viewport = Canvas()\n self.canvas = Canvas()\n with self.canvas_viewport.before:\n PushMatrix()\n self.g_translate = Translate(0, 0)\n with self.canvas_viewport.after:\n PopMatrix()\n\n super(ScrollView, self).__init__(**kwargs)\n\n self.register_event_type('on_scroll_start')\n self.register_event_type('on_scroll_move')\n self.register_event_type('on_scroll_stop')\n\n # now add the viewport canvas to our canvas\n self.canvas.add(self.canvas_viewport)\n\n effect_cls = self.effect_cls\n if isinstance(effect_cls, string_types):\n effect_cls = Factory.get(effect_cls)\n if self.effect_x is None and effect_cls is not None:\n self.effect_x = effect_cls(target_widget=self._viewport)\n if self.effect_y is None and effect_cls is not None:\n self.effect_y = effect_cls(target_widget=self._viewport)\n self.bind(\n width=self._update_effect_x_bounds,\n height=self._update_effect_y_bounds,\n viewport_size=self._update_effect_bounds,\n _viewport=self._update_effect_widget,\n scroll_x=self._trigger_update_from_scroll,\n scroll_y=self._trigger_update_from_scroll,\n pos=self._trigger_update_from_scroll,\n size=self._trigger_update_from_scroll)\n\n self._update_effect_widget()\n self._update_effect_x_bounds()\n self._update_effect_y_bounds()\n\n def on_effect_x(self, instance, value):\n if value:\n value.bind(scroll=self._update_effect_x)\n value.target_widget = self._viewport\n\n def on_effect_y(self, instance, value):\n if value:\n value.bind(scroll=self._update_effect_y)\n value.target_widget = self._viewport\n\n def on_effect_cls(self, instance, cls):\n if isinstance(cls, string_types):\n cls = Factory.get(cls)\n self.effect_x = cls(target_widget=self._viewport)\n self.effect_x.bind(scroll=self._update_effect_x)\n self.effect_y = cls(target_widget=self._viewport)\n self.effect_y.bind(scroll=self._update_effect_y)\n\n def _update_effect_widget(self, *args):\n if self.effect_x:\n self.effect_x.target_widget = self._viewport\n if self.effect_y:\n self.effect_y.target_widget = self._viewport\n\n def _update_effect_x_bounds(self, *args):\n if not self._viewport or not self.effect_x:\n return\n self.effect_x.min = -(self.viewport_size[0] - self.width)\n self.effect_x.max = 0\n self.effect_x.value = self.effect_x.min * self.scroll_x\n\n def _update_effect_y_bounds(self, *args):\n if not self._viewport or not self.effect_y:\n return\n self.effect_y.min = -(self.viewport_size[1] - self.height)\n self.effect_y.max = 0\n self.effect_y.value = self.effect_y.min * self.scroll_y\n\n def _update_effect_bounds(self, *args):\n if not self._viewport:\n return\n if self.effect_x:\n self._update_effect_x_bounds()\n if self.effect_y:\n self._update_effect_y_bounds()\n\n def _update_effect_x(self, *args):\n vp = self._viewport\n if not vp or not self.effect_x:\n return\n sw = vp.width - self.width\n if sw < 1:\n return\n sx = self.effect_x.scroll / float(sw)\n self.scroll_x = -sx\n self._trigger_update_from_scroll()\n\n def _update_effect_y(self, *args):\n vp = self._viewport\n if not vp or not self.effect_y:\n return\n sh = vp.height - self.height\n if sh < 1:\n return\n sy = self.effect_y.scroll / float(sh)\n self.scroll_y = -sy\n self._trigger_update_from_scroll()\n\n def to_local(self, x, y, **k):\n tx, ty = self.g_translate.xy\n return x - tx, y - ty\n\n def to_parent(self, x, y, **k):\n tx, ty = self.g_translate.xy\n return x + tx, y + ty\n\n def simulate_touch_down(self, touch):\n # at this point the touch is in parent coords\n touch.push()\n touch.apply_transform_2d(self.to_local)\n ret = super(ScrollView, self).on_touch_down(touch)\n touch.pop()\n return ret\n\n def on_touch_down(self, touch):\n if self.dispatch('on_scroll_start', touch):\n self._touch = touch\n touch.grab(self)\n return True\n\n def on_scroll_start(self, touch, check_children=True):\n if check_children:\n touch.push()\n touch.apply_transform_2d(self.to_local)\n if self.dispatch_children('on_scroll_start', touch):\n return True\n touch.pop()\n\n if not self.collide_point(*touch.pos):\n touch.ud[self._get_uid('svavoid')] = True\n return\n if self.disabled:\n return True\n if self._touch or (not (self.do_scroll_x or self.do_scroll_y)):\n return self.simulate_touch_down(touch)\n\n # handle mouse scrolling, only if the viewport size is bigger than the\n # scrollview size, and if the user allowed to do it\n vp = self._viewport\n if not vp:\n return True\n scroll_type = self.scroll_type\n ud = touch.ud\n scroll_bar = 'bars' in scroll_type\n\n # check if touch is in bar_x(horizontal) or bay_y(bertical)\n ud['in_bar_x'] = ud['in_bar_y'] = False\n width_scrollable = vp.width > self.width\n height_scrollable = vp.height > self.height\n bar_pos_x = self.bar_pos_x[0]\n bar_pos_y = self.bar_pos_y[0]\n\n d = {'b': True if touch.y < self.y + self.bar_width else False,\n 't': True if touch.y > self.top - self.bar_width else False,\n 'l': True if touch.x < self.x + self.bar_width else False,\n 'r': True if touch.x > self.right - self.bar_width else False}\n if scroll_bar:\n if (width_scrollable and d[bar_pos_x]):\n ud['in_bar_x'] = True\n if (height_scrollable and d[bar_pos_y]):\n ud['in_bar_y'] = True\n\n if vp and 'button' in touch.profile and \\\n touch.button.startswith('scroll'):\n btn = touch.button\n m = sp(self.scroll_wheel_distance)\n e = None\n\n if ((btn == 'scrolldown' and self.scroll_y >= 1) or\n (btn == 'scrollup' and self.scroll_y <= 0) or\n (btn == 'scrollleft' and self.scroll_x >= 1) or\n (btn == 'scrollright' and self.scroll_x <= 0)):\n return False\n\n if (self.effect_x and self.do_scroll_y and height_scrollable\n and btn in ('scrolldown', 'scrollup')):\n e = self.effect_x if ud['in_bar_x'] else self.effect_y\n\n elif (self.effect_y and self.do_scroll_x and width_scrollable\n and btn in ('scrollleft', 'scrollright')):\n e = self.effect_y if ud['in_bar_y'] else self.effect_x\n\n if e:\n if btn in ('scrolldown', 'scrollleft'):\n e.value = max(e.value - m, e.min)\n e.velocity = 0\n elif btn in ('scrollup', 'scrollright'):\n e.value = min(e.value + m, e.max)\n e.velocity = 0\n touch.ud[self._get_uid('svavoid')] = True\n e.trigger_velocity_update()\n return True\n\n # no mouse scrolling, so the user is going to drag the scrollview with\n # this touch.\n self._touch = touch\n uid = self._get_uid()\n\n ud[uid] = {\n 'mode': 'unknown',\n 'dx': 0,\n 'dy': 0,\n 'user_stopped': False,\n 'frames': Clock.frames,\n 'time': touch.time_start}\n\n if self.do_scroll_x and self.effect_x and not ud['in_bar_x']:\n self.effect_x.start(touch.x)\n self._scroll_x_mouse = self.scroll_x\n if self.do_scroll_y and self.effect_y and not ud['in_bar_y']:\n self.effect_y.start(touch.y)\n self._scroll_y_mouse = self.scroll_y\n\n if (ud.get('in_bar_x', False) or ud.get('in_bar_y', False)):\n return True\n if scroll_type == ['bars']:\n # touch is in parent, but _change_touch_mode expects window coords\n touch.push()\n touch.apply_transform_2d(self.to_local)\n touch.apply_transform_2d(self.to_window)\n self._change_touch_mode()\n touch.pop()\n return False\n else:\n Clock.schedule_once(self._change_touch_mode,\n self.scroll_timeout / 1000.)\n return True\n\n def on_touch_move(self, touch):\n if self._touch is not touch:\n # touch is in parent\n touch.push()\n touch.apply_transform_2d(self.to_local)\n super(ScrollView, self).on_touch_move(touch)\n touch.pop()\n return self._get_uid() in touch.ud\n if touch.grab_current is not self:\n return True\n\n touch.ud['sv.handled'] = {'x': False, 'y': False}\n if self.dispatch('on_scroll_move', touch):\n return True\n\n def on_scroll_move(self, touch):\n if self._get_uid('svavoid') in touch.ud:\n return False\n\n touch.push()\n touch.apply_transform_2d(self.to_local)\n if self.dispatch_children('on_scroll_move', touch):\n return True\n touch.pop()\n\n rv = True\n\n uid = self._get_uid()\n if not uid in touch.ud:\n self._touch = False\n return self.on_scroll_start(touch, False)\n ud = touch.ud[uid]\n mode = ud['mode']\n\n # check if the minimum distance has been travelled\n if mode == 'unknown' or mode == 'scroll':\n if not touch.ud['sv.handled']['x'] and self.do_scroll_x \\\n and self.effect_x:\n width = self.width\n if touch.ud.get('in_bar_x', False):\n dx = touch.dx / float(width - width * self.hbar[1])\n self.scroll_x = min(max(self.scroll_x + dx, 0.), 1.)\n self._trigger_update_from_scroll()\n else:\n if self.scroll_type != ['bars']:\n self.effect_x.update(touch.x)\n if self.scroll_x < 0 or self.scroll_x > 1:\n rv = False\n else:\n touch.ud['sv.handled']['x'] = True\n if not touch.ud['sv.handled']['y'] and self.do_scroll_y \\\n and self.effect_y:\n height = self.height\n if touch.ud.get('in_bar_y', False):\n dy = touch.dy / float(height - height * self.vbar[1])\n self.scroll_y = min(max(self.scroll_y + dy, 0.), 1.)\n self._trigger_update_from_scroll()\n else:\n if self.scroll_type != ['bars']:\n self.effect_y.update(touch.y)\n if self.scroll_y < 0 or self.scroll_y > 1:\n rv = False\n else:\n touch.ud['sv.handled']['y'] = True\n\n if mode == 'unknown':\n ud['dx'] += abs(touch.dx)\n ud['dy'] += abs(touch.dy)\n if ud['dx'] > self.scroll_distance:\n if not self.do_scroll_x:\n # touch is in parent, but _change expects window coords\n touch.push()\n touch.apply_transform_2d(self.to_local)\n touch.apply_transform_2d(self.to_window)\n self._change_touch_mode()\n touch.pop()\n return\n mode = 'scroll'\n\n if ud['dy'] > self.scroll_distance:\n if not self.do_scroll_y:\n # touch is in parent, but _change expects window coords\n touch.push()\n touch.apply_transform_2d(self.to_local)\n touch.apply_transform_2d(self.to_window)\n self._change_touch_mode()\n touch.pop()\n return\n mode = 'scroll'\n ud['mode'] = mode\n\n if mode == 'scroll':\n ud['dt'] = touch.time_update - ud['time']\n ud['time'] = touch.time_update\n ud['user_stopped'] = True\n\n return rv\n\n def on_touch_up(self, touch):\n if self._touch is not touch and self.uid not in touch.ud:\n # touch is in parents\n touch.push()\n touch.apply_transform_2d(self.to_local)\n if super(ScrollView, self).on_touch_up(touch):\n return True\n touch.pop()\n return False\n\n if self.dispatch('on_scroll_stop', touch):\n touch.ungrab(self)\n return True\n\n def on_scroll_stop(self, touch, check_children=True):\n self._touch = None\n\n if check_children:\n touch.push()\n touch.apply_transform_2d(self.to_local)\n if self.dispatch_children('on_scroll_stop', touch):\n return True\n touch.pop()\n\n if self._get_uid('svavoid') in touch.ud:\n return\n if self._get_uid() not in touch.ud:\n return False\n\n self._touch = None\n uid = self._get_uid()\n ud = touch.ud[uid]\n if self.do_scroll_x and self.effect_x:\n if not touch.ud.get('in_bar_x', False) and\\\n self.scroll_type != ['bars']:\n self.effect_x.stop(touch.x)\n if self.do_scroll_y and self.effect_y and\\\n self.scroll_type != ['bars']:\n if not touch.ud.get('in_bar_y', False):\n self.effect_y.stop(touch.y)\n if ud['mode'] == 'unknown':\n # we must do the click at least..\n # only send the click if it was not a click to stop\n # autoscrolling\n if not ud['user_stopped']:\n self.simulate_touch_down(touch)\n Clock.schedule_once(partial(self._do_touch_up, touch), .2)\n Clock.unschedule(self._update_effect_bounds)\n Clock.schedule_once(self._update_effect_bounds)\n\n # if we do mouse scrolling, always accept it\n if 'button' in touch.profile and touch.button.startswith('scroll'):\n return True\n\n return self._get_uid() in touch.ud\n\n def convert_distance_to_scroll(self, dx, dy):\n '''Convert a distance in pixels to a scroll distance, depending on the\n content size and the scrollview size.\n\n The result will be a tuple of scroll distance that can be added to\n :data:`scroll_x` and :data:`scroll_y`\n '''\n if not self._viewport:\n return 0, 0\n vp = self._viewport\n if vp.width > self.width:\n sw = vp.width - self.width\n sx = dx / float(sw)\n else:\n sx = 0\n if vp.height > self.height:\n sh = vp.height - self.height\n sy = dy / float(sh)\n else:\n sy = 1\n return sx, sy\n\n def update_from_scroll(self, *largs):\n '''Force the reposition of the content, according to current value of\n :attr:`scroll_x` and :attr:`scroll_y`.\n\n This method is automatically called when one of the :attr:`scroll_x`,\n :attr:`scroll_y`, :attr:`pos` or :attr:`size` properties change, or\n if the size of the content changes.\n '''\n if not self._viewport:\n return\n vp = self._viewport\n\n # update from size_hint\n if vp.size_hint_x is not None:\n vp.width = vp.size_hint_x * self.width\n if vp.size_hint_y is not None:\n vp.height = vp.size_hint_y * self.height\n\n if vp.width > self.width:\n sw = vp.width - self.width\n x = self.x - self.scroll_x * sw\n else:\n x = self.x\n if vp.height > self.height:\n sh = vp.height - self.height\n y = self.y - self.scroll_y * sh\n else:\n y = self.top - vp.height\n\n # from 1.8.0, we now use a matrix by default, instead of moving the\n # widget position behind. We set it here, but it will be a no-op most of\n # the time.\n vp.pos = 0, 0\n self.g_translate.xy = x, y\n\n # New in 1.2.0, show bar when scrolling happens and (changed in 1.9.0)\n # fade to bar_inactive_color when no scroll is happening.\n Clock.unschedule(self._bind_inactive_bar_color)\n self.unbind(bar_inactive_color=self._change_bar_color)\n Animation.stop_all(self, '_bar_color')\n self.bind(bar_color=self._change_bar_color)\n self._bar_color = self.bar_color\n Clock.schedule_once(self._bind_inactive_bar_color, .5)\n\n def _bind_inactive_bar_color(self, *l):\n self.unbind(bar_color=self._change_bar_color)\n self.bind(bar_inactive_color=self._change_bar_color)\n Animation(\n _bar_color=self.bar_inactive_color, d=.5, t='out_quart').start(self)\n\n def _change_bar_color(self, inst, value):\n self._bar_color = value\n\n #\n # Private\n #\n def add_widget(self, widget, index=0):\n if self._viewport:\n raise Exception('ScrollView accept only one widget')\n canvas = self.canvas\n self.canvas = self.canvas_viewport\n super(ScrollView, self).add_widget(widget, index)\n self.canvas = canvas\n self._viewport = widget\n widget.bind(size=self._trigger_update_from_scroll)\n self._trigger_update_from_scroll()\n\n def remove_widget(self, widget):\n canvas = self.canvas\n self.canvas = self.canvas_viewport\n super(ScrollView, self).remove_widget(widget)\n self.canvas = canvas\n if widget is self._viewport:\n self._viewport = None\n\n def _get_uid(self, prefix='sv'):\n return '{0}.{1}'.format(prefix, self.uid)\n\n def _change_touch_mode(self, *largs):\n if not self._touch:\n return\n uid = self._get_uid()\n touch = self._touch\n ud = touch.ud[uid]\n if ud['mode'] != 'unknown' or ud['user_stopped']:\n return\n diff_frames = Clock.frames - ud['frames']\n\n # in order to be able to scroll on very slow devices, let at least 3\n # frames displayed to accumulate some velocity. And then, change the\n # touch mode. Otherwise, we might never be able to compute velocity, and\n # no way to scroll it. See #1464 and #1499\n if diff_frames < 3:\n Clock.schedule_once(self._change_touch_mode, 0)\n return\n\n if self.do_scroll_x and self.effect_x:\n self.effect_x.cancel()\n if self.do_scroll_y and self.effect_y:\n self.effect_y.cancel()\n # XXX the next line was in the condition. But this stop\n # the possibily to \"drag\" an object out of the scrollview in the\n # non-used direction: if you have an horizontal scrollview, a\n # vertical gesture will not \"stop\" the scroll view to look for an\n # horizontal gesture, until the timeout is done.\n # and touch.dx + touch.dy == 0:\n touch.ungrab(self)\n self._touch = None\n # touch is in window coords\n touch.push()\n touch.apply_transform_2d(self.to_widget)\n touch.apply_transform_2d(self.to_parent)\n self.simulate_touch_down(touch)\n touch.pop()\n return\n\n def _do_touch_up(self, touch, *largs):\n # touch is in window coords\n touch.push()\n touch.apply_transform_2d(self.to_widget)\n super(ScrollView, self).on_touch_up(touch)\n touch.pop()\n # don't forget about grab event!\n for x in touch.grab_list[:]:\n touch.grab_list.remove(x)\n x = x()\n if not x:\n continue\n touch.grab_current = x\n # touch is in window coords\n touch.push()\n touch.apply_transform_2d(self.to_widget)\n super(ScrollView, self).on_touch_up(touch)\n touch.pop()\n touch.grab_current = None\n\n\nif __name__ == '__main__':\n from kivy.app import App\n\n from kivy.uix.gridlayout import GridLayout\n from kivy.uix.button import Button\n\n class ScrollViewApp(App):\n\n def build(self):\n layout1 = GridLayout(cols=4, spacing=10, size_hint=(None, None))\n layout1.bind(minimum_height=layout1.setter('height'),\n minimum_width=layout1.setter('width'))\n for i in range(40):\n btn = Button(text=str(i), size_hint=(None, None),\n size=(200, 100))\n layout1.add_widget(btn)\n scrollview1 = ScrollView(bar_width='2dp')\n scrollview1.add_widget(layout1)\n\n layout2 = GridLayout(cols=4, spacing=10, size_hint=(None, None))\n layout2.bind(minimum_height=layout2.setter('height'),\n minimum_width=layout2.setter('width'))\n for i in range(40):\n btn = Button(text=str(i), size_hint=(None, None),\n size=(200, 100))\n layout2.add_widget(btn)\n scrollview2 = ScrollView(scroll_type=['bars'],\n bar_width='9dp',\n scroll_wheel_distance=100)\n scrollview2.add_widget(layout2)\n\n root = GridLayout(cols=2)\n root.add_widget(scrollview1)\n root.add_widget(scrollview2)\n return root\n\n ScrollViewApp().run()\n", "path": "kivy/uix/scrollview.py"}], "after_files": [{"content": "'''Scroll View\n===========\n\n.. versionadded:: 1.0.4\n\nThe :class:`ScrollView` widget provides a scrollable/pannable viewport that is\nclipped at the scrollview's bounding box.\n\n\nScrolling Behavior\n------------------\n\nThe ScrollView accepts only one child and applies a viewport/window to\nit according to the :attr:`ScrollView.scroll_x` and\n:attr:`ScrollView.scroll_y` properties. Touches are analyzed to\ndetermine if the user wants to scroll or control the child in some\nother manner - you cannot do both at the same time. To determine if\ninteraction is a scrolling gesture, these properties are used:\n\n - :attr:`ScrollView.scroll_distance`: the minimum distance to travel,\n defaults to 20 pixels.\n - :attr:`ScrollView.scroll_timeout`: the maximum time period, defaults\n to 250 milliseconds.\n\nIf a touch travels :attr:`~ScrollView.scroll_distance` pixels within the\n:attr:`~ScrollView.scroll_timeout` period, it is recognized as a scrolling\ngesture and translation (scroll/pan) will begin. If the timeout occurs, the\ntouch down event is dispatched to the child instead (no translation).\n\nThe default value for those settings can be changed in the configuration file::\n\n [widgets]\n scroll_timeout = 250\n scroll_distance = 20\n\n.. versionadded:: 1.1.1\n\n ScrollView now animates scrolling in Y when a mousewheel is used.\n\n\nLimiting to the X or Y Axis\n---------------------------\n\nBy default, the ScrollView allows scrolling in both the X and Y axes. You can\nexplicitly disable scrolling on an axis by setting\n:attr:`ScrollView.do_scroll_x` or :attr:`ScrollView.do_scroll_y` to False.\n\n\nManaging the Content Size and Position\n--------------------------------------\n\nScrollView manages the position of its children similarly to a\nRelativeLayout (see :mod:`~kivy.uix.relativelayout`) but not the size. You must\ncarefully specify the `size_hint` of your content to get the desired\nscroll/pan effect.\n\nBy default, size_hint is (1, 1), so the content size will fit your ScrollView\nexactly (you will have nothing to scroll). You must deactivate at least one of\nthe size_hint instructions (x or y) of the child to enable scrolling.\n\nTo scroll a :class:`GridLayout` on Y-axis/vertically, set the child's width\nidentical to that of the ScrollView (size_hint_x=1, default), and set the\nsize_hint_y property to None::\n\n layout = GridLayout(cols=1, spacing=10, size_hint_y=None)\n # Make sure the height is such that there is something to scroll.\n layout.bind(minimum_height=layout.setter('height'))\n for i in range(30):\n btn = Button(text=str(i), size_hint_y=None, height=40)\n layout.add_widget(btn)\n root = ScrollView(size_hint=(None, None), size=(400, 400))\n root.add_widget(layout)\n\n\nOverscroll Effects\n------------------\n\n.. versionadded:: 1.7.0\n\nWhen scrolling would exceed the bounds of the :class:`ScrollView`, it\nuses a :class:`~kivy.effects.scroll.ScrollEffect` to handle the\noverscroll. These effects can perform actions like bouncing back,\nchanging opacity, or simply preventing scrolling beyond the normal\nboundaries. Note that complex effects may perform many computations,\nwhich can be slow on weaker hardware.\n\nYou can change what effect is being used by setting\n:attr:`ScrollView.effect_cls` to any effect class. Current options\ninclude:\n\n - :class:`~kivy.effects.scroll.ScrollEffect`: Does not allow\n scrolling beyond the :class:`ScrollView` boundaries.\n - :class:`~kivy.effects.dampedscroll.DampedScrollEffect`: The\n current default. Allows the user to scroll beyond the normal\n boundaries, but has the content spring back once the\n touch/click is released.\n - :class:`~kivy.effects.opacityscroll.OpacityScrollEffect`: Similar\n to the :class:`~kivy.effect.dampedscroll.DampedScrollEffect`, but\n also reduces opacity during overscroll.\n\nYou can also create your own scroll effect by subclassing one of these,\nthen pass it as the :attr:`~ScrollView.effect_cls` in the same way.\n\nAlternatively, you can set :attr:`ScrollView.effect_x` and/or\n:attr:`ScrollView.effect_y` to an *instance* of the effect you want to\nuse. This will override the default effect set in\n:attr:`ScrollView.effect_cls`.\n\nAll the effects are located in the :mod:`kivy.effects`.\n\n'''\n\n__all__ = ('ScrollView', )\n\nfrom functools import partial\nfrom kivy.animation import Animation\nfrom kivy.compat import string_types\nfrom kivy.config import Config\nfrom kivy.clock import Clock\nfrom kivy.factory import Factory\nfrom kivy.uix.stencilview import StencilView\nfrom kivy.metrics import sp\nfrom kivy.effects.dampedscroll import DampedScrollEffect\nfrom kivy.properties import NumericProperty, BooleanProperty, AliasProperty, \\\n ObjectProperty, ListProperty, ReferenceListProperty, OptionProperty\n\n\n# When we are generating documentation, Config doesn't exist\n_scroll_timeout = _scroll_distance = 0\nif Config:\n _scroll_timeout = Config.getint('widgets', 'scroll_timeout')\n _scroll_distance = sp(Config.getint('widgets', 'scroll_distance'))\n\n\nclass ScrollView(StencilView):\n '''ScrollView class. See module documentation for more information.\n\n .. versionchanged:: 1.7.0\n `auto_scroll`, `scroll_friction`, `scroll_moves`, `scroll_stoptime' has\n been deprecated, use :attr:`effect_cls` instead.\n '''\n\n scroll_distance = NumericProperty(_scroll_distance)\n '''Distance to move before scrolling the :class:`ScrollView`, in pixels. As\n soon as the distance has been traveled, the :class:`ScrollView` will start\n to scroll, and no touch event will go to children.\n It is advisable that you base this value on the dpi of your target device's\n screen.\n\n :attr:`scroll_distance` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 20 (pixels), according to the default value in user\n configuration.\n '''\n\n scroll_wheel_distance = NumericProperty(20)\n '''Distance to move when scrolling with a mouse wheel.\n It is advisable that you base this value on the dpi of your target device's\n screen.\n\n .. versionadded:: 1.8.0\n\n :attr:`scroll_wheel_distance` is a\n :class:`~kivy.properties.NumericProperty` , defaults to 20 pixels.\n '''\n\n scroll_timeout = NumericProperty(_scroll_timeout)\n '''Timeout allowed to trigger the :attr:`scroll_distance`, in milliseconds.\n If the user has not moved :attr:`scroll_distance` within the timeout,\n the scrolling will be disabled, and the touch event will go to the\n children.\n\n :attr:`scroll_timeout` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 55 (milliseconds) according to the default value in user\n configuration.\n\n .. versionchanged:: 1.5.0\n Default value changed from 250 to 55.\n '''\n\n scroll_x = NumericProperty(0.)\n '''X scrolling value, between 0 and 1. If 0, the content's left side will\n touch the left side of the ScrollView. If 1, the content's right side will\n touch the right side.\n\n This property is controled by :class:`ScrollView` only if\n :attr:`do_scroll_x` is True.\n\n :attr:`scroll_x` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 0.\n '''\n\n scroll_y = NumericProperty(1.)\n '''Y scrolling value, between 0 and 1. If 0, the content's bottom side will\n touch the bottom side of the ScrollView. If 1, the content's top side will\n touch the top side.\n\n This property is controled by :class:`ScrollView` only if\n :attr:`do_scroll_y` is True.\n\n :attr:`scroll_y` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 1.\n '''\n\n do_scroll_x = BooleanProperty(True)\n '''Allow scroll on X axis.\n\n :attr:`do_scroll_x` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to True.\n '''\n\n do_scroll_y = BooleanProperty(True)\n '''Allow scroll on Y axis.\n\n :attr:`do_scroll_y` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to True.\n '''\n\n def _get_do_scroll(self):\n return (self.do_scroll_x, self.do_scroll_y)\n\n def _set_do_scroll(self, value):\n if type(value) in (list, tuple):\n self.do_scroll_x, self.do_scroll_y = value\n else:\n self.do_scroll_x = self.do_scroll_y = bool(value)\n do_scroll = AliasProperty(_get_do_scroll, _set_do_scroll,\n bind=('do_scroll_x', 'do_scroll_y'))\n '''Allow scroll on X or Y axis.\n\n :attr:`do_scroll` is a :class:`~kivy.properties.AliasProperty` of\n (:attr:`do_scroll_x` + :attr:`do_scroll_y`)\n '''\n\n def _get_vbar(self):\n # must return (y, height) in %\n # calculate the viewport size / scrollview size %\n if self._viewport is None:\n return 0, 1.\n vh = self._viewport.height\n h = self.height\n if vh < h or vh == 0:\n return 0, 1.\n ph = max(0.01, h / float(vh))\n sy = min(1.0, max(0.0, self.scroll_y))\n py = (1. - ph) * sy\n return (py, ph)\n\n vbar = AliasProperty(_get_vbar, None, bind=(\n 'scroll_y', '_viewport', 'viewport_size'))\n '''Return a tuple of (position, size) of the vertical scrolling bar.\n\n .. versionadded:: 1.2.0\n\n The position and size are normalized between 0-1, and represent a\n percentage of the current scrollview height. This property is used\n internally for drawing the little vertical bar when you're scrolling.\n\n :attr:`vbar` is a :class:`~kivy.properties.AliasProperty`, readonly.\n '''\n\n def _get_hbar(self):\n # must return (x, width) in %\n # calculate the viewport size / scrollview size %\n if self._viewport is None:\n return 0, 1.\n vw = self._viewport.width\n w = self.width\n if vw < w or vw == 0:\n return 0, 1.\n pw = max(0.01, w / float(vw))\n sx = min(1.0, max(0.0, self.scroll_x))\n px = (1. - pw) * sx\n return (px, pw)\n\n hbar = AliasProperty(_get_hbar, None, bind=(\n 'scroll_x', '_viewport', 'viewport_size'))\n '''Return a tuple of (position, size) of the horizontal scrolling bar.\n\n .. versionadded:: 1.2.0\n\n The position and size are normalized between 0-1, and represent a\n percentage of the current scrollview height. This property is used\n internally for drawing the little horizontal bar when you're scrolling.\n\n :attr:`vbar` is a :class:`~kivy.properties.AliasProperty`, readonly.\n '''\n\n bar_color = ListProperty([.7, .7, .7, .9])\n '''Color of horizontal / vertical scroll bar, in RGBA format.\n\n .. versionadded:: 1.2.0\n\n :attr:`bar_color` is a :class:`~kivy.properties.ListProperty` and defaults\n to [.7, .7, .7, .9].\n '''\n\n bar_inactive_color = ListProperty([.7, .7, .7, .2])\n '''Color of horizontal / vertical scroll bar (in RGBA format), when no\n scroll is happening.\n\n .. versionadded:: 1.9.0\n\n :attr:`bar_inactive_color` is a\n :class:`~kivy.properties.ListProperty` and defaults to [.7, .7, .7, .2].\n '''\n\n bar_width = NumericProperty('2dp')\n '''Width of the horizontal / vertical scroll bar. The width is interpreted\n as a height for the horizontal bar.\n\n .. versionadded:: 1.2.0\n\n :attr:`bar_width` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 2.\n '''\n\n bar_pos_x = OptionProperty('bottom', options=('top', 'bottom'))\n '''Which side of the ScrollView the horizontal scroll bar should go\n on. Possible values are 'top' and 'bottom'.\n\n .. versionadded:: 1.8.0\n\n :attr:`bar_pos_x` is an :class:`~kivy.properties.OptionProperty`,\n default to 'bottom'\n\n '''\n\n bar_pos_y = OptionProperty('right', options=('left', 'right'))\n '''Which side of the ScrollView the vertical scroll bar should go\n on. Possible values are 'left' and 'right'.\n\n .. versionadded:: 1.8.0\n\n :attr:`bar_pos_y` is an :class:`~kivy.properties.OptionProperty`,\n default to 'right'\n\n '''\n\n bar_pos = ReferenceListProperty(bar_pos_x, bar_pos_y)\n '''Which side of the scroll view to place each of the bars on.\n\n :attr:`bar_pos` is a :class:`~kivy.properties.ReferenceListProperty` of\n (:attr:`bar_pos_x`, :attr:`bar_pos_y`)\n '''\n\n bar_margin = NumericProperty(0)\n '''Margin between the bottom / right side of the scrollview when drawing\n the horizontal / vertical scroll bar.\n\n .. versionadded:: 1.2.0\n\n :attr:`bar_margin` is a :class:`~kivy.properties.NumericProperty`, default\n to 0\n '''\n\n effect_cls = ObjectProperty(DampedScrollEffect, allownone=True)\n '''Class effect to instanciate for X and Y axis.\n\n .. versionadded:: 1.7.0\n\n :attr:`effect_cls` is an :class:`~kivy.properties.ObjectProperty` and\n defaults to :class:`DampedScrollEffect`.\n\n .. versionchanged:: 1.8.0\n If you set a string, the :class:`~kivy.factory.Factory` will be used to\n resolve the class.\n\n '''\n\n effect_x = ObjectProperty(None, allownone=True)\n '''Effect to apply for the X axis. If None is set, an instance of\n :attr:`effect_cls` will be created.\n\n .. versionadded:: 1.7.0\n\n :attr:`effect_x` is an :class:`~kivy.properties.ObjectProperty` and\n defaults to None.\n '''\n\n effect_y = ObjectProperty(None, allownone=True)\n '''Effect to apply for the Y axis. If None is set, an instance of\n :attr:`effect_cls` will be created.\n\n .. versionadded:: 1.7.0\n\n :attr:`effect_y` is an :class:`~kivy.properties.ObjectProperty` and\n defaults to None, read-only.\n '''\n\n viewport_size = ListProperty([0, 0])\n '''(internal) Size of the internal viewport. This is the size of your only\n child in the scrollview.\n '''\n\n scroll_type = OptionProperty(['content'], options=(['content'], ['bars'],\n ['bars', 'content'], ['content', 'bars']))\n '''Sets the type of scrolling to use for the content of the scrollview.\n Available options are: ['content'], ['bars'], ['bars', 'content'].\n\n .. versionadded:: 1.8.0\n\n :attr:`scroll_type` is a :class:`~kivy.properties.OptionProperty`, defaults\n to ['content'].\n '''\n\n # private, for internal use only\n\n _viewport = ObjectProperty(None, allownone=True)\n _bar_color = ListProperty([0, 0, 0, 0])\n\n def _set_viewport_size(self, instance, value):\n self.viewport_size = value\n\n def on__viewport(self, instance, value):\n if value:\n value.bind(size=self._set_viewport_size)\n self.viewport_size = value.size\n\n def __init__(self, **kwargs):\n self._touch = None\n self._trigger_update_from_scroll = Clock.create_trigger(\n self.update_from_scroll, -1)\n # create a specific canvas for the viewport\n from kivy.graphics import PushMatrix, Translate, PopMatrix, Canvas\n self.canvas_viewport = Canvas()\n self.canvas = Canvas()\n with self.canvas_viewport.before:\n PushMatrix()\n self.g_translate = Translate(0, 0)\n with self.canvas_viewport.after:\n PopMatrix()\n\n super(ScrollView, self).__init__(**kwargs)\n\n self.register_event_type('on_scroll_start')\n self.register_event_type('on_scroll_move')\n self.register_event_type('on_scroll_stop')\n\n # now add the viewport canvas to our canvas\n self.canvas.add(self.canvas_viewport)\n\n effect_cls = self.effect_cls\n if isinstance(effect_cls, string_types):\n effect_cls = Factory.get(effect_cls)\n if self.effect_x is None and effect_cls is not None:\n self.effect_x = effect_cls(target_widget=self._viewport)\n if self.effect_y is None and effect_cls is not None:\n self.effect_y = effect_cls(target_widget=self._viewport)\n self.bind(\n width=self._update_effect_x_bounds,\n height=self._update_effect_y_bounds,\n viewport_size=self._update_effect_bounds,\n _viewport=self._update_effect_widget,\n scroll_x=self._trigger_update_from_scroll,\n scroll_y=self._trigger_update_from_scroll,\n pos=self._trigger_update_from_scroll,\n size=self._trigger_update_from_scroll)\n\n self._update_effect_widget()\n self._update_effect_x_bounds()\n self._update_effect_y_bounds()\n\n def on_effect_x(self, instance, value):\n if value:\n value.bind(scroll=self._update_effect_x)\n value.target_widget = self._viewport\n\n def on_effect_y(self, instance, value):\n if value:\n value.bind(scroll=self._update_effect_y)\n value.target_widget = self._viewport\n\n def on_effect_cls(self, instance, cls):\n if isinstance(cls, string_types):\n cls = Factory.get(cls)\n self.effect_x = cls(target_widget=self._viewport)\n self.effect_x.bind(scroll=self._update_effect_x)\n self.effect_y = cls(target_widget=self._viewport)\n self.effect_y.bind(scroll=self._update_effect_y)\n\n def _update_effect_widget(self, *args):\n if self.effect_x:\n self.effect_x.target_widget = self._viewport\n if self.effect_y:\n self.effect_y.target_widget = self._viewport\n\n def _update_effect_x_bounds(self, *args):\n if not self._viewport or not self.effect_x:\n return\n self.effect_x.min = -(self.viewport_size[0] - self.width)\n self.effect_x.max = 0\n self.effect_x.value = self.effect_x.min * self.scroll_x\n\n def _update_effect_y_bounds(self, *args):\n if not self._viewport or not self.effect_y:\n return\n self.effect_y.min = -(self.viewport_size[1] - self.height)\n self.effect_y.max = 0\n self.effect_y.value = self.effect_y.min * self.scroll_y\n\n def _update_effect_bounds(self, *args):\n if not self._viewport:\n return\n if self.effect_x:\n self._update_effect_x_bounds()\n if self.effect_y:\n self._update_effect_y_bounds()\n\n def _update_effect_x(self, *args):\n vp = self._viewport\n if not vp or not self.effect_x:\n return\n sw = vp.width - self.width\n if sw < 1:\n return\n sx = self.effect_x.scroll / float(sw)\n self.scroll_x = -sx\n self._trigger_update_from_scroll()\n\n def _update_effect_y(self, *args):\n vp = self._viewport\n if not vp or not self.effect_y:\n return\n sh = vp.height - self.height\n if sh < 1:\n return\n sy = self.effect_y.scroll / float(sh)\n self.scroll_y = -sy\n self._trigger_update_from_scroll()\n\n def to_local(self, x, y, **k):\n tx, ty = self.g_translate.xy\n return x - tx, y - ty\n\n def to_parent(self, x, y, **k):\n tx, ty = self.g_translate.xy\n return x + tx, y + ty\n\n def simulate_touch_down(self, touch):\n # at this point the touch is in parent coords\n touch.push()\n touch.apply_transform_2d(self.to_local)\n ret = super(ScrollView, self).on_touch_down(touch)\n touch.pop()\n return ret\n\n def on_touch_down(self, touch):\n if self.dispatch('on_scroll_start', touch):\n self._touch = touch\n touch.grab(self)\n return True\n\n def on_scroll_start(self, touch, check_children=True):\n if check_children:\n touch.push()\n touch.apply_transform_2d(self.to_local)\n if self.dispatch_children('on_scroll_start', touch):\n return True\n touch.pop()\n\n if not self.collide_point(*touch.pos):\n touch.ud[self._get_uid('svavoid')] = True\n return\n if self.disabled:\n return True\n if self._touch or (not (self.do_scroll_x or self.do_scroll_y)):\n return self.simulate_touch_down(touch)\n\n # handle mouse scrolling, only if the viewport size is bigger than the\n # scrollview size, and if the user allowed to do it\n vp = self._viewport\n if not vp:\n return True\n scroll_type = self.scroll_type\n ud = touch.ud\n scroll_bar = 'bars' in scroll_type\n\n # check if touch is in bar_x(horizontal) or bay_y(bertical)\n ud['in_bar_x'] = ud['in_bar_y'] = False\n width_scrollable = vp.width > self.width\n height_scrollable = vp.height > self.height\n bar_pos_x = self.bar_pos_x[0]\n bar_pos_y = self.bar_pos_y[0]\n\n d = {'b': True if touch.y < self.y + self.bar_width else False,\n 't': True if touch.y > self.top - self.bar_width else False,\n 'l': True if touch.x < self.x + self.bar_width else False,\n 'r': True if touch.x > self.right - self.bar_width else False}\n if scroll_bar:\n if (width_scrollable and d[bar_pos_x]):\n ud['in_bar_x'] = True\n if (height_scrollable and d[bar_pos_y]):\n ud['in_bar_y'] = True\n\n if vp and 'button' in touch.profile and \\\n touch.button.startswith('scroll'):\n btn = touch.button\n m = sp(self.scroll_wheel_distance)\n e = None\n\n if ((btn == 'scrolldown' and self.scroll_y >= 1) or\n (btn == 'scrollup' and self.scroll_y <= 0) or\n (btn == 'scrollleft' and self.scroll_x >= 1) or\n (btn == 'scrollright' and self.scroll_x <= 0)):\n return False\n\n if (self.effect_x and self.do_scroll_y and height_scrollable\n and btn in ('scrolldown', 'scrollup')):\n e = self.effect_x if ud['in_bar_x'] else self.effect_y\n\n elif (self.effect_y and self.do_scroll_x and width_scrollable\n and btn in ('scrollleft', 'scrollright')):\n e = self.effect_y if ud['in_bar_y'] else self.effect_x\n\n if e:\n if btn in ('scrolldown', 'scrollleft'):\n e.value = max(e.value - m, e.min)\n e.velocity = 0\n elif btn in ('scrollup', 'scrollright'):\n e.value = min(e.value + m, e.max)\n e.velocity = 0\n touch.ud[self._get_uid('svavoid')] = True\n e.trigger_velocity_update()\n return True\n\n # no mouse scrolling, so the user is going to drag the scrollview with\n # this touch.\n self._touch = touch\n uid = self._get_uid()\n\n ud[uid] = {\n 'mode': 'unknown',\n 'dx': 0,\n 'dy': 0,\n 'user_stopped': False,\n 'frames': Clock.frames,\n 'time': touch.time_start}\n\n if self.do_scroll_x and self.effect_x and not ud['in_bar_x']:\n self.effect_x.start(touch.x)\n self._scroll_x_mouse = self.scroll_x\n if self.do_scroll_y and self.effect_y and not ud['in_bar_y']:\n self.effect_y.start(touch.y)\n self._scroll_y_mouse = self.scroll_y\n\n if (ud.get('in_bar_x', False) or ud.get('in_bar_y', False)):\n return True\n if scroll_type == ['bars']:\n # touch is in parent, but _change_touch_mode expects window coords\n touch.push()\n touch.apply_transform_2d(self.to_local)\n touch.apply_transform_2d(self.to_window)\n self._change_touch_mode()\n touch.pop()\n return False\n else:\n Clock.schedule_once(self._change_touch_mode,\n self.scroll_timeout / 1000.)\n return True\n\n def on_touch_move(self, touch):\n if self._touch is not touch:\n # touch is in parent\n touch.push()\n touch.apply_transform_2d(self.to_local)\n super(ScrollView, self).on_touch_move(touch)\n touch.pop()\n return self._get_uid() in touch.ud\n if touch.grab_current is not self:\n return True\n\n if not (self.do_scroll_y or self.do_scroll_x):\n return super(ScrollView, self).on_touch_move(touch)\n\n touch.ud['sv.handled'] = {'x': False, 'y': False}\n if self.dispatch('on_scroll_move', touch):\n return True\n\n def on_scroll_move(self, touch):\n if self._get_uid('svavoid') in touch.ud:\n return False\n\n touch.push()\n touch.apply_transform_2d(self.to_local)\n if self.dispatch_children('on_scroll_move', touch):\n return True\n touch.pop()\n\n rv = True\n\n uid = self._get_uid()\n if not uid in touch.ud:\n self._touch = False\n return self.on_scroll_start(touch, False)\n ud = touch.ud[uid]\n mode = ud['mode']\n\n # check if the minimum distance has been travelled\n if mode == 'unknown' or mode == 'scroll':\n if not touch.ud['sv.handled']['x'] and self.do_scroll_x \\\n and self.effect_x:\n width = self.width\n if touch.ud.get('in_bar_x', False):\n dx = touch.dx / float(width - width * self.hbar[1])\n self.scroll_x = min(max(self.scroll_x + dx, 0.), 1.)\n self._trigger_update_from_scroll()\n else:\n if self.scroll_type != ['bars']:\n self.effect_x.update(touch.x)\n if self.scroll_x < 0 or self.scroll_x > 1:\n rv = False\n else:\n touch.ud['sv.handled']['x'] = True\n if not touch.ud['sv.handled']['y'] and self.do_scroll_y \\\n and self.effect_y:\n height = self.height\n if touch.ud.get('in_bar_y', False):\n dy = touch.dy / float(height - height * self.vbar[1])\n self.scroll_y = min(max(self.scroll_y + dy, 0.), 1.)\n self._trigger_update_from_scroll()\n else:\n if self.scroll_type != ['bars']:\n self.effect_y.update(touch.y)\n if self.scroll_y < 0 or self.scroll_y > 1:\n rv = False\n else:\n touch.ud['sv.handled']['y'] = True\n\n if mode == 'unknown':\n ud['dx'] += abs(touch.dx)\n ud['dy'] += abs(touch.dy)\n if ud['dx'] > self.scroll_distance:\n if not self.do_scroll_x:\n # touch is in parent, but _change expects window coords\n touch.push()\n touch.apply_transform_2d(self.to_local)\n touch.apply_transform_2d(self.to_window)\n self._change_touch_mode()\n touch.pop()\n return\n mode = 'scroll'\n\n if ud['dy'] > self.scroll_distance:\n if not self.do_scroll_y:\n # touch is in parent, but _change expects window coords\n touch.push()\n touch.apply_transform_2d(self.to_local)\n touch.apply_transform_2d(self.to_window)\n self._change_touch_mode()\n touch.pop()\n return\n mode = 'scroll'\n ud['mode'] = mode\n\n if mode == 'scroll':\n ud['dt'] = touch.time_update - ud['time']\n ud['time'] = touch.time_update\n ud['user_stopped'] = True\n\n return rv\n\n def on_touch_up(self, touch):\n if self._touch is not touch and self.uid not in touch.ud:\n # touch is in parents\n touch.push()\n touch.apply_transform_2d(self.to_local)\n if super(ScrollView, self).on_touch_up(touch):\n return True\n touch.pop()\n return False\n\n if self.dispatch('on_scroll_stop', touch):\n touch.ungrab(self)\n return True\n\n def on_scroll_stop(self, touch, check_children=True):\n self._touch = None\n\n if check_children:\n touch.push()\n touch.apply_transform_2d(self.to_local)\n if self.dispatch_children('on_scroll_stop', touch):\n return True\n touch.pop()\n\n if self._get_uid('svavoid') in touch.ud:\n return\n if self._get_uid() not in touch.ud:\n return False\n\n self._touch = None\n uid = self._get_uid()\n ud = touch.ud[uid]\n if self.do_scroll_x and self.effect_x:\n if not touch.ud.get('in_bar_x', False) and\\\n self.scroll_type != ['bars']:\n self.effect_x.stop(touch.x)\n if self.do_scroll_y and self.effect_y and\\\n self.scroll_type != ['bars']:\n if not touch.ud.get('in_bar_y', False):\n self.effect_y.stop(touch.y)\n if ud['mode'] == 'unknown':\n # we must do the click at least..\n # only send the click if it was not a click to stop\n # autoscrolling\n if not ud['user_stopped']:\n self.simulate_touch_down(touch)\n Clock.schedule_once(partial(self._do_touch_up, touch), .2)\n Clock.unschedule(self._update_effect_bounds)\n Clock.schedule_once(self._update_effect_bounds)\n\n # if we do mouse scrolling, always accept it\n if 'button' in touch.profile and touch.button.startswith('scroll'):\n return True\n\n return self._get_uid() in touch.ud\n\n def convert_distance_to_scroll(self, dx, dy):\n '''Convert a distance in pixels to a scroll distance, depending on the\n content size and the scrollview size.\n\n The result will be a tuple of scroll distance that can be added to\n :data:`scroll_x` and :data:`scroll_y`\n '''\n if not self._viewport:\n return 0, 0\n vp = self._viewport\n if vp.width > self.width:\n sw = vp.width - self.width\n sx = dx / float(sw)\n else:\n sx = 0\n if vp.height > self.height:\n sh = vp.height - self.height\n sy = dy / float(sh)\n else:\n sy = 1\n return sx, sy\n\n def update_from_scroll(self, *largs):\n '''Force the reposition of the content, according to current value of\n :attr:`scroll_x` and :attr:`scroll_y`.\n\n This method is automatically called when one of the :attr:`scroll_x`,\n :attr:`scroll_y`, :attr:`pos` or :attr:`size` properties change, or\n if the size of the content changes.\n '''\n if not self._viewport:\n return\n vp = self._viewport\n\n # update from size_hint\n if vp.size_hint_x is not None:\n vp.width = vp.size_hint_x * self.width\n if vp.size_hint_y is not None:\n vp.height = vp.size_hint_y * self.height\n\n if vp.width > self.width:\n sw = vp.width - self.width\n x = self.x - self.scroll_x * sw\n else:\n x = self.x\n if vp.height > self.height:\n sh = vp.height - self.height\n y = self.y - self.scroll_y * sh\n else:\n y = self.top - vp.height\n\n # from 1.8.0, we now use a matrix by default, instead of moving the\n # widget position behind. We set it here, but it will be a no-op most of\n # the time.\n vp.pos = 0, 0\n self.g_translate.xy = x, y\n\n # New in 1.2.0, show bar when scrolling happens and (changed in 1.9.0)\n # fade to bar_inactive_color when no scroll is happening.\n Clock.unschedule(self._bind_inactive_bar_color)\n self.unbind(bar_inactive_color=self._change_bar_color)\n Animation.stop_all(self, '_bar_color')\n self.bind(bar_color=self._change_bar_color)\n self._bar_color = self.bar_color\n Clock.schedule_once(self._bind_inactive_bar_color, .5)\n\n def _bind_inactive_bar_color(self, *l):\n self.unbind(bar_color=self._change_bar_color)\n self.bind(bar_inactive_color=self._change_bar_color)\n Animation(\n _bar_color=self.bar_inactive_color, d=.5, t='out_quart').start(self)\n\n def _change_bar_color(self, inst, value):\n self._bar_color = value\n\n #\n # Private\n #\n def add_widget(self, widget, index=0):\n if self._viewport:\n raise Exception('ScrollView accept only one widget')\n canvas = self.canvas\n self.canvas = self.canvas_viewport\n super(ScrollView, self).add_widget(widget, index)\n self.canvas = canvas\n self._viewport = widget\n widget.bind(size=self._trigger_update_from_scroll)\n self._trigger_update_from_scroll()\n\n def remove_widget(self, widget):\n canvas = self.canvas\n self.canvas = self.canvas_viewport\n super(ScrollView, self).remove_widget(widget)\n self.canvas = canvas\n if widget is self._viewport:\n self._viewport = None\n\n def _get_uid(self, prefix='sv'):\n return '{0}.{1}'.format(prefix, self.uid)\n\n def _change_touch_mode(self, *largs):\n if not self._touch:\n return\n uid = self._get_uid()\n touch = self._touch\n ud = touch.ud[uid]\n if ud['mode'] != 'unknown' or ud['user_stopped']:\n return\n diff_frames = Clock.frames - ud['frames']\n\n # in order to be able to scroll on very slow devices, let at least 3\n # frames displayed to accumulate some velocity. And then, change the\n # touch mode. Otherwise, we might never be able to compute velocity, and\n # no way to scroll it. See #1464 and #1499\n if diff_frames < 3:\n Clock.schedule_once(self._change_touch_mode, 0)\n return\n\n if self.do_scroll_x and self.effect_x:\n self.effect_x.cancel()\n if self.do_scroll_y and self.effect_y:\n self.effect_y.cancel()\n # XXX the next line was in the condition. But this stop\n # the possibily to \"drag\" an object out of the scrollview in the\n # non-used direction: if you have an horizontal scrollview, a\n # vertical gesture will not \"stop\" the scroll view to look for an\n # horizontal gesture, until the timeout is done.\n # and touch.dx + touch.dy == 0:\n touch.ungrab(self)\n self._touch = None\n # touch is in window coords\n touch.push()\n touch.apply_transform_2d(self.to_widget)\n touch.apply_transform_2d(self.to_parent)\n self.simulate_touch_down(touch)\n touch.pop()\n return\n\n def _do_touch_up(self, touch, *largs):\n # touch is in window coords\n touch.push()\n touch.apply_transform_2d(self.to_widget)\n super(ScrollView, self).on_touch_up(touch)\n touch.pop()\n # don't forget about grab event!\n for x in touch.grab_list[:]:\n touch.grab_list.remove(x)\n x = x()\n if not x:\n continue\n touch.grab_current = x\n # touch is in window coords\n touch.push()\n touch.apply_transform_2d(self.to_widget)\n super(ScrollView, self).on_touch_up(touch)\n touch.pop()\n touch.grab_current = None\n\n\nif __name__ == '__main__':\n from kivy.app import App\n\n from kivy.uix.gridlayout import GridLayout\n from kivy.uix.button import Button\n\n class ScrollViewApp(App):\n\n def build(self):\n layout1 = GridLayout(cols=4, spacing=10, size_hint=(None, None))\n layout1.bind(minimum_height=layout1.setter('height'),\n minimum_width=layout1.setter('width'))\n for i in range(40):\n btn = Button(text=str(i), size_hint=(None, None),\n size=(200, 100))\n layout1.add_widget(btn)\n scrollview1 = ScrollView(bar_width='2dp')\n scrollview1.add_widget(layout1)\n\n layout2 = GridLayout(cols=4, spacing=10, size_hint=(None, None))\n layout2.bind(minimum_height=layout2.setter('height'),\n minimum_width=layout2.setter('width'))\n for i in range(40):\n btn = Button(text=str(i), size_hint=(None, None),\n size=(200, 100))\n layout2.add_widget(btn)\n scrollview2 = ScrollView(scroll_type=['bars'],\n bar_width='9dp',\n scroll_wheel_distance=100)\n scrollview2.add_widget(layout2)\n\n root = GridLayout(cols=2)\n root.add_widget(scrollview1)\n root.add_widget(scrollview2)\n return root\n\n ScrollViewApp().run()\n", "path": "kivy/uix/scrollview.py"}]} |
gh_patches_debug_1233 | rasdani/github-patches | git_diff | kivy__kivy-7383 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dropdown opening from data item in RecycleView is jumping down at refresh of RV-data/-layout
**Software Versions**
* Python: 3.6.9
* OS: Ubuntu 18.04
* Kivy: 2.0.0
* Kivy installation method: pip
A `Dropdown` attached to a `data` item of a `RecycleView` (RV) is jumping down and up if `data` of the `RecycleView` gets updated/refreshed.
**Expected behavior**
The `Dropdown` `y` position should not be recalculated if the `parent` of the item widget where the `Dropdown` is attached is `None`.
**To Reproduce**
Create a `RecycleView` with `Button` items, which are displaying a `Dropdown` when the user clicks on the `Button` item. Create a `Clock` timer to periodically update the `data` property of the RV. Now click on an item to open the `Dropdown`. Now you can see that the `Dropdown` is not staying fixed under the attaching item/button-widget - instead, it is jumping down and up again every time the `data` gets updated.
**Code and Logs and screenshots**

As you can see in the animation above, the `y` position of the item gets wrongly calculated if `parent` is `None`.
For to workaround overwrite the `_reposition` method of the `Dropdown` widget to prevent the recalculation of the `y` position if the `parent` is None:
```python
def _reposition(self, *largs):
if self.attach_to and not self.attach_to.parent:
return
super()._reposition(*largs)
```
**Additional context**
On refresh of RV's `data` property, the items will temporarily be removed from the RV, which sets the `parent` property of the item to `None`. This breaks the calculation of the item screen position (not adding the y-offset of the `ScrollView`) and the `Dropdown` is jumping down. As soon as the `parent` of the item is getting reset, the screen position gets calculated again and the `Dropdown` is jumping back up (to its original/correct position, underneath the attaching item).
I will create PR to fix this within `Dropdown._reposition`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kivy/uix/dropdown.py`
Content:
```
1 '''
2 Drop-Down List
3 ==============
4
5 .. image:: images/dropdown.gif
6 :align: right
7
8 .. versionadded:: 1.4.0
9
10 A versatile drop-down list that can be used with custom widgets. It allows you
11 to display a list of widgets under a displayed widget. Unlike other toolkits,
12 the list of widgets can contain any type of widget: simple buttons,
13 images etc.
14
15 The positioning of the drop-down list is fully automatic: we will always try to
16 place the dropdown list in a way that the user can select an item in the list.
17
18 Basic example
19 -------------
20
21 A button with a dropdown list of 10 possible values. All the buttons within the
22 dropdown list will trigger the dropdown :meth:`DropDown.select` method. After
23 being called, the main button text will display the selection of the
24 dropdown. ::
25
26 from kivy.uix.dropdown import DropDown
27 from kivy.uix.button import Button
28 from kivy.base import runTouchApp
29
30 # create a dropdown with 10 buttons
31 dropdown = DropDown()
32 for index in range(10):
33 # When adding widgets, we need to specify the height manually
34 # (disabling the size_hint_y) so the dropdown can calculate
35 # the area it needs.
36
37 btn = Button(text='Value %d' % index, size_hint_y=None, height=44)
38
39 # for each button, attach a callback that will call the select() method
40 # on the dropdown. We'll pass the text of the button as the data of the
41 # selection.
42 btn.bind(on_release=lambda btn: dropdown.select(btn.text))
43
44 # then add the button inside the dropdown
45 dropdown.add_widget(btn)
46
47 # create a big main button
48 mainbutton = Button(text='Hello', size_hint=(None, None))
49
50 # show the dropdown menu when the main button is released
51 # note: all the bind() calls pass the instance of the caller (here, the
52 # mainbutton instance) as the first argument of the callback (here,
53 # dropdown.open.).
54 mainbutton.bind(on_release=dropdown.open)
55
56 # one last thing, listen for the selection in the dropdown list and
57 # assign the data to the button text.
58 dropdown.bind(on_select=lambda instance, x: setattr(mainbutton, 'text', x))
59
60 runTouchApp(mainbutton)
61
62 Extending dropdown in Kv
63 ------------------------
64
65 You could create a dropdown directly from your kv::
66
67 #:kivy 1.4.0
68 <CustomDropDown>:
69 Button:
70 text: 'My first Item'
71 size_hint_y: None
72 height: 44
73 on_release: root.select('item1')
74 Label:
75 text: 'Unselectable item'
76 size_hint_y: None
77 height: 44
78 Button:
79 text: 'My second Item'
80 size_hint_y: None
81 height: 44
82 on_release: root.select('item2')
83
84 And then, create the associated python class and use it::
85
86 class CustomDropDown(DropDown):
87 pass
88
89 dropdown = CustomDropDown()
90 mainbutton = Button(text='Hello', size_hint=(None, None))
91 mainbutton.bind(on_release=dropdown.open)
92 dropdown.bind(on_select=lambda instance, x: setattr(mainbutton, 'text', x))
93 '''
94
95 __all__ = ('DropDown', )
96
97 from kivy.uix.scrollview import ScrollView
98 from kivy.properties import ObjectProperty, NumericProperty, BooleanProperty
99 from kivy.core.window import Window
100 from kivy.lang import Builder
101 from kivy.clock import Clock
102 from kivy.config import Config
103
104 _grid_kv = '''
105 GridLayout:
106 size_hint_y: None
107 height: self.minimum_size[1]
108 cols: 1
109 '''
110
111
112 class DropDownException(Exception):
113 '''DropDownException class.
114 '''
115 pass
116
117
118 class DropDown(ScrollView):
119 '''DropDown class. See module documentation for more information.
120
121 :Events:
122 `on_select`: data
123 Fired when a selection is done. The data of the selection is passed
124 in as the first argument and is what you pass in the :meth:`select`
125 method as the first argument.
126 `on_dismiss`:
127 .. versionadded:: 1.8.0
128
129 Fired when the DropDown is dismissed, either on selection or on
130 touching outside the widget.
131 '''
132
133 auto_width = BooleanProperty(True)
134 '''By default, the width of the dropdown will be the same as the width of
135 the attached widget. Set to False if you want to provide your own width.
136
137 :attr:`auto_width` is a :class:`~kivy.properties.BooleanProperty`
138 and defaults to True.
139 '''
140
141 max_height = NumericProperty(None, allownone=True)
142 '''Indicate the maximum height that the dropdown can take. If None, it will
143 take the maximum height available until the top or bottom of the screen
144 is reached.
145
146 :attr:`max_height` is a :class:`~kivy.properties.NumericProperty` and
147 defaults to None.
148 '''
149
150 dismiss_on_select = BooleanProperty(True)
151 '''By default, the dropdown will be automatically dismissed when a
152 selection has been done. Set to False to prevent the dismiss.
153
154 :attr:`dismiss_on_select` is a :class:`~kivy.properties.BooleanProperty`
155 and defaults to True.
156 '''
157
158 auto_dismiss = BooleanProperty(True)
159 '''By default, the dropdown will be automatically dismissed when a
160 touch happens outside of it, this option allows to disable this
161 feature
162
163 :attr:`auto_dismiss` is a :class:`~kivy.properties.BooleanProperty`
164 and defaults to True.
165
166 .. versionadded:: 1.8.0
167 '''
168
169 min_state_time = NumericProperty(0)
170 '''Minimum time before the :class:`~kivy.uix.DropDown` is dismissed.
171 This is used to allow for the widget inside the dropdown to display
172 a down state or for the :class:`~kivy.uix.DropDown` itself to
173 display a animation for closing.
174
175 :attr:`min_state_time` is a :class:`~kivy.properties.NumericProperty`
176 and defaults to the `Config` value `min_state_time`.
177
178 .. versionadded:: 1.10.0
179 '''
180
181 attach_to = ObjectProperty(allownone=True)
182 '''(internal) Property that will be set to the widget to which the
183 drop down list is attached.
184
185 The :meth:`open` method will automatically set this property whilst
186 :meth:`dismiss` will set it back to None.
187 '''
188
189 container = ObjectProperty()
190 '''(internal) Property that will be set to the container of the dropdown
191 list. It is a :class:`~kivy.uix.gridlayout.GridLayout` by default.
192 '''
193
194 _touch_started_inside = None
195
196 __events__ = ('on_select', 'on_dismiss')
197
198 def __init__(self, **kwargs):
199 self._win = None
200 if 'min_state_time' not in kwargs:
201 self.min_state_time = float(
202 Config.get('graphics', 'min_state_time'))
203 if 'container' not in kwargs:
204 c = self.container = Builder.load_string(_grid_kv)
205 else:
206 c = None
207 if 'do_scroll_x' not in kwargs:
208 self.do_scroll_x = False
209 if 'size_hint' not in kwargs:
210 if 'size_hint_x' not in kwargs:
211 self.size_hint_x = None
212 if 'size_hint_y' not in kwargs:
213 self.size_hint_y = None
214 super(DropDown, self).__init__(**kwargs)
215 if c is not None:
216 super(DropDown, self).add_widget(c)
217 self.on_container(self, c)
218 Window.bind(
219 on_key_down=self.on_key_down,
220 size=self._reposition)
221 self.fbind('size', self._reposition)
222
223 def on_key_down(self, instance, key, scancode, codepoint, modifiers):
224 if key == 27 and self.get_parent_window():
225 self.dismiss()
226 return True
227
228 def on_container(self, instance, value):
229 if value is not None:
230 self.container.bind(minimum_size=self._reposition)
231
232 def open(self, widget):
233 '''Open the dropdown list and attach it to a specific widget.
234 Depending on the position of the widget within the window and
235 the height of the dropdown, the dropdown might be above or below
236 that widget.
237 '''
238 # ensure we are not already attached
239 if self.attach_to is not None:
240 self.dismiss()
241
242 # we will attach ourself to the main window, so ensure the
243 # widget we are looking for have a window
244 self._win = widget.get_parent_window()
245 if self._win is None:
246 raise DropDownException(
247 'Cannot open a dropdown list on a hidden widget')
248
249 self.attach_to = widget
250 widget.bind(pos=self._reposition, size=self._reposition)
251 self._reposition()
252
253 # attach ourself to the main window
254 self._win.add_widget(self)
255
256 def dismiss(self, *largs):
257 '''Remove the dropdown widget from the window and detach it from
258 the attached widget.
259 '''
260 Clock.schedule_once(self._real_dismiss, self.min_state_time)
261
262 def _real_dismiss(self, *largs):
263 if self.parent:
264 self.parent.remove_widget(self)
265 if self.attach_to:
266 self.attach_to.unbind(pos=self._reposition, size=self._reposition)
267 self.attach_to = None
268 self.dispatch('on_dismiss')
269
270 def on_dismiss(self):
271 pass
272
273 def select(self, data):
274 '''Call this method to trigger the `on_select` event with the `data`
275 selection. The `data` can be anything you want.
276 '''
277 self.dispatch('on_select', data)
278 if self.dismiss_on_select:
279 self.dismiss()
280
281 def on_select(self, data):
282 pass
283
284 def add_widget(self, *args, **kwargs):
285 if self.container:
286 return self.container.add_widget(*args, **kwargs)
287 return super(DropDown, self).add_widget(*args, **kwargs)
288
289 def remove_widget(self, *args, **kwargs):
290 if self.container:
291 return self.container.remove_widget(*args, **kwargs)
292 return super(DropDown, self).remove_widget(*args, **kwargs)
293
294 def clear_widgets(self, *args, **kwargs):
295 if self.container:
296 return self.container.clear_widgets(*args, **kwargs)
297 return super(DropDown, self).clear_widgets(*args, **kwargs)
298
299 def on_touch_down(self, touch):
300 self._touch_started_inside = self.collide_point(*touch.pos)
301 if not self.auto_dismiss or self._touch_started_inside:
302 super(DropDown, self).on_touch_down(touch)
303 return True
304
305 def on_touch_move(self, touch):
306 if not self.auto_dismiss or self._touch_started_inside:
307 super(DropDown, self).on_touch_move(touch)
308 return True
309
310 def on_touch_up(self, touch):
311 # Explicitly test for False as None occurs when shown by on_touch_down
312 if self.auto_dismiss and self._touch_started_inside is False:
313 self.dismiss()
314 else:
315 super(DropDown, self).on_touch_up(touch)
316 self._touch_started_inside = None
317 return True
318
319 def _reposition(self, *largs):
320 # calculate the coordinate of the attached widget in the window
321 # coordinate system
322 win = self._win
323 widget = self.attach_to
324 if not widget or not win:
325 return
326 wx, wy = widget.to_window(*widget.pos)
327 wright, wtop = widget.to_window(widget.right, widget.top)
328
329 # set width and x
330 if self.auto_width:
331 self.width = wright - wx
332
333 # ensure the dropdown list doesn't get out on the X axis, with a
334 # preference to 0 in case the list is too wide.
335 x = wx
336 if x + self.width > win.width:
337 x = win.width - self.width
338 if x < 0:
339 x = 0
340 self.x = x
341
342 # determine if we display the dropdown upper or lower to the widget
343 if self.max_height is not None:
344 height = min(self.max_height, self.container.minimum_height)
345 else:
346 height = self.container.minimum_height
347
348 h_bottom = wy - height
349 h_top = win.height - (wtop + height)
350 if h_bottom > 0:
351 self.top = wy
352 self.height = height
353 elif h_top > 0:
354 self.y = wtop
355 self.height = height
356 else:
357 # none of both top/bottom have enough place to display the
358 # widget at the current size. Take the best side, and fit to
359 # it.
360
361 if h_top < h_bottom:
362 self.top = self.height = wy
363 else:
364 self.y = wtop
365 self.height = win.height - wtop
366
367
368 if __name__ == '__main__':
369 from kivy.uix.button import Button
370 from kivy.base import runTouchApp
371
372 def show_dropdown(button, *largs):
373 dp = DropDown()
374 dp.bind(on_select=lambda instance, x: setattr(button, 'text', x))
375 for i in range(10):
376 item = Button(text='hello %d' % i, size_hint_y=None, height=44)
377 item.bind(on_release=lambda btn: dp.select(btn.text))
378 dp.add_widget(item)
379 dp.open(button)
380
381 def touch_move(instance, touch):
382 instance.center = touch.pos
383
384 btn = Button(text='SHOW', size_hint=(None, None), pos=(300, 200))
385 btn.bind(on_release=show_dropdown, on_touch_move=touch_move)
386
387 runTouchApp(btn)
388
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kivy/uix/dropdown.py b/kivy/uix/dropdown.py
--- a/kivy/uix/dropdown.py
+++ b/kivy/uix/dropdown.py
@@ -321,7 +321,7 @@
# coordinate system
win = self._win
widget = self.attach_to
- if not widget or not win:
+ if not widget or not widget.parent or not win:
return
wx, wy = widget.to_window(*widget.pos)
wright, wtop = widget.to_window(widget.right, widget.top)
| {"golden_diff": "diff --git a/kivy/uix/dropdown.py b/kivy/uix/dropdown.py\n--- a/kivy/uix/dropdown.py\n+++ b/kivy/uix/dropdown.py\n@@ -321,7 +321,7 @@\n # coordinate system\n win = self._win\n widget = self.attach_to\n- if not widget or not win:\n+ if not widget or not widget.parent or not win:\n return\n wx, wy = widget.to_window(*widget.pos)\n wright, wtop = widget.to_window(widget.right, widget.top)\n", "issue": "Dropdown opening from data item in RecycleView is jumping down at refresh of RV-data/-layout\n**Software Versions**\r\n* Python: 3.6.9\r\n* OS: Ubuntu 18.04\r\n* Kivy: 2.0.0\r\n* Kivy installation method: pip\r\n\r\nA `Dropdown` attached to a `data` item of a `RecycleView` (RV) is jumping down and up if `data` of the `RecycleView` gets updated/refreshed.\r\n\r\n**Expected behavior**\r\nThe `Dropdown` `y` position should not be recalculated if the `parent` of the item widget where the `Dropdown` is attached is `None`.\r\n\r\n**To Reproduce**\r\nCreate a `RecycleView` with `Button` items, which are displaying a `Dropdown` when the user clicks on the `Button` item. Create a `Clock` timer to periodically update the `data` property of the RV. Now click on an item to open the `Dropdown`. Now you can see that the `Dropdown` is not staying fixed under the attaching item/button-widget - instead, it is jumping down and up again every time the `data` gets updated.\r\n\r\n**Code and Logs and screenshots**\r\n\r\n\r\n\r\nAs you can see in the animation above, the `y` position of the item gets wrongly calculated if `parent` is `None`.\r\n\r\nFor to workaround overwrite the `_reposition` method of the `Dropdown` widget to prevent the recalculation of the `y` position if the `parent` is None:\r\n\r\n```python\r\n def _reposition(self, *largs):\r\n if self.attach_to and not self.attach_to.parent:\r\n return\r\n super()._reposition(*largs)\r\n```\r\n\r\n**Additional context**\r\nOn refresh of RV's `data` property, the items will temporarily be removed from the RV, which sets the `parent` property of the item to `None`. This breaks the calculation of the item screen position (not adding the y-offset of the `ScrollView`) and the `Dropdown` is jumping down. As soon as the `parent` of the item is getting reset, the screen position gets calculated again and the `Dropdown` is jumping back up (to its original/correct position, underneath the attaching item).\r\n\r\nI will create PR to fix this within `Dropdown._reposition`.\r\n\n", "before_files": [{"content": "'''\nDrop-Down List\n==============\n\n.. image:: images/dropdown.gif\n :align: right\n\n.. versionadded:: 1.4.0\n\nA versatile drop-down list that can be used with custom widgets. It allows you\nto display a list of widgets under a displayed widget. Unlike other toolkits,\nthe list of widgets can contain any type of widget: simple buttons,\nimages etc.\n\nThe positioning of the drop-down list is fully automatic: we will always try to\nplace the dropdown list in a way that the user can select an item in the list.\n\nBasic example\n-------------\n\nA button with a dropdown list of 10 possible values. All the buttons within the\ndropdown list will trigger the dropdown :meth:`DropDown.select` method. After\nbeing called, the main button text will display the selection of the\ndropdown. ::\n\n from kivy.uix.dropdown import DropDown\n from kivy.uix.button import Button\n from kivy.base import runTouchApp\n\n # create a dropdown with 10 buttons\n dropdown = DropDown()\n for index in range(10):\n # When adding widgets, we need to specify the height manually\n # (disabling the size_hint_y) so the dropdown can calculate\n # the area it needs.\n\n btn = Button(text='Value %d' % index, size_hint_y=None, height=44)\n\n # for each button, attach a callback that will call the select() method\n # on the dropdown. We'll pass the text of the button as the data of the\n # selection.\n btn.bind(on_release=lambda btn: dropdown.select(btn.text))\n\n # then add the button inside the dropdown\n dropdown.add_widget(btn)\n\n # create a big main button\n mainbutton = Button(text='Hello', size_hint=(None, None))\n\n # show the dropdown menu when the main button is released\n # note: all the bind() calls pass the instance of the caller (here, the\n # mainbutton instance) as the first argument of the callback (here,\n # dropdown.open.).\n mainbutton.bind(on_release=dropdown.open)\n\n # one last thing, listen for the selection in the dropdown list and\n # assign the data to the button text.\n dropdown.bind(on_select=lambda instance, x: setattr(mainbutton, 'text', x))\n\n runTouchApp(mainbutton)\n\nExtending dropdown in Kv\n------------------------\n\nYou could create a dropdown directly from your kv::\n\n #:kivy 1.4.0\n <CustomDropDown>:\n Button:\n text: 'My first Item'\n size_hint_y: None\n height: 44\n on_release: root.select('item1')\n Label:\n text: 'Unselectable item'\n size_hint_y: None\n height: 44\n Button:\n text: 'My second Item'\n size_hint_y: None\n height: 44\n on_release: root.select('item2')\n\nAnd then, create the associated python class and use it::\n\n class CustomDropDown(DropDown):\n pass\n\n dropdown = CustomDropDown()\n mainbutton = Button(text='Hello', size_hint=(None, None))\n mainbutton.bind(on_release=dropdown.open)\n dropdown.bind(on_select=lambda instance, x: setattr(mainbutton, 'text', x))\n'''\n\n__all__ = ('DropDown', )\n\nfrom kivy.uix.scrollview import ScrollView\nfrom kivy.properties import ObjectProperty, NumericProperty, BooleanProperty\nfrom kivy.core.window import Window\nfrom kivy.lang import Builder\nfrom kivy.clock import Clock\nfrom kivy.config import Config\n\n_grid_kv = '''\nGridLayout:\n size_hint_y: None\n height: self.minimum_size[1]\n cols: 1\n'''\n\n\nclass DropDownException(Exception):\n '''DropDownException class.\n '''\n pass\n\n\nclass DropDown(ScrollView):\n '''DropDown class. See module documentation for more information.\n\n :Events:\n `on_select`: data\n Fired when a selection is done. The data of the selection is passed\n in as the first argument and is what you pass in the :meth:`select`\n method as the first argument.\n `on_dismiss`:\n .. versionadded:: 1.8.0\n\n Fired when the DropDown is dismissed, either on selection or on\n touching outside the widget.\n '''\n\n auto_width = BooleanProperty(True)\n '''By default, the width of the dropdown will be the same as the width of\n the attached widget. Set to False if you want to provide your own width.\n\n :attr:`auto_width` is a :class:`~kivy.properties.BooleanProperty`\n and defaults to True.\n '''\n\n max_height = NumericProperty(None, allownone=True)\n '''Indicate the maximum height that the dropdown can take. If None, it will\n take the maximum height available until the top or bottom of the screen\n is reached.\n\n :attr:`max_height` is a :class:`~kivy.properties.NumericProperty` and\n defaults to None.\n '''\n\n dismiss_on_select = BooleanProperty(True)\n '''By default, the dropdown will be automatically dismissed when a\n selection has been done. Set to False to prevent the dismiss.\n\n :attr:`dismiss_on_select` is a :class:`~kivy.properties.BooleanProperty`\n and defaults to True.\n '''\n\n auto_dismiss = BooleanProperty(True)\n '''By default, the dropdown will be automatically dismissed when a\n touch happens outside of it, this option allows to disable this\n feature\n\n :attr:`auto_dismiss` is a :class:`~kivy.properties.BooleanProperty`\n and defaults to True.\n\n .. versionadded:: 1.8.0\n '''\n\n min_state_time = NumericProperty(0)\n '''Minimum time before the :class:`~kivy.uix.DropDown` is dismissed.\n This is used to allow for the widget inside the dropdown to display\n a down state or for the :class:`~kivy.uix.DropDown` itself to\n display a animation for closing.\n\n :attr:`min_state_time` is a :class:`~kivy.properties.NumericProperty`\n and defaults to the `Config` value `min_state_time`.\n\n .. versionadded:: 1.10.0\n '''\n\n attach_to = ObjectProperty(allownone=True)\n '''(internal) Property that will be set to the widget to which the\n drop down list is attached.\n\n The :meth:`open` method will automatically set this property whilst\n :meth:`dismiss` will set it back to None.\n '''\n\n container = ObjectProperty()\n '''(internal) Property that will be set to the container of the dropdown\n list. It is a :class:`~kivy.uix.gridlayout.GridLayout` by default.\n '''\n\n _touch_started_inside = None\n\n __events__ = ('on_select', 'on_dismiss')\n\n def __init__(self, **kwargs):\n self._win = None\n if 'min_state_time' not in kwargs:\n self.min_state_time = float(\n Config.get('graphics', 'min_state_time'))\n if 'container' not in kwargs:\n c = self.container = Builder.load_string(_grid_kv)\n else:\n c = None\n if 'do_scroll_x' not in kwargs:\n self.do_scroll_x = False\n if 'size_hint' not in kwargs:\n if 'size_hint_x' not in kwargs:\n self.size_hint_x = None\n if 'size_hint_y' not in kwargs:\n self.size_hint_y = None\n super(DropDown, self).__init__(**kwargs)\n if c is not None:\n super(DropDown, self).add_widget(c)\n self.on_container(self, c)\n Window.bind(\n on_key_down=self.on_key_down,\n size=self._reposition)\n self.fbind('size', self._reposition)\n\n def on_key_down(self, instance, key, scancode, codepoint, modifiers):\n if key == 27 and self.get_parent_window():\n self.dismiss()\n return True\n\n def on_container(self, instance, value):\n if value is not None:\n self.container.bind(minimum_size=self._reposition)\n\n def open(self, widget):\n '''Open the dropdown list and attach it to a specific widget.\n Depending on the position of the widget within the window and\n the height of the dropdown, the dropdown might be above or below\n that widget.\n '''\n # ensure we are not already attached\n if self.attach_to is not None:\n self.dismiss()\n\n # we will attach ourself to the main window, so ensure the\n # widget we are looking for have a window\n self._win = widget.get_parent_window()\n if self._win is None:\n raise DropDownException(\n 'Cannot open a dropdown list on a hidden widget')\n\n self.attach_to = widget\n widget.bind(pos=self._reposition, size=self._reposition)\n self._reposition()\n\n # attach ourself to the main window\n self._win.add_widget(self)\n\n def dismiss(self, *largs):\n '''Remove the dropdown widget from the window and detach it from\n the attached widget.\n '''\n Clock.schedule_once(self._real_dismiss, self.min_state_time)\n\n def _real_dismiss(self, *largs):\n if self.parent:\n self.parent.remove_widget(self)\n if self.attach_to:\n self.attach_to.unbind(pos=self._reposition, size=self._reposition)\n self.attach_to = None\n self.dispatch('on_dismiss')\n\n def on_dismiss(self):\n pass\n\n def select(self, data):\n '''Call this method to trigger the `on_select` event with the `data`\n selection. The `data` can be anything you want.\n '''\n self.dispatch('on_select', data)\n if self.dismiss_on_select:\n self.dismiss()\n\n def on_select(self, data):\n pass\n\n def add_widget(self, *args, **kwargs):\n if self.container:\n return self.container.add_widget(*args, **kwargs)\n return super(DropDown, self).add_widget(*args, **kwargs)\n\n def remove_widget(self, *args, **kwargs):\n if self.container:\n return self.container.remove_widget(*args, **kwargs)\n return super(DropDown, self).remove_widget(*args, **kwargs)\n\n def clear_widgets(self, *args, **kwargs):\n if self.container:\n return self.container.clear_widgets(*args, **kwargs)\n return super(DropDown, self).clear_widgets(*args, **kwargs)\n\n def on_touch_down(self, touch):\n self._touch_started_inside = self.collide_point(*touch.pos)\n if not self.auto_dismiss or self._touch_started_inside:\n super(DropDown, self).on_touch_down(touch)\n return True\n\n def on_touch_move(self, touch):\n if not self.auto_dismiss or self._touch_started_inside:\n super(DropDown, self).on_touch_move(touch)\n return True\n\n def on_touch_up(self, touch):\n # Explicitly test for False as None occurs when shown by on_touch_down\n if self.auto_dismiss and self._touch_started_inside is False:\n self.dismiss()\n else:\n super(DropDown, self).on_touch_up(touch)\n self._touch_started_inside = None\n return True\n\n def _reposition(self, *largs):\n # calculate the coordinate of the attached widget in the window\n # coordinate system\n win = self._win\n widget = self.attach_to\n if not widget or not win:\n return\n wx, wy = widget.to_window(*widget.pos)\n wright, wtop = widget.to_window(widget.right, widget.top)\n\n # set width and x\n if self.auto_width:\n self.width = wright - wx\n\n # ensure the dropdown list doesn't get out on the X axis, with a\n # preference to 0 in case the list is too wide.\n x = wx\n if x + self.width > win.width:\n x = win.width - self.width\n if x < 0:\n x = 0\n self.x = x\n\n # determine if we display the dropdown upper or lower to the widget\n if self.max_height is not None:\n height = min(self.max_height, self.container.minimum_height)\n else:\n height = self.container.minimum_height\n\n h_bottom = wy - height\n h_top = win.height - (wtop + height)\n if h_bottom > 0:\n self.top = wy\n self.height = height\n elif h_top > 0:\n self.y = wtop\n self.height = height\n else:\n # none of both top/bottom have enough place to display the\n # widget at the current size. Take the best side, and fit to\n # it.\n\n if h_top < h_bottom:\n self.top = self.height = wy\n else:\n self.y = wtop\n self.height = win.height - wtop\n\n\nif __name__ == '__main__':\n from kivy.uix.button import Button\n from kivy.base import runTouchApp\n\n def show_dropdown(button, *largs):\n dp = DropDown()\n dp.bind(on_select=lambda instance, x: setattr(button, 'text', x))\n for i in range(10):\n item = Button(text='hello %d' % i, size_hint_y=None, height=44)\n item.bind(on_release=lambda btn: dp.select(btn.text))\n dp.add_widget(item)\n dp.open(button)\n\n def touch_move(instance, touch):\n instance.center = touch.pos\n\n btn = Button(text='SHOW', size_hint=(None, None), pos=(300, 200))\n btn.bind(on_release=show_dropdown, on_touch_move=touch_move)\n\n runTouchApp(btn)\n", "path": "kivy/uix/dropdown.py"}], "after_files": [{"content": "'''\nDrop-Down List\n==============\n\n.. image:: images/dropdown.gif\n :align: right\n\n.. versionadded:: 1.4.0\n\nA versatile drop-down list that can be used with custom widgets. It allows you\nto display a list of widgets under a displayed widget. Unlike other toolkits,\nthe list of widgets can contain any type of widget: simple buttons,\nimages etc.\n\nThe positioning of the drop-down list is fully automatic: we will always try to\nplace the dropdown list in a way that the user can select an item in the list.\n\nBasic example\n-------------\n\nA button with a dropdown list of 10 possible values. All the buttons within the\ndropdown list will trigger the dropdown :meth:`DropDown.select` method. After\nbeing called, the main button text will display the selection of the\ndropdown. ::\n\n from kivy.uix.dropdown import DropDown\n from kivy.uix.button import Button\n from kivy.base import runTouchApp\n\n # create a dropdown with 10 buttons\n dropdown = DropDown()\n for index in range(10):\n # When adding widgets, we need to specify the height manually\n # (disabling the size_hint_y) so the dropdown can calculate\n # the area it needs.\n\n btn = Button(text='Value %d' % index, size_hint_y=None, height=44)\n\n # for each button, attach a callback that will call the select() method\n # on the dropdown. We'll pass the text of the button as the data of the\n # selection.\n btn.bind(on_release=lambda btn: dropdown.select(btn.text))\n\n # then add the button inside the dropdown\n dropdown.add_widget(btn)\n\n # create a big main button\n mainbutton = Button(text='Hello', size_hint=(None, None))\n\n # show the dropdown menu when the main button is released\n # note: all the bind() calls pass the instance of the caller (here, the\n # mainbutton instance) as the first argument of the callback (here,\n # dropdown.open.).\n mainbutton.bind(on_release=dropdown.open)\n\n # one last thing, listen for the selection in the dropdown list and\n # assign the data to the button text.\n dropdown.bind(on_select=lambda instance, x: setattr(mainbutton, 'text', x))\n\n runTouchApp(mainbutton)\n\nExtending dropdown in Kv\n------------------------\n\nYou could create a dropdown directly from your kv::\n\n #:kivy 1.4.0\n <CustomDropDown>:\n Button:\n text: 'My first Item'\n size_hint_y: None\n height: 44\n on_release: root.select('item1')\n Label:\n text: 'Unselectable item'\n size_hint_y: None\n height: 44\n Button:\n text: 'My second Item'\n size_hint_y: None\n height: 44\n on_release: root.select('item2')\n\nAnd then, create the associated python class and use it::\n\n class CustomDropDown(DropDown):\n pass\n\n dropdown = CustomDropDown()\n mainbutton = Button(text='Hello', size_hint=(None, None))\n mainbutton.bind(on_release=dropdown.open)\n dropdown.bind(on_select=lambda instance, x: setattr(mainbutton, 'text', x))\n'''\n\n__all__ = ('DropDown', )\n\nfrom kivy.uix.scrollview import ScrollView\nfrom kivy.properties import ObjectProperty, NumericProperty, BooleanProperty\nfrom kivy.core.window import Window\nfrom kivy.lang import Builder\nfrom kivy.clock import Clock\nfrom kivy.config import Config\n\n_grid_kv = '''\nGridLayout:\n size_hint_y: None\n height: self.minimum_size[1]\n cols: 1\n'''\n\n\nclass DropDownException(Exception):\n '''DropDownException class.\n '''\n pass\n\n\nclass DropDown(ScrollView):\n '''DropDown class. See module documentation for more information.\n\n :Events:\n `on_select`: data\n Fired when a selection is done. The data of the selection is passed\n in as the first argument and is what you pass in the :meth:`select`\n method as the first argument.\n `on_dismiss`:\n .. versionadded:: 1.8.0\n\n Fired when the DropDown is dismissed, either on selection or on\n touching outside the widget.\n '''\n\n auto_width = BooleanProperty(True)\n '''By default, the width of the dropdown will be the same as the width of\n the attached widget. Set to False if you want to provide your own width.\n\n :attr:`auto_width` is a :class:`~kivy.properties.BooleanProperty`\n and defaults to True.\n '''\n\n max_height = NumericProperty(None, allownone=True)\n '''Indicate the maximum height that the dropdown can take. If None, it will\n take the maximum height available until the top or bottom of the screen\n is reached.\n\n :attr:`max_height` is a :class:`~kivy.properties.NumericProperty` and\n defaults to None.\n '''\n\n dismiss_on_select = BooleanProperty(True)\n '''By default, the dropdown will be automatically dismissed when a\n selection has been done. Set to False to prevent the dismiss.\n\n :attr:`dismiss_on_select` is a :class:`~kivy.properties.BooleanProperty`\n and defaults to True.\n '''\n\n auto_dismiss = BooleanProperty(True)\n '''By default, the dropdown will be automatically dismissed when a\n touch happens outside of it, this option allows to disable this\n feature\n\n :attr:`auto_dismiss` is a :class:`~kivy.properties.BooleanProperty`\n and defaults to True.\n\n .. versionadded:: 1.8.0\n '''\n\n min_state_time = NumericProperty(0)\n '''Minimum time before the :class:`~kivy.uix.DropDown` is dismissed.\n This is used to allow for the widget inside the dropdown to display\n a down state or for the :class:`~kivy.uix.DropDown` itself to\n display a animation for closing.\n\n :attr:`min_state_time` is a :class:`~kivy.properties.NumericProperty`\n and defaults to the `Config` value `min_state_time`.\n\n .. versionadded:: 1.10.0\n '''\n\n attach_to = ObjectProperty(allownone=True)\n '''(internal) Property that will be set to the widget to which the\n drop down list is attached.\n\n The :meth:`open` method will automatically set this property whilst\n :meth:`dismiss` will set it back to None.\n '''\n\n container = ObjectProperty()\n '''(internal) Property that will be set to the container of the dropdown\n list. It is a :class:`~kivy.uix.gridlayout.GridLayout` by default.\n '''\n\n _touch_started_inside = None\n\n __events__ = ('on_select', 'on_dismiss')\n\n def __init__(self, **kwargs):\n self._win = None\n if 'min_state_time' not in kwargs:\n self.min_state_time = float(\n Config.get('graphics', 'min_state_time'))\n if 'container' not in kwargs:\n c = self.container = Builder.load_string(_grid_kv)\n else:\n c = None\n if 'do_scroll_x' not in kwargs:\n self.do_scroll_x = False\n if 'size_hint' not in kwargs:\n if 'size_hint_x' not in kwargs:\n self.size_hint_x = None\n if 'size_hint_y' not in kwargs:\n self.size_hint_y = None\n super(DropDown, self).__init__(**kwargs)\n if c is not None:\n super(DropDown, self).add_widget(c)\n self.on_container(self, c)\n Window.bind(\n on_key_down=self.on_key_down,\n size=self._reposition)\n self.fbind('size', self._reposition)\n\n def on_key_down(self, instance, key, scancode, codepoint, modifiers):\n if key == 27 and self.get_parent_window():\n self.dismiss()\n return True\n\n def on_container(self, instance, value):\n if value is not None:\n self.container.bind(minimum_size=self._reposition)\n\n def open(self, widget):\n '''Open the dropdown list and attach it to a specific widget.\n Depending on the position of the widget within the window and\n the height of the dropdown, the dropdown might be above or below\n that widget.\n '''\n # ensure we are not already attached\n if self.attach_to is not None:\n self.dismiss()\n\n # we will attach ourself to the main window, so ensure the\n # widget we are looking for have a window\n self._win = widget.get_parent_window()\n if self._win is None:\n raise DropDownException(\n 'Cannot open a dropdown list on a hidden widget')\n\n self.attach_to = widget\n widget.bind(pos=self._reposition, size=self._reposition)\n self._reposition()\n\n # attach ourself to the main window\n self._win.add_widget(self)\n\n def dismiss(self, *largs):\n '''Remove the dropdown widget from the window and detach it from\n the attached widget.\n '''\n Clock.schedule_once(self._real_dismiss, self.min_state_time)\n\n def _real_dismiss(self, *largs):\n if self.parent:\n self.parent.remove_widget(self)\n if self.attach_to:\n self.attach_to.unbind(pos=self._reposition, size=self._reposition)\n self.attach_to = None\n self.dispatch('on_dismiss')\n\n def on_dismiss(self):\n pass\n\n def select(self, data):\n '''Call this method to trigger the `on_select` event with the `data`\n selection. The `data` can be anything you want.\n '''\n self.dispatch('on_select', data)\n if self.dismiss_on_select:\n self.dismiss()\n\n def on_select(self, data):\n pass\n\n def add_widget(self, *args, **kwargs):\n if self.container:\n return self.container.add_widget(*args, **kwargs)\n return super(DropDown, self).add_widget(*args, **kwargs)\n\n def remove_widget(self, *args, **kwargs):\n if self.container:\n return self.container.remove_widget(*args, **kwargs)\n return super(DropDown, self).remove_widget(*args, **kwargs)\n\n def clear_widgets(self, *args, **kwargs):\n if self.container:\n return self.container.clear_widgets(*args, **kwargs)\n return super(DropDown, self).clear_widgets(*args, **kwargs)\n\n def on_touch_down(self, touch):\n self._touch_started_inside = self.collide_point(*touch.pos)\n if not self.auto_dismiss or self._touch_started_inside:\n super(DropDown, self).on_touch_down(touch)\n return True\n\n def on_touch_move(self, touch):\n if not self.auto_dismiss or self._touch_started_inside:\n super(DropDown, self).on_touch_move(touch)\n return True\n\n def on_touch_up(self, touch):\n # Explicitly test for False as None occurs when shown by on_touch_down\n if self.auto_dismiss and self._touch_started_inside is False:\n self.dismiss()\n else:\n super(DropDown, self).on_touch_up(touch)\n self._touch_started_inside = None\n return True\n\n def _reposition(self, *largs):\n # calculate the coordinate of the attached widget in the window\n # coordinate system\n win = self._win\n widget = self.attach_to\n if not widget or not widget.parent or not win:\n return\n wx, wy = widget.to_window(*widget.pos)\n wright, wtop = widget.to_window(widget.right, widget.top)\n\n # set width and x\n if self.auto_width:\n self.width = wright - wx\n\n # ensure the dropdown list doesn't get out on the X axis, with a\n # preference to 0 in case the list is too wide.\n x = wx\n if x + self.width > win.width:\n x = win.width - self.width\n if x < 0:\n x = 0\n self.x = x\n\n # determine if we display the dropdown upper or lower to the widget\n if self.max_height is not None:\n height = min(self.max_height, self.container.minimum_height)\n else:\n height = self.container.minimum_height\n\n h_bottom = wy - height\n h_top = win.height - (wtop + height)\n if h_bottom > 0:\n self.top = wy\n self.height = height\n elif h_top > 0:\n self.y = wtop\n self.height = height\n else:\n # none of both top/bottom have enough place to display the\n # widget at the current size. Take the best side, and fit to\n # it.\n\n if h_top < h_bottom:\n self.top = self.height = wy\n else:\n self.y = wtop\n self.height = win.height - wtop\n\n\nif __name__ == '__main__':\n from kivy.uix.button import Button\n from kivy.base import runTouchApp\n\n def show_dropdown(button, *largs):\n dp = DropDown()\n dp.bind(on_select=lambda instance, x: setattr(button, 'text', x))\n for i in range(10):\n item = Button(text='hello %d' % i, size_hint_y=None, height=44)\n item.bind(on_release=lambda btn: dp.select(btn.text))\n dp.add_widget(item)\n dp.open(button)\n\n def touch_move(instance, touch):\n instance.center = touch.pos\n\n btn = Button(text='SHOW', size_hint=(None, None), pos=(300, 200))\n btn.bind(on_release=show_dropdown, on_touch_move=touch_move)\n\n runTouchApp(btn)\n", "path": "kivy/uix/dropdown.py"}]} |
gh_patches_debug_1234 | rasdani/github-patches | git_diff | ipython__ipython-3901 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
under Windows, "ipython3 nbconvert "C:/blabla/first_try.ipynb" --to latex --post PDF" POST processing action fails because of a bad parameter
Hello,
The "one single step" option to create a ".pdf" from a .ipynb" fails on my windows python3 pc
Nbconvert apparently tries compile ".TEX" result with
"pdflatex .\first_try.tex"
==> It generates a bad behaviour of pdflatex, which picks "pdfTex" option instead of "PdfLatex".
The working option, on my Windows PC and when I do it by hand, is not to put the ".\"
"pdflatex first_try.tex"
UPDATE : replacing ".\" per "./" seems also to be a solution.
"pdflatex ./first_try.tex"
Hint to the problem comes from here
http://tex.stackexchange.com/questions/78178/miktex-how-to-run-pdflatex-from-cmd-prompt-on-windows-7-compared-to-windows-xp
Details below.
Sheers
*\* instruction *\*
ipython3 nbconvert "C:/blabla/first_try.ipynb" --to latex --post PDF"
**\* (start of the output ) ***
C:\Users\parent\Desktop\winpython\WinPython-32bit-3.3.2.1rc1\python-3.3.2>ipytho
n3 nbconvert "C:/blabla//first_try.ipynb" --to latex --po
st PDF
[NbConvertApp] Using existing profile dir: 'C:\Users\parent\Desktop\winpytho
n\WinPython-32bit-3.3.2.1rc1\settings\.ipython\profile_default'
[NbConvertApp] Converting notebook C:/blabla/first_try.ipynb to latex
[NbConvertApp] Support files will be in first_try_files\
[NbConvertApp] Loaded template latex_article.tplx
[NbConvertApp] Writing 53680 bytes to .\first_try.tex
[NbConvertApp] Building PDF: `pdflatex .\first_try.tex`
This is pdfTeX, Version 3.1415926-2.4-1.40.13 (TeX Live 2012/W32TeX)
restricted \write18 enabled.
entering extended mode
! Undefined control sequence.
<_> .\first
_try.tex
?
*_\* (end of the output ) ***
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `IPython/nbconvert/writers/files.py`
Content:
```
1 """
2 Contains writer for writing nbconvert output to filesystem.
3 """
4 #-----------------------------------------------------------------------------
5 #Copyright (c) 2013, the IPython Development Team.
6 #
7 #Distributed under the terms of the Modified BSD License.
8 #
9 #The full license is in the file COPYING.txt, distributed with this software.
10 #-----------------------------------------------------------------------------
11
12 #-----------------------------------------------------------------------------
13 # Imports
14 #-----------------------------------------------------------------------------
15
16 import io
17 import os
18 import glob
19
20 from IPython.utils.traitlets import Unicode
21 from IPython.utils.path import link_or_copy
22
23 from .base import WriterBase
24
25 #-----------------------------------------------------------------------------
26 # Classes
27 #-----------------------------------------------------------------------------
28
29 class FilesWriter(WriterBase):
30 """Consumes nbconvert output and produces files."""
31
32
33 build_directory = Unicode(".", config=True,
34 help="""Directory to write output to. Leave blank
35 to output to the current directory""")
36
37
38 # Make sure that the output directory exists.
39 def _build_directory_changed(self, name, old, new):
40 if new and not os.path.isdir(new):
41 os.makedirs(new)
42
43
44 def __init__(self, **kw):
45 super(FilesWriter, self).__init__(**kw)
46 self._build_directory_changed('build_directory', self.build_directory,
47 self.build_directory)
48
49 def _makedir(self, path):
50 """Make a directory if it doesn't already exist"""
51 if not os.path.isdir(path):
52 self.log.info("Making directory %s", path)
53 os.makedirs(path)
54
55 def write(self, output, resources, notebook_name=None, **kw):
56 """
57 Consume and write Jinja output to the file system. Output directory
58 is set via the 'build_directory' variable of this instance (a
59 configurable).
60
61 See base for more...
62 """
63
64 # Pull the extension and subdir from the resources dict.
65 output_extension = resources['output_extension']
66
67 # Write all of the extracted resources to the destination directory.
68 # NOTE: WE WRITE EVERYTHING AS-IF IT'S BINARY. THE EXTRACT FIG
69 # TRANSFORMER SHOULD HANDLE UNIX/WINDOWS LINE ENDINGS...
70 for filename, data in resources.get('outputs', {}).items():
71
72 # Determine where to write the file to
73 dest = os.path.join(self.build_directory, filename)
74 path = os.path.dirname(dest)
75 self._makedir(path)
76
77 # Write file
78 self.log.debug("Writing %i bytes to support file %s", len(data), dest)
79 with io.open(dest, 'wb') as f:
80 f.write(data)
81
82 # Copy referenced files to output directory
83 if self.build_directory:
84 for filename in self.files:
85
86 # Copy files that match search pattern
87 for matching_filename in glob.glob(filename):
88
89 # Make sure folder exists.
90 dest = os.path.join(self.build_directory, filename)
91 path = os.path.dirname(dest)
92 self._makedir(path)
93
94 # Copy if destination is different.
95 if not os.path.normpath(dest) == os.path.normpath(matching_filename):
96 self.log.info("Linking %s -> %s", matching_filename, dest)
97 link_or_copy(matching_filename, dest)
98
99 # Determine where to write conversion results.
100 dest = notebook_name + '.' + output_extension
101 if self.build_directory:
102 dest = os.path.join(self.build_directory, dest)
103
104 # Write conversion results.
105 self.log.info("Writing %i bytes to %s", len(output), dest)
106 with io.open(dest, 'w', encoding='utf-8') as f:
107 f.write(output)
108 return dest
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/IPython/nbconvert/writers/files.py b/IPython/nbconvert/writers/files.py
--- a/IPython/nbconvert/writers/files.py
+++ b/IPython/nbconvert/writers/files.py
@@ -30,7 +30,7 @@
"""Consumes nbconvert output and produces files."""
- build_directory = Unicode(".", config=True,
+ build_directory = Unicode("", config=True,
help="""Directory to write output to. Leave blank
to output to the current directory""")
| {"golden_diff": "diff --git a/IPython/nbconvert/writers/files.py b/IPython/nbconvert/writers/files.py\n--- a/IPython/nbconvert/writers/files.py\n+++ b/IPython/nbconvert/writers/files.py\n@@ -30,7 +30,7 @@\n \"\"\"Consumes nbconvert output and produces files.\"\"\"\n \n \n- build_directory = Unicode(\".\", config=True, \n+ build_directory = Unicode(\"\", config=True,\n help=\"\"\"Directory to write output to. Leave blank\n to output to the current directory\"\"\")\n", "issue": "under Windows, \"ipython3 nbconvert \"C:/blabla/first_try.ipynb\" --to latex --post PDF\" POST processing action fails because of a bad parameter\nHello,\n\nThe \"one single step\" option to create a \".pdf\" from a .ipynb\" fails on my windows python3 pc \n\nNbconvert apparently tries compile \".TEX\" result with \n\n\"pdflatex .\\first_try.tex\" \n\n==> It generates a bad behaviour of pdflatex, which picks \"pdfTex\" option instead of \"PdfLatex\".\n\nThe working option, on my Windows PC and when I do it by hand, is not to put the \".\\\" \n\n\"pdflatex first_try.tex\" \n\nUPDATE : replacing \".\\\" per \"./\" seems also to be a solution.\n\"pdflatex ./first_try.tex\" \n\nHint to the problem comes from here \nhttp://tex.stackexchange.com/questions/78178/miktex-how-to-run-pdflatex-from-cmd-prompt-on-windows-7-compared-to-windows-xp \n\nDetails below.\n\nSheers\n\n*\\* instruction *\\* \nipython3 nbconvert \"C:/blabla/first_try.ipynb\" --to latex --post PDF\"\n\n**\\* (start of the output ) ***\nC:\\Users\\parent\\Desktop\\winpython\\WinPython-32bit-3.3.2.1rc1\\python-3.3.2>ipytho\nn3 nbconvert \"C:/blabla//first_try.ipynb\" --to latex --po\nst PDF\n[NbConvertApp] Using existing profile dir: 'C:\\Users\\parent\\Desktop\\winpytho\nn\\WinPython-32bit-3.3.2.1rc1\\settings\\.ipython\\profile_default'\n[NbConvertApp] Converting notebook C:/blabla/first_try.ipynb to latex\n[NbConvertApp] Support files will be in first_try_files\\\n[NbConvertApp] Loaded template latex_article.tplx\n[NbConvertApp] Writing 53680 bytes to .\\first_try.tex\n[NbConvertApp] Building PDF: `pdflatex .\\first_try.tex`\nThis is pdfTeX, Version 3.1415926-2.4-1.40.13 (TeX Live 2012/W32TeX)\n restricted \\write18 enabled.\nentering extended mode\n! Undefined control sequence.\n<_> .\\first\n _try.tex\n?\n*_\\* (end of the output ) ***\n\n", "before_files": [{"content": "\"\"\"\nContains writer for writing nbconvert output to filesystem.\n\"\"\"\n#-----------------------------------------------------------------------------\n#Copyright (c) 2013, the IPython Development Team.\n#\n#Distributed under the terms of the Modified BSD License.\n#\n#The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\nimport io\nimport os\nimport glob\n\nfrom IPython.utils.traitlets import Unicode\nfrom IPython.utils.path import link_or_copy\n\nfrom .base import WriterBase\n\n#-----------------------------------------------------------------------------\n# Classes\n#-----------------------------------------------------------------------------\n\nclass FilesWriter(WriterBase):\n \"\"\"Consumes nbconvert output and produces files.\"\"\"\n\n\n build_directory = Unicode(\".\", config=True, \n help=\"\"\"Directory to write output to. Leave blank\n to output to the current directory\"\"\")\n\n\n # Make sure that the output directory exists.\n def _build_directory_changed(self, name, old, new):\n if new and not os.path.isdir(new):\n os.makedirs(new)\n\n\n def __init__(self, **kw):\n super(FilesWriter, self).__init__(**kw)\n self._build_directory_changed('build_directory', self.build_directory, \n self.build_directory)\n \n def _makedir(self, path):\n \"\"\"Make a directory if it doesn't already exist\"\"\"\n if not os.path.isdir(path):\n self.log.info(\"Making directory %s\", path)\n os.makedirs(path)\n\n def write(self, output, resources, notebook_name=None, **kw):\n \"\"\"\n Consume and write Jinja output to the file system. Output directory\n is set via the 'build_directory' variable of this instance (a \n configurable).\n\n See base for more...\n \"\"\"\n\n # Pull the extension and subdir from the resources dict.\n output_extension = resources['output_extension']\n\n # Write all of the extracted resources to the destination directory.\n # NOTE: WE WRITE EVERYTHING AS-IF IT'S BINARY. THE EXTRACT FIG\n # TRANSFORMER SHOULD HANDLE UNIX/WINDOWS LINE ENDINGS...\n for filename, data in resources.get('outputs', {}).items():\n\n # Determine where to write the file to\n dest = os.path.join(self.build_directory, filename)\n path = os.path.dirname(dest)\n self._makedir(path)\n\n # Write file\n self.log.debug(\"Writing %i bytes to support file %s\", len(data), dest)\n with io.open(dest, 'wb') as f:\n f.write(data)\n\n # Copy referenced files to output directory\n if self.build_directory:\n for filename in self.files:\n\n # Copy files that match search pattern\n for matching_filename in glob.glob(filename):\n\n # Make sure folder exists.\n dest = os.path.join(self.build_directory, filename)\n path = os.path.dirname(dest)\n self._makedir(path)\n\n # Copy if destination is different.\n if not os.path.normpath(dest) == os.path.normpath(matching_filename):\n self.log.info(\"Linking %s -> %s\", matching_filename, dest)\n link_or_copy(matching_filename, dest)\n\n # Determine where to write conversion results.\n dest = notebook_name + '.' + output_extension\n if self.build_directory:\n dest = os.path.join(self.build_directory, dest)\n\n # Write conversion results.\n self.log.info(\"Writing %i bytes to %s\", len(output), dest)\n with io.open(dest, 'w', encoding='utf-8') as f:\n f.write(output)\n return dest", "path": "IPython/nbconvert/writers/files.py"}], "after_files": [{"content": "\"\"\"\nContains writer for writing nbconvert output to filesystem.\n\"\"\"\n#-----------------------------------------------------------------------------\n#Copyright (c) 2013, the IPython Development Team.\n#\n#Distributed under the terms of the Modified BSD License.\n#\n#The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\nimport io\nimport os\nimport glob\n\nfrom IPython.utils.traitlets import Unicode\nfrom IPython.utils.path import link_or_copy\n\nfrom .base import WriterBase\n\n#-----------------------------------------------------------------------------\n# Classes\n#-----------------------------------------------------------------------------\n\nclass FilesWriter(WriterBase):\n \"\"\"Consumes nbconvert output and produces files.\"\"\"\n\n\n build_directory = Unicode(\"\", config=True,\n help=\"\"\"Directory to write output to. Leave blank\n to output to the current directory\"\"\")\n\n\n # Make sure that the output directory exists.\n def _build_directory_changed(self, name, old, new):\n if new and not os.path.isdir(new):\n os.makedirs(new)\n\n\n def __init__(self, **kw):\n super(FilesWriter, self).__init__(**kw)\n self._build_directory_changed('build_directory', self.build_directory, \n self.build_directory)\n \n def _makedir(self, path):\n \"\"\"Make a directory if it doesn't already exist\"\"\"\n if not os.path.isdir(path):\n self.log.info(\"Making directory %s\", path)\n os.makedirs(path)\n\n def write(self, output, resources, notebook_name=None, **kw):\n \"\"\"\n Consume and write Jinja output to the file system. Output directory\n is set via the 'build_directory' variable of this instance (a \n configurable).\n\n See base for more...\n \"\"\"\n\n # Pull the extension and subdir from the resources dict.\n output_extension = resources['output_extension']\n\n # Write all of the extracted resources to the destination directory.\n # NOTE: WE WRITE EVERYTHING AS-IF IT'S BINARY. THE EXTRACT FIG\n # TRANSFORMER SHOULD HANDLE UNIX/WINDOWS LINE ENDINGS...\n for filename, data in resources.get('outputs', {}).items():\n\n # Determine where to write the file to\n dest = os.path.join(self.build_directory, filename)\n path = os.path.dirname(dest)\n self._makedir(path)\n\n # Write file\n self.log.debug(\"Writing %i bytes to support file %s\", len(data), dest)\n with io.open(dest, 'wb') as f:\n f.write(data)\n\n # Copy referenced files to output directory\n if self.build_directory:\n for filename in self.files:\n\n # Copy files that match search pattern\n for matching_filename in glob.glob(filename):\n\n # Make sure folder exists.\n dest = os.path.join(self.build_directory, filename)\n path = os.path.dirname(dest)\n self._makedir(path)\n\n # Copy if destination is different.\n if not os.path.normpath(dest) == os.path.normpath(matching_filename):\n self.log.info(\"Linking %s -> %s\", matching_filename, dest)\n link_or_copy(matching_filename, dest)\n\n # Determine where to write conversion results.\n dest = notebook_name + '.' + output_extension\n if self.build_directory:\n dest = os.path.join(self.build_directory, dest)\n\n # Write conversion results.\n self.log.info(\"Writing %i bytes to %s\", len(output), dest)\n with io.open(dest, 'w', encoding='utf-8') as f:\n f.write(output)\n return dest", "path": "IPython/nbconvert/writers/files.py"}]} |
gh_patches_debug_1235 | rasdani/github-patches | git_diff | pymedusa__Medusa-3813 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unnecessary warning for "Multiple posters at highest weighted score"
I get these "Multiple posters at highest weighted score" warnings too often, and they are a nuisance since they are not important enough to worry about.
Should be demoted to a level where it does not show up in the GUI as a warning.
2018-02-26 14:06:30 WARNING SHOWQUEUE-REFRESH :: [ccbc0ce] Multiple posters at highest weighted score for series 328599:
Score Rating Votes Resolution URL
4.355 10.000 1 680x1000 http://thetvdb.com/banners/posters/328599-6.jpg
4.355 10.000 1 680x1000 http://thetvdb.com/banners/posters/328599-8.jpg
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `medusa/indexers/indexer_base.py`
Content:
```
1 # coding=utf-8
2
3 """Base class for indexer api's."""
4
5 from __future__ import division
6
7 import getpass
8 import logging
9 import os
10 import tempfile
11 import time
12 import warnings
13 from operator import itemgetter
14
15 from medusa import statistics as stats
16 from medusa.helpers.utils import gen_values_by_key
17 from medusa.indexers.indexer_exceptions import (
18 IndexerAttributeNotFound,
19 IndexerEpisodeNotFound,
20 IndexerSeasonNotFound,
21 IndexerSeasonUpdatesNotSupported,
22 IndexerShowNotFound,
23 )
24 from medusa.indexers.indexer_ui import BaseUI, ConsoleUI
25 from medusa.logger.adapters.style import BraceAdapter
26 from medusa.statistics import weights
27
28 import requests
29 from six import integer_types
30
31
32 log = BraceAdapter(logging.getLogger(__name__))
33 log.logger.addHandler(logging.NullHandler())
34
35
36 class BaseIndexer(object):
37 """Base class for indexer api's."""
38
39 def __init__(self,
40 interactive=False,
41 select_first=False,
42 debug=False,
43 cache=True,
44 episodes=True,
45 banners=False,
46 actors=False,
47 custom_ui=None,
48 language=None,
49 search_all_languages=False,
50 apikey=None,
51 force_connect=False,
52 use_zip=False,
53 dvdorder=False,
54 proxy=None,
55 session=None,
56 image_type=None): # pylint: disable=too-many-locals,too-many-arguments
57 """Pass these arguments on as args from the subclass."""
58 self.shows = ShowContainer() # Holds all Show classes
59 self.corrections = {} # Holds show-name to show_id mapping
60
61 self.config = {}
62
63 self.config['debug_enabled'] = debug # show debugging messages
64
65 self.config['custom_ui'] = custom_ui
66
67 self.config['interactive'] = interactive # prompt for correct series?
68
69 self.config['select_first'] = select_first
70
71 self.config['search_all_languages'] = search_all_languages
72
73 self.config['use_zip'] = use_zip
74
75 self.config['dvdorder'] = dvdorder
76
77 self.config['proxy'] = proxy
78
79 if cache is True:
80 self.config['cache_enabled'] = True
81 self.config['cache_location'] = self._get_temp_dir()
82 elif cache is False:
83 self.config['cache_enabled'] = False
84 elif isinstance(cache, basestring):
85 self.config['cache_enabled'] = True
86 self.config['cache_location'] = cache
87 else:
88 raise ValueError('Invalid value for Cache {0!r} (type was {1})'.format(cache, type(cache)))
89
90 self.config['session'] = session if session else requests.Session()
91
92 self.config['episodes_enabled'] = episodes
93 self.config['banners_enabled'] = banners
94 self.config['image_type'] = image_type
95 self.config['actors_enabled'] = actors
96
97 if self.config['debug_enabled']:
98 warnings.warn('The debug argument to tvdbv2_api.__init__ will be removed in the next version. '
99 'To enable debug messages, use the following code before importing: '
100 'import logging; logging.basicConfig(level=logging.DEBUG)')
101 logging.basicConfig(level=logging.DEBUG)
102
103 # List of language from http://thetvdbv2.com/api/0629B785CE550C8D/languages.xml
104 # Hard-coded here as it is realtively static, and saves another HTTP request, as
105 # recommended on http://thetvdbv2.com/wiki/index.php/API:languages.xml
106 self.config['valid_languages'] = [
107 'da', 'fi', 'nl', 'de', 'it', 'es', 'fr', 'pl', 'hu', 'el', 'tr',
108 'ru', 'he', 'ja', 'pt', 'zh', 'cs', 'sl', 'hr', 'ko', 'en', 'sv', 'no'
109 ]
110
111 # thetvdb.com should be based around numeric language codes,
112 # but to link to a series like http://thetvdb.com/?tab=series&id=79349&lid=16
113 # requires the language ID, thus this mapping is required (mainly
114 # for usage in tvdb_ui - internally tvdb_api will use the language abbreviations)
115 self.config['langabbv_to_id'] = {'el': 20, 'en': 7, 'zh': 27,
116 'it': 15, 'cs': 28, 'es': 16, 'ru': 22, 'nl': 13, 'pt': 26, 'no': 9,
117 'tr': 21, 'pl': 18, 'fr': 17, 'hr': 31, 'de': 14, 'da': 10, 'fi': 11,
118 'hu': 19, 'ja': 25, 'he': 24, 'ko': 32, 'sv': 8, 'sl': 30}
119
120 if language is None:
121 self.config['language'] = 'en'
122 else:
123 if language not in self.config['valid_languages']:
124 raise ValueError('Invalid language {0}, options are: {1}'.format(
125 language, self.config['valid_languages']
126 ))
127 else:
128 self.config['language'] = language
129
130 def _get_temp_dir(self): # pylint: disable=no-self-use
131 """Return the [system temp dir]/tvdb_api-u501 (or tvdb_api-myuser)."""
132 if hasattr(os, 'getuid'):
133 uid = 'u{0}'.format(os.getuid()) # pylint: disable=no-member
134 else:
135 # For Windows
136 try:
137 uid = getpass.getuser()
138 except ImportError:
139 return os.path.join(tempfile.gettempdir(), 'tvdbv2_api')
140
141 return os.path.join(tempfile.gettempdir(), 'tvdbv2_api-{0}'.format(uid))
142
143 def _get_show_data(self, sid, language):
144 """Return dummy _get_show_data method."""
145 return None
146
147 def _get_series(self, series):
148 """Search themoviedb.org for the series name.
149
150 If a custom_ui UI is configured, it uses this to select the correct
151 series. If not, and interactive == True, ConsoleUI is used, if not
152 BaseUI is used to select the first result.
153
154 :param series: the query for the series name
155 :return: A list of series mapped to a UI (for example: a BaseUi or CustomUI).
156 """
157 all_series = self.search(series)
158 if not all_series:
159 log.debug('Series result returned zero')
160 IndexerShowNotFound('Show search returned zero results (cannot find show on Indexer)')
161
162 if not isinstance(all_series, list):
163 all_series = [all_series]
164
165 if self.config['custom_ui'] is not None:
166 log.debug('Using custom UI {0!r}', self.config['custom_ui'])
167 custom_ui = self.config['custom_ui']
168 ui = custom_ui(config=self.config)
169 else:
170 if not self.config['interactive']:
171 log.debug('Auto-selecting first search result using BaseUI')
172 ui = BaseUI(config=self.config)
173 else:
174 log.debug('Interactively selecting show using ConsoleUI')
175 ui = ConsoleUI(config=self.config) # pylint: disable=redefined-variable-type
176
177 return ui.select_series(all_series)
178
179 def _set_show_data(self, sid, key, value):
180 """Set self.shows[sid] to a new Show instance, or sets the data."""
181 if sid not in self.shows:
182 self.shows[sid] = Show()
183 self.shows[sid].data[key] = value
184
185 def __repr__(self):
186 """Indexer representation, returning representation of all shows indexed."""
187 return str(self.shows)
188
189 def _set_item(self, sid, seas, ep, attrib, value): # pylint: disable=too-many-arguments
190 """Create a new episode, creating Show(), Season() and Episode()s as required.
191
192 Called by _get_show_data to populate show.
193 Since the nice-to-use tvdb[1][24]['name] interface
194 makes it impossible to do tvdb[1][24]['name] = "name"
195 and still be capable of checking if an episode exists
196 so we can raise tvdb_shownotfound, we have a slightly
197 less pretty method of setting items.. but since the API
198 is supposed to be read-only, this is the best way to
199 do it!
200 The problem is that calling tvdb[1][24]['episodename'] = "name"
201 calls __getitem__ on tvdb[1], there is no way to check if
202 tvdb.__dict__ should have a key "1" before we auto-create it
203 """
204 if sid not in self.shows:
205 self.shows[sid] = Show()
206 if seas not in self.shows[sid]:
207 self.shows[sid][seas] = Season(show=self.shows[sid])
208 if ep not in self.shows[sid][seas]:
209 self.shows[sid][seas][ep] = Episode(season=self.shows[sid][seas])
210 self.shows[sid][seas][ep][attrib] = value
211
212 def _save_images_by_type(self, image_type, series_id, images):
213 """
214 Save the highest rated images for a show by image type.
215
216 :param image_type: Image type being processed (e.g. `fanart`)
217 :param series: ID of series being processed
218 :param images: Images to be processed
219 """
220 def pop_stats(it, key):
221 """Get the population statistics for a key."""
222 values = list(gen_values_by_key(it, key))
223 num_values = len(values)
224 total = sum(values)
225 mean = total / num_values
226 std_dev = stats.population_standard_deviation(values)
227 return mean, std_dev, values
228
229 def result(item, threshold, mean):
230 """Calculate a score given a threshold and population mean."""
231 if not threshold:
232 threshold = 1 # Prevent division by zero
233 value = item['rating']
234 weight = item['ratingcount']
235 res_index = item['res_index']
236 score_rated = weights.bayesian(weight, value, threshold, mean)
237 weight_score = .5
238 weight_res = .5
239 score_weighted = weight_score * score_rated + weight_res * res_index
240 item['score_rated'] = score_rated
241 item['score_weighted'] = score_weighted
242 return score_weighted, value, weight, item
243
244 def format_result(item):
245 """Format result row for logging output."""
246 row = '{score:>10.3f} {rating:>10.3f} {votes:>6} {res:>15}\t{url}'
247 return row.format(
248 score=item[0],
249 rating=item[1],
250 votes=item[2],
251 res=item[3]['resolution'],
252 url=item[3]['_bannerpath'],
253 )
254
255 # Header for display of format results
256 column_header = '{:>10} {:>10} {:>6} {:>15}\t{}'.format(
257 'Score', 'Rating', 'Votes', 'Resolution', 'URL'
258 )
259
260 available_res = sorted(images.keys(), key=lambda x: int(x.split('x')[0]) * int(x.split('x')[1]))
261
262 # add resolution information to each image and flatten dict
263 merged_images = []
264 for resolution in images:
265 images_by_resolution = images[resolution]
266 for image in images_by_resolution.values():
267 image['resolution'] = resolution
268 image['res_index'] = available_res.index(resolution) + 1
269 # add all current resolution images to the merged list
270 merged_images.extend(images_by_resolution.values())
271 log.debug(
272 u'Found {x} {image}s at {res} ({res_index}) resolution for series {id}', {
273 'x': len(images_by_resolution),
274 'image': image_type,
275 'res': image['resolution'],
276 'res_index': image['res_index'],
277 'id': series_id,
278 }
279 )
280
281 # Get population statistics
282 num_items = len(merged_images)
283 log.debug(
284 u'Found {x} total {image}s for series {id}', {
285 'x': num_items,
286 'image': image_type,
287 'id': series_id,
288 }
289 )
290
291 # Get population rating statistics
292 rating_mean, rating_dev, ratings = pop_stats(merged_images, 'rating')
293
294 # Get population rating statistics
295 vote_mean, vote_dev, votes = pop_stats(merged_images, 'ratingcount')
296
297 # Set vote threshold to one standard deviation above the mean
298 # This would be the 84th percentile in a normal distribution
299 vote_threshold = vote_mean + vote_dev
300 log.debug(u'{image} threshold set to {x} votes',
301 {'image': image_type.capitalize(), 'x': vote_threshold})
302
303 # create a list of results
304 rated_images = (
305 result(image, vote_threshold, rating_mean)
306 for image in merged_images
307 )
308 # sort results by score
309 sorted_results = sorted(rated_images, key=itemgetter(0), reverse=True)
310 log.debug(
311 u'Weighted {image} results for series {id}:'
312 u'\n{header}'
313 u'\n{items}', {
314 'image': image_type,
315 'id': series_id,
316 'header': column_header,
317 'items': '\n'.join(
318 format_result(item)
319 for item in sorted_results
320 )
321 }
322 )
323 # filter only highest rated results
324 best_result = sorted_results[0]
325 best_results = [
326 item for item in sorted_results
327 if item[0] >= best_result[0]
328 ]
329 if len(best_results) > 1:
330 log.warning(
331 u'Multiple {image}s at highest weighted score for series {id}:'
332 u'\n{header}'
333 u'\n{results}', {
334 'image': image_type,
335 'id': series_id,
336 'header': column_header,
337 'results': '\n'.join(
338 format_result(item)
339 for item in best_results
340 )
341 }
342 )
343 img_score, img_rating, img_votes, img = best_result
344 img_url = img['_bannerpath']
345 img_res = img['resolution']
346 img_bay_score = img['score_rated']
347 log.info(
348 u'Selected {image} for series {id}'
349 u' (score={x}, score_bay={b}, rating={y}, votes={z}, res={r}): {url}', {
350 'image': image_type,
351 'id': series_id,
352 'x': img_score,
353 'b': img_bay_score,
354 'y': img_rating,
355 'z': img_votes,
356 'r': img_res,
357 'url': img_url,
358 }
359 )
360 log.debug(u'Full info for best {image} for series {id}: {info}',
361 {'image': image_type, 'id': series_id, 'info': img})
362
363 self._set_show_data(series_id, image_type, img_url)
364
365 def _save_images(self, series_id, images):
366 """
367 Save the highest rated images for the show.
368
369 :param series_id: The series ID
370 :param images: A nested mapping of image info
371 images[type][res][id] = image_info_mapping
372 type: image type such as `banner`, `poster`, etc
373 res: resolution such as `1024x768`, `original`, etc
374 id: the image id
375 """
376 image_types = 'banner', 'fanart', 'poster'
377
378 # Iterate through desired image types
379 for img_type in image_types:
380 try:
381 images_by_type = images[img_type]
382 except KeyError:
383 log.debug(
384 u'No {image}s found for {series}', {
385 'image': img_type,
386 'series': series_id,
387 }
388 )
389 continue
390
391 self._save_images_by_type(img_type, series_id, images_by_type)
392
393 def __getitem__(self, key):
394 """Handle tvdbv2_instance['seriesname'] calls. The dict index should be the show id."""
395 if isinstance(key, (integer_types, long)):
396 # Item is integer, treat as show id
397 if key not in self.shows:
398 self._get_show_data(key, self.config['language'])
399 return self.shows[key]
400
401 key = str(key).lower()
402 self.config['searchterm'] = key
403 selected_series = self._get_series(key)
404 if isinstance(selected_series, dict):
405 selected_series = [selected_series]
406
407 for show in selected_series:
408 for k, v in show.items():
409 self._set_show_data(show['id'], k, v)
410 return selected_series
411
412 def get_last_updated_series(self, from_time, weeks=1, filter_show_list=None):
413 """Retrieve a list with updated shows.
414
415 :param from_time: epoch timestamp, with the start date/time
416 :param weeks: number of weeks to get updates for.
417 :param filter_show_list: Optional list of show objects, to use for filtering the returned list.
418 """
419 raise IndexerSeasonUpdatesNotSupported("Method get_last_updated_series not implemented by this indexer")
420
421 def get_episodes_for_season(self, show_id, *args, **kwargs):
422 self._get_episodes(show_id, *args, **kwargs)
423 return self.shows[show_id]
424
425
426 class ShowContainer(dict):
427 """Simple dict that holds a series of Show instances."""
428
429 def __init__(self):
430 """Init for ShowContainer."""
431 dict.__init__(self)
432 self._stack = []
433 self._lastgc = time.time()
434
435 def __setitem__(self, key, value):
436 """Set ShowContainer attribut."""
437 self._stack.append(key)
438
439 # keep only the 100th latest results
440 if time.time() - self._lastgc > 20:
441 for o in self._stack[:-100]:
442 del self[o]
443
444 self._stack = self._stack[-100:]
445
446 self._lastgc = time.time()
447
448 super(ShowContainer, self).__setitem__(key, value)
449
450
451 class Show(dict):
452 """Hold a dict of seasons, and show data."""
453
454 def __init__(self):
455 """Init method of show dict."""
456 dict.__init__(self)
457 self.data = {}
458
459 def __repr__(self):
460 """Represent a Show object."""
461 return '<Show {0} (containing {1} seasons)>'.format(
462 self.data.get(u'seriesname', 'instance'),
463 len(self)
464 )
465
466 def __getattr__(self, key):
467 """Return Episode or Show-data."""
468 if key in self:
469 # Key is an episode, return it
470 return self[key]
471
472 if key in self.data:
473 # Non-numeric request is for show-data
474 return self.data[key]
475
476 raise AttributeError
477
478 def __getitem__(self, key):
479 """Return Episode or Show-data."""
480 if key in self:
481 # Key is an episode, return it
482 return dict.__getitem__(self, key)
483
484 if key in self.data:
485 # Non-numeric request is for show-data
486 return dict.__getitem__(self.data, key)
487
488 # Data wasn't found, raise appropriate error
489 if isinstance(key, integer_types) or key.isdigit():
490 # Episode number x was not found
491 raise IndexerSeasonNotFound('Could not find season {0!r}'.format(key))
492 else:
493 # If it's not numeric, it must be an attribute name, which
494 # doesn't exist, so attribute error.
495 raise IndexerAttributeNotFound('Cannot find attribute {0!r}'.format(key))
496
497 def aired_on(self, date):
498 """Search and return a list of episodes with the airdates."""
499 ret = self.search(str(date), 'firstaired')
500 if len(ret) == 0:
501 raise IndexerEpisodeNotFound('Could not find any episodes that aired on {0}'.format(date))
502 return ret
503
504 def search(self, term=None, key=None):
505 """Search all episodes in show.
506
507 Can search all data, or a specific key (for
508 example, episodename).
509 Always returns an array (can be empty). First index contains the first
510 match, and so on.
511
512 Each array index is an Episode() instance, so doing
513 search_results[0]['episodename'] will retrieve the episode name of the
514 first match.
515 """
516 results = []
517 for cur_season in self.values():
518 searchresult = cur_season.search(term=term, key=key)
519 if len(searchresult) != 0:
520 results.extend(searchresult)
521
522 return results
523
524
525 class Season(dict):
526 """Hold all Seasons instances for a show."""
527
528 def __init__(self, show=None): # pylint: disable=super-init-not-called
529 """Show attribute points to the parent show."""
530 self.show = show
531
532 def __repr__(self):
533 """Representation of a season object."""
534 return '<Season instance (containing {0} episodes)>'.format(
535 len(self.keys())
536 )
537
538 def __getattr__(self, episode_number):
539 """Get an attribute by passing it as episode number."""
540 if episode_number in self:
541 return self[episode_number]
542 raise AttributeError
543
544 def __getitem__(self, episode_number):
545 """Get the episode dict by passing it as a dict key."""
546 if episode_number not in self:
547 raise IndexerEpisodeNotFound('Could not find episode {0!r}'.format(episode_number))
548 else:
549 return dict.__getitem__(self, episode_number)
550
551 def search(self, term=None, key=None):
552 """Search all episodes in season, returns a list of matching Episode instances.
553
554 >>> indexer_api = Tvdb()
555 >>> indexer_api['scrubs'][1].search('first day')
556 [<Episode 01x01 - My First Day>]
557 >>>
558
559 See Show.search documentation for further information on search
560
561 """
562 results = []
563 for ep in self.values():
564 searchresult = ep.search(term=term, key=key)
565 if searchresult is not None:
566 results.append(
567 searchresult
568 )
569 return results
570
571
572 class Episode(dict):
573 """Hold all episodes instances of a show."""
574
575 def __init__(self, season=None):
576 """Initialize class with season attribute that points to the parent season."""
577 self.season = season
578
579 def __repr__(self):
580 """Representation of an episode object."""
581 seasno = int(self.get(u'seasonnumber', 0))
582 epno = int(self.get(u'episodenumber', 0))
583 epname = self.get(u'episodename')
584 if epname:
585 return '<Episode {0:0>2}x{1:0>2} - {2}>'.format(seasno, epno, epname)
586 else:
587 return '<Episode {0:0>2}x{1:0>2}>'.format(seasno, epno)
588
589 def __getattr__(self, key):
590 """Get an attribute."""
591 if key in self:
592 return self[key]
593 raise AttributeError
594
595 def __getitem__(self, key):
596 """Get an attribute, by passing it as a key."""
597 try:
598 return dict.__getitem__(self, key)
599 except KeyError:
600 raise IndexerAttributeNotFound('Cannot find attribute {0!r}'.format(key))
601
602 def search(self, term=None, key=None):
603 """Search episode data for term, if it matches, return the Episode (self).
604
605 The key parameter can be used to limit the search to a specific element,
606 for example, episodename.
607
608 This primarily for use use by Show.search and Season.search. See
609 Show.search for further information on search
610
611 Simple example:
612
613 >>> e = Episode()
614 >>> e['episodename'] = "An Example"
615 >>> e.search("examp")
616 <Episode 00x00 - An Example>
617 >>>
618
619 Limiting by key:
620
621 >>> e.search("examp", key = "episodename")
622 <Episode 00x00 - An Example>
623 >>>
624 """
625 if term is None:
626 raise TypeError('must supply string to search for (contents)')
627
628 term = unicode(term).lower()
629 for cur_key, cur_value in self.items():
630 cur_key, cur_value = unicode(cur_key).lower(), unicode(cur_value).lower()
631 if key is not None and cur_key != key:
632 # Do not search this key
633 continue
634 if cur_value.find(unicode(term).lower()) > -1:
635 return self
636
637
638 class Actors(list):
639 """Hold all Actor instances for a show."""
640
641 pass
642
643
644 class Actor(dict):
645 """Represent a single actor.
646
647 Should contain:
648 id,
649 image,
650 name,
651 role,
652 sortorder
653 """
654
655 def __repr__(self):
656 """Representation of actor name."""
657 return '<Actor {0!r}>'.format(self.get('name'))
658
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/medusa/indexers/indexer_base.py b/medusa/indexers/indexer_base.py
--- a/medusa/indexers/indexer_base.py
+++ b/medusa/indexers/indexer_base.py
@@ -327,7 +327,7 @@
if item[0] >= best_result[0]
]
if len(best_results) > 1:
- log.warning(
+ log.debug(
u'Multiple {image}s at highest weighted score for series {id}:'
u'\n{header}'
u'\n{results}', {
| {"golden_diff": "diff --git a/medusa/indexers/indexer_base.py b/medusa/indexers/indexer_base.py\n--- a/medusa/indexers/indexer_base.py\n+++ b/medusa/indexers/indexer_base.py\n@@ -327,7 +327,7 @@\n if item[0] >= best_result[0]\n ]\n if len(best_results) > 1:\n- log.warning(\n+ log.debug(\n u'Multiple {image}s at highest weighted score for series {id}:'\n u'\\n{header}'\n u'\\n{results}', {\n", "issue": "Unnecessary warning for \"Multiple posters at highest weighted score\"\nI get these \"Multiple posters at highest weighted score\" warnings too often, and they are a nuisance since they are not important enough to worry about.\r\n\r\nShould be demoted to a level where it does not show up in the GUI as a warning.\r\n\r\n2018-02-26 14:06:30 WARNING SHOWQUEUE-REFRESH :: [ccbc0ce] Multiple posters at highest weighted score for series 328599:\r\n Score Rating Votes Resolution\tURL\r\n 4.355 10.000 1 680x1000\thttp://thetvdb.com/banners/posters/328599-6.jpg\r\n 4.355 10.000 1 680x1000\thttp://thetvdb.com/banners/posters/328599-8.jpg\r\n\n", "before_files": [{"content": "# coding=utf-8\n\n\"\"\"Base class for indexer api's.\"\"\"\n\nfrom __future__ import division\n\nimport getpass\nimport logging\nimport os\nimport tempfile\nimport time\nimport warnings\nfrom operator import itemgetter\n\nfrom medusa import statistics as stats\nfrom medusa.helpers.utils import gen_values_by_key\nfrom medusa.indexers.indexer_exceptions import (\n IndexerAttributeNotFound,\n IndexerEpisodeNotFound,\n IndexerSeasonNotFound,\n IndexerSeasonUpdatesNotSupported,\n IndexerShowNotFound,\n)\nfrom medusa.indexers.indexer_ui import BaseUI, ConsoleUI\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.statistics import weights\n\nimport requests\nfrom six import integer_types\n\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass BaseIndexer(object):\n \"\"\"Base class for indexer api's.\"\"\"\n\n def __init__(self,\n interactive=False,\n select_first=False,\n debug=False,\n cache=True,\n episodes=True,\n banners=False,\n actors=False,\n custom_ui=None,\n language=None,\n search_all_languages=False,\n apikey=None,\n force_connect=False,\n use_zip=False,\n dvdorder=False,\n proxy=None,\n session=None,\n image_type=None): # pylint: disable=too-many-locals,too-many-arguments\n \"\"\"Pass these arguments on as args from the subclass.\"\"\"\n self.shows = ShowContainer() # Holds all Show classes\n self.corrections = {} # Holds show-name to show_id mapping\n\n self.config = {}\n\n self.config['debug_enabled'] = debug # show debugging messages\n\n self.config['custom_ui'] = custom_ui\n\n self.config['interactive'] = interactive # prompt for correct series?\n\n self.config['select_first'] = select_first\n\n self.config['search_all_languages'] = search_all_languages\n\n self.config['use_zip'] = use_zip\n\n self.config['dvdorder'] = dvdorder\n\n self.config['proxy'] = proxy\n\n if cache is True:\n self.config['cache_enabled'] = True\n self.config['cache_location'] = self._get_temp_dir()\n elif cache is False:\n self.config['cache_enabled'] = False\n elif isinstance(cache, basestring):\n self.config['cache_enabled'] = True\n self.config['cache_location'] = cache\n else:\n raise ValueError('Invalid value for Cache {0!r} (type was {1})'.format(cache, type(cache)))\n\n self.config['session'] = session if session else requests.Session()\n\n self.config['episodes_enabled'] = episodes\n self.config['banners_enabled'] = banners\n self.config['image_type'] = image_type\n self.config['actors_enabled'] = actors\n\n if self.config['debug_enabled']:\n warnings.warn('The debug argument to tvdbv2_api.__init__ will be removed in the next version. '\n 'To enable debug messages, use the following code before importing: '\n 'import logging; logging.basicConfig(level=logging.DEBUG)')\n logging.basicConfig(level=logging.DEBUG)\n\n # List of language from http://thetvdbv2.com/api/0629B785CE550C8D/languages.xml\n # Hard-coded here as it is realtively static, and saves another HTTP request, as\n # recommended on http://thetvdbv2.com/wiki/index.php/API:languages.xml\n self.config['valid_languages'] = [\n 'da', 'fi', 'nl', 'de', 'it', 'es', 'fr', 'pl', 'hu', 'el', 'tr',\n 'ru', 'he', 'ja', 'pt', 'zh', 'cs', 'sl', 'hr', 'ko', 'en', 'sv', 'no'\n ]\n\n # thetvdb.com should be based around numeric language codes,\n # but to link to a series like http://thetvdb.com/?tab=series&id=79349&lid=16\n # requires the language ID, thus this mapping is required (mainly\n # for usage in tvdb_ui - internally tvdb_api will use the language abbreviations)\n self.config['langabbv_to_id'] = {'el': 20, 'en': 7, 'zh': 27,\n 'it': 15, 'cs': 28, 'es': 16, 'ru': 22, 'nl': 13, 'pt': 26, 'no': 9,\n 'tr': 21, 'pl': 18, 'fr': 17, 'hr': 31, 'de': 14, 'da': 10, 'fi': 11,\n 'hu': 19, 'ja': 25, 'he': 24, 'ko': 32, 'sv': 8, 'sl': 30}\n\n if language is None:\n self.config['language'] = 'en'\n else:\n if language not in self.config['valid_languages']:\n raise ValueError('Invalid language {0}, options are: {1}'.format(\n language, self.config['valid_languages']\n ))\n else:\n self.config['language'] = language\n\n def _get_temp_dir(self): # pylint: disable=no-self-use\n \"\"\"Return the [system temp dir]/tvdb_api-u501 (or tvdb_api-myuser).\"\"\"\n if hasattr(os, 'getuid'):\n uid = 'u{0}'.format(os.getuid()) # pylint: disable=no-member\n else:\n # For Windows\n try:\n uid = getpass.getuser()\n except ImportError:\n return os.path.join(tempfile.gettempdir(), 'tvdbv2_api')\n\n return os.path.join(tempfile.gettempdir(), 'tvdbv2_api-{0}'.format(uid))\n\n def _get_show_data(self, sid, language):\n \"\"\"Return dummy _get_show_data method.\"\"\"\n return None\n\n def _get_series(self, series):\n \"\"\"Search themoviedb.org for the series name.\n\n If a custom_ui UI is configured, it uses this to select the correct\n series. If not, and interactive == True, ConsoleUI is used, if not\n BaseUI is used to select the first result.\n\n :param series: the query for the series name\n :return: A list of series mapped to a UI (for example: a BaseUi or CustomUI).\n \"\"\"\n all_series = self.search(series)\n if not all_series:\n log.debug('Series result returned zero')\n IndexerShowNotFound('Show search returned zero results (cannot find show on Indexer)')\n\n if not isinstance(all_series, list):\n all_series = [all_series]\n\n if self.config['custom_ui'] is not None:\n log.debug('Using custom UI {0!r}', self.config['custom_ui'])\n custom_ui = self.config['custom_ui']\n ui = custom_ui(config=self.config)\n else:\n if not self.config['interactive']:\n log.debug('Auto-selecting first search result using BaseUI')\n ui = BaseUI(config=self.config)\n else:\n log.debug('Interactively selecting show using ConsoleUI')\n ui = ConsoleUI(config=self.config) # pylint: disable=redefined-variable-type\n\n return ui.select_series(all_series)\n\n def _set_show_data(self, sid, key, value):\n \"\"\"Set self.shows[sid] to a new Show instance, or sets the data.\"\"\"\n if sid not in self.shows:\n self.shows[sid] = Show()\n self.shows[sid].data[key] = value\n\n def __repr__(self):\n \"\"\"Indexer representation, returning representation of all shows indexed.\"\"\"\n return str(self.shows)\n\n def _set_item(self, sid, seas, ep, attrib, value): # pylint: disable=too-many-arguments\n \"\"\"Create a new episode, creating Show(), Season() and Episode()s as required.\n\n Called by _get_show_data to populate show.\n Since the nice-to-use tvdb[1][24]['name] interface\n makes it impossible to do tvdb[1][24]['name] = \"name\"\n and still be capable of checking if an episode exists\n so we can raise tvdb_shownotfound, we have a slightly\n less pretty method of setting items.. but since the API\n is supposed to be read-only, this is the best way to\n do it!\n The problem is that calling tvdb[1][24]['episodename'] = \"name\"\n calls __getitem__ on tvdb[1], there is no way to check if\n tvdb.__dict__ should have a key \"1\" before we auto-create it\n \"\"\"\n if sid not in self.shows:\n self.shows[sid] = Show()\n if seas not in self.shows[sid]:\n self.shows[sid][seas] = Season(show=self.shows[sid])\n if ep not in self.shows[sid][seas]:\n self.shows[sid][seas][ep] = Episode(season=self.shows[sid][seas])\n self.shows[sid][seas][ep][attrib] = value\n\n def _save_images_by_type(self, image_type, series_id, images):\n \"\"\"\n Save the highest rated images for a show by image type.\n\n :param image_type: Image type being processed (e.g. `fanart`)\n :param series: ID of series being processed\n :param images: Images to be processed\n \"\"\"\n def pop_stats(it, key):\n \"\"\"Get the population statistics for a key.\"\"\"\n values = list(gen_values_by_key(it, key))\n num_values = len(values)\n total = sum(values)\n mean = total / num_values\n std_dev = stats.population_standard_deviation(values)\n return mean, std_dev, values\n\n def result(item, threshold, mean):\n \"\"\"Calculate a score given a threshold and population mean.\"\"\"\n if not threshold:\n threshold = 1 # Prevent division by zero\n value = item['rating']\n weight = item['ratingcount']\n res_index = item['res_index']\n score_rated = weights.bayesian(weight, value, threshold, mean)\n weight_score = .5\n weight_res = .5\n score_weighted = weight_score * score_rated + weight_res * res_index\n item['score_rated'] = score_rated\n item['score_weighted'] = score_weighted\n return score_weighted, value, weight, item\n\n def format_result(item):\n \"\"\"Format result row for logging output.\"\"\"\n row = '{score:>10.3f} {rating:>10.3f} {votes:>6} {res:>15}\\t{url}'\n return row.format(\n score=item[0],\n rating=item[1],\n votes=item[2],\n res=item[3]['resolution'],\n url=item[3]['_bannerpath'],\n )\n\n # Header for display of format results\n column_header = '{:>10} {:>10} {:>6} {:>15}\\t{}'.format(\n 'Score', 'Rating', 'Votes', 'Resolution', 'URL'\n )\n\n available_res = sorted(images.keys(), key=lambda x: int(x.split('x')[0]) * int(x.split('x')[1]))\n\n # add resolution information to each image and flatten dict\n merged_images = []\n for resolution in images:\n images_by_resolution = images[resolution]\n for image in images_by_resolution.values():\n image['resolution'] = resolution\n image['res_index'] = available_res.index(resolution) + 1\n # add all current resolution images to the merged list\n merged_images.extend(images_by_resolution.values())\n log.debug(\n u'Found {x} {image}s at {res} ({res_index}) resolution for series {id}', {\n 'x': len(images_by_resolution),\n 'image': image_type,\n 'res': image['resolution'],\n 'res_index': image['res_index'],\n 'id': series_id,\n }\n )\n\n # Get population statistics\n num_items = len(merged_images)\n log.debug(\n u'Found {x} total {image}s for series {id}', {\n 'x': num_items,\n 'image': image_type,\n 'id': series_id,\n }\n )\n\n # Get population rating statistics\n rating_mean, rating_dev, ratings = pop_stats(merged_images, 'rating')\n\n # Get population rating statistics\n vote_mean, vote_dev, votes = pop_stats(merged_images, 'ratingcount')\n\n # Set vote threshold to one standard deviation above the mean\n # This would be the 84th percentile in a normal distribution\n vote_threshold = vote_mean + vote_dev\n log.debug(u'{image} threshold set to {x} votes',\n {'image': image_type.capitalize(), 'x': vote_threshold})\n\n # create a list of results\n rated_images = (\n result(image, vote_threshold, rating_mean)\n for image in merged_images\n )\n # sort results by score\n sorted_results = sorted(rated_images, key=itemgetter(0), reverse=True)\n log.debug(\n u'Weighted {image} results for series {id}:'\n u'\\n{header}'\n u'\\n{items}', {\n 'image': image_type,\n 'id': series_id,\n 'header': column_header,\n 'items': '\\n'.join(\n format_result(item)\n for item in sorted_results\n )\n }\n )\n # filter only highest rated results\n best_result = sorted_results[0]\n best_results = [\n item for item in sorted_results\n if item[0] >= best_result[0]\n ]\n if len(best_results) > 1:\n log.warning(\n u'Multiple {image}s at highest weighted score for series {id}:'\n u'\\n{header}'\n u'\\n{results}', {\n 'image': image_type,\n 'id': series_id,\n 'header': column_header,\n 'results': '\\n'.join(\n format_result(item)\n for item in best_results\n )\n }\n )\n img_score, img_rating, img_votes, img = best_result\n img_url = img['_bannerpath']\n img_res = img['resolution']\n img_bay_score = img['score_rated']\n log.info(\n u'Selected {image} for series {id}'\n u' (score={x}, score_bay={b}, rating={y}, votes={z}, res={r}): {url}', {\n 'image': image_type,\n 'id': series_id,\n 'x': img_score,\n 'b': img_bay_score,\n 'y': img_rating,\n 'z': img_votes,\n 'r': img_res,\n 'url': img_url,\n }\n )\n log.debug(u'Full info for best {image} for series {id}: {info}',\n {'image': image_type, 'id': series_id, 'info': img})\n\n self._set_show_data(series_id, image_type, img_url)\n\n def _save_images(self, series_id, images):\n \"\"\"\n Save the highest rated images for the show.\n\n :param series_id: The series ID\n :param images: A nested mapping of image info\n images[type][res][id] = image_info_mapping\n type: image type such as `banner`, `poster`, etc\n res: resolution such as `1024x768`, `original`, etc\n id: the image id\n \"\"\"\n image_types = 'banner', 'fanart', 'poster'\n\n # Iterate through desired image types\n for img_type in image_types:\n try:\n images_by_type = images[img_type]\n except KeyError:\n log.debug(\n u'No {image}s found for {series}', {\n 'image': img_type,\n 'series': series_id,\n }\n )\n continue\n\n self._save_images_by_type(img_type, series_id, images_by_type)\n\n def __getitem__(self, key):\n \"\"\"Handle tvdbv2_instance['seriesname'] calls. The dict index should be the show id.\"\"\"\n if isinstance(key, (integer_types, long)):\n # Item is integer, treat as show id\n if key not in self.shows:\n self._get_show_data(key, self.config['language'])\n return self.shows[key]\n\n key = str(key).lower()\n self.config['searchterm'] = key\n selected_series = self._get_series(key)\n if isinstance(selected_series, dict):\n selected_series = [selected_series]\n\n for show in selected_series:\n for k, v in show.items():\n self._set_show_data(show['id'], k, v)\n return selected_series\n\n def get_last_updated_series(self, from_time, weeks=1, filter_show_list=None):\n \"\"\"Retrieve a list with updated shows.\n\n :param from_time: epoch timestamp, with the start date/time\n :param weeks: number of weeks to get updates for.\n :param filter_show_list: Optional list of show objects, to use for filtering the returned list.\n \"\"\"\n raise IndexerSeasonUpdatesNotSupported(\"Method get_last_updated_series not implemented by this indexer\")\n\n def get_episodes_for_season(self, show_id, *args, **kwargs):\n self._get_episodes(show_id, *args, **kwargs)\n return self.shows[show_id]\n\n\nclass ShowContainer(dict):\n \"\"\"Simple dict that holds a series of Show instances.\"\"\"\n\n def __init__(self):\n \"\"\"Init for ShowContainer.\"\"\"\n dict.__init__(self)\n self._stack = []\n self._lastgc = time.time()\n\n def __setitem__(self, key, value):\n \"\"\"Set ShowContainer attribut.\"\"\"\n self._stack.append(key)\n\n # keep only the 100th latest results\n if time.time() - self._lastgc > 20:\n for o in self._stack[:-100]:\n del self[o]\n\n self._stack = self._stack[-100:]\n\n self._lastgc = time.time()\n\n super(ShowContainer, self).__setitem__(key, value)\n\n\nclass Show(dict):\n \"\"\"Hold a dict of seasons, and show data.\"\"\"\n\n def __init__(self):\n \"\"\"Init method of show dict.\"\"\"\n dict.__init__(self)\n self.data = {}\n\n def __repr__(self):\n \"\"\"Represent a Show object.\"\"\"\n return '<Show {0} (containing {1} seasons)>'.format(\n self.data.get(u'seriesname', 'instance'),\n len(self)\n )\n\n def __getattr__(self, key):\n \"\"\"Return Episode or Show-data.\"\"\"\n if key in self:\n # Key is an episode, return it\n return self[key]\n\n if key in self.data:\n # Non-numeric request is for show-data\n return self.data[key]\n\n raise AttributeError\n\n def __getitem__(self, key):\n \"\"\"Return Episode or Show-data.\"\"\"\n if key in self:\n # Key is an episode, return it\n return dict.__getitem__(self, key)\n\n if key in self.data:\n # Non-numeric request is for show-data\n return dict.__getitem__(self.data, key)\n\n # Data wasn't found, raise appropriate error\n if isinstance(key, integer_types) or key.isdigit():\n # Episode number x was not found\n raise IndexerSeasonNotFound('Could not find season {0!r}'.format(key))\n else:\n # If it's not numeric, it must be an attribute name, which\n # doesn't exist, so attribute error.\n raise IndexerAttributeNotFound('Cannot find attribute {0!r}'.format(key))\n\n def aired_on(self, date):\n \"\"\"Search and return a list of episodes with the airdates.\"\"\"\n ret = self.search(str(date), 'firstaired')\n if len(ret) == 0:\n raise IndexerEpisodeNotFound('Could not find any episodes that aired on {0}'.format(date))\n return ret\n\n def search(self, term=None, key=None):\n \"\"\"Search all episodes in show.\n\n Can search all data, or a specific key (for\n example, episodename).\n Always returns an array (can be empty). First index contains the first\n match, and so on.\n\n Each array index is an Episode() instance, so doing\n search_results[0]['episodename'] will retrieve the episode name of the\n first match.\n \"\"\"\n results = []\n for cur_season in self.values():\n searchresult = cur_season.search(term=term, key=key)\n if len(searchresult) != 0:\n results.extend(searchresult)\n\n return results\n\n\nclass Season(dict):\n \"\"\"Hold all Seasons instances for a show.\"\"\"\n\n def __init__(self, show=None): # pylint: disable=super-init-not-called\n \"\"\"Show attribute points to the parent show.\"\"\"\n self.show = show\n\n def __repr__(self):\n \"\"\"Representation of a season object.\"\"\"\n return '<Season instance (containing {0} episodes)>'.format(\n len(self.keys())\n )\n\n def __getattr__(self, episode_number):\n \"\"\"Get an attribute by passing it as episode number.\"\"\"\n if episode_number in self:\n return self[episode_number]\n raise AttributeError\n\n def __getitem__(self, episode_number):\n \"\"\"Get the episode dict by passing it as a dict key.\"\"\"\n if episode_number not in self:\n raise IndexerEpisodeNotFound('Could not find episode {0!r}'.format(episode_number))\n else:\n return dict.__getitem__(self, episode_number)\n\n def search(self, term=None, key=None):\n \"\"\"Search all episodes in season, returns a list of matching Episode instances.\n\n >>> indexer_api = Tvdb()\n >>> indexer_api['scrubs'][1].search('first day')\n [<Episode 01x01 - My First Day>]\n >>>\n\n See Show.search documentation for further information on search\n\n \"\"\"\n results = []\n for ep in self.values():\n searchresult = ep.search(term=term, key=key)\n if searchresult is not None:\n results.append(\n searchresult\n )\n return results\n\n\nclass Episode(dict):\n \"\"\"Hold all episodes instances of a show.\"\"\"\n\n def __init__(self, season=None):\n \"\"\"Initialize class with season attribute that points to the parent season.\"\"\"\n self.season = season\n\n def __repr__(self):\n \"\"\"Representation of an episode object.\"\"\"\n seasno = int(self.get(u'seasonnumber', 0))\n epno = int(self.get(u'episodenumber', 0))\n epname = self.get(u'episodename')\n if epname:\n return '<Episode {0:0>2}x{1:0>2} - {2}>'.format(seasno, epno, epname)\n else:\n return '<Episode {0:0>2}x{1:0>2}>'.format(seasno, epno)\n\n def __getattr__(self, key):\n \"\"\"Get an attribute.\"\"\"\n if key in self:\n return self[key]\n raise AttributeError\n\n def __getitem__(self, key):\n \"\"\"Get an attribute, by passing it as a key.\"\"\"\n try:\n return dict.__getitem__(self, key)\n except KeyError:\n raise IndexerAttributeNotFound('Cannot find attribute {0!r}'.format(key))\n\n def search(self, term=None, key=None):\n \"\"\"Search episode data for term, if it matches, return the Episode (self).\n\n The key parameter can be used to limit the search to a specific element,\n for example, episodename.\n\n This primarily for use use by Show.search and Season.search. See\n Show.search for further information on search\n\n Simple example:\n\n >>> e = Episode()\n >>> e['episodename'] = \"An Example\"\n >>> e.search(\"examp\")\n <Episode 00x00 - An Example>\n >>>\n\n Limiting by key:\n\n >>> e.search(\"examp\", key = \"episodename\")\n <Episode 00x00 - An Example>\n >>>\n \"\"\"\n if term is None:\n raise TypeError('must supply string to search for (contents)')\n\n term = unicode(term).lower()\n for cur_key, cur_value in self.items():\n cur_key, cur_value = unicode(cur_key).lower(), unicode(cur_value).lower()\n if key is not None and cur_key != key:\n # Do not search this key\n continue\n if cur_value.find(unicode(term).lower()) > -1:\n return self\n\n\nclass Actors(list):\n \"\"\"Hold all Actor instances for a show.\"\"\"\n\n pass\n\n\nclass Actor(dict):\n \"\"\"Represent a single actor.\n\n Should contain:\n id,\n image,\n name,\n role,\n sortorder\n \"\"\"\n\n def __repr__(self):\n \"\"\"Representation of actor name.\"\"\"\n return '<Actor {0!r}>'.format(self.get('name'))\n", "path": "medusa/indexers/indexer_base.py"}], "after_files": [{"content": "# coding=utf-8\n\n\"\"\"Base class for indexer api's.\"\"\"\n\nfrom __future__ import division\n\nimport getpass\nimport logging\nimport os\nimport tempfile\nimport time\nimport warnings\nfrom operator import itemgetter\n\nfrom medusa import statistics as stats\nfrom medusa.helpers.utils import gen_values_by_key\nfrom medusa.indexers.indexer_exceptions import (\n IndexerAttributeNotFound,\n IndexerEpisodeNotFound,\n IndexerSeasonNotFound,\n IndexerSeasonUpdatesNotSupported,\n IndexerShowNotFound,\n)\nfrom medusa.indexers.indexer_ui import BaseUI, ConsoleUI\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.statistics import weights\n\nimport requests\nfrom six import integer_types\n\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass BaseIndexer(object):\n \"\"\"Base class for indexer api's.\"\"\"\n\n def __init__(self,\n interactive=False,\n select_first=False,\n debug=False,\n cache=True,\n episodes=True,\n banners=False,\n actors=False,\n custom_ui=None,\n language=None,\n search_all_languages=False,\n apikey=None,\n force_connect=False,\n use_zip=False,\n dvdorder=False,\n proxy=None,\n session=None,\n image_type=None): # pylint: disable=too-many-locals,too-many-arguments\n \"\"\"Pass these arguments on as args from the subclass.\"\"\"\n self.shows = ShowContainer() # Holds all Show classes\n self.corrections = {} # Holds show-name to show_id mapping\n\n self.config = {}\n\n self.config['debug_enabled'] = debug # show debugging messages\n\n self.config['custom_ui'] = custom_ui\n\n self.config['interactive'] = interactive # prompt for correct series?\n\n self.config['select_first'] = select_first\n\n self.config['search_all_languages'] = search_all_languages\n\n self.config['use_zip'] = use_zip\n\n self.config['dvdorder'] = dvdorder\n\n self.config['proxy'] = proxy\n\n if cache is True:\n self.config['cache_enabled'] = True\n self.config['cache_location'] = self._get_temp_dir()\n elif cache is False:\n self.config['cache_enabled'] = False\n elif isinstance(cache, basestring):\n self.config['cache_enabled'] = True\n self.config['cache_location'] = cache\n else:\n raise ValueError('Invalid value for Cache {0!r} (type was {1})'.format(cache, type(cache)))\n\n self.config['session'] = session if session else requests.Session()\n\n self.config['episodes_enabled'] = episodes\n self.config['banners_enabled'] = banners\n self.config['image_type'] = image_type\n self.config['actors_enabled'] = actors\n\n if self.config['debug_enabled']:\n warnings.warn('The debug argument to tvdbv2_api.__init__ will be removed in the next version. '\n 'To enable debug messages, use the following code before importing: '\n 'import logging; logging.basicConfig(level=logging.DEBUG)')\n logging.basicConfig(level=logging.DEBUG)\n\n # List of language from http://thetvdbv2.com/api/0629B785CE550C8D/languages.xml\n # Hard-coded here as it is realtively static, and saves another HTTP request, as\n # recommended on http://thetvdbv2.com/wiki/index.php/API:languages.xml\n self.config['valid_languages'] = [\n 'da', 'fi', 'nl', 'de', 'it', 'es', 'fr', 'pl', 'hu', 'el', 'tr',\n 'ru', 'he', 'ja', 'pt', 'zh', 'cs', 'sl', 'hr', 'ko', 'en', 'sv', 'no'\n ]\n\n # thetvdb.com should be based around numeric language codes,\n # but to link to a series like http://thetvdb.com/?tab=series&id=79349&lid=16\n # requires the language ID, thus this mapping is required (mainly\n # for usage in tvdb_ui - internally tvdb_api will use the language abbreviations)\n self.config['langabbv_to_id'] = {'el': 20, 'en': 7, 'zh': 27,\n 'it': 15, 'cs': 28, 'es': 16, 'ru': 22, 'nl': 13, 'pt': 26, 'no': 9,\n 'tr': 21, 'pl': 18, 'fr': 17, 'hr': 31, 'de': 14, 'da': 10, 'fi': 11,\n 'hu': 19, 'ja': 25, 'he': 24, 'ko': 32, 'sv': 8, 'sl': 30}\n\n if language is None:\n self.config['language'] = 'en'\n else:\n if language not in self.config['valid_languages']:\n raise ValueError('Invalid language {0}, options are: {1}'.format(\n language, self.config['valid_languages']\n ))\n else:\n self.config['language'] = language\n\n def _get_temp_dir(self): # pylint: disable=no-self-use\n \"\"\"Return the [system temp dir]/tvdb_api-u501 (or tvdb_api-myuser).\"\"\"\n if hasattr(os, 'getuid'):\n uid = 'u{0}'.format(os.getuid()) # pylint: disable=no-member\n else:\n # For Windows\n try:\n uid = getpass.getuser()\n except ImportError:\n return os.path.join(tempfile.gettempdir(), 'tvdbv2_api')\n\n return os.path.join(tempfile.gettempdir(), 'tvdbv2_api-{0}'.format(uid))\n\n def _get_show_data(self, sid, language):\n \"\"\"Return dummy _get_show_data method.\"\"\"\n return None\n\n def _get_series(self, series):\n \"\"\"Search themoviedb.org for the series name.\n\n If a custom_ui UI is configured, it uses this to select the correct\n series. If not, and interactive == True, ConsoleUI is used, if not\n BaseUI is used to select the first result.\n\n :param series: the query for the series name\n :return: A list of series mapped to a UI (for example: a BaseUi or CustomUI).\n \"\"\"\n all_series = self.search(series)\n if not all_series:\n log.debug('Series result returned zero')\n IndexerShowNotFound('Show search returned zero results (cannot find show on Indexer)')\n\n if not isinstance(all_series, list):\n all_series = [all_series]\n\n if self.config['custom_ui'] is not None:\n log.debug('Using custom UI {0!r}', self.config['custom_ui'])\n custom_ui = self.config['custom_ui']\n ui = custom_ui(config=self.config)\n else:\n if not self.config['interactive']:\n log.debug('Auto-selecting first search result using BaseUI')\n ui = BaseUI(config=self.config)\n else:\n log.debug('Interactively selecting show using ConsoleUI')\n ui = ConsoleUI(config=self.config) # pylint: disable=redefined-variable-type\n\n return ui.select_series(all_series)\n\n def _set_show_data(self, sid, key, value):\n \"\"\"Set self.shows[sid] to a new Show instance, or sets the data.\"\"\"\n if sid not in self.shows:\n self.shows[sid] = Show()\n self.shows[sid].data[key] = value\n\n def __repr__(self):\n \"\"\"Indexer representation, returning representation of all shows indexed.\"\"\"\n return str(self.shows)\n\n def _set_item(self, sid, seas, ep, attrib, value): # pylint: disable=too-many-arguments\n \"\"\"Create a new episode, creating Show(), Season() and Episode()s as required.\n\n Called by _get_show_data to populate show.\n Since the nice-to-use tvdb[1][24]['name] interface\n makes it impossible to do tvdb[1][24]['name] = \"name\"\n and still be capable of checking if an episode exists\n so we can raise tvdb_shownotfound, we have a slightly\n less pretty method of setting items.. but since the API\n is supposed to be read-only, this is the best way to\n do it!\n The problem is that calling tvdb[1][24]['episodename'] = \"name\"\n calls __getitem__ on tvdb[1], there is no way to check if\n tvdb.__dict__ should have a key \"1\" before we auto-create it\n \"\"\"\n if sid not in self.shows:\n self.shows[sid] = Show()\n if seas not in self.shows[sid]:\n self.shows[sid][seas] = Season(show=self.shows[sid])\n if ep not in self.shows[sid][seas]:\n self.shows[sid][seas][ep] = Episode(season=self.shows[sid][seas])\n self.shows[sid][seas][ep][attrib] = value\n\n def _save_images_by_type(self, image_type, series_id, images):\n \"\"\"\n Save the highest rated images for a show by image type.\n\n :param image_type: Image type being processed (e.g. `fanart`)\n :param series: ID of series being processed\n :param images: Images to be processed\n \"\"\"\n def pop_stats(it, key):\n \"\"\"Get the population statistics for a key.\"\"\"\n values = list(gen_values_by_key(it, key))\n num_values = len(values)\n total = sum(values)\n mean = total / num_values\n std_dev = stats.population_standard_deviation(values)\n return mean, std_dev, values\n\n def result(item, threshold, mean):\n \"\"\"Calculate a score given a threshold and population mean.\"\"\"\n if not threshold:\n threshold = 1 # Prevent division by zero\n value = item['rating']\n weight = item['ratingcount']\n res_index = item['res_index']\n score_rated = weights.bayesian(weight, value, threshold, mean)\n weight_score = .5\n weight_res = .5\n score_weighted = weight_score * score_rated + weight_res * res_index\n item['score_rated'] = score_rated\n item['score_weighted'] = score_weighted\n return score_weighted, value, weight, item\n\n def format_result(item):\n \"\"\"Format result row for logging output.\"\"\"\n row = '{score:>10.3f} {rating:>10.3f} {votes:>6} {res:>15}\\t{url}'\n return row.format(\n score=item[0],\n rating=item[1],\n votes=item[2],\n res=item[3]['resolution'],\n url=item[3]['_bannerpath'],\n )\n\n # Header for display of format results\n column_header = '{:>10} {:>10} {:>6} {:>15}\\t{}'.format(\n 'Score', 'Rating', 'Votes', 'Resolution', 'URL'\n )\n\n available_res = sorted(images.keys(), key=lambda x: int(x.split('x')[0]) * int(x.split('x')[1]))\n\n # add resolution information to each image and flatten dict\n merged_images = []\n for resolution in images:\n images_by_resolution = images[resolution]\n for image in images_by_resolution.values():\n image['resolution'] = resolution\n image['res_index'] = available_res.index(resolution) + 1\n # add all current resolution images to the merged list\n merged_images.extend(images_by_resolution.values())\n log.debug(\n u'Found {x} {image}s at {res} ({res_index}) resolution for series {id}', {\n 'x': len(images_by_resolution),\n 'image': image_type,\n 'res': image['resolution'],\n 'res_index': image['res_index'],\n 'id': series_id,\n }\n )\n\n # Get population statistics\n num_items = len(merged_images)\n log.debug(\n u'Found {x} total {image}s for series {id}', {\n 'x': num_items,\n 'image': image_type,\n 'id': series_id,\n }\n )\n\n # Get population rating statistics\n rating_mean, rating_dev, ratings = pop_stats(merged_images, 'rating')\n\n # Get population rating statistics\n vote_mean, vote_dev, votes = pop_stats(merged_images, 'ratingcount')\n\n # Set vote threshold to one standard deviation above the mean\n # This would be the 84th percentile in a normal distribution\n vote_threshold = vote_mean + vote_dev\n log.debug(u'{image} threshold set to {x} votes',\n {'image': image_type.capitalize(), 'x': vote_threshold})\n\n # create a list of results\n rated_images = (\n result(image, vote_threshold, rating_mean)\n for image in merged_images\n )\n # sort results by score\n sorted_results = sorted(rated_images, key=itemgetter(0), reverse=True)\n log.debug(\n u'Weighted {image} results for series {id}:'\n u'\\n{header}'\n u'\\n{items}', {\n 'image': image_type,\n 'id': series_id,\n 'header': column_header,\n 'items': '\\n'.join(\n format_result(item)\n for item in sorted_results\n )\n }\n )\n # filter only highest rated results\n best_result = sorted_results[0]\n best_results = [\n item for item in sorted_results\n if item[0] >= best_result[0]\n ]\n if len(best_results) > 1:\n log.debug(\n u'Multiple {image}s at highest weighted score for series {id}:'\n u'\\n{header}'\n u'\\n{results}', {\n 'image': image_type,\n 'id': series_id,\n 'header': column_header,\n 'results': '\\n'.join(\n format_result(item)\n for item in best_results\n )\n }\n )\n img_score, img_rating, img_votes, img = best_result\n img_url = img['_bannerpath']\n img_res = img['resolution']\n img_bay_score = img['score_rated']\n log.info(\n u'Selected {image} for series {id}'\n u' (score={x}, score_bay={b}, rating={y}, votes={z}, res={r}): {url}', {\n 'image': image_type,\n 'id': series_id,\n 'x': img_score,\n 'b': img_bay_score,\n 'y': img_rating,\n 'z': img_votes,\n 'r': img_res,\n 'url': img_url,\n }\n )\n log.debug(u'Full info for best {image} for series {id}: {info}',\n {'image': image_type, 'id': series_id, 'info': img})\n\n self._set_show_data(series_id, image_type, img_url)\n\n def _save_images(self, series_id, images):\n \"\"\"\n Save the highest rated images for the show.\n\n :param series_id: The series ID\n :param images: A nested mapping of image info\n images[type][res][id] = image_info_mapping\n type: image type such as `banner`, `poster`, etc\n res: resolution such as `1024x768`, `original`, etc\n id: the image id\n \"\"\"\n image_types = 'banner', 'fanart', 'poster'\n\n # Iterate through desired image types\n for img_type in image_types:\n try:\n images_by_type = images[img_type]\n except KeyError:\n log.debug(\n u'No {image}s found for {series}', {\n 'image': img_type,\n 'series': series_id,\n }\n )\n continue\n\n self._save_images_by_type(img_type, series_id, images_by_type)\n\n def __getitem__(self, key):\n \"\"\"Handle tvdbv2_instance['seriesname'] calls. The dict index should be the show id.\"\"\"\n if isinstance(key, (integer_types, long)):\n # Item is integer, treat as show id\n if key not in self.shows:\n self._get_show_data(key, self.config['language'])\n return self.shows[key]\n\n key = str(key).lower()\n self.config['searchterm'] = key\n selected_series = self._get_series(key)\n if isinstance(selected_series, dict):\n selected_series = [selected_series]\n\n for show in selected_series:\n for k, v in show.items():\n self._set_show_data(show['id'], k, v)\n return selected_series\n\n def get_last_updated_series(self, from_time, weeks=1, filter_show_list=None):\n \"\"\"Retrieve a list with updated shows.\n\n :param from_time: epoch timestamp, with the start date/time\n :param weeks: number of weeks to get updates for.\n :param filter_show_list: Optional list of show objects, to use for filtering the returned list.\n \"\"\"\n raise IndexerSeasonUpdatesNotSupported(\"Method get_last_updated_series not implemented by this indexer\")\n\n def get_episodes_for_season(self, show_id, *args, **kwargs):\n self._get_episodes(show_id, *args, **kwargs)\n return self.shows[show_id]\n\n\nclass ShowContainer(dict):\n \"\"\"Simple dict that holds a series of Show instances.\"\"\"\n\n def __init__(self):\n \"\"\"Init for ShowContainer.\"\"\"\n dict.__init__(self)\n self._stack = []\n self._lastgc = time.time()\n\n def __setitem__(self, key, value):\n \"\"\"Set ShowContainer attribut.\"\"\"\n self._stack.append(key)\n\n # keep only the 100th latest results\n if time.time() - self._lastgc > 20:\n for o in self._stack[:-100]:\n del self[o]\n\n self._stack = self._stack[-100:]\n\n self._lastgc = time.time()\n\n super(ShowContainer, self).__setitem__(key, value)\n\n\nclass Show(dict):\n \"\"\"Hold a dict of seasons, and show data.\"\"\"\n\n def __init__(self):\n \"\"\"Init method of show dict.\"\"\"\n dict.__init__(self)\n self.data = {}\n\n def __repr__(self):\n \"\"\"Represent a Show object.\"\"\"\n return '<Show {0} (containing {1} seasons)>'.format(\n self.data.get(u'seriesname', 'instance'),\n len(self)\n )\n\n def __getattr__(self, key):\n \"\"\"Return Episode or Show-data.\"\"\"\n if key in self:\n # Key is an episode, return it\n return self[key]\n\n if key in self.data:\n # Non-numeric request is for show-data\n return self.data[key]\n\n raise AttributeError\n\n def __getitem__(self, key):\n \"\"\"Return Episode or Show-data.\"\"\"\n if key in self:\n # Key is an episode, return it\n return dict.__getitem__(self, key)\n\n if key in self.data:\n # Non-numeric request is for show-data\n return dict.__getitem__(self.data, key)\n\n # Data wasn't found, raise appropriate error\n if isinstance(key, integer_types) or key.isdigit():\n # Episode number x was not found\n raise IndexerSeasonNotFound('Could not find season {0!r}'.format(key))\n else:\n # If it's not numeric, it must be an attribute name, which\n # doesn't exist, so attribute error.\n raise IndexerAttributeNotFound('Cannot find attribute {0!r}'.format(key))\n\n def aired_on(self, date):\n \"\"\"Search and return a list of episodes with the airdates.\"\"\"\n ret = self.search(str(date), 'firstaired')\n if len(ret) == 0:\n raise IndexerEpisodeNotFound('Could not find any episodes that aired on {0}'.format(date))\n return ret\n\n def search(self, term=None, key=None):\n \"\"\"Search all episodes in show.\n\n Can search all data, or a specific key (for\n example, episodename).\n Always returns an array (can be empty). First index contains the first\n match, and so on.\n\n Each array index is an Episode() instance, so doing\n search_results[0]['episodename'] will retrieve the episode name of the\n first match.\n \"\"\"\n results = []\n for cur_season in self.values():\n searchresult = cur_season.search(term=term, key=key)\n if len(searchresult) != 0:\n results.extend(searchresult)\n\n return results\n\n\nclass Season(dict):\n \"\"\"Hold all Seasons instances for a show.\"\"\"\n\n def __init__(self, show=None): # pylint: disable=super-init-not-called\n \"\"\"Show attribute points to the parent show.\"\"\"\n self.show = show\n\n def __repr__(self):\n \"\"\"Representation of a season object.\"\"\"\n return '<Season instance (containing {0} episodes)>'.format(\n len(self.keys())\n )\n\n def __getattr__(self, episode_number):\n \"\"\"Get an attribute by passing it as episode number.\"\"\"\n if episode_number in self:\n return self[episode_number]\n raise AttributeError\n\n def __getitem__(self, episode_number):\n \"\"\"Get the episode dict by passing it as a dict key.\"\"\"\n if episode_number not in self:\n raise IndexerEpisodeNotFound('Could not find episode {0!r}'.format(episode_number))\n else:\n return dict.__getitem__(self, episode_number)\n\n def search(self, term=None, key=None):\n \"\"\"Search all episodes in season, returns a list of matching Episode instances.\n\n >>> indexer_api = Tvdb()\n >>> indexer_api['scrubs'][1].search('first day')\n [<Episode 01x01 - My First Day>]\n >>>\n\n See Show.search documentation for further information on search\n\n \"\"\"\n results = []\n for ep in self.values():\n searchresult = ep.search(term=term, key=key)\n if searchresult is not None:\n results.append(\n searchresult\n )\n return results\n\n\nclass Episode(dict):\n \"\"\"Hold all episodes instances of a show.\"\"\"\n\n def __init__(self, season=None):\n \"\"\"Initialize class with season attribute that points to the parent season.\"\"\"\n self.season = season\n\n def __repr__(self):\n \"\"\"Representation of an episode object.\"\"\"\n seasno = int(self.get(u'seasonnumber', 0))\n epno = int(self.get(u'episodenumber', 0))\n epname = self.get(u'episodename')\n if epname:\n return '<Episode {0:0>2}x{1:0>2} - {2}>'.format(seasno, epno, epname)\n else:\n return '<Episode {0:0>2}x{1:0>2}>'.format(seasno, epno)\n\n def __getattr__(self, key):\n \"\"\"Get an attribute.\"\"\"\n if key in self:\n return self[key]\n raise AttributeError\n\n def __getitem__(self, key):\n \"\"\"Get an attribute, by passing it as a key.\"\"\"\n try:\n return dict.__getitem__(self, key)\n except KeyError:\n raise IndexerAttributeNotFound('Cannot find attribute {0!r}'.format(key))\n\n def search(self, term=None, key=None):\n \"\"\"Search episode data for term, if it matches, return the Episode (self).\n\n The key parameter can be used to limit the search to a specific element,\n for example, episodename.\n\n This primarily for use use by Show.search and Season.search. See\n Show.search for further information on search\n\n Simple example:\n\n >>> e = Episode()\n >>> e['episodename'] = \"An Example\"\n >>> e.search(\"examp\")\n <Episode 00x00 - An Example>\n >>>\n\n Limiting by key:\n\n >>> e.search(\"examp\", key = \"episodename\")\n <Episode 00x00 - An Example>\n >>>\n \"\"\"\n if term is None:\n raise TypeError('must supply string to search for (contents)')\n\n term = unicode(term).lower()\n for cur_key, cur_value in self.items():\n cur_key, cur_value = unicode(cur_key).lower(), unicode(cur_value).lower()\n if key is not None and cur_key != key:\n # Do not search this key\n continue\n if cur_value.find(unicode(term).lower()) > -1:\n return self\n\n\nclass Actors(list):\n \"\"\"Hold all Actor instances for a show.\"\"\"\n\n pass\n\n\nclass Actor(dict):\n \"\"\"Represent a single actor.\n\n Should contain:\n id,\n image,\n name,\n role,\n sortorder\n \"\"\"\n\n def __repr__(self):\n \"\"\"Representation of actor name.\"\"\"\n return '<Actor {0!r}>'.format(self.get('name'))\n", "path": "medusa/indexers/indexer_base.py"}]} |
gh_patches_debug_1236 | rasdani/github-patches | git_diff | PennyLaneAI__pennylane-3856 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug] Inconsistent ordering in datasets for lists of parameters
The ordering of lists of datasets when passed lists of parameters is of no canonical ordering, but appears to be random:
```python
bondlengths = ['0.5', '0.54', '0.58', '0.62']
data = qml.data.load("qchem", molname="H2", basis="STO-3G", bondlength=bondlengths)
print(data)
```
would expect output:
```pycon
[<Dataset = description: qchem/H2/STO-3G/0.5, attributes: ['molecule', 'hamiltonian', ...]>,
<Dataset = description: qchem/H2/STO-3G/0.54, attributes: ['molecule', 'hamiltonian', ...]>,
<Dataset = description: qchem/H2/STO-3G/0.58, attributes: ['molecule', 'hamiltonian', ...]>,
<Dataset = description: qchem/H2/STO-3G/0.62, attributes: ['molecule', 'hamiltonian', ...]>]
```
but actually obtain:
```pycon
[<Dataset = description: qchem/H2/STO-3G/0.54, attributes: ['molecule', 'hamiltonian', ...]>,
<Dataset = description: qchem/H2/STO-3G/0.5, attributes: ['molecule', 'hamiltonian', ...]>,
<Dataset = description: qchem/H2/STO-3G/0.58, attributes: ['molecule', 'hamiltonian', ...]>,
<Dataset = description: qchem/H2/STO-3G/0.62, attributes: ['molecule', 'hamiltonian', ...]>]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pennylane/data/data_manager.py`
Content:
```
1 # Copyright 2018-2022 Xanadu Quantum Technologies Inc.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """
15 Contains the Dataset utility functions.
16 """
17 # pylint:disable=too-many-arguments,global-statement
18 from collections.abc import Iterable
19 from concurrent.futures import ThreadPoolExecutor, wait, FIRST_EXCEPTION
20 import os
21 from os.path import sep as pathsep
22 from time import sleep
23 from urllib.parse import quote
24
25 import requests
26 from pennylane.data.dataset import Dataset
27
28 S3_URL = "https://xanadu-quantum-datasets.s3.amazonaws.com"
29 FOLDERMAP_URL = f"{S3_URL}/foldermap.json"
30 DATA_STRUCT_URL = f"{S3_URL}/data_struct.json"
31
32 _foldermap = {}
33 _data_struct = {}
34
35
36 # pylint:disable=too-many-branches
37 def _format_details(param, details):
38 """Ensures each user-inputted parameter is a properly typed list.
39 Also provides custom support for certain parameters."""
40 if not isinstance(details, list):
41 details = [details]
42 if param == "layout":
43 # if a user inputs layout=[1,2], they wanted "1x2"
44 # note that the above conversion to a list of details wouldn't work as expected here
45 if all(isinstance(dim, int) for dim in details):
46 return ["x".join(map(str, details))]
47 # will turn [(1,2), [3,4], "5x6"] into ["1x2", "3x4", "5x6"]
48 for i, detail in enumerate(details):
49 if isinstance(detail, Iterable) and all(isinstance(dim, int) for dim in detail):
50 details[i] = "x".join(map(str, detail))
51 elif not isinstance(detail, str):
52 raise TypeError(
53 f"Invalid layout value of '{detail}'. Must be a string or a tuple of ints."
54 )
55 elif param == "bondlength":
56 for i, detail in enumerate(details):
57 if isinstance(detail, float):
58 details[i] = str(detail)
59 elif isinstance(detail, int):
60 details[i] = f"{detail:.1f}"
61 elif not isinstance(detail, str):
62 raise TypeError(f"Invalid bondlength '{detail}'. Must be a string, int or float.")
63 for detail in details:
64 if not isinstance(detail, str):
65 raise TypeError(f"Invalid type '{type(detail).__name__}' for parameter '{param}'")
66 return details
67
68
69 def _validate_params(data_name, description, attributes):
70 """Validate parameters for loading the data."""
71
72 data = _data_struct.get(data_name)
73 if not data:
74 raise ValueError(
75 f"Currently the hosted datasets are of types: {list(_data_struct)}, but got {data_name}."
76 )
77
78 if not isinstance(attributes, list):
79 raise TypeError(f"Arg 'attributes' should be a list, but got {type(attributes).__name__}.")
80
81 all_attributes = data["attributes"]
82 if not set(attributes).issubset(set(all_attributes)):
83 raise ValueError(
84 f"Supported key values for {data_name} are {all_attributes}, but got {attributes}."
85 )
86
87 params_needed = data["params"]
88 if set(description) != set(params_needed):
89 raise ValueError(
90 f"Supported parameter values for {data_name} are {params_needed}, but got {list(description)}."
91 )
92
93 def validate_structure(node, params_left):
94 """Recursively validates that all values in `description` exist in the dataset."""
95 param = params_left[0]
96 params_left = params_left[1:]
97 for detail in description[param]:
98 exc = None
99 if detail == "full":
100 if not params_left:
101 return None
102 for child in node.values():
103 exc = validate_structure(child, params_left)
104 elif detail not in node: # error: return the error message to be raised
105 return ValueError(
106 f"{param} value of '{detail}' is not available. Available values are {list(node)}"
107 )
108 elif params_left:
109 exc = validate_structure(node[detail], params_left)
110 if exc is not None:
111 return exc
112 return None
113
114 exc = validate_structure(_foldermap[data_name], params_needed)
115 if isinstance(exc, Exception):
116 raise exc # pylint:disable=raising-bad-type
117
118
119 def _refresh_foldermap():
120 """Refresh the foldermap from S3."""
121 global _foldermap
122 if _foldermap:
123 return
124 response = requests.get(FOLDERMAP_URL, timeout=5.0)
125 response.raise_for_status()
126 _foldermap = response.json()
127
128
129 def _refresh_data_struct():
130 """Refresh the data struct from S3."""
131 global _data_struct
132 if _data_struct:
133 return
134 response = requests.get(DATA_STRUCT_URL, timeout=5.0)
135 response.raise_for_status()
136 _data_struct = response.json()
137
138
139 def _fetch_and_save(filename, dest_folder):
140 """Download a single file from S3 and save it locally."""
141 webfile = filename if pathsep == "/" else filename.replace(pathsep, "/")
142 response = requests.get(f"{S3_URL}/{quote(webfile)}", timeout=5.0)
143 response.raise_for_status()
144 with open(os.path.join(dest_folder, filename), "wb") as f:
145 f.write(response.content)
146
147
148 def _s3_download(data_name, folders, attributes, dest_folder, force, num_threads):
149 """Download a file for each attribute from each folder to the specified destination.
150
151 Args:
152 data_name (str) : The type of the data required
153 folders (list) : A list of folders corresponding to S3 object prefixes
154 attributes (list) : A list to specify individual data elements that are required
155 dest_folder (str) : Path to the root folder where files should be saved
156 force (bool) : Whether data has to be downloaded even if it is still present
157 num_threads (int) : The maximum number of threads to spawn while downloading files
158 (1 thread per file)
159 """
160 files = []
161 for folder in folders:
162 local_folder = os.path.join(dest_folder, data_name, folder)
163 if not os.path.exists(local_folder):
164 os.makedirs(local_folder)
165
166 prefix = os.path.join(data_name, folder, f"{folder.replace(pathsep, '_')}_")
167 # TODO: consider combining files within a folder (switch to append)
168 files.extend([f"{prefix}{attr}.dat" for attr in attributes])
169
170 if not force:
171 start = len(dest_folder.rstrip(pathsep)) + 1
172 existing_files = {
173 os.path.join(path, name)[start:]
174 for path, _, local_files in os.walk(dest_folder)
175 for name in local_files
176 }
177 files = list(set(files) - existing_files)
178
179 with ThreadPoolExecutor(num_threads) as pool:
180 futures = [pool.submit(_fetch_and_save, f, dest_folder) for f in files]
181 results = wait(futures, return_when=FIRST_EXCEPTION)
182 for result in results.done:
183 if result.exception():
184 raise result.exception()
185
186
187 def _generate_folders(node, folders):
188 """Recursively generate and return a tree of all folder names below a node.
189
190 Args:
191 node (dict) : A sub-dict of the foldermap for which a list of sub-folders is generated
192 folders (list[list[str]]) : The ordered list of folder names requested.
193 The value ``["full"]`` will expand to all possible folders at that depth
194
195 Returns:
196 list[str]: The paths of files that should be fetched from S3
197 """
198
199 next_folders = folders[1:]
200 folders = set(node) if folders[0] == ["full"] else set(folders[0]).intersection(set(node))
201 return (
202 [
203 os.path.join(folder, child)
204 for folder in folders
205 for child in _generate_folders(node[folder], next_folders)
206 ]
207 if next_folders
208 else folders
209 )
210
211
212 def load(
213 data_name, attributes=None, lazy=False, folder_path="", force=False, num_threads=50, **params
214 ):
215 r"""Downloads the data if it is not already present in the directory and return it to user as a
216 :class:`~pennylane.data.Dataset` object. For the full list of available datasets, please see the
217 `datasets website <https://pennylane.ai/qml/datasets.html>`_.
218
219 Args:
220 data_name (str) : A string representing the type of data required such as `qchem`, `qpsin`, etc.
221 attributes (list) : An optional list to specify individual data element that are required
222 folder_path (str) : Path to the root folder where download takes place.
223 By default dataset folder will be created in the working directory
224 force (Bool) : Bool representing whether data has to be downloaded even if it is still present
225 num_threads (int) : The maximum number of threads to spawn while downloading files (1 thread per file)
226 params (kwargs) : Keyword arguments exactly matching the parameters required for the data type.
227 Note that these are not optional
228
229 Returns:
230 list[:class:`~pennylane.data.Dataset`]
231
232 .. warning::
233
234 PennyLane datasets use the ``dill`` module to compress, store, and read data. Since ``dill``
235 is built on the ``pickle`` module, we reproduce an important warning from the ``pickle``
236 module: it is possible to construct malicious pickle data which will execute arbitrary code
237 during unpickling. Never unpickle data that could have come from an untrusted source, or
238 that could have been tampered with.
239 """
240
241 _ = lazy
242
243 _refresh_foldermap()
244 _refresh_data_struct()
245 if not attributes:
246 attributes = ["full"]
247
248 description = {param: _format_details(param, details) for param, details in params.items()}
249 _validate_params(data_name, description, attributes)
250 if len(attributes) > 1 and "full" in attributes:
251 attributes = ["full"]
252 for key, val in description.items():
253 if len(val) > 1 and "full" in val:
254 description[key] = ["full"]
255
256 data = _data_struct[data_name]
257 directory_path = os.path.join(folder_path, "datasets")
258
259 folders = [description[param] for param in data["params"]]
260 all_folders = _generate_folders(_foldermap[data_name], folders)
261 _s3_download(data_name, all_folders, attributes, directory_path, force, num_threads)
262
263 data_files = []
264 docstring = data["docstr"]
265 for folder in all_folders:
266 real_folder = os.path.join(directory_path, data_name, folder)
267 data_files.append(
268 Dataset(data_name, real_folder, folder.replace(pathsep, "_"), docstring, standard=True)
269 )
270
271 return data_files
272
273
274 def _direc_to_dict(path):
275 r"""Helper function to create dictionary structure from directory path"""
276 for root, dirs, _ in os.walk(path):
277 if not dirs:
278 return None
279 tree = {x: _direc_to_dict(os.path.join(root, x)) for x in dirs}
280 return list(dirs) if all(x is None for x in tree.values()) else tree
281
282
283 def list_datasets(path=None):
284 r"""Returns a dictionary of the available datasets.
285
286 Return:
287 dict: Nested dictionary representing the directory structure of the hosted datasets.
288
289 **Example:**
290
291 Note that the results of calling this function may differ from this example as more datasets
292 are added. For updates on available data see the `datasets website <https://pennylane.ai/qml/datasets.html>`_.
293
294 .. code-block :: pycon
295
296 >>> qml.data.list_datasets()
297 {'qchem': {'H2': {'6-31G': ['0.5', '0.54', '0.58', ... '2.02', '2.06', '2.1'],
298 'STO-3G': ['0.5', '0.54', '0.58', ... '2.02', '2.06', '2.1']},
299 'HeH+': {'6-31G': ['0.5', '0.54', '0.58', ... '2.02', '2.06', '2.1'],
300 'STO-3G': ['0.5', '0.54', '0.58', ... '2.02', '2.06', '2.1']},
301 'LiH': {'STO-3G': ['0.5', '0.54', '0.58', ... '2.02', '2.06', '2.1']},
302 'OH-': {'STO-3G': ['0.5', '0.54', '0.58', ... '0.94', '0.98', '1.02']}},
303 'qspin': {'Heisenberg': {'closed': {'chain': ['1x16', '1x4', '1x8'],
304 'rectangular': ['2x2', '2x4', '2x8', '4x4']},
305 'open': {'chain': ['1x16', '1x4', '1x8'],
306 'rectangular': ['2x2', '2x4', '2x8', '4x4']}},
307 'Ising': {'closed': {'chain': ['1x16', '1x4', '1x8'],
308 'rectangular': ['2x2', '2x4', '2x8', '4x4']},
309 'open': {'chain': ['1x16', '1x4', '1x8'],
310 'rectangular': ['2x2', '2x4', '2x8', '4x4']}}}}
311 """
312
313 if path:
314 return _direc_to_dict(path)
315 _refresh_foldermap()
316 return _foldermap.copy()
317
318
319 def list_attributes(data_name):
320 r"""List the attributes that exist for a specific ``data_name``.
321
322 Args:
323 data_name (str): The type of the desired data
324
325 Returns:
326 list (str): A list of accepted attributes for a given data name
327 """
328 _refresh_data_struct()
329 if data_name not in _data_struct:
330 raise ValueError(
331 f"Currently the hosted datasets are of types: {list(_data_struct)}, but got {data_name}."
332 )
333 return _data_struct[data_name]["attributes"]
334
335
336 def _interactive_request_attributes(options):
337 """Prompt the user to select a list of attributes."""
338 prompt = "Please select attributes:"
339 for i, option in enumerate(options):
340 if option == "full":
341 option = "full (all attributes)"
342 prompt += f"\n\t{i+1}) {option}"
343 print(prompt)
344 choices = input(f"Choice (comma-separated list of options) [1-{len(options)}]: ").split(",")
345 try:
346 choices = list(map(int, choices))
347 except ValueError as e:
348 raise ValueError(f"Must enter a list of integers between 1 and {len(options)}") from e
349 if any(choice < 1 or choice > len(options) for choice in choices):
350 raise ValueError(f"Must enter a list of integers between 1 and {len(options)}")
351 return [options[choice - 1] for choice in choices]
352
353
354 def _interactive_request_single(node, param):
355 """Prompt the user to select a single option from a list."""
356 options = list(node)
357 if len(options) == 1:
358 print(f"Using {options[0]} as it is the only {param} available.")
359 sleep(1)
360 return options[0]
361 print(f"Please select a {param}:")
362 print("\n".join(f"\t{i+1}) {option}" for i, option in enumerate(options)))
363 try:
364 choice = int(input(f"Choice [1-{len(options)}]: "))
365 except ValueError as e:
366 raise ValueError(f"Must enter an integer between 1 and {len(options)}") from e
367 if choice < 1 or choice > len(options):
368 raise ValueError(f"Must enter an integer between 1 and {len(options)}")
369 return options[choice - 1]
370
371
372 def load_interactive():
373 r"""Download a dataset using an interactive load prompt.
374
375 Returns:
376 :class:`~pennylane.data.Dataset`
377
378 **Example**
379
380 .. code-block :: pycon
381
382 >>> qml.data.load_interactive()
383 Please select a data name:
384 1) qspin
385 2) qchem
386 Choice [1-2]: 1
387 Please select a sysname:
388 ...
389 Please select a periodicity:
390 ...
391 Please select a lattice:
392 ...
393 Please select a layout:
394 ...
395 Please select attributes:
396 ...
397 Force download files? (Default is no) [y/N]: N
398 Folder to download to? (Default is pwd, will download to /datasets subdirectory):
399
400 Please confirm your choices:
401 dataset: qspin/Ising/open/rectangular/4x4
402 attributes: ['parameters', 'ground_states']
403 force: False
404 dest folder: /Users/jovyan/Downloads/datasets
405 Would you like to continue? (Default is yes) [Y/n]:
406 <Dataset = description: qspin/Ising/open/rectangular/4x4, attributes: ['parameters', 'ground_states']>
407 """
408
409 _refresh_foldermap()
410 _refresh_data_struct()
411
412 node = _foldermap
413 data_name = _interactive_request_single(node, "data name")
414
415 description = {}
416 value = data_name
417
418 params = _data_struct[data_name]["params"]
419 for param in params:
420 node = node[value]
421 value = _interactive_request_single(node, param)
422 description[param] = value
423
424 attributes = _interactive_request_attributes(_data_struct[data_name]["attributes"])
425 force = input("Force download files? (Default is no) [y/N]: ") in ["y", "Y"]
426 dest_folder = input(
427 "Folder to download to? (Default is pwd, will download to /datasets subdirectory): "
428 )
429
430 print("\nPlease confirm your choices:")
431 print("dataset:", "/".join([data_name] + [description[param] for param in params]))
432 print("attributes:", attributes)
433 print("force:", force)
434 print("dest folder:", os.path.join(dest_folder, "datasets"))
435
436 approve = input("Would you like to continue? (Default is yes) [Y/n]: ")
437 if approve not in ["Y", "", "y"]:
438 print("Aborting and not downloading!")
439 return None
440 return load(
441 data_name, attributes=attributes, folder_path=dest_folder, force=force, **description
442 )[0]
443
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pennylane/data/data_manager.py b/pennylane/data/data_manager.py
--- a/pennylane/data/data_manager.py
+++ b/pennylane/data/data_manager.py
@@ -197,7 +197,11 @@
"""
next_folders = folders[1:]
- folders = set(node) if folders[0] == ["full"] else set(folders[0]).intersection(set(node))
+ if folders[0] == ["full"]:
+ folders = node
+ else:
+ values_for_this_node = set(folders[0]).intersection(set(node))
+ folders = [f for f in folders[0] if f in values_for_this_node]
return (
[
os.path.join(folder, child)
| {"golden_diff": "diff --git a/pennylane/data/data_manager.py b/pennylane/data/data_manager.py\n--- a/pennylane/data/data_manager.py\n+++ b/pennylane/data/data_manager.py\n@@ -197,7 +197,11 @@\n \"\"\"\n \n next_folders = folders[1:]\n- folders = set(node) if folders[0] == [\"full\"] else set(folders[0]).intersection(set(node))\n+ if folders[0] == [\"full\"]:\n+ folders = node\n+ else:\n+ values_for_this_node = set(folders[0]).intersection(set(node))\n+ folders = [f for f in folders[0] if f in values_for_this_node]\n return (\n [\n os.path.join(folder, child)\n", "issue": "[Bug] Inconsistent ordering in datasets for lists of parameters\nThe ordering of lists of datasets when passed lists of parameters is of no canonical ordering, but appears to be random:\r\n\r\n```python\r\nbondlengths = ['0.5', '0.54', '0.58', '0.62']\r\ndata = qml.data.load(\"qchem\", molname=\"H2\", basis=\"STO-3G\", bondlength=bondlengths)\r\nprint(data)\r\n```\r\nwould expect output:\r\n```pycon\r\n[<Dataset = description: qchem/H2/STO-3G/0.5, attributes: ['molecule', 'hamiltonian', ...]>, \r\n<Dataset = description: qchem/H2/STO-3G/0.54, attributes: ['molecule', 'hamiltonian', ...]>, \r\n<Dataset = description: qchem/H2/STO-3G/0.58, attributes: ['molecule', 'hamiltonian', ...]>, \r\n<Dataset = description: qchem/H2/STO-3G/0.62, attributes: ['molecule', 'hamiltonian', ...]>]\r\n```\r\nbut actually obtain:\r\n```pycon\r\n[<Dataset = description: qchem/H2/STO-3G/0.54, attributes: ['molecule', 'hamiltonian', ...]>, \r\n<Dataset = description: qchem/H2/STO-3G/0.5, attributes: ['molecule', 'hamiltonian', ...]>, \r\n<Dataset = description: qchem/H2/STO-3G/0.58, attributes: ['molecule', 'hamiltonian', ...]>, \r\n<Dataset = description: qchem/H2/STO-3G/0.62, attributes: ['molecule', 'hamiltonian', ...]>]\r\n```\n", "before_files": [{"content": "# Copyright 2018-2022 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nContains the Dataset utility functions.\n\"\"\"\n# pylint:disable=too-many-arguments,global-statement\nfrom collections.abc import Iterable\nfrom concurrent.futures import ThreadPoolExecutor, wait, FIRST_EXCEPTION\nimport os\nfrom os.path import sep as pathsep\nfrom time import sleep\nfrom urllib.parse import quote\n\nimport requests\nfrom pennylane.data.dataset import Dataset\n\nS3_URL = \"https://xanadu-quantum-datasets.s3.amazonaws.com\"\nFOLDERMAP_URL = f\"{S3_URL}/foldermap.json\"\nDATA_STRUCT_URL = f\"{S3_URL}/data_struct.json\"\n\n_foldermap = {}\n_data_struct = {}\n\n\n# pylint:disable=too-many-branches\ndef _format_details(param, details):\n \"\"\"Ensures each user-inputted parameter is a properly typed list.\n Also provides custom support for certain parameters.\"\"\"\n if not isinstance(details, list):\n details = [details]\n if param == \"layout\":\n # if a user inputs layout=[1,2], they wanted \"1x2\"\n # note that the above conversion to a list of details wouldn't work as expected here\n if all(isinstance(dim, int) for dim in details):\n return [\"x\".join(map(str, details))]\n # will turn [(1,2), [3,4], \"5x6\"] into [\"1x2\", \"3x4\", \"5x6\"]\n for i, detail in enumerate(details):\n if isinstance(detail, Iterable) and all(isinstance(dim, int) for dim in detail):\n details[i] = \"x\".join(map(str, detail))\n elif not isinstance(detail, str):\n raise TypeError(\n f\"Invalid layout value of '{detail}'. Must be a string or a tuple of ints.\"\n )\n elif param == \"bondlength\":\n for i, detail in enumerate(details):\n if isinstance(detail, float):\n details[i] = str(detail)\n elif isinstance(detail, int):\n details[i] = f\"{detail:.1f}\"\n elif not isinstance(detail, str):\n raise TypeError(f\"Invalid bondlength '{detail}'. Must be a string, int or float.\")\n for detail in details:\n if not isinstance(detail, str):\n raise TypeError(f\"Invalid type '{type(detail).__name__}' for parameter '{param}'\")\n return details\n\n\ndef _validate_params(data_name, description, attributes):\n \"\"\"Validate parameters for loading the data.\"\"\"\n\n data = _data_struct.get(data_name)\n if not data:\n raise ValueError(\n f\"Currently the hosted datasets are of types: {list(_data_struct)}, but got {data_name}.\"\n )\n\n if not isinstance(attributes, list):\n raise TypeError(f\"Arg 'attributes' should be a list, but got {type(attributes).__name__}.\")\n\n all_attributes = data[\"attributes\"]\n if not set(attributes).issubset(set(all_attributes)):\n raise ValueError(\n f\"Supported key values for {data_name} are {all_attributes}, but got {attributes}.\"\n )\n\n params_needed = data[\"params\"]\n if set(description) != set(params_needed):\n raise ValueError(\n f\"Supported parameter values for {data_name} are {params_needed}, but got {list(description)}.\"\n )\n\n def validate_structure(node, params_left):\n \"\"\"Recursively validates that all values in `description` exist in the dataset.\"\"\"\n param = params_left[0]\n params_left = params_left[1:]\n for detail in description[param]:\n exc = None\n if detail == \"full\":\n if not params_left:\n return None\n for child in node.values():\n exc = validate_structure(child, params_left)\n elif detail not in node: # error: return the error message to be raised\n return ValueError(\n f\"{param} value of '{detail}' is not available. Available values are {list(node)}\"\n )\n elif params_left:\n exc = validate_structure(node[detail], params_left)\n if exc is not None:\n return exc\n return None\n\n exc = validate_structure(_foldermap[data_name], params_needed)\n if isinstance(exc, Exception):\n raise exc # pylint:disable=raising-bad-type\n\n\ndef _refresh_foldermap():\n \"\"\"Refresh the foldermap from S3.\"\"\"\n global _foldermap\n if _foldermap:\n return\n response = requests.get(FOLDERMAP_URL, timeout=5.0)\n response.raise_for_status()\n _foldermap = response.json()\n\n\ndef _refresh_data_struct():\n \"\"\"Refresh the data struct from S3.\"\"\"\n global _data_struct\n if _data_struct:\n return\n response = requests.get(DATA_STRUCT_URL, timeout=5.0)\n response.raise_for_status()\n _data_struct = response.json()\n\n\ndef _fetch_and_save(filename, dest_folder):\n \"\"\"Download a single file from S3 and save it locally.\"\"\"\n webfile = filename if pathsep == \"/\" else filename.replace(pathsep, \"/\")\n response = requests.get(f\"{S3_URL}/{quote(webfile)}\", timeout=5.0)\n response.raise_for_status()\n with open(os.path.join(dest_folder, filename), \"wb\") as f:\n f.write(response.content)\n\n\ndef _s3_download(data_name, folders, attributes, dest_folder, force, num_threads):\n \"\"\"Download a file for each attribute from each folder to the specified destination.\n\n Args:\n data_name (str) : The type of the data required\n folders (list) : A list of folders corresponding to S3 object prefixes\n attributes (list) : A list to specify individual data elements that are required\n dest_folder (str) : Path to the root folder where files should be saved\n force (bool) : Whether data has to be downloaded even if it is still present\n num_threads (int) : The maximum number of threads to spawn while downloading files\n (1 thread per file)\n \"\"\"\n files = []\n for folder in folders:\n local_folder = os.path.join(dest_folder, data_name, folder)\n if not os.path.exists(local_folder):\n os.makedirs(local_folder)\n\n prefix = os.path.join(data_name, folder, f\"{folder.replace(pathsep, '_')}_\")\n # TODO: consider combining files within a folder (switch to append)\n files.extend([f\"{prefix}{attr}.dat\" for attr in attributes])\n\n if not force:\n start = len(dest_folder.rstrip(pathsep)) + 1\n existing_files = {\n os.path.join(path, name)[start:]\n for path, _, local_files in os.walk(dest_folder)\n for name in local_files\n }\n files = list(set(files) - existing_files)\n\n with ThreadPoolExecutor(num_threads) as pool:\n futures = [pool.submit(_fetch_and_save, f, dest_folder) for f in files]\n results = wait(futures, return_when=FIRST_EXCEPTION)\n for result in results.done:\n if result.exception():\n raise result.exception()\n\n\ndef _generate_folders(node, folders):\n \"\"\"Recursively generate and return a tree of all folder names below a node.\n\n Args:\n node (dict) : A sub-dict of the foldermap for which a list of sub-folders is generated\n folders (list[list[str]]) : The ordered list of folder names requested.\n The value ``[\"full\"]`` will expand to all possible folders at that depth\n\n Returns:\n list[str]: The paths of files that should be fetched from S3\n \"\"\"\n\n next_folders = folders[1:]\n folders = set(node) if folders[0] == [\"full\"] else set(folders[0]).intersection(set(node))\n return (\n [\n os.path.join(folder, child)\n for folder in folders\n for child in _generate_folders(node[folder], next_folders)\n ]\n if next_folders\n else folders\n )\n\n\ndef load(\n data_name, attributes=None, lazy=False, folder_path=\"\", force=False, num_threads=50, **params\n):\n r\"\"\"Downloads the data if it is not already present in the directory and return it to user as a\n :class:`~pennylane.data.Dataset` object. For the full list of available datasets, please see the\n `datasets website <https://pennylane.ai/qml/datasets.html>`_.\n\n Args:\n data_name (str) : A string representing the type of data required such as `qchem`, `qpsin`, etc.\n attributes (list) : An optional list to specify individual data element that are required\n folder_path (str) : Path to the root folder where download takes place.\n By default dataset folder will be created in the working directory\n force (Bool) : Bool representing whether data has to be downloaded even if it is still present\n num_threads (int) : The maximum number of threads to spawn while downloading files (1 thread per file)\n params (kwargs) : Keyword arguments exactly matching the parameters required for the data type.\n Note that these are not optional\n\n Returns:\n list[:class:`~pennylane.data.Dataset`]\n\n .. warning::\n\n PennyLane datasets use the ``dill`` module to compress, store, and read data. Since ``dill``\n is built on the ``pickle`` module, we reproduce an important warning from the ``pickle``\n module: it is possible to construct malicious pickle data which will execute arbitrary code\n during unpickling. Never unpickle data that could have come from an untrusted source, or\n that could have been tampered with.\n \"\"\"\n\n _ = lazy\n\n _refresh_foldermap()\n _refresh_data_struct()\n if not attributes:\n attributes = [\"full\"]\n\n description = {param: _format_details(param, details) for param, details in params.items()}\n _validate_params(data_name, description, attributes)\n if len(attributes) > 1 and \"full\" in attributes:\n attributes = [\"full\"]\n for key, val in description.items():\n if len(val) > 1 and \"full\" in val:\n description[key] = [\"full\"]\n\n data = _data_struct[data_name]\n directory_path = os.path.join(folder_path, \"datasets\")\n\n folders = [description[param] for param in data[\"params\"]]\n all_folders = _generate_folders(_foldermap[data_name], folders)\n _s3_download(data_name, all_folders, attributes, directory_path, force, num_threads)\n\n data_files = []\n docstring = data[\"docstr\"]\n for folder in all_folders:\n real_folder = os.path.join(directory_path, data_name, folder)\n data_files.append(\n Dataset(data_name, real_folder, folder.replace(pathsep, \"_\"), docstring, standard=True)\n )\n\n return data_files\n\n\ndef _direc_to_dict(path):\n r\"\"\"Helper function to create dictionary structure from directory path\"\"\"\n for root, dirs, _ in os.walk(path):\n if not dirs:\n return None\n tree = {x: _direc_to_dict(os.path.join(root, x)) for x in dirs}\n return list(dirs) if all(x is None for x in tree.values()) else tree\n\n\ndef list_datasets(path=None):\n r\"\"\"Returns a dictionary of the available datasets.\n\n Return:\n dict: Nested dictionary representing the directory structure of the hosted datasets.\n\n **Example:**\n\n Note that the results of calling this function may differ from this example as more datasets\n are added. For updates on available data see the `datasets website <https://pennylane.ai/qml/datasets.html>`_.\n\n .. code-block :: pycon\n\n >>> qml.data.list_datasets()\n {'qchem': {'H2': {'6-31G': ['0.5', '0.54', '0.58', ... '2.02', '2.06', '2.1'],\n 'STO-3G': ['0.5', '0.54', '0.58', ... '2.02', '2.06', '2.1']},\n 'HeH+': {'6-31G': ['0.5', '0.54', '0.58', ... '2.02', '2.06', '2.1'],\n 'STO-3G': ['0.5', '0.54', '0.58', ... '2.02', '2.06', '2.1']},\n 'LiH': {'STO-3G': ['0.5', '0.54', '0.58', ... '2.02', '2.06', '2.1']},\n 'OH-': {'STO-3G': ['0.5', '0.54', '0.58', ... '0.94', '0.98', '1.02']}},\n 'qspin': {'Heisenberg': {'closed': {'chain': ['1x16', '1x4', '1x8'],\n 'rectangular': ['2x2', '2x4', '2x8', '4x4']},\n 'open': {'chain': ['1x16', '1x4', '1x8'],\n 'rectangular': ['2x2', '2x4', '2x8', '4x4']}},\n 'Ising': {'closed': {'chain': ['1x16', '1x4', '1x8'],\n 'rectangular': ['2x2', '2x4', '2x8', '4x4']},\n 'open': {'chain': ['1x16', '1x4', '1x8'],\n 'rectangular': ['2x2', '2x4', '2x8', '4x4']}}}}\n \"\"\"\n\n if path:\n return _direc_to_dict(path)\n _refresh_foldermap()\n return _foldermap.copy()\n\n\ndef list_attributes(data_name):\n r\"\"\"List the attributes that exist for a specific ``data_name``.\n\n Args:\n data_name (str): The type of the desired data\n\n Returns:\n list (str): A list of accepted attributes for a given data name\n \"\"\"\n _refresh_data_struct()\n if data_name not in _data_struct:\n raise ValueError(\n f\"Currently the hosted datasets are of types: {list(_data_struct)}, but got {data_name}.\"\n )\n return _data_struct[data_name][\"attributes\"]\n\n\ndef _interactive_request_attributes(options):\n \"\"\"Prompt the user to select a list of attributes.\"\"\"\n prompt = \"Please select attributes:\"\n for i, option in enumerate(options):\n if option == \"full\":\n option = \"full (all attributes)\"\n prompt += f\"\\n\\t{i+1}) {option}\"\n print(prompt)\n choices = input(f\"Choice (comma-separated list of options) [1-{len(options)}]: \").split(\",\")\n try:\n choices = list(map(int, choices))\n except ValueError as e:\n raise ValueError(f\"Must enter a list of integers between 1 and {len(options)}\") from e\n if any(choice < 1 or choice > len(options) for choice in choices):\n raise ValueError(f\"Must enter a list of integers between 1 and {len(options)}\")\n return [options[choice - 1] for choice in choices]\n\n\ndef _interactive_request_single(node, param):\n \"\"\"Prompt the user to select a single option from a list.\"\"\"\n options = list(node)\n if len(options) == 1:\n print(f\"Using {options[0]} as it is the only {param} available.\")\n sleep(1)\n return options[0]\n print(f\"Please select a {param}:\")\n print(\"\\n\".join(f\"\\t{i+1}) {option}\" for i, option in enumerate(options)))\n try:\n choice = int(input(f\"Choice [1-{len(options)}]: \"))\n except ValueError as e:\n raise ValueError(f\"Must enter an integer between 1 and {len(options)}\") from e\n if choice < 1 or choice > len(options):\n raise ValueError(f\"Must enter an integer between 1 and {len(options)}\")\n return options[choice - 1]\n\n\ndef load_interactive():\n r\"\"\"Download a dataset using an interactive load prompt.\n\n Returns:\n :class:`~pennylane.data.Dataset`\n\n **Example**\n\n .. code-block :: pycon\n\n >>> qml.data.load_interactive()\n Please select a data name:\n 1) qspin\n 2) qchem\n Choice [1-2]: 1\n Please select a sysname:\n ...\n Please select a periodicity:\n ...\n Please select a lattice:\n ...\n Please select a layout:\n ...\n Please select attributes:\n ...\n Force download files? (Default is no) [y/N]: N\n Folder to download to? (Default is pwd, will download to /datasets subdirectory):\n\n Please confirm your choices:\n dataset: qspin/Ising/open/rectangular/4x4\n attributes: ['parameters', 'ground_states']\n force: False\n dest folder: /Users/jovyan/Downloads/datasets\n Would you like to continue? (Default is yes) [Y/n]:\n <Dataset = description: qspin/Ising/open/rectangular/4x4, attributes: ['parameters', 'ground_states']>\n \"\"\"\n\n _refresh_foldermap()\n _refresh_data_struct()\n\n node = _foldermap\n data_name = _interactive_request_single(node, \"data name\")\n\n description = {}\n value = data_name\n\n params = _data_struct[data_name][\"params\"]\n for param in params:\n node = node[value]\n value = _interactive_request_single(node, param)\n description[param] = value\n\n attributes = _interactive_request_attributes(_data_struct[data_name][\"attributes\"])\n force = input(\"Force download files? (Default is no) [y/N]: \") in [\"y\", \"Y\"]\n dest_folder = input(\n \"Folder to download to? (Default is pwd, will download to /datasets subdirectory): \"\n )\n\n print(\"\\nPlease confirm your choices:\")\n print(\"dataset:\", \"/\".join([data_name] + [description[param] for param in params]))\n print(\"attributes:\", attributes)\n print(\"force:\", force)\n print(\"dest folder:\", os.path.join(dest_folder, \"datasets\"))\n\n approve = input(\"Would you like to continue? (Default is yes) [Y/n]: \")\n if approve not in [\"Y\", \"\", \"y\"]:\n print(\"Aborting and not downloading!\")\n return None\n return load(\n data_name, attributes=attributes, folder_path=dest_folder, force=force, **description\n )[0]\n", "path": "pennylane/data/data_manager.py"}], "after_files": [{"content": "# Copyright 2018-2022 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nContains the Dataset utility functions.\n\"\"\"\n# pylint:disable=too-many-arguments,global-statement\nfrom collections.abc import Iterable\nfrom concurrent.futures import ThreadPoolExecutor, wait, FIRST_EXCEPTION\nimport os\nfrom os.path import sep as pathsep\nfrom time import sleep\nfrom urllib.parse import quote\n\nimport requests\nfrom pennylane.data.dataset import Dataset\n\nS3_URL = \"https://xanadu-quantum-datasets.s3.amazonaws.com\"\nFOLDERMAP_URL = f\"{S3_URL}/foldermap.json\"\nDATA_STRUCT_URL = f\"{S3_URL}/data_struct.json\"\n\n_foldermap = {}\n_data_struct = {}\n\n\n# pylint:disable=too-many-branches\ndef _format_details(param, details):\n \"\"\"Ensures each user-inputted parameter is a properly typed list.\n Also provides custom support for certain parameters.\"\"\"\n if not isinstance(details, list):\n details = [details]\n if param == \"layout\":\n # if a user inputs layout=[1,2], they wanted \"1x2\"\n # note that the above conversion to a list of details wouldn't work as expected here\n if all(isinstance(dim, int) for dim in details):\n return [\"x\".join(map(str, details))]\n # will turn [(1,2), [3,4], \"5x6\"] into [\"1x2\", \"3x4\", \"5x6\"]\n for i, detail in enumerate(details):\n if isinstance(detail, Iterable) and all(isinstance(dim, int) for dim in detail):\n details[i] = \"x\".join(map(str, detail))\n elif not isinstance(detail, str):\n raise TypeError(\n f\"Invalid layout value of '{detail}'. Must be a string or a tuple of ints.\"\n )\n elif param == \"bondlength\":\n for i, detail in enumerate(details):\n if isinstance(detail, float):\n details[i] = str(detail)\n elif isinstance(detail, int):\n details[i] = f\"{detail:.1f}\"\n elif not isinstance(detail, str):\n raise TypeError(f\"Invalid bondlength '{detail}'. Must be a string, int or float.\")\n for detail in details:\n if not isinstance(detail, str):\n raise TypeError(f\"Invalid type '{type(detail).__name__}' for parameter '{param}'\")\n return details\n\n\ndef _validate_params(data_name, description, attributes):\n \"\"\"Validate parameters for loading the data.\"\"\"\n\n data = _data_struct.get(data_name)\n if not data:\n raise ValueError(\n f\"Currently the hosted datasets are of types: {list(_data_struct)}, but got {data_name}.\"\n )\n\n if not isinstance(attributes, list):\n raise TypeError(f\"Arg 'attributes' should be a list, but got {type(attributes).__name__}.\")\n\n all_attributes = data[\"attributes\"]\n if not set(attributes).issubset(set(all_attributes)):\n raise ValueError(\n f\"Supported key values for {data_name} are {all_attributes}, but got {attributes}.\"\n )\n\n params_needed = data[\"params\"]\n if set(description) != set(params_needed):\n raise ValueError(\n f\"Supported parameter values for {data_name} are {params_needed}, but got {list(description)}.\"\n )\n\n def validate_structure(node, params_left):\n \"\"\"Recursively validates that all values in `description` exist in the dataset.\"\"\"\n param = params_left[0]\n params_left = params_left[1:]\n for detail in description[param]:\n exc = None\n if detail == \"full\":\n if not params_left:\n return None\n for child in node.values():\n exc = validate_structure(child, params_left)\n elif detail not in node: # error: return the error message to be raised\n return ValueError(\n f\"{param} value of '{detail}' is not available. Available values are {list(node)}\"\n )\n elif params_left:\n exc = validate_structure(node[detail], params_left)\n if exc is not None:\n return exc\n return None\n\n exc = validate_structure(_foldermap[data_name], params_needed)\n if isinstance(exc, Exception):\n raise exc # pylint:disable=raising-bad-type\n\n\ndef _refresh_foldermap():\n \"\"\"Refresh the foldermap from S3.\"\"\"\n global _foldermap\n if _foldermap:\n return\n response = requests.get(FOLDERMAP_URL, timeout=5.0)\n response.raise_for_status()\n _foldermap = response.json()\n\n\ndef _refresh_data_struct():\n \"\"\"Refresh the data struct from S3.\"\"\"\n global _data_struct\n if _data_struct:\n return\n response = requests.get(DATA_STRUCT_URL, timeout=5.0)\n response.raise_for_status()\n _data_struct = response.json()\n\n\ndef _fetch_and_save(filename, dest_folder):\n \"\"\"Download a single file from S3 and save it locally.\"\"\"\n webfile = filename if pathsep == \"/\" else filename.replace(pathsep, \"/\")\n response = requests.get(f\"{S3_URL}/{quote(webfile)}\", timeout=5.0)\n response.raise_for_status()\n with open(os.path.join(dest_folder, filename), \"wb\") as f:\n f.write(response.content)\n\n\ndef _s3_download(data_name, folders, attributes, dest_folder, force, num_threads):\n \"\"\"Download a file for each attribute from each folder to the specified destination.\n\n Args:\n data_name (str) : The type of the data required\n folders (list) : A list of folders corresponding to S3 object prefixes\n attributes (list) : A list to specify individual data elements that are required\n dest_folder (str) : Path to the root folder where files should be saved\n force (bool) : Whether data has to be downloaded even if it is still present\n num_threads (int) : The maximum number of threads to spawn while downloading files\n (1 thread per file)\n \"\"\"\n files = []\n for folder in folders:\n local_folder = os.path.join(dest_folder, data_name, folder)\n if not os.path.exists(local_folder):\n os.makedirs(local_folder)\n\n prefix = os.path.join(data_name, folder, f\"{folder.replace(pathsep, '_')}_\")\n # TODO: consider combining files within a folder (switch to append)\n files.extend([f\"{prefix}{attr}.dat\" for attr in attributes])\n\n if not force:\n start = len(dest_folder.rstrip(pathsep)) + 1\n existing_files = {\n os.path.join(path, name)[start:]\n for path, _, local_files in os.walk(dest_folder)\n for name in local_files\n }\n files = list(set(files) - existing_files)\n\n with ThreadPoolExecutor(num_threads) as pool:\n futures = [pool.submit(_fetch_and_save, f, dest_folder) for f in files]\n results = wait(futures, return_when=FIRST_EXCEPTION)\n for result in results.done:\n if result.exception():\n raise result.exception()\n\n\ndef _generate_folders(node, folders):\n \"\"\"Recursively generate and return a tree of all folder names below a node.\n\n Args:\n node (dict) : A sub-dict of the foldermap for which a list of sub-folders is generated\n folders (list[list[str]]) : The ordered list of folder names requested.\n The value ``[\"full\"]`` will expand to all possible folders at that depth\n\n Returns:\n list[str]: The paths of files that should be fetched from S3\n \"\"\"\n\n next_folders = folders[1:]\n if folders[0] == [\"full\"]:\n folders = node\n else:\n values_for_this_node = set(folders[0]).intersection(set(node))\n folders = [f for f in folders[0] if f in values_for_this_node]\n return (\n [\n os.path.join(folder, child)\n for folder in folders\n for child in _generate_folders(node[folder], next_folders)\n ]\n if next_folders\n else folders\n )\n\n\ndef load(\n data_name, attributes=None, lazy=False, folder_path=\"\", force=False, num_threads=50, **params\n):\n r\"\"\"Downloads the data if it is not already present in the directory and return it to user as a\n :class:`~pennylane.data.Dataset` object. For the full list of available datasets, please see the\n `datasets website <https://pennylane.ai/qml/datasets.html>`_.\n\n Args:\n data_name (str) : A string representing the type of data required such as `qchem`, `qpsin`, etc.\n attributes (list) : An optional list to specify individual data element that are required\n folder_path (str) : Path to the root folder where download takes place.\n By default dataset folder will be created in the working directory\n force (Bool) : Bool representing whether data has to be downloaded even if it is still present\n num_threads (int) : The maximum number of threads to spawn while downloading files (1 thread per file)\n params (kwargs) : Keyword arguments exactly matching the parameters required for the data type.\n Note that these are not optional\n\n Returns:\n list[:class:`~pennylane.data.Dataset`]\n\n .. warning::\n\n PennyLane datasets use the ``dill`` module to compress, store, and read data. Since ``dill``\n is built on the ``pickle`` module, we reproduce an important warning from the ``pickle``\n module: it is possible to construct malicious pickle data which will execute arbitrary code\n during unpickling. Never unpickle data that could have come from an untrusted source, or\n that could have been tampered with.\n \"\"\"\n\n _ = lazy\n\n _refresh_foldermap()\n _refresh_data_struct()\n if not attributes:\n attributes = [\"full\"]\n\n description = {param: _format_details(param, details) for param, details in params.items()}\n _validate_params(data_name, description, attributes)\n if len(attributes) > 1 and \"full\" in attributes:\n attributes = [\"full\"]\n for key, val in description.items():\n if len(val) > 1 and \"full\" in val:\n description[key] = [\"full\"]\n\n data = _data_struct[data_name]\n directory_path = os.path.join(folder_path, \"datasets\")\n\n folders = [description[param] for param in data[\"params\"]]\n all_folders = _generate_folders(_foldermap[data_name], folders)\n _s3_download(data_name, all_folders, attributes, directory_path, force, num_threads)\n\n data_files = []\n docstring = data[\"docstr\"]\n for folder in all_folders:\n real_folder = os.path.join(directory_path, data_name, folder)\n data_files.append(\n Dataset(data_name, real_folder, folder.replace(pathsep, \"_\"), docstring, standard=True)\n )\n\n return data_files\n\n\ndef _direc_to_dict(path):\n r\"\"\"Helper function to create dictionary structure from directory path\"\"\"\n for root, dirs, _ in os.walk(path):\n if not dirs:\n return None\n tree = {x: _direc_to_dict(os.path.join(root, x)) for x in dirs}\n return list(dirs) if all(x is None for x in tree.values()) else tree\n\n\ndef list_datasets(path=None):\n r\"\"\"Returns a dictionary of the available datasets.\n\n Return:\n dict: Nested dictionary representing the directory structure of the hosted datasets.\n\n **Example:**\n\n Note that the results of calling this function may differ from this example as more datasets\n are added. For updates on available data see the `datasets website <https://pennylane.ai/qml/datasets.html>`_.\n\n .. code-block :: pycon\n\n >>> qml.data.list_datasets()\n {'qchem': {'H2': {'6-31G': ['0.5', '0.54', '0.58', ... '2.02', '2.06', '2.1'],\n 'STO-3G': ['0.5', '0.54', '0.58', ... '2.02', '2.06', '2.1']},\n 'HeH+': {'6-31G': ['0.5', '0.54', '0.58', ... '2.02', '2.06', '2.1'],\n 'STO-3G': ['0.5', '0.54', '0.58', ... '2.02', '2.06', '2.1']},\n 'LiH': {'STO-3G': ['0.5', '0.54', '0.58', ... '2.02', '2.06', '2.1']},\n 'OH-': {'STO-3G': ['0.5', '0.54', '0.58', ... '0.94', '0.98', '1.02']}},\n 'qspin': {'Heisenberg': {'closed': {'chain': ['1x16', '1x4', '1x8'],\n 'rectangular': ['2x2', '2x4', '2x8', '4x4']},\n 'open': {'chain': ['1x16', '1x4', '1x8'],\n 'rectangular': ['2x2', '2x4', '2x8', '4x4']}},\n 'Ising': {'closed': {'chain': ['1x16', '1x4', '1x8'],\n 'rectangular': ['2x2', '2x4', '2x8', '4x4']},\n 'open': {'chain': ['1x16', '1x4', '1x8'],\n 'rectangular': ['2x2', '2x4', '2x8', '4x4']}}}}\n \"\"\"\n\n if path:\n return _direc_to_dict(path)\n _refresh_foldermap()\n return _foldermap.copy()\n\n\ndef list_attributes(data_name):\n r\"\"\"List the attributes that exist for a specific ``data_name``.\n\n Args:\n data_name (str): The type of the desired data\n\n Returns:\n list (str): A list of accepted attributes for a given data name\n \"\"\"\n _refresh_data_struct()\n if data_name not in _data_struct:\n raise ValueError(\n f\"Currently the hosted datasets are of types: {list(_data_struct)}, but got {data_name}.\"\n )\n return _data_struct[data_name][\"attributes\"]\n\n\ndef _interactive_request_attributes(options):\n \"\"\"Prompt the user to select a list of attributes.\"\"\"\n prompt = \"Please select attributes:\"\n for i, option in enumerate(options):\n if option == \"full\":\n option = \"full (all attributes)\"\n prompt += f\"\\n\\t{i+1}) {option}\"\n print(prompt)\n choices = input(f\"Choice (comma-separated list of options) [1-{len(options)}]: \").split(\",\")\n try:\n choices = list(map(int, choices))\n except ValueError as e:\n raise ValueError(f\"Must enter a list of integers between 1 and {len(options)}\") from e\n if any(choice < 1 or choice > len(options) for choice in choices):\n raise ValueError(f\"Must enter a list of integers between 1 and {len(options)}\")\n return [options[choice - 1] for choice in choices]\n\n\ndef _interactive_request_single(node, param):\n \"\"\"Prompt the user to select a single option from a list.\"\"\"\n options = list(node)\n if len(options) == 1:\n print(f\"Using {options[0]} as it is the only {param} available.\")\n sleep(1)\n return options[0]\n print(f\"Please select a {param}:\")\n print(\"\\n\".join(f\"\\t{i+1}) {option}\" for i, option in enumerate(options)))\n try:\n choice = int(input(f\"Choice [1-{len(options)}]: \"))\n except ValueError as e:\n raise ValueError(f\"Must enter an integer between 1 and {len(options)}\") from e\n if choice < 1 or choice > len(options):\n raise ValueError(f\"Must enter an integer between 1 and {len(options)}\")\n return options[choice - 1]\n\n\ndef load_interactive():\n r\"\"\"Download a dataset using an interactive load prompt.\n\n Returns:\n :class:`~pennylane.data.Dataset`\n\n **Example**\n\n .. code-block :: pycon\n\n >>> qml.data.load_interactive()\n Please select a data name:\n 1) qspin\n 2) qchem\n Choice [1-2]: 1\n Please select a sysname:\n ...\n Please select a periodicity:\n ...\n Please select a lattice:\n ...\n Please select a layout:\n ...\n Please select attributes:\n ...\n Force download files? (Default is no) [y/N]: N\n Folder to download to? (Default is pwd, will download to /datasets subdirectory):\n\n Please confirm your choices:\n dataset: qspin/Ising/open/rectangular/4x4\n attributes: ['parameters', 'ground_states']\n force: False\n dest folder: /Users/jovyan/Downloads/datasets\n Would you like to continue? (Default is yes) [Y/n]:\n <Dataset = description: qspin/Ising/open/rectangular/4x4, attributes: ['parameters', 'ground_states']>\n \"\"\"\n\n _refresh_foldermap()\n _refresh_data_struct()\n\n node = _foldermap\n data_name = _interactive_request_single(node, \"data name\")\n\n description = {}\n value = data_name\n\n params = _data_struct[data_name][\"params\"]\n for param in params:\n node = node[value]\n value = _interactive_request_single(node, param)\n description[param] = value\n\n attributes = _interactive_request_attributes(_data_struct[data_name][\"attributes\"])\n force = input(\"Force download files? (Default is no) [y/N]: \") in [\"y\", \"Y\"]\n dest_folder = input(\n \"Folder to download to? (Default is pwd, will download to /datasets subdirectory): \"\n )\n\n print(\"\\nPlease confirm your choices:\")\n print(\"dataset:\", \"/\".join([data_name] + [description[param] for param in params]))\n print(\"attributes:\", attributes)\n print(\"force:\", force)\n print(\"dest folder:\", os.path.join(dest_folder, \"datasets\"))\n\n approve = input(\"Would you like to continue? (Default is yes) [Y/n]: \")\n if approve not in [\"Y\", \"\", \"y\"]:\n print(\"Aborting and not downloading!\")\n return None\n return load(\n data_name, attributes=attributes, folder_path=dest_folder, force=force, **description\n )[0]\n", "path": "pennylane/data/data_manager.py"}]} |
gh_patches_debug_1237 | rasdani/github-patches | git_diff | ivy-llc__ivy-18274 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
diff
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/paddle/tensor/math.py`
Content:
```
1 # global
2 import ivy
3 from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
4 from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back
5
6
7 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
8 @to_ivy_arrays_and_back
9 def sin(x, name=None):
10 return ivy.sin(x)
11
12
13 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
14 @to_ivy_arrays_and_back
15 def cos(x, name=None):
16 return ivy.cos(x)
17
18
19 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
20 @to_ivy_arrays_and_back
21 def acos(x, name=None):
22 return ivy.acos(x)
23
24
25 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
26 @to_ivy_arrays_and_back
27 def cosh(x, name=None):
28 return ivy.cosh(x)
29
30
31 @with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
32 @to_ivy_arrays_and_back
33 def tanh(x, name=None):
34 return ivy.tanh(x)
35
36
37 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
38 @to_ivy_arrays_and_back
39 def acosh(x, name=None):
40 return ivy.acosh(x)
41
42
43 @with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
44 @to_ivy_arrays_and_back
45 def asin(x, name=None):
46 return ivy.asin(x)
47
48
49 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
50 @to_ivy_arrays_and_back
51 def log(x, name=None):
52 return ivy.log(x)
53
54
55 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
56 @to_ivy_arrays_and_back
57 def divide(x, y, name=None):
58 return ivy.divide(x, y)
59
60
61 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
62 @to_ivy_arrays_and_back
63 def abs(x, name=None):
64 return ivy.abs(x)
65
66
67 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
68 @to_ivy_arrays_and_back
69 def multiply(x, y, name=None):
70 return ivy.multiply(x, y)
71
72
73 @with_unsupported_dtypes(
74 {"2.5.1 and below": ("bool", "unsigned", "int8", "float16", "bfloat16")}, "paddle"
75 )
76 @to_ivy_arrays_and_back
77 def add(x, y, name=None):
78 return ivy.add(x, y)
79
80
81 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
82 @to_ivy_arrays_and_back
83 def subtract(x, y, name=None):
84 return ivy.subtract(x, y)
85
86
87 @with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
88 @to_ivy_arrays_and_back
89 def sqrt(x, name=None):
90 return ivy.sqrt(x)
91
92
93 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
94 @to_ivy_arrays_and_back
95 def atanh(x, name=None):
96 return ivy.atanh(x)
97
98
99 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
100 @to_ivy_arrays_and_back
101 def atan(x, name=None):
102 return ivy.atan(x)
103
104
105 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
106 @to_ivy_arrays_and_back
107 def round(x, name=None):
108 return ivy.round(x)
109
110
111 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
112 @to_ivy_arrays_and_back
113 def ceil(x, name=None):
114 return ivy.ceil(x)
115
116
117 @with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
118 @to_ivy_arrays_and_back
119 def sinh(x, name=None):
120 return ivy.sinh(x)
121
122
123 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
124 @to_ivy_arrays_and_back
125 def pow(x, y, name=None):
126 return ivy.pow(x, y)
127
128
129 @with_unsupported_dtypes({"2.4.2 and below": ("int16", "float16")}, "paddle")
130 @to_ivy_arrays_and_back
131 def conj(x, name=None):
132 return ivy.conj(x)
133
134
135 @with_supported_dtypes(
136 {"2.5.1 and below": ("bfloat16", "float32", "float64")}, "paddle"
137 )
138 @to_ivy_arrays_and_back
139 def floor(x, name=None):
140 return ivy.floor(x)
141
142
143 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
144 @to_ivy_arrays_and_back
145 def remainder(x, y, name=None):
146 return ivy.remainder(x, y)
147
148
149 @with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
150 @to_ivy_arrays_and_back
151 def log2(x, name=None):
152 return ivy.log2(x)
153
154
155 @with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
156 @to_ivy_arrays_and_back
157 def log1p(x, name=None):
158 return ivy.log1p(x)
159
160
161 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
162 @to_ivy_arrays_and_back
163 def rad2deg(x, name=None):
164 return ivy.rad2deg(x)
165
166
167 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
168 @to_ivy_arrays_and_back
169 def deg2rad(x, name=None):
170 return ivy.deg2rad(x)
171
172
173 @with_supported_dtypes({"2.5.1 and below": ("int32", "int64")}, "paddle")
174 @to_ivy_arrays_and_back
175 def gcd(x, y, name=None):
176 return ivy.gcd(x, y)
177
178
179 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
180 @to_ivy_arrays_and_back
181 def tan(x, name=None):
182 return ivy.tan(x)
183
184
185 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
186 @to_ivy_arrays_and_back
187 def atan2(x, y, name=None):
188 return ivy.atan2(x, y)
189
190
191 @with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
192 @to_ivy_arrays_and_back
193 def square(x, name=None):
194 return ivy.square(x)
195
196
197 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
198 @to_ivy_arrays_and_back
199 def sign(x, name=None):
200 return ivy.sign(x, np_variant=False)
201
202
203 @with_supported_dtypes(
204 {"2.5.1 and below": ("float32", "float64", "int8", "int16", "int32", "int64")},
205 "paddle",
206 )
207 @to_ivy_arrays_and_back
208 def neg(x, name=None):
209 return ivy.negative(x)
210
211
212 @with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
213 @to_ivy_arrays_and_back
214 def exp(x, name=None):
215 return ivy.exp(x)
216
217
218 @with_supported_dtypes({"2.5.1 and below": ("float16", "float32", "float64")}, "paddle")
219 @to_ivy_arrays_and_back
220 def expm1(x, name=None):
221 return ivy.expm1(x)
222
223
224 @with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
225 @to_ivy_arrays_and_back
226 def erf(x, name=None):
227 return ivy.erf(x)
228
229
230 @with_supported_dtypes(
231 {
232 "2.5.1 and below": (
233 "int32",
234 "int64",
235 "float32",
236 "float64",
237 "complex64",
238 "complex128",
239 )
240 },
241 "paddle",
242 )
243 @to_ivy_arrays_and_back
244 def cumprod(x, dim=None, dtype=None, name=None):
245 return ivy.cumprod(x, axis=dim, dtype=dtype)
246
247
248 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
249 @to_ivy_arrays_and_back
250 def reciprocal(x, name=None):
251 return ivy.reciprocal(x)
252
253
254 @with_supported_dtypes({"2.5.1 and below": ("int32", "int64")}, "paddle")
255 @to_ivy_arrays_and_back
256 def lcm(x, y, name=None):
257 return ivy.lcm(x, y)
258
259
260 @with_supported_dtypes(
261 {"2.5.1 and below": ("float16", "float32", "float64", "int32", "int64")}, "paddle"
262 )
263 @to_ivy_arrays_and_back
264 def isnan(x, name=None):
265 return ivy.isnan(x)
266
267
268 @with_supported_dtypes(
269 {"2.5.1 and below": ("float16", "float32", "float64", "int32", "int64")}, "paddle"
270 )
271 @to_ivy_arrays_and_back
272 def isfinite(x, name=None):
273 return ivy.isfinite(x)
274
275
276 @with_supported_dtypes(
277 {"2.5.1 and below": ("float16", "float32", "float64", "int32", "int64")}, "paddle"
278 )
279 @to_ivy_arrays_and_back
280 def isinf(x, name=None):
281 return ivy.isinf(x)
282
283
284 @with_supported_dtypes(
285 {"2.5.1 and below": ("complex64", "complex128", "float32", "float64")},
286 "paddle",
287 )
288 @to_ivy_arrays_and_back
289 def angle(x, name=None):
290 return ivy.angle(x)
291
292
293 @with_unsupported_dtypes({"2.5.1 and below": "bfloat16"}, "paddle")
294 @to_ivy_arrays_and_back
295 def fmin(x, y, name=None):
296 return ivy.fmin(x, y)
297
298
299 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
300 @to_ivy_arrays_and_back
301 def logit(x, eps=None, name=None):
302 return ivy.logit(x, eps=eps)
303
304
305 @with_unsupported_dtypes({"2.5.1 and below": "bfloat16"}, "paddle")
306 @to_ivy_arrays_and_back
307 def fmax(x, y, name=None):
308 return ivy.fmax(x, y)
309
310
311 @with_supported_dtypes(
312 {"2.5.1 and below": ("float32", "float64", "int32", "int64")}, "paddle"
313 )
314 @to_ivy_arrays_and_back
315 def minimum(x, y, name=None):
316 return ivy.minimum(x, y)
317
318
319 @with_supported_dtypes(
320 {"2.4.2 and below": ("float32", "float64", "int32", "int64")}, "paddle"
321 )
322 @to_ivy_arrays_and_back
323 def trunc(x, name=None):
324 return ivy.trunc(x)
325
326
327 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
328 @to_ivy_arrays_and_back
329 def sgn(x, name=None):
330 return ivy.sign(x, np_variant=True)
331
332
333 @with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
334 @to_ivy_arrays_and_back
335 def outer(x, y, name=None):
336 return ivy.outer(x, y)
337
338
339 # maximum
340 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
341 @to_ivy_arrays_and_back
342 def maximum(x, y, name=None):
343 return ivy.maximum(x, y)
344
345
346 @with_supported_dtypes(
347 {"2.5.1 and below": ("float32", "float64", "int32", "int64")}, "paddle"
348 )
349 @to_ivy_arrays_and_back
350 def frac(x, name=None):
351 y = ivy.trunc(x)
352 return ivy.subtract(x, y)
353
354
355 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
356 @to_ivy_arrays_and_back
357 def asinh(x, name=None):
358 return ivy.asinh(x)
359
360
361 @with_supported_dtypes(
362 {"2.5.1 and below": ("float32", "float64", "int32", "int64")}, "paddle"
363 )
364 @to_ivy_arrays_and_back
365 def max(x, axis=None, keepdim=False, name=None):
366 return ivy.max(x, axis=axis, keepdims=keepdim)
367
368
369 @with_supported_dtypes(
370 {"2.5.1 and below": ("float16", "float32", "float64", "int32", "int64")}, "paddle"
371 )
372 @to_ivy_arrays_and_back
373 def heaviside(x, y, name=None):
374 return ivy.heaviside(x, y)
375
376
377 @with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
378 @to_ivy_arrays_and_back
379 def lerp(x, y, weight, name=None):
380 return ivy.lerp(x, y, weight)
381
382
383 @with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
384 @to_ivy_arrays_and_back
385 def rsqrt(x, name=None):
386 return 1 / ivy.sqrt(x)
387
388
389 @with_supported_dtypes(
390 {"2.5.1 and below": ("float32", "float64", "int32", "int64")}, "paddle"
391 )
392 @to_ivy_arrays_and_back
393 def prod(x, axis=None, keepdim=False, dtype=None, name=None):
394 return ivy.prod(x, axis=axis, keepdims=keepdim, dtype=dtype)
395
396
397 @with_supported_dtypes({"2.5.0 and below": "bool"}, "paddle")
398 @to_ivy_arrays_and_back
399 def any(x, axis=None, keepdim=False, name=None):
400 return ivy.any(x, axis=axis, keepdims=keepdim)
401
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/paddle/tensor/math.py b/ivy/functional/frontends/paddle/tensor/math.py
--- a/ivy/functional/frontends/paddle/tensor/math.py
+++ b/ivy/functional/frontends/paddle/tensor/math.py
@@ -398,3 +398,11 @@
@to_ivy_arrays_and_back
def any(x, axis=None, keepdim=False, name=None):
return ivy.any(x, axis=axis, keepdims=keepdim)
+
+
+@with_supported_dtypes(
+ {"2.5.1 and below": ("float32", "float64", "int32", "int64")}, "paddle"
+)
+@to_ivy_arrays_and_back
+def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
+ return ivy.diff(x, n=n, axis=axis, prepend=prepend, append=append)
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/math.py b/ivy/functional/frontends/paddle/tensor/math.py\n--- a/ivy/functional/frontends/paddle/tensor/math.py\n+++ b/ivy/functional/frontends/paddle/tensor/math.py\n@@ -398,3 +398,11 @@\n @to_ivy_arrays_and_back\n def any(x, axis=None, keepdim=False, name=None):\n return ivy.any(x, axis=axis, keepdims=keepdim)\n+\n+\n+@with_supported_dtypes(\n+ {\"2.5.1 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n+)\n+@to_ivy_arrays_and_back\n+def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):\n+ return ivy.diff(x, n=n, axis=axis, prepend=prepend, append=append)\n", "issue": "diff\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sin(x, name=None):\n return ivy.sin(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cos(x, name=None):\n return ivy.cos(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acos(x, name=None):\n return ivy.acos(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cosh(x, name=None):\n return ivy.cosh(x)\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tanh(x, name=None):\n return ivy.tanh(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acosh(x, name=None):\n return ivy.acosh(x)\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef asin(x, name=None):\n return ivy.asin(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log(x, name=None):\n return ivy.log(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef divide(x, y, name=None):\n return ivy.divide(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef multiply(x, y, name=None):\n return ivy.multiply(x, y)\n\n\n@with_unsupported_dtypes(\n {\"2.5.1 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef add(x, y, name=None):\n return ivy.add(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef subtract(x, y, name=None):\n return ivy.subtract(x, y)\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sqrt(x, name=None):\n return ivy.sqrt(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atanh(x, name=None):\n return ivy.atanh(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atan(x, name=None):\n return ivy.atan(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef round(x, name=None):\n return ivy.round(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef ceil(x, name=None):\n return ivy.ceil(x)\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sinh(x, name=None):\n return ivy.sinh(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef pow(x, y, name=None):\n return ivy.pow(x, y)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"int16\", \"float16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef conj(x, name=None):\n return ivy.conj(x)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"bfloat16\", \"float32\", \"float64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef floor(x, name=None):\n return ivy.floor(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef remainder(x, y, name=None):\n return ivy.remainder(x, y)\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log2(x, name=None):\n return ivy.log2(x)\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log1p(x, name=None):\n return ivy.log1p(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef rad2deg(x, name=None):\n return ivy.rad2deg(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef deg2rad(x, name=None):\n return ivy.deg2rad(x)\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"int32\", \"int64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef gcd(x, y, name=None):\n return ivy.gcd(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tan(x, name=None):\n return ivy.tan(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atan2(x, y, name=None):\n return ivy.atan2(x, y)\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef square(x, name=None):\n return ivy.square(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sign(x, name=None):\n return ivy.sign(x, np_variant=False)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\", \"int8\", \"int16\", \"int32\", \"int64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef neg(x, name=None):\n return ivy.negative(x)\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef exp(x, name=None):\n return ivy.exp(x)\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"float16\", \"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef expm1(x, name=None):\n return ivy.expm1(x)\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef erf(x, name=None):\n return ivy.erf(x)\n\n\n@with_supported_dtypes(\n {\n \"2.5.1 and below\": (\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef cumprod(x, dim=None, dtype=None, name=None):\n return ivy.cumprod(x, axis=dim, dtype=dtype)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef reciprocal(x, name=None):\n return ivy.reciprocal(x)\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"int32\", \"int64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef lcm(x, y, name=None):\n return ivy.lcm(x, y)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float16\", \"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef isnan(x, name=None):\n return ivy.isnan(x)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float16\", \"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef isfinite(x, name=None):\n return ivy.isfinite(x)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float16\", \"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef isinf(x, name=None):\n return ivy.isinf(x)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"complex64\", \"complex128\", \"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef angle(x, name=None):\n return ivy.angle(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": \"bfloat16\"}, \"paddle\")\n@to_ivy_arrays_and_back\ndef fmin(x, y, name=None):\n return ivy.fmin(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef logit(x, eps=None, name=None):\n return ivy.logit(x, eps=eps)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": \"bfloat16\"}, \"paddle\")\n@to_ivy_arrays_and_back\ndef fmax(x, y, name=None):\n return ivy.fmax(x, y)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef minimum(x, y, name=None):\n return ivy.minimum(x, y)\n\n\n@with_supported_dtypes(\n {\"2.4.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef trunc(x, name=None):\n return ivy.trunc(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sgn(x, name=None):\n return ivy.sign(x, np_variant=True)\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef outer(x, y, name=None):\n return ivy.outer(x, y)\n\n\n# maximum\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef maximum(x, y, name=None):\n return ivy.maximum(x, y)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef frac(x, name=None):\n y = ivy.trunc(x)\n return ivy.subtract(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef asinh(x, name=None):\n return ivy.asinh(x)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef max(x, axis=None, keepdim=False, name=None):\n return ivy.max(x, axis=axis, keepdims=keepdim)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float16\", \"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef heaviside(x, y, name=None):\n return ivy.heaviside(x, y)\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef lerp(x, y, weight, name=None):\n return ivy.lerp(x, y, weight)\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef rsqrt(x, name=None):\n return 1 / ivy.sqrt(x)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef prod(x, axis=None, keepdim=False, dtype=None, name=None):\n return ivy.prod(x, axis=axis, keepdims=keepdim, dtype=dtype)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": \"bool\"}, \"paddle\")\n@to_ivy_arrays_and_back\ndef any(x, axis=None, keepdim=False, name=None):\n return ivy.any(x, axis=axis, keepdims=keepdim)\n", "path": "ivy/functional/frontends/paddle/tensor/math.py"}], "after_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sin(x, name=None):\n return ivy.sin(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cos(x, name=None):\n return ivy.cos(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acos(x, name=None):\n return ivy.acos(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cosh(x, name=None):\n return ivy.cosh(x)\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tanh(x, name=None):\n return ivy.tanh(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acosh(x, name=None):\n return ivy.acosh(x)\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef asin(x, name=None):\n return ivy.asin(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log(x, name=None):\n return ivy.log(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef divide(x, y, name=None):\n return ivy.divide(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef multiply(x, y, name=None):\n return ivy.multiply(x, y)\n\n\n@with_unsupported_dtypes(\n {\"2.5.1 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef add(x, y, name=None):\n return ivy.add(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef subtract(x, y, name=None):\n return ivy.subtract(x, y)\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sqrt(x, name=None):\n return ivy.sqrt(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atanh(x, name=None):\n return ivy.atanh(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atan(x, name=None):\n return ivy.atan(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef round(x, name=None):\n return ivy.round(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef ceil(x, name=None):\n return ivy.ceil(x)\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sinh(x, name=None):\n return ivy.sinh(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef pow(x, y, name=None):\n return ivy.pow(x, y)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"int16\", \"float16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef conj(x, name=None):\n return ivy.conj(x)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"bfloat16\", \"float32\", \"float64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef floor(x, name=None):\n return ivy.floor(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef remainder(x, y, name=None):\n return ivy.remainder(x, y)\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log2(x, name=None):\n return ivy.log2(x)\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log1p(x, name=None):\n return ivy.log1p(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef rad2deg(x, name=None):\n return ivy.rad2deg(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef deg2rad(x, name=None):\n return ivy.deg2rad(x)\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"int32\", \"int64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef gcd(x, y, name=None):\n return ivy.gcd(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tan(x, name=None):\n return ivy.tan(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atan2(x, y, name=None):\n return ivy.atan2(x, y)\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef square(x, name=None):\n return ivy.square(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sign(x, name=None):\n return ivy.sign(x, np_variant=False)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\", \"int8\", \"int16\", \"int32\", \"int64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef neg(x, name=None):\n return ivy.negative(x)\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef exp(x, name=None):\n return ivy.exp(x)\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"float16\", \"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef expm1(x, name=None):\n return ivy.expm1(x)\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef erf(x, name=None):\n return ivy.erf(x)\n\n\n@with_supported_dtypes(\n {\n \"2.5.1 and below\": (\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef cumprod(x, dim=None, dtype=None, name=None):\n return ivy.cumprod(x, axis=dim, dtype=dtype)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef reciprocal(x, name=None):\n return ivy.reciprocal(x)\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"int32\", \"int64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef lcm(x, y, name=None):\n return ivy.lcm(x, y)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float16\", \"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef isnan(x, name=None):\n return ivy.isnan(x)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float16\", \"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef isfinite(x, name=None):\n return ivy.isfinite(x)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float16\", \"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef isinf(x, name=None):\n return ivy.isinf(x)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"complex64\", \"complex128\", \"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef angle(x, name=None):\n return ivy.angle(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": \"bfloat16\"}, \"paddle\")\n@to_ivy_arrays_and_back\ndef fmin(x, y, name=None):\n return ivy.fmin(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef logit(x, eps=None, name=None):\n return ivy.logit(x, eps=eps)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": \"bfloat16\"}, \"paddle\")\n@to_ivy_arrays_and_back\ndef fmax(x, y, name=None):\n return ivy.fmax(x, y)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef minimum(x, y, name=None):\n return ivy.minimum(x, y)\n\n\n@with_supported_dtypes(\n {\"2.4.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef trunc(x, name=None):\n return ivy.trunc(x)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sgn(x, name=None):\n return ivy.sign(x, np_variant=True)\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef outer(x, y, name=None):\n return ivy.outer(x, y)\n\n\n# maximum\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef maximum(x, y, name=None):\n return ivy.maximum(x, y)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef frac(x, name=None):\n y = ivy.trunc(x)\n return ivy.subtract(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef asinh(x, name=None):\n return ivy.asinh(x)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef max(x, axis=None, keepdim=False, name=None):\n return ivy.max(x, axis=axis, keepdims=keepdim)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float16\", \"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef heaviside(x, y, name=None):\n return ivy.heaviside(x, y)\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef lerp(x, y, weight, name=None):\n return ivy.lerp(x, y, weight)\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef rsqrt(x, name=None):\n return 1 / ivy.sqrt(x)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef prod(x, axis=None, keepdim=False, dtype=None, name=None):\n return ivy.prod(x, axis=axis, keepdims=keepdim, dtype=dtype)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": \"bool\"}, \"paddle\")\n@to_ivy_arrays_and_back\ndef any(x, axis=None, keepdim=False, name=None):\n return ivy.any(x, axis=axis, keepdims=keepdim)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef diff(x, n=1, axis=-1, prepend=None, append=None, name=None):\n return ivy.diff(x, n=n, axis=axis, prepend=prepend, append=append)\n", "path": "ivy/functional/frontends/paddle/tensor/math.py"}]} |
gh_patches_debug_1238 | rasdani/github-patches | git_diff | bokeh__bokeh-1923 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
app_reveal fails importing old plotting stuff
```
(py34devel)[damian@damian-S400CA][slideshow](master)$ python app_reveal.py
Traceback (most recent call last):
File "app_reveal.py", line 19, in <module>
from bokeh.plotting import (annular_wedge, cursession, figure, hold, legend,
ImportError: cannot import name 'annular_wedge'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/embed/slideshow/app_reveal.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 In this example, we want to show you how you can take isolated blocks of code
4 (featuring different kinds of Bokeh visualizations) and rearrange them in a
5 bigger (encompassing) flask-based application without losing the independence
6 of each example. This is the reason of some weirdness through the code.
7 We are using this "building blocks" approach here because we believe it has some
8 conceptual advantages for people trying to quickly understand, and more
9 importantly, use the embed API, in a more complex way than just a simple script.
10 """
11 import time
12 from threading import Thread
13
14 import numpy as np
15 import scipy.special
16
17 from bokeh.embed import autoload_server
18 from bokeh.models import GlyphRenderer
19 from bokeh.plotting import (annular_wedge, cursession, figure, hold, legend,
20 line, output_server, push, quad, xgrid, ygrid)
21
22 from flask import Flask, render_template
23 app = Flask(__name__)
24
25 @app.route('/')
26 def render_plot():
27 """
28 Get the script tags from each plot object and "insert" them into the template.
29
30 This also starts different threads for each update function, so you can have
31 a non-blocking update.
32 """
33 dist_plot, dist_session = distribution()
34 dist_tag = autoload_server(dist_plot, dist_session)
35
36 anim_plot, anim_session = animated()
37 anim_tag = autoload_server(anim_plot, anim_session)
38 # for update_animation as target we need to pass the anim_plot and anim_session as args
39 thread = Thread(target=update_animation, args=(anim_plot, anim_session))
40 thread.start()
41
42 pop = Population()
43 pop_tag = autoload_server(pop.layout, pop.session)
44 # for update_population as target we need to pass the pop instance as args
45 thread = Thread(target=update_population, args=(pop,))
46 thread.start()
47
48 return render_template('app_plot.html', tag1=dist_tag, tag2=anim_tag, tag3=pop_tag)
49
50
51 def distribution():
52
53 mu, sigma = 0, 0.5
54
55 measured = np.random.normal(mu, sigma, 1000)
56 hist, edges = np.histogram(measured, density=True, bins=20)
57
58 x = np.linspace(-2, 2, 1000)
59 pdf = 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(-(x - mu) ** 2 / (2 * sigma ** 2))
60 cdf = (1 + scipy.special.erf((x - mu) / np.sqrt(2 * sigma ** 2))) / 2
61
62 output_server("distribution_reveal")
63
64 p = figure(title="Interactive plots",
65 background_fill="#E5E5E5")
66 p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],
67 fill_color="#333333", line_color="#E5E5E5", line_width=3)
68
69 # Use `line` renderers to display the PDF and CDF
70 p.line(x, pdf, line_color="#348abd", line_width=8, alpha=0.7, legend="PDF")
71 p.line(x, cdf, line_color="#7a68a6", line_width=8, alpha=0.7, legend="CDF")
72
73 p.legend.orientation = "top_left"
74
75 p.xaxis.axis_label = 'x'
76 p.xgrid[0].grid_line_color = "white"
77 p.xgrid[0].grid_line_width = 3
78
79 p.yaxis.axis_label = 'Pr(x)'
80 p.ygrid[0].grid_line_color = "white"
81 p.ygrid[0].grid_line_width = 3
82
83 push()
84
85 return p, cursession()
86
87
88 def animated():
89
90 from numpy import pi, cos, sin, linspace
91
92 N = 50 + 1
93 r_base = 8
94 theta = linspace(0, 2 * pi, N)
95 r_x = linspace(0, 6 * pi, N - 1)
96 rmin = r_base - cos(r_x) - 1
97 rmax = r_base + sin(r_x) + 1
98
99 colors = ["FFFFCC", "#C7E9B4", "#7FCDBB", "#41B6C4", "#2C7FB8",
100 "#253494", "#2C7FB8", "#41B6C4", "#7FCDBB", "#C7E9B4"] * 5
101
102 output_server("animated_reveal")
103
104 p = figure(title="Animations", x_range=[-11, 11], y_range=[-11, 11])
105
106 p.annular_wedge(
107 0, 0, rmin, rmax, theta[:-1], theta[1:],
108 inner_radius_units="data",
109 outer_radius_units="data",
110 fill_color=colors,
111 line_color="black",
112 )
113
114 push()
115
116 return p, cursession()
117
118
119 def update_animation(plot, session):
120
121 from numpy import roll
122
123 renderer = plot.select(dict(type=GlyphRenderer))
124 ds = renderer[0].data_source
125
126 while True:
127
128 rmin = ds.data["inner_radius"]
129 rmin = roll(rmin, 1)
130 ds.data["inner_radius"] = rmin
131
132 rmax = ds.data["outer_radius"]
133 rmax = roll(rmax, -1)
134 ds.data["outer_radius"] = rmax
135
136 cursession().store_objects(ds)
137 time.sleep(0.1)
138
139
140 class Population(object):
141
142 year = 2010
143 location = "World"
144
145 def __init__(self):
146 from bokeh.models import ColumnDataSource
147 from bokeh.document import Document
148 from bokeh.session import Session
149 from bokeh.sampledata.population import load_population
150
151 self.document = Document()
152 self.session = Session()
153 self.session.use_doc('population_reveal')
154 self.session.load_document(self.document)
155
156 self.df = load_population()
157 self.source_pyramid = ColumnDataSource(data=dict())
158
159 # just render at the initialization
160 self._render()
161
162 def _render(self):
163 self.pyramid_plot()
164 self.create_layout()
165 self.document.add(self.layout)
166 self.update_pyramid()
167
168 def pyramid_plot(self):
169 from bokeh.models import (Plot, DataRange1d, LinearAxis, Grid,
170 Legend, SingleIntervalTicker)
171 from bokeh.models.glyphs import Quad
172
173 xdr = DataRange1d(sources=[self.source_pyramid.columns("male"),
174 self.source_pyramid.columns("female")])
175 ydr = DataRange1d(sources=[self.source_pyramid.columns("groups")])
176
177 self.plot = Plot(title="Widgets", x_range=xdr, y_range=ydr,
178 plot_width=600, plot_height=600)
179
180 xaxis = LinearAxis()
181 self.plot.add_layout(xaxis, 'below')
182 yaxis = LinearAxis(ticker=SingleIntervalTicker(interval=5))
183 self.plot.add_layout(yaxis, 'left')
184
185 self.plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
186 self.plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
187
188 male_quad = Quad(left="male", right=0, bottom="groups", top="shifted",
189 fill_color="#3B8686")
190 male_quad_glyph = self.plot.add_glyph(self.source_pyramid, male_quad)
191
192 female_quad = Quad(left=0, right="female", bottom="groups", top="shifted",
193 fill_color="#CFF09E")
194 female_quad_glyph = self.plot.add_glyph(self.source_pyramid, female_quad)
195
196 self.plot.add_layout(Legend(legends=dict(Male=[male_quad_glyph],
197 Female=[female_quad_glyph])))
198
199 def on_year_change(self, obj, attr, old, new):
200 self.year = int(new)
201 self.update_pyramid()
202
203 def on_location_change(self, obj, attr, old, new):
204 self.location = new
205 self.update_pyramid()
206
207 def create_layout(self):
208 from bokeh.models.widgets import Select, HBox, VBox
209
210 years = list(map(str, sorted(self.df.Year.unique())))
211 locations = sorted(self.df.Location.unique())
212
213 year_select = Select(title="Year:", value="2010", options=years)
214 location_select = Select(title="Location:", value="World", options=locations)
215
216 year_select.on_change('value', self.on_year_change)
217 location_select.on_change('value', self.on_location_change)
218
219 controls = HBox(year_select, location_select)
220 self.layout = VBox(controls, self.plot)
221
222 def update_pyramid(self):
223 pyramid = self.df[(self.df.Location == self.location) & (self.df.Year == self.year)]
224
225 male = pyramid[pyramid.Sex == "Male"]
226 female = pyramid[pyramid.Sex == "Female"]
227
228 total = male.Value.sum() + female.Value.sum()
229
230 male_percent = -male.Value / total
231 female_percent = female.Value / total
232
233 groups = male.AgeGrpStart.tolist()
234 shifted = groups[1:] + [groups[-1] + 5]
235
236 self.source_pyramid.data = dict(
237 groups=groups,
238 shifted=shifted,
239 male=male_percent,
240 female=female_percent,
241 )
242 self.session.store_document(self.document)
243
244
245 def update_population(plot):
246 while True:
247 plot.session.load_document(plot.document)
248 time.sleep(0.1)
249
250 if __name__ == '__main__':
251 app.run(debug=True)
252
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/embed/slideshow/app_reveal.py b/examples/embed/slideshow/app_reveal.py
--- a/examples/embed/slideshow/app_reveal.py
+++ b/examples/embed/slideshow/app_reveal.py
@@ -16,8 +16,7 @@
from bokeh.embed import autoload_server
from bokeh.models import GlyphRenderer
-from bokeh.plotting import (annular_wedge, cursession, figure, hold, legend,
- line, output_server, push, quad, xgrid, ygrid)
+from bokeh.plotting import cursession, figure, output_server, push
from flask import Flask, render_template
app = Flask(__name__)
| {"golden_diff": "diff --git a/examples/embed/slideshow/app_reveal.py b/examples/embed/slideshow/app_reveal.py\n--- a/examples/embed/slideshow/app_reveal.py\n+++ b/examples/embed/slideshow/app_reveal.py\n@@ -16,8 +16,7 @@\n \n from bokeh.embed import autoload_server\n from bokeh.models import GlyphRenderer\n-from bokeh.plotting import (annular_wedge, cursession, figure, hold, legend,\n- line, output_server, push, quad, xgrid, ygrid)\n+from bokeh.plotting import cursession, figure, output_server, push\n \n from flask import Flask, render_template\n app = Flask(__name__)\n", "issue": "app_reveal fails importing old plotting stuff\n```\n(py34devel)[damian@damian-S400CA][slideshow](master)$ python app_reveal.py \nTraceback (most recent call last):\n File \"app_reveal.py\", line 19, in <module>\n from bokeh.plotting import (annular_wedge, cursession, figure, hold, legend,\nImportError: cannot import name 'annular_wedge'\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nIn this example, we want to show you how you can take isolated blocks of code\n(featuring different kinds of Bokeh visualizations) and rearrange them in a\nbigger (encompassing) flask-based application without losing the independence\nof each example. This is the reason of some weirdness through the code.\nWe are using this \"building blocks\" approach here because we believe it has some\nconceptual advantages for people trying to quickly understand, and more\nimportantly, use the embed API, in a more complex way than just a simple script.\n\"\"\"\nimport time\nfrom threading import Thread\n\nimport numpy as np\nimport scipy.special\n\nfrom bokeh.embed import autoload_server\nfrom bokeh.models import GlyphRenderer\nfrom bokeh.plotting import (annular_wedge, cursession, figure, hold, legend,\n line, output_server, push, quad, xgrid, ygrid)\n\nfrom flask import Flask, render_template\napp = Flask(__name__)\n\[email protected]('/')\ndef render_plot():\n \"\"\"\n Get the script tags from each plot object and \"insert\" them into the template.\n\n This also starts different threads for each update function, so you can have\n a non-blocking update.\n \"\"\"\n dist_plot, dist_session = distribution()\n dist_tag = autoload_server(dist_plot, dist_session)\n\n anim_plot, anim_session = animated()\n anim_tag = autoload_server(anim_plot, anim_session)\n # for update_animation as target we need to pass the anim_plot and anim_session as args\n thread = Thread(target=update_animation, args=(anim_plot, anim_session))\n thread.start()\n\n pop = Population()\n pop_tag = autoload_server(pop.layout, pop.session)\n # for update_population as target we need to pass the pop instance as args\n thread = Thread(target=update_population, args=(pop,))\n thread.start()\n\n return render_template('app_plot.html', tag1=dist_tag, tag2=anim_tag, tag3=pop_tag)\n\n\ndef distribution():\n\n mu, sigma = 0, 0.5\n\n measured = np.random.normal(mu, sigma, 1000)\n hist, edges = np.histogram(measured, density=True, bins=20)\n\n x = np.linspace(-2, 2, 1000)\n pdf = 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(-(x - mu) ** 2 / (2 * sigma ** 2))\n cdf = (1 + scipy.special.erf((x - mu) / np.sqrt(2 * sigma ** 2))) / 2\n\n output_server(\"distribution_reveal\")\n\n p = figure(title=\"Interactive plots\",\n background_fill=\"#E5E5E5\")\n p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],\n fill_color=\"#333333\", line_color=\"#E5E5E5\", line_width=3)\n\n # Use `line` renderers to display the PDF and CDF\n p.line(x, pdf, line_color=\"#348abd\", line_width=8, alpha=0.7, legend=\"PDF\")\n p.line(x, cdf, line_color=\"#7a68a6\", line_width=8, alpha=0.7, legend=\"CDF\")\n\n p.legend.orientation = \"top_left\"\n\n p.xaxis.axis_label = 'x'\n p.xgrid[0].grid_line_color = \"white\"\n p.xgrid[0].grid_line_width = 3\n\n p.yaxis.axis_label = 'Pr(x)'\n p.ygrid[0].grid_line_color = \"white\"\n p.ygrid[0].grid_line_width = 3\n\n push()\n\n return p, cursession()\n\n\ndef animated():\n\n from numpy import pi, cos, sin, linspace\n\n N = 50 + 1\n r_base = 8\n theta = linspace(0, 2 * pi, N)\n r_x = linspace(0, 6 * pi, N - 1)\n rmin = r_base - cos(r_x) - 1\n rmax = r_base + sin(r_x) + 1\n\n colors = [\"FFFFCC\", \"#C7E9B4\", \"#7FCDBB\", \"#41B6C4\", \"#2C7FB8\",\n \"#253494\", \"#2C7FB8\", \"#41B6C4\", \"#7FCDBB\", \"#C7E9B4\"] * 5\n\n output_server(\"animated_reveal\")\n\n p = figure(title=\"Animations\", x_range=[-11, 11], y_range=[-11, 11])\n\n p.annular_wedge(\n 0, 0, rmin, rmax, theta[:-1], theta[1:],\n inner_radius_units=\"data\",\n outer_radius_units=\"data\",\n fill_color=colors,\n line_color=\"black\",\n )\n\n push()\n\n return p, cursession()\n\n\ndef update_animation(plot, session):\n\n from numpy import roll\n\n renderer = plot.select(dict(type=GlyphRenderer))\n ds = renderer[0].data_source\n\n while True:\n\n rmin = ds.data[\"inner_radius\"]\n rmin = roll(rmin, 1)\n ds.data[\"inner_radius\"] = rmin\n\n rmax = ds.data[\"outer_radius\"]\n rmax = roll(rmax, -1)\n ds.data[\"outer_radius\"] = rmax\n\n cursession().store_objects(ds)\n time.sleep(0.1)\n\n\nclass Population(object):\n\n year = 2010\n location = \"World\"\n\n def __init__(self):\n from bokeh.models import ColumnDataSource\n from bokeh.document import Document\n from bokeh.session import Session\n from bokeh.sampledata.population import load_population\n\n self.document = Document()\n self.session = Session()\n self.session.use_doc('population_reveal')\n self.session.load_document(self.document)\n\n self.df = load_population()\n self.source_pyramid = ColumnDataSource(data=dict())\n\n # just render at the initialization\n self._render()\n\n def _render(self):\n self.pyramid_plot()\n self.create_layout()\n self.document.add(self.layout)\n self.update_pyramid()\n\n def pyramid_plot(self):\n from bokeh.models import (Plot, DataRange1d, LinearAxis, Grid,\n Legend, SingleIntervalTicker)\n from bokeh.models.glyphs import Quad\n\n xdr = DataRange1d(sources=[self.source_pyramid.columns(\"male\"),\n self.source_pyramid.columns(\"female\")])\n ydr = DataRange1d(sources=[self.source_pyramid.columns(\"groups\")])\n\n self.plot = Plot(title=\"Widgets\", x_range=xdr, y_range=ydr,\n plot_width=600, plot_height=600)\n\n xaxis = LinearAxis()\n self.plot.add_layout(xaxis, 'below')\n yaxis = LinearAxis(ticker=SingleIntervalTicker(interval=5))\n self.plot.add_layout(yaxis, 'left')\n\n self.plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))\n self.plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))\n\n male_quad = Quad(left=\"male\", right=0, bottom=\"groups\", top=\"shifted\",\n fill_color=\"#3B8686\")\n male_quad_glyph = self.plot.add_glyph(self.source_pyramid, male_quad)\n\n female_quad = Quad(left=0, right=\"female\", bottom=\"groups\", top=\"shifted\",\n fill_color=\"#CFF09E\")\n female_quad_glyph = self.plot.add_glyph(self.source_pyramid, female_quad)\n\n self.plot.add_layout(Legend(legends=dict(Male=[male_quad_glyph],\n Female=[female_quad_glyph])))\n\n def on_year_change(self, obj, attr, old, new):\n self.year = int(new)\n self.update_pyramid()\n\n def on_location_change(self, obj, attr, old, new):\n self.location = new\n self.update_pyramid()\n\n def create_layout(self):\n from bokeh.models.widgets import Select, HBox, VBox\n\n years = list(map(str, sorted(self.df.Year.unique())))\n locations = sorted(self.df.Location.unique())\n\n year_select = Select(title=\"Year:\", value=\"2010\", options=years)\n location_select = Select(title=\"Location:\", value=\"World\", options=locations)\n\n year_select.on_change('value', self.on_year_change)\n location_select.on_change('value', self.on_location_change)\n\n controls = HBox(year_select, location_select)\n self.layout = VBox(controls, self.plot)\n\n def update_pyramid(self):\n pyramid = self.df[(self.df.Location == self.location) & (self.df.Year == self.year)]\n\n male = pyramid[pyramid.Sex == \"Male\"]\n female = pyramid[pyramid.Sex == \"Female\"]\n\n total = male.Value.sum() + female.Value.sum()\n\n male_percent = -male.Value / total\n female_percent = female.Value / total\n\n groups = male.AgeGrpStart.tolist()\n shifted = groups[1:] + [groups[-1] + 5]\n\n self.source_pyramid.data = dict(\n groups=groups,\n shifted=shifted,\n male=male_percent,\n female=female_percent,\n )\n self.session.store_document(self.document)\n\n\ndef update_population(plot):\n while True:\n plot.session.load_document(plot.document)\n time.sleep(0.1)\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "path": "examples/embed/slideshow/app_reveal.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nIn this example, we want to show you how you can take isolated blocks of code\n(featuring different kinds of Bokeh visualizations) and rearrange them in a\nbigger (encompassing) flask-based application without losing the independence\nof each example. This is the reason of some weirdness through the code.\nWe are using this \"building blocks\" approach here because we believe it has some\nconceptual advantages for people trying to quickly understand, and more\nimportantly, use the embed API, in a more complex way than just a simple script.\n\"\"\"\nimport time\nfrom threading import Thread\n\nimport numpy as np\nimport scipy.special\n\nfrom bokeh.embed import autoload_server\nfrom bokeh.models import GlyphRenderer\nfrom bokeh.plotting import cursession, figure, output_server, push\n\nfrom flask import Flask, render_template\napp = Flask(__name__)\n\[email protected]('/')\ndef render_plot():\n \"\"\"\n Get the script tags from each plot object and \"insert\" them into the template.\n\n This also starts different threads for each update function, so you can have\n a non-blocking update.\n \"\"\"\n dist_plot, dist_session = distribution()\n dist_tag = autoload_server(dist_plot, dist_session)\n\n anim_plot, anim_session = animated()\n anim_tag = autoload_server(anim_plot, anim_session)\n # for update_animation as target we need to pass the anim_plot and anim_session as args\n thread = Thread(target=update_animation, args=(anim_plot, anim_session))\n thread.start()\n\n pop = Population()\n pop_tag = autoload_server(pop.layout, pop.session)\n # for update_population as target we need to pass the pop instance as args\n thread = Thread(target=update_population, args=(pop,))\n thread.start()\n\n return render_template('app_plot.html', tag1=dist_tag, tag2=anim_tag, tag3=pop_tag)\n\n\ndef distribution():\n\n mu, sigma = 0, 0.5\n\n measured = np.random.normal(mu, sigma, 1000)\n hist, edges = np.histogram(measured, density=True, bins=20)\n\n x = np.linspace(-2, 2, 1000)\n pdf = 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(-(x - mu) ** 2 / (2 * sigma ** 2))\n cdf = (1 + scipy.special.erf((x - mu) / np.sqrt(2 * sigma ** 2))) / 2\n\n output_server(\"distribution_reveal\")\n\n p = figure(title=\"Interactive plots\",\n background_fill=\"#E5E5E5\")\n p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],\n fill_color=\"#333333\", line_color=\"#E5E5E5\", line_width=3)\n\n # Use `line` renderers to display the PDF and CDF\n p.line(x, pdf, line_color=\"#348abd\", line_width=8, alpha=0.7, legend=\"PDF\")\n p.line(x, cdf, line_color=\"#7a68a6\", line_width=8, alpha=0.7, legend=\"CDF\")\n\n p.legend.orientation = \"top_left\"\n\n p.xaxis.axis_label = 'x'\n p.xgrid[0].grid_line_color = \"white\"\n p.xgrid[0].grid_line_width = 3\n\n p.yaxis.axis_label = 'Pr(x)'\n p.ygrid[0].grid_line_color = \"white\"\n p.ygrid[0].grid_line_width = 3\n\n push()\n\n return p, cursession()\n\n\ndef animated():\n\n from numpy import pi, cos, sin, linspace\n\n N = 50 + 1\n r_base = 8\n theta = linspace(0, 2 * pi, N)\n r_x = linspace(0, 6 * pi, N - 1)\n rmin = r_base - cos(r_x) - 1\n rmax = r_base + sin(r_x) + 1\n\n colors = [\"FFFFCC\", \"#C7E9B4\", \"#7FCDBB\", \"#41B6C4\", \"#2C7FB8\",\n \"#253494\", \"#2C7FB8\", \"#41B6C4\", \"#7FCDBB\", \"#C7E9B4\"] * 5\n\n output_server(\"animated_reveal\")\n\n p = figure(title=\"Animations\", x_range=[-11, 11], y_range=[-11, 11])\n\n p.annular_wedge(\n 0, 0, rmin, rmax, theta[:-1], theta[1:],\n inner_radius_units=\"data\",\n outer_radius_units=\"data\",\n fill_color=colors,\n line_color=\"black\",\n )\n\n push()\n\n return p, cursession()\n\n\ndef update_animation(plot, session):\n\n from numpy import roll\n\n renderer = plot.select(dict(type=GlyphRenderer))\n ds = renderer[0].data_source\n\n while True:\n\n rmin = ds.data[\"inner_radius\"]\n rmin = roll(rmin, 1)\n ds.data[\"inner_radius\"] = rmin\n\n rmax = ds.data[\"outer_radius\"]\n rmax = roll(rmax, -1)\n ds.data[\"outer_radius\"] = rmax\n\n cursession().store_objects(ds)\n time.sleep(0.1)\n\n\nclass Population(object):\n\n year = 2010\n location = \"World\"\n\n def __init__(self):\n from bokeh.models import ColumnDataSource\n from bokeh.document import Document\n from bokeh.session import Session\n from bokeh.sampledata.population import load_population\n\n self.document = Document()\n self.session = Session()\n self.session.use_doc('population_reveal')\n self.session.load_document(self.document)\n\n self.df = load_population()\n self.source_pyramid = ColumnDataSource(data=dict())\n\n # just render at the initialization\n self._render()\n\n def _render(self):\n self.pyramid_plot()\n self.create_layout()\n self.document.add(self.layout)\n self.update_pyramid()\n\n def pyramid_plot(self):\n from bokeh.models import (Plot, DataRange1d, LinearAxis, Grid,\n Legend, SingleIntervalTicker)\n from bokeh.models.glyphs import Quad\n\n xdr = DataRange1d(sources=[self.source_pyramid.columns(\"male\"),\n self.source_pyramid.columns(\"female\")])\n ydr = DataRange1d(sources=[self.source_pyramid.columns(\"groups\")])\n\n self.plot = Plot(title=\"Widgets\", x_range=xdr, y_range=ydr,\n plot_width=600, plot_height=600)\n\n xaxis = LinearAxis()\n self.plot.add_layout(xaxis, 'below')\n yaxis = LinearAxis(ticker=SingleIntervalTicker(interval=5))\n self.plot.add_layout(yaxis, 'left')\n\n self.plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))\n self.plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))\n\n male_quad = Quad(left=\"male\", right=0, bottom=\"groups\", top=\"shifted\",\n fill_color=\"#3B8686\")\n male_quad_glyph = self.plot.add_glyph(self.source_pyramid, male_quad)\n\n female_quad = Quad(left=0, right=\"female\", bottom=\"groups\", top=\"shifted\",\n fill_color=\"#CFF09E\")\n female_quad_glyph = self.plot.add_glyph(self.source_pyramid, female_quad)\n\n self.plot.add_layout(Legend(legends=dict(Male=[male_quad_glyph],\n Female=[female_quad_glyph])))\n\n def on_year_change(self, obj, attr, old, new):\n self.year = int(new)\n self.update_pyramid()\n\n def on_location_change(self, obj, attr, old, new):\n self.location = new\n self.update_pyramid()\n\n def create_layout(self):\n from bokeh.models.widgets import Select, HBox, VBox\n\n years = list(map(str, sorted(self.df.Year.unique())))\n locations = sorted(self.df.Location.unique())\n\n year_select = Select(title=\"Year:\", value=\"2010\", options=years)\n location_select = Select(title=\"Location:\", value=\"World\", options=locations)\n\n year_select.on_change('value', self.on_year_change)\n location_select.on_change('value', self.on_location_change)\n\n controls = HBox(year_select, location_select)\n self.layout = VBox(controls, self.plot)\n\n def update_pyramid(self):\n pyramid = self.df[(self.df.Location == self.location) & (self.df.Year == self.year)]\n\n male = pyramid[pyramid.Sex == \"Male\"]\n female = pyramid[pyramid.Sex == \"Female\"]\n\n total = male.Value.sum() + female.Value.sum()\n\n male_percent = -male.Value / total\n female_percent = female.Value / total\n\n groups = male.AgeGrpStart.tolist()\n shifted = groups[1:] + [groups[-1] + 5]\n\n self.source_pyramid.data = dict(\n groups=groups,\n shifted=shifted,\n male=male_percent,\n female=female_percent,\n )\n self.session.store_document(self.document)\n\n\ndef update_population(plot):\n while True:\n plot.session.load_document(plot.document)\n time.sleep(0.1)\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "path": "examples/embed/slideshow/app_reveal.py"}]} |
gh_patches_debug_1239 | rasdani/github-patches | git_diff | liqd__a4-product-66 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Internal server error when editing poll question
Internal server error when editing poll question in creating poll in dashboard
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `liqd_product/config/settings/base.py`
Content:
```
1 """Django settings for _LIQD_PRODUCT_."""
2
3 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
4 import os
5
6 CONFIG_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
7 PROJECT_DIR = os.path.dirname(CONFIG_DIR)
8 BASE_DIR = os.path.dirname(PROJECT_DIR)
9
10 # Application definition
11
12 INSTALLED_APPS = (
13 'liqd_product.apps.django_overwrites.apps.Config',
14 'django.contrib.sites',
15 'django.contrib.admin',
16 'django.contrib.auth',
17 'django.contrib.contenttypes',
18 'django.contrib.sessions',
19 'django.contrib.messages',
20 'django.contrib.staticfiles',
21 'django.contrib.humanize',
22
23 'widget_tweaks',
24 'rest_framework',
25 'allauth',
26 'allauth.account',
27 'rules.apps.AutodiscoverRulesConfig',
28 'easy_thumbnails',
29 'ckeditor',
30 'ckeditor_uploader',
31 'capture_tag',
32 'background_task',
33
34 # Temporary Compatibility layer for a4-meinberlin
35 'liqd_product.apps.compatibility.apps.Config',
36
37 # General adhocracy 4 components
38 'adhocracy4.actions.apps.ActionsConfig',
39 'adhocracy4.categories.apps.CategoriesConfig',
40 'adhocracy4.comments.apps.CommentsConfig',
41 'adhocracy4.filters.apps.FiltersConfig',
42 'adhocracy4.follows.apps.FollowsConfig',
43 'adhocracy4.forms.apps.FormsConfig',
44 'adhocracy4.images.apps.ImagesConfig',
45 'adhocracy4.maps.apps.MapsConfig',
46 'adhocracy4.modules.apps.ModulesConfig',
47 'adhocracy4.organisations.apps.OrganisationsConfig',
48 'adhocracy4.phases.apps.PhasesConfig',
49 'adhocracy4.projects.apps.ProjectsConfig',
50 'adhocracy4.ratings.apps.RatingsConfig',
51 'adhocracy4.reports.apps.ReportsConfig',
52
53 # General components that define models or helpers
54 'liqd_product.apps.contrib.apps.Config',
55 'liqd_product.apps.organisations.apps.Config',
56 'liqd_product.apps.partners.apps.Config',
57 'liqd_product.apps.users.apps.Config',
58 'meinberlin.apps.contrib.apps.Config',
59 'meinberlin.apps.actions.apps.Config',
60 'meinberlin.apps.moderatorfeedback.apps.Config',
61 'meinberlin.apps.maps.apps.Config',
62
63 # General apps containing views
64 'liqd_product.apps.account.apps.Config',
65 'meinberlin.apps.embed.apps.Config',
66 'meinberlin.apps.exports.apps.Config',
67 'meinberlin.apps.offlineevents.apps.Config',
68 'meinberlin.apps.projects.apps.Config',
69 'meinberlin.apps.dashboard2.apps.Config',
70
71 # Apps defining phases
72 'meinberlin.apps.documents.apps.Config',
73 'meinberlin.apps.ideas.apps.Config',
74 'meinberlin.apps.mapideas.apps.Config',
75 'meinberlin.apps.polls.apps.Config',
76 )
77
78 MIDDLEWARE = (
79 'django.contrib.sessions.middleware.SessionMiddleware',
80 'django.middleware.common.CommonMiddleware',
81 'django.middleware.csrf.CsrfViewMiddleware',
82 'django.contrib.auth.middleware.AuthenticationMiddleware',
83 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
84 'django.contrib.messages.middleware.MessageMiddleware',
85 'django.middleware.clickjacking.XFrameOptionsMiddleware',
86 'django.middleware.security.SecurityMiddleware',
87 'django.middleware.locale.LocaleMiddleware',
88
89 'liqd_product.apps.partners.middleware.PartnerMiddleware',
90 'meinberlin.apps.embed.middleware.AjaxPathMiddleware',
91 )
92
93 SITE_ID = 1
94
95 ROOT_URLCONF = 'liqd_product.config.urls'
96
97 LOCALE_PATHS = [os.path.join(BASE_DIR, 'locale')]
98
99 TEMPLATES = [
100 {
101 'BACKEND': 'django.template.backends.django.DjangoTemplates',
102 'DIRS': [
103 os.path.join(PROJECT_DIR, 'templates'),
104 ],
105 'APP_DIRS': True,
106 'OPTIONS': {
107 'context_processors': [
108 'django.template.context_processors.debug',
109 'django.template.context_processors.request',
110 'django.contrib.auth.context_processors.auth',
111 'django.contrib.messages.context_processors.messages',
112 ],
113 },
114 },
115 ]
116
117 WSGI_APPLICATION = 'liqd_product.config.wsgi.application'
118
119 REVERSE_METHOD = 'liqd_product.apps.partners.urlresolvers.reverse'
120
121
122 # Database
123 # https://docs.djangoproject.com/en/1.8/ref/settings/#databases
124
125 DATABASES = {
126 'default': {
127 'ENGINE': 'django.db.backends.sqlite3',
128 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
129 'TEST': {
130 'NAME': os.path.join(BASE_DIR, 'test_db.sqlite3'),
131 }
132 }
133 }
134
135
136 # Internationalization
137 # https://docs.djangoproject.com/en/1.8/topics/i18n/
138
139 LANGUAGE_CODE = 'en-us'
140
141 TIME_ZONE = 'Europe/Berlin'
142
143 USE_I18N = True
144
145 USE_L10N = True
146
147 USE_TZ = True
148
149
150 # Static files (CSS, JavaScript, Images)
151 # https://docs.djangoproject.com/en/1.8/howto/static-files/
152
153 STATICFILES_DIRS = [
154 os.path.join(PROJECT_DIR, 'static'),
155 ]
156
157 STATIC_ROOT = os.path.join(BASE_DIR, 'static')
158 STATIC_URL = '/static/'
159
160 MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
161 MEDIA_URL = '/media/'
162
163 IMAGE_ALIASES = {
164 '*': {
165 'max_size': 5*10**6,
166 'fileformats': ('image/png', 'image/jpeg', 'image/gif')
167 },
168 'heroimage': {'min_resolution': (1500, 500)},
169 'tileimage': {'min_resolution': (500, 300)},
170 'logo': {'min_resolution': (200, 200), 'aspect_ratio': (1, 1)},
171 'avatar': {'min_resolution': (200, 200)},
172 'idea_image': {'min_resolution': (800, 200)},
173 }
174
175 THUMBNAIL_ALIASES = {
176 '': {
177 'heroimage': {'size': (1500, 500), 'crop': 'smart'},
178 'heroimage_preview': {'size': (880, 220), 'crop': 'smart'},
179 'project_thumbnail': {'size': (520, 330), 'crop': 'smart'},
180 'idea_image': {'size': (800, 0), 'crop': 'scale'},
181 'idea_thumbnail': {'size': (240, 240), 'crop': 'smart'},
182 }
183 }
184
185 ALLOWED_UPLOAD_IMAGES = ('png', 'jpeg', 'gif')
186
187
188 # Authentication
189
190 AUTH_USER_MODEL = 'liqd_product_users.User'
191
192 AUTHENTICATION_BACKENDS = (
193 'rules.permissions.ObjectPermissionBackend',
194 'django.contrib.auth.backends.ModelBackend',
195 'allauth.account.auth_backends.AuthenticationBackend',
196 )
197
198 ACCOUNT_ADAPTER = 'liqd_product.apps.users.adapters.AccountAdapter'
199 ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
200 ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 3
201 ACCOUNT_EMAIL_REQUIRED = True
202 ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
203 ACCOUNT_USERNAME_REQUIRED = True
204 ACCOUNT_LOGIN_ATTEMPTS_LIMIT = 10
205 ACCOUNT_LOGIN_ATTEMPTS_TIMEOUT = 300 # seconds
206 ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
207 ACCOUNT_LOGIN_ON_PASSWORD_RESET = True
208 ACCOUNT_SIGNUP_FORM_CLASS = 'liqd_product.apps.users.forms.TermsSignupForm'
209 SOCIALACCOUNT_EMAIL_VERIFICATION = 'none'
210
211 LOGIN_URL = 'account_login'
212 LOGIN_REDIRECT_URL = '/'
213
214 EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
215
216
217 # CKEditor
218
219 CKEDITOR_UPLOAD_PATH = "uploads/"
220 CKEDITOR_RESTRICT_BY_USER = 'username'
221 CKEDITOR_ALLOW_NONIMAGE_FILES = True
222
223 CKEDITOR_CONFIGS = {
224 'default': {
225 'width': '100%',
226 'toolbar': 'Custom',
227 'toolbar_Custom': [
228 ['Bold', 'Italic', 'Underline'],
229 ['NumberedList', 'BulletedList'],
230 ['Link', 'Unlink']
231 ]
232 },
233 'image-editor': {
234 'width': '100%',
235 'toolbar': 'Custom',
236 'toolbar_Custom': [
237 ['Bold', 'Italic', 'Underline'],
238 ['Image'],
239 ['NumberedList', 'BulletedList'],
240 ['Link', 'Unlink']
241 ]
242 }
243 }
244
245 BLEACH_LIST = {
246 'default' : {
247 'tags': ['p','strong','em','u','ol','li','ul','a'],
248 'attributes': {
249 'a': ['href', 'rel'],
250 },
251 },
252 'image-editor': {
253 'tags': ['p','strong','em','u','ol','li','ul','a','img'],
254 'attributes': {
255 'a': ['href', 'rel'],
256 'img': ['src', 'alt', 'style']
257 },
258 'styles': [
259 'float',
260 'margin',
261 'padding',
262 'width',
263 'height',
264 'margin-bottom',
265 'margin-top',
266 'margin-left',
267 'margin-right',
268 ],
269 }
270 }
271
272
273 # adhocracy4
274
275 A4_ORGANISATIONS_MODEL = 'liqd_product_organisations.Organisation'
276
277 A4_RATEABLES = (
278 ('a4comments', 'comment'),
279 ('meinberlin_ideas', 'idea'),
280 ('meinberlin_mapideas', 'mapidea'),
281 )
282
283 A4_COMMENTABLES = (
284 ('a4comments', 'comment'),
285 ('meinberlin_ideas', 'idea'),
286 ('meinberlin_documents', 'chapter'),
287 ('meinberlin_documents', 'paragraph'),
288 ('meinberlin_mapideas', 'mapidea'),
289 ('meinberlin_polls', 'poll'),
290 )
291
292 A4_REPORTABLES = (
293 ('a4comments', 'comment'),
294 ('meinberlin_ideas', 'idea'),
295 ('meinberlin_mapideas', 'mapidea'),
296 )
297
298 A4_ACTIONABLES = (
299 ('a4comments', 'comment'),
300 ('meinberlin_ideas', 'idea'),
301 ('meinberlin_mapideas', 'mapidea'),
302 )
303
304 A4_AUTO_FOLLOWABLES = (
305 ('a4comments', 'comment'),
306 ('meinberlin_ideas', 'idea'),
307 ('meinberlin_mapideas', 'mapidea'),
308 )
309
310
311 A4_CATEGORIZABLE = (
312 ('meinberlin_ideas', 'idea'),
313 ('meinberlin_mapideas', 'mapidea'),
314 )
315
316
317 A4_MAP_BASEURL = 'https://{s}.tile.openstreetmap.org/'
318 A4_MAP_ATTRIBUTION = '© <a href="http://openstreetmap.org/copyright">OpenStreetMap</a> contributors'
319 A4_MAP_BOUNDING_BOX = ([[52.3517, 13.8229], [52.6839, 12.9543]])
320
321 A4_DASHBOARD = {
322 'PROJECT_DASHBOARD_CLASS': 'meinberlin.apps.dashboard2.ProjectDashboard',
323 'BLUEPRINTS': 'liqd_product.apps.dashboard.blueprints.blueprints'
324 }
325
326 CONTACT_EMAIL = '[email protected]'
327 EMAIL_DEFAULT_LANGUAGE = 'de'
328
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/liqd_product/config/settings/base.py b/liqd_product/config/settings/base.py
--- a/liqd_product/config/settings/base.py
+++ b/liqd_product/config/settings/base.py
@@ -49,6 +49,7 @@
'adhocracy4.projects.apps.ProjectsConfig',
'adhocracy4.ratings.apps.RatingsConfig',
'adhocracy4.reports.apps.ReportsConfig',
+ 'adhocracy4.rules.apps.RulesConfig',
# General components that define models or helpers
'liqd_product.apps.contrib.apps.Config',
| {"golden_diff": "diff --git a/liqd_product/config/settings/base.py b/liqd_product/config/settings/base.py\n--- a/liqd_product/config/settings/base.py\n+++ b/liqd_product/config/settings/base.py\n@@ -49,6 +49,7 @@\n 'adhocracy4.projects.apps.ProjectsConfig',\n 'adhocracy4.ratings.apps.RatingsConfig',\n 'adhocracy4.reports.apps.ReportsConfig',\n+ 'adhocracy4.rules.apps.RulesConfig',\n \n # General components that define models or helpers\n 'liqd_product.apps.contrib.apps.Config',\n", "issue": "Internal server error when editing poll question\nInternal server error when editing poll question in creating poll in dashboard\n", "before_files": [{"content": "\"\"\"Django settings for _LIQD_PRODUCT_.\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\n\nCONFIG_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nPROJECT_DIR = os.path.dirname(CONFIG_DIR)\nBASE_DIR = os.path.dirname(PROJECT_DIR)\n\n# Application definition\n\nINSTALLED_APPS = (\n 'liqd_product.apps.django_overwrites.apps.Config',\n 'django.contrib.sites',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n\n 'widget_tweaks',\n 'rest_framework',\n 'allauth',\n 'allauth.account',\n 'rules.apps.AutodiscoverRulesConfig',\n 'easy_thumbnails',\n 'ckeditor',\n 'ckeditor_uploader',\n 'capture_tag',\n 'background_task',\n\n # Temporary Compatibility layer for a4-meinberlin\n 'liqd_product.apps.compatibility.apps.Config',\n\n # General adhocracy 4 components\n 'adhocracy4.actions.apps.ActionsConfig',\n 'adhocracy4.categories.apps.CategoriesConfig',\n 'adhocracy4.comments.apps.CommentsConfig',\n 'adhocracy4.filters.apps.FiltersConfig',\n 'adhocracy4.follows.apps.FollowsConfig',\n 'adhocracy4.forms.apps.FormsConfig',\n 'adhocracy4.images.apps.ImagesConfig',\n 'adhocracy4.maps.apps.MapsConfig',\n 'adhocracy4.modules.apps.ModulesConfig',\n 'adhocracy4.organisations.apps.OrganisationsConfig',\n 'adhocracy4.phases.apps.PhasesConfig',\n 'adhocracy4.projects.apps.ProjectsConfig',\n 'adhocracy4.ratings.apps.RatingsConfig',\n 'adhocracy4.reports.apps.ReportsConfig',\n\n # General components that define models or helpers\n 'liqd_product.apps.contrib.apps.Config',\n 'liqd_product.apps.organisations.apps.Config',\n 'liqd_product.apps.partners.apps.Config',\n 'liqd_product.apps.users.apps.Config',\n 'meinberlin.apps.contrib.apps.Config',\n 'meinberlin.apps.actions.apps.Config',\n 'meinberlin.apps.moderatorfeedback.apps.Config',\n 'meinberlin.apps.maps.apps.Config',\n\n # General apps containing views\n 'liqd_product.apps.account.apps.Config',\n 'meinberlin.apps.embed.apps.Config',\n 'meinberlin.apps.exports.apps.Config',\n 'meinberlin.apps.offlineevents.apps.Config',\n 'meinberlin.apps.projects.apps.Config',\n 'meinberlin.apps.dashboard2.apps.Config',\n\n # Apps defining phases\n 'meinberlin.apps.documents.apps.Config',\n 'meinberlin.apps.ideas.apps.Config',\n 'meinberlin.apps.mapideas.apps.Config',\n 'meinberlin.apps.polls.apps.Config',\n)\n\nMIDDLEWARE = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n\n 'liqd_product.apps.partners.middleware.PartnerMiddleware',\n 'meinberlin.apps.embed.middleware.AjaxPathMiddleware',\n)\n\nSITE_ID = 1\n\nROOT_URLCONF = 'liqd_product.config.urls'\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'locale')]\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(PROJECT_DIR, 'templates'),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'liqd_product.config.wsgi.application'\n\nREVERSE_METHOD = 'liqd_product.apps.partners.urlresolvers.reverse'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.8/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n 'TEST': {\n 'NAME': os.path.join(BASE_DIR, 'test_db.sqlite3'),\n }\n }\n}\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.8/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'Europe/Berlin'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.8/howto/static-files/\n\nSTATICFILES_DIRS = [\n os.path.join(PROJECT_DIR, 'static'),\n]\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nSTATIC_URL = '/static/'\n\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = '/media/'\n\nIMAGE_ALIASES = {\n '*': {\n 'max_size': 5*10**6,\n 'fileformats': ('image/png', 'image/jpeg', 'image/gif')\n },\n 'heroimage': {'min_resolution': (1500, 500)},\n 'tileimage': {'min_resolution': (500, 300)},\n 'logo': {'min_resolution': (200, 200), 'aspect_ratio': (1, 1)},\n 'avatar': {'min_resolution': (200, 200)},\n 'idea_image': {'min_resolution': (800, 200)},\n}\n\nTHUMBNAIL_ALIASES = {\n '': {\n 'heroimage': {'size': (1500, 500), 'crop': 'smart'},\n 'heroimage_preview': {'size': (880, 220), 'crop': 'smart'},\n 'project_thumbnail': {'size': (520, 330), 'crop': 'smart'},\n 'idea_image': {'size': (800, 0), 'crop': 'scale'},\n 'idea_thumbnail': {'size': (240, 240), 'crop': 'smart'},\n }\n}\n\nALLOWED_UPLOAD_IMAGES = ('png', 'jpeg', 'gif')\n\n\n# Authentication\n\nAUTH_USER_MODEL = 'liqd_product_users.User'\n\nAUTHENTICATION_BACKENDS = (\n 'rules.permissions.ObjectPermissionBackend',\n 'django.contrib.auth.backends.ModelBackend',\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n\nACCOUNT_ADAPTER = 'liqd_product.apps.users.adapters.AccountAdapter'\nACCOUNT_AUTHENTICATION_METHOD = 'username_email'\nACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 3\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = 'mandatory'\nACCOUNT_USERNAME_REQUIRED = True\nACCOUNT_LOGIN_ATTEMPTS_LIMIT = 10\nACCOUNT_LOGIN_ATTEMPTS_TIMEOUT = 300 # seconds\nACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True\nACCOUNT_LOGIN_ON_PASSWORD_RESET = True\nACCOUNT_SIGNUP_FORM_CLASS = 'liqd_product.apps.users.forms.TermsSignupForm'\nSOCIALACCOUNT_EMAIL_VERIFICATION = 'none'\n\nLOGIN_URL = 'account_login'\nLOGIN_REDIRECT_URL = '/'\n\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n\n# CKEditor\n\nCKEDITOR_UPLOAD_PATH = \"uploads/\"\nCKEDITOR_RESTRICT_BY_USER = 'username'\nCKEDITOR_ALLOW_NONIMAGE_FILES = True\n\nCKEDITOR_CONFIGS = {\n 'default': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n },\n 'image-editor': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['Image'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n }\n}\n\nBLEACH_LIST = {\n 'default' : {\n 'tags': ['p','strong','em','u','ol','li','ul','a'],\n 'attributes': {\n 'a': ['href', 'rel'],\n },\n },\n 'image-editor': {\n 'tags': ['p','strong','em','u','ol','li','ul','a','img'],\n 'attributes': {\n 'a': ['href', 'rel'],\n 'img': ['src', 'alt', 'style']\n },\n 'styles': [\n 'float',\n 'margin',\n 'padding',\n 'width',\n 'height',\n 'margin-bottom',\n 'margin-top',\n 'margin-left',\n 'margin-right',\n ],\n }\n}\n\n\n# adhocracy4\n\nA4_ORGANISATIONS_MODEL = 'liqd_product_organisations.Organisation'\n\nA4_RATEABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n)\n\nA4_COMMENTABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_documents', 'chapter'),\n ('meinberlin_documents', 'paragraph'),\n ('meinberlin_mapideas', 'mapidea'),\n ('meinberlin_polls', 'poll'),\n)\n\nA4_REPORTABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n)\n\nA4_ACTIONABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n)\n\nA4_AUTO_FOLLOWABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n)\n\n\nA4_CATEGORIZABLE = (\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n)\n\n\nA4_MAP_BASEURL = 'https://{s}.tile.openstreetmap.org/'\nA4_MAP_ATTRIBUTION = '© <a href=\"http://openstreetmap.org/copyright\">OpenStreetMap</a> contributors'\nA4_MAP_BOUNDING_BOX = ([[52.3517, 13.8229], [52.6839, 12.9543]])\n\nA4_DASHBOARD = {\n 'PROJECT_DASHBOARD_CLASS': 'meinberlin.apps.dashboard2.ProjectDashboard',\n 'BLUEPRINTS': 'liqd_product.apps.dashboard.blueprints.blueprints'\n}\n\nCONTACT_EMAIL = '[email protected]'\nEMAIL_DEFAULT_LANGUAGE = 'de'\n", "path": "liqd_product/config/settings/base.py"}], "after_files": [{"content": "\"\"\"Django settings for _LIQD_PRODUCT_.\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\n\nCONFIG_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nPROJECT_DIR = os.path.dirname(CONFIG_DIR)\nBASE_DIR = os.path.dirname(PROJECT_DIR)\n\n# Application definition\n\nINSTALLED_APPS = (\n 'liqd_product.apps.django_overwrites.apps.Config',\n 'django.contrib.sites',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n\n 'widget_tweaks',\n 'rest_framework',\n 'allauth',\n 'allauth.account',\n 'rules.apps.AutodiscoverRulesConfig',\n 'easy_thumbnails',\n 'ckeditor',\n 'ckeditor_uploader',\n 'capture_tag',\n 'background_task',\n\n # Temporary Compatibility layer for a4-meinberlin\n 'liqd_product.apps.compatibility.apps.Config',\n\n # General adhocracy 4 components\n 'adhocracy4.actions.apps.ActionsConfig',\n 'adhocracy4.categories.apps.CategoriesConfig',\n 'adhocracy4.comments.apps.CommentsConfig',\n 'adhocracy4.filters.apps.FiltersConfig',\n 'adhocracy4.follows.apps.FollowsConfig',\n 'adhocracy4.forms.apps.FormsConfig',\n 'adhocracy4.images.apps.ImagesConfig',\n 'adhocracy4.maps.apps.MapsConfig',\n 'adhocracy4.modules.apps.ModulesConfig',\n 'adhocracy4.organisations.apps.OrganisationsConfig',\n 'adhocracy4.phases.apps.PhasesConfig',\n 'adhocracy4.projects.apps.ProjectsConfig',\n 'adhocracy4.ratings.apps.RatingsConfig',\n 'adhocracy4.reports.apps.ReportsConfig',\n 'adhocracy4.rules.apps.RulesConfig',\n\n # General components that define models or helpers\n 'liqd_product.apps.contrib.apps.Config',\n 'liqd_product.apps.organisations.apps.Config',\n 'liqd_product.apps.partners.apps.Config',\n 'liqd_product.apps.users.apps.Config',\n 'meinberlin.apps.contrib.apps.Config',\n 'meinberlin.apps.actions.apps.Config',\n 'meinberlin.apps.moderatorfeedback.apps.Config',\n 'meinberlin.apps.maps.apps.Config',\n\n # General apps containing views\n 'liqd_product.apps.account.apps.Config',\n 'meinberlin.apps.embed.apps.Config',\n 'meinberlin.apps.exports.apps.Config',\n 'meinberlin.apps.offlineevents.apps.Config',\n 'meinberlin.apps.projects.apps.Config',\n 'meinberlin.apps.dashboard2.apps.Config',\n\n # Apps defining phases\n 'meinberlin.apps.documents.apps.Config',\n 'meinberlin.apps.ideas.apps.Config',\n 'meinberlin.apps.mapideas.apps.Config',\n 'meinberlin.apps.polls.apps.Config',\n)\n\nMIDDLEWARE = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n\n 'liqd_product.apps.partners.middleware.PartnerMiddleware',\n 'meinberlin.apps.embed.middleware.AjaxPathMiddleware',\n)\n\nSITE_ID = 1\n\nROOT_URLCONF = 'liqd_product.config.urls'\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'locale')]\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(PROJECT_DIR, 'templates'),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'liqd_product.config.wsgi.application'\n\nREVERSE_METHOD = 'liqd_product.apps.partners.urlresolvers.reverse'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.8/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n 'TEST': {\n 'NAME': os.path.join(BASE_DIR, 'test_db.sqlite3'),\n }\n }\n}\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.8/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'Europe/Berlin'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.8/howto/static-files/\n\nSTATICFILES_DIRS = [\n os.path.join(PROJECT_DIR, 'static'),\n]\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nSTATIC_URL = '/static/'\n\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = '/media/'\n\nIMAGE_ALIASES = {\n '*': {\n 'max_size': 5*10**6,\n 'fileformats': ('image/png', 'image/jpeg', 'image/gif')\n },\n 'heroimage': {'min_resolution': (1500, 500)},\n 'tileimage': {'min_resolution': (500, 300)},\n 'logo': {'min_resolution': (200, 200), 'aspect_ratio': (1, 1)},\n 'avatar': {'min_resolution': (200, 200)},\n 'idea_image': {'min_resolution': (800, 200)},\n}\n\nTHUMBNAIL_ALIASES = {\n '': {\n 'heroimage': {'size': (1500, 500), 'crop': 'smart'},\n 'heroimage_preview': {'size': (880, 220), 'crop': 'smart'},\n 'project_thumbnail': {'size': (520, 330), 'crop': 'smart'},\n 'idea_image': {'size': (800, 0), 'crop': 'scale'},\n 'idea_thumbnail': {'size': (240, 240), 'crop': 'smart'},\n }\n}\n\nALLOWED_UPLOAD_IMAGES = ('png', 'jpeg', 'gif')\n\n\n# Authentication\n\nAUTH_USER_MODEL = 'liqd_product_users.User'\n\nAUTHENTICATION_BACKENDS = (\n 'rules.permissions.ObjectPermissionBackend',\n 'django.contrib.auth.backends.ModelBackend',\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n\nACCOUNT_ADAPTER = 'liqd_product.apps.users.adapters.AccountAdapter'\nACCOUNT_AUTHENTICATION_METHOD = 'username_email'\nACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 3\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = 'mandatory'\nACCOUNT_USERNAME_REQUIRED = True\nACCOUNT_LOGIN_ATTEMPTS_LIMIT = 10\nACCOUNT_LOGIN_ATTEMPTS_TIMEOUT = 300 # seconds\nACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True\nACCOUNT_LOGIN_ON_PASSWORD_RESET = True\nACCOUNT_SIGNUP_FORM_CLASS = 'liqd_product.apps.users.forms.TermsSignupForm'\nSOCIALACCOUNT_EMAIL_VERIFICATION = 'none'\n\nLOGIN_URL = 'account_login'\nLOGIN_REDIRECT_URL = '/'\n\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n\n# CKEditor\n\nCKEDITOR_UPLOAD_PATH = \"uploads/\"\nCKEDITOR_RESTRICT_BY_USER = 'username'\nCKEDITOR_ALLOW_NONIMAGE_FILES = True\n\nCKEDITOR_CONFIGS = {\n 'default': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n },\n 'image-editor': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['Image'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n }\n}\n\nBLEACH_LIST = {\n 'default' : {\n 'tags': ['p','strong','em','u','ol','li','ul','a'],\n 'attributes': {\n 'a': ['href', 'rel'],\n },\n },\n 'image-editor': {\n 'tags': ['p','strong','em','u','ol','li','ul','a','img'],\n 'attributes': {\n 'a': ['href', 'rel'],\n 'img': ['src', 'alt', 'style']\n },\n 'styles': [\n 'float',\n 'margin',\n 'padding',\n 'width',\n 'height',\n 'margin-bottom',\n 'margin-top',\n 'margin-left',\n 'margin-right',\n ],\n }\n}\n\n\n# adhocracy4\n\nA4_ORGANISATIONS_MODEL = 'liqd_product_organisations.Organisation'\n\nA4_RATEABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n)\n\nA4_COMMENTABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_documents', 'chapter'),\n ('meinberlin_documents', 'paragraph'),\n ('meinberlin_mapideas', 'mapidea'),\n ('meinberlin_polls', 'poll'),\n)\n\nA4_REPORTABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n)\n\nA4_ACTIONABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n)\n\nA4_AUTO_FOLLOWABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n)\n\n\nA4_CATEGORIZABLE = (\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n)\n\n\nA4_MAP_BASEURL = 'https://{s}.tile.openstreetmap.org/'\nA4_MAP_ATTRIBUTION = '© <a href=\"http://openstreetmap.org/copyright\">OpenStreetMap</a> contributors'\nA4_MAP_BOUNDING_BOX = ([[52.3517, 13.8229], [52.6839, 12.9543]])\n\nA4_DASHBOARD = {\n 'PROJECT_DASHBOARD_CLASS': 'meinberlin.apps.dashboard2.ProjectDashboard',\n 'BLUEPRINTS': 'liqd_product.apps.dashboard.blueprints.blueprints'\n}\n\nCONTACT_EMAIL = '[email protected]'\nEMAIL_DEFAULT_LANGUAGE = 'de'\n", "path": "liqd_product/config/settings/base.py"}]} |
gh_patches_debug_1240 | rasdani/github-patches | git_diff | ckan__ckan-4657 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
exception after upgrade to 2.8.1 - Popped wrong app context
After upgrade from 2.7 to 2.8.1 - I'm seeing some exceptions in a production server log, couldn't reproduce the error locally
### CKAN Version if known (or site URL)
2.8.1
### Please describe the expected behaviour
no exception (or less cryptic exception..)
### Please describe the actual behaviour
exception -
```
-------------------------------------------------------------
Error - <type 'exceptions.AssertionError'>: Popped wrong app context. (<flask.ctx.AppContext object at 0x7f3ac4b90b10> instead of <flask.ctx.AppContext object at 0x7f3ac4b90690>)
URL: http://www.odata.org.il/organization/hatzlacha?license_id=cc-by&tags=%D7%94%D7%AA%D7%A4%D7%A8%D7%A6%D7%95%D7%AA+%D7%9C%D7%9E%D7%A7%D7%95%D7%9D+%D7%9E%D7%92%D7%95%D7%A8%D7%99%D7%9D&organization=hatzlacha&res_format=DOC&tags=%D7%A2%D7%99%D7%9B%D7%95%D7%91+%D7%94%D7%9C%D7%99%D7%9B%D7%99%D7%9D
File '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/weberror/errormiddleware.py', line 171 in __call__
app_iter = self.application(environ, sr_checker)
File '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/webob/dec.py', line 147 in __call__
resp = self.call_func(req, *args, **self.kwargs)
File '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/webob/dec.py', line 208 in call_func
return self.func(req, *args, **kwargs)
File '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/fanstatic/publisher.py', line 234 in __call__
return request.get_response(self.app)
File '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/webob/request.py', line 1053 in get_response
application, catch_exc_info=False)
File '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/webob/request.py', line 1022 in call_application
app_iter = application(self.environ, start_response)
File '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/webob/dec.py', line 147 in __call__
resp = self.call_func(req, *args, **self.kwargs)
File '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/webob/dec.py', line 208 in call_func
return self.func(req, *args, **kwargs)
File '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/fanstatic/injector.py', line 54 in __call__
response = request.get_response(self.app)
File '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/webob/request.py', line 1053 in get_response
application, catch_exc_info=False)
File '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/webob/request.py', line 1022 in call_application
app_iter = application(self.environ, start_response)
File '/usr/lib/ckan/venv/src/ckan/ckan/config/middleware/pylons_app.py', line 265 in inner
result = application(environ, start_response)
File '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/beaker/middleware.py', line 73 in __call__
return self.app(environ, start_response)
File '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/beaker/middleware.py', line 156 in __call__
return self.wrap_app(environ, session_start_response)
File '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/routes/middleware.py', line 131 in __call__
response = self.app(environ, start_response)
File '/usr/lib/ckan/venv/src/ckan/ckan/config/middleware/common_middleware.py', line 30 in __call__
return self.app(environ, start_response)
File '/usr/lib/ckan/venv/src/ckan/ckan/config/middleware/common_middleware.py', line 56 in __call__
return self.app(environ, start_response)
File '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/pylons/wsgiapp.py', line 125 in __call__
response = self.dispatch(controller, environ, start_response)
File '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/pylons/wsgiapp.py', line 324 in dispatch
return controller(environ, start_response)
File '/usr/lib/ckan/venv/src/ckan/ckan/lib/base.py', line 240 in __call__
res = WSGIController.__call__(self, environ, start_response)
File '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/pylons/controllers/core.py', line 221 in __call__
response = self._dispatch_call()
File '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/pylons/controllers/core.py', line 172 in _dispatch_call
response = self._inspect_call(func)
File '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/pylons/controllers/core.py', line 107 in _inspect_call
result = self._perform_call(func, args)
File '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/pylons/controllers/core.py', line 60 in _perform_call
return func(**args)
File '/usr/lib/ckan/venv/src/ckan/ckan/controllers/group.py', line 230 in read
extra_vars={'group_type': group_type})
File '/usr/lib/ckan/venv/src/ckan/ckan/lib/base.py', line 125 in render
return cached_template(template_name, renderer)
File '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/pylons/templating.py', line 249 in cached_template
return render_func()
File '/usr/lib/ckan/venv/src/ckan/ckan/lib/base.py', line 162 in render_template
return render_jinja2(template_name, globs)
File '/usr/lib/ckan/venv/src/ckan/ckan/lib/base.py', line 94 in render_jinja2
return template.render(**extra_vars)
File '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/jinja2/environment.py', line 989 in render
return self.environment.handle_exception(exc_info, True)
File '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/jinja2/environment.py', line 754 in handle_exception
reraise(exc_type, exc_value, tb)
File '/usr/lib/ckan/venv/src/ckan/ckan/templates/organization/read.html', line 1 in top-level template code
{% extends "organization/read_base.html" %}
File '/usr/lib/ckan/venv/src/ckan/ckan/templates/organization/read_base.html', line 1 in top-level template code
{% extends "page.html" %}
File '/usr/lib/ckan/venv/src/ckan/ckan/templates/page.html', line 1 in top-level template code
{% extends "base.html" %}
File '/ckanext-odata_org_il/ckanext/odata_org_il/templates/base.html', line 1 in top-level template code
{% ckan_extends %}
File '/usr/lib/ckan/venv/src/ckan/ckan/templates/base.html', line 101 in top-level template code
{%- block page %}{% endblock -%}
File '/usr/lib/ckan/venv/src/ckan/ckan/templates/page.html', line 125 in block "page"
{%- block footer %}
File '/usr/lib/ckan/venv/src/ckan/ckan/templates/page.html', line 126 in block "footer"
{% include "footer.html" %}
File '/ckanext-odata_org_il/ckanext/odata_org_il/templates/footer.html', line 3 in top-level template code
{% block footer_content %}
File '/ckanext-odata_org_il/ckanext/odata_org_il/templates/footer.html', line 32 in block "footer_content"
{% block footer_lang %}
File '/ckanext-odata_org_il/ckanext/odata_org_il/templates/footer.html', line 33 in block "footer_lang"
{% snippet "snippets/language_selector.html" %}
File '/usr/lib/ckan/venv/src/ckan/ckan/lib/jinja_extensions.py', line 268 in _call
return base.render_snippet(args[0], **kwargs)
File '/usr/lib/ckan/venv/src/ckan/ckan/lib/base.py', line 84 in render_snippet
output = render(template_name, extra_vars=kw)
File '/usr/lib/ckan/venv/src/ckan/ckan/lib/base.py', line 125 in render
return cached_template(template_name, renderer)
File '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/pylons/templating.py', line 249 in cached_template
return render_func()
File '/usr/lib/ckan/venv/src/ckan/ckan/lib/base.py', line 162 in render_template
return render_jinja2(template_name, globs)
File '/usr/lib/ckan/venv/src/ckan/ckan/lib/base.py', line 94 in render_jinja2
return template.render(**extra_vars)
File '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/jinja2/environment.py', line 989 in render
return self.environment.handle_exception(exc_info, True)
File '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/jinja2/environment.py', line 754 in handle_exception
reraise(exc_type, exc_value, tb)
File '/usr/lib/ckan/venv/src/ckan/ckan/templates/snippets/language_selector.html', line 2 in top-level template code
<form class="form-inline form-select lang-select" action="{% url_for controller='util', action='redirect' %}" data-module="select-switch" method="POST">
File '/usr/lib/ckan/venv/src/ckan/ckan/lib/jinja_extensions.py', line 297 in _call
return h.url_for(*args, **kwargs)
File '/usr/lib/ckan/venv/src/ckan/ckan/lib/helpers.py', line 326 in url_for
_auto_flask_context.pop()
File '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/flask/ctx.py', line 376 in pop
app_ctx.pop(exc)
File '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/flask/ctx.py', line 193 in pop
% (rv, self)
AssertionError: Popped wrong app context. (<flask.ctx.AppContext object at 0x7f3ac4b90b10> instead of <flask.ctx.AppContext object at 0x7f3ac4b90690>)
CGI Variables
-------------
CKAN_CURRENT_URL: '/organization/hatzlacha?license_id%3Dcc-by%26tags%3D%25D7%2594%25D7%25AA%25D7%25A4%25D7%25A8%25D7%25A6%25D7%2595%25D7%25AA%2B%25D7%259C%25D7%259E%25D7%25A7%25D7%2595%25D7%259D%2B%25D7%259E%25D7%2592%25D7%2595%25D7%25A8%25D7%2599%25D7%259D%26organization%3Dhatzlacha%26res_format%3DDOC%26tags%3D%25D7%25A2%25D7%2599%25D7%259B%25D7%2595%25D7%2591%2B%25D7%2594%25D7%259C%25D7%2599%25D7%259B%25D7%2599%25D7%259D'
CKAN_LANG: 'he'
CKAN_LANG_IS_DEFAULT: True
CONTENT_LENGTH: '0'
HTTP_ACCEPT: '*/*'
HTTP_ACCEPT_ENCODING: 'gzip'
HTTP_CF_CONNECTING_IP: ' --- '
HTTP_CF_IPCOUNTRY: 'US'
HTTP_CF_RAY: '450eef3ef9fa5afd-HEL'
HTTP_CF_VISITOR: '{"scheme":"https"}'
HTTP_HOST: 'www.odata.org.il'
HTTP_USER_AGENT: ' --- '
HTTP_X_FORWARDED_FOR: ' --- '
HTTP_X_FORWARDED_HOST: 'www.odata.org.il'
HTTP_X_FORWARDED_PORT: '443'
HTTP_X_FORWARDED_PROTO: 'https'
HTTP_X_FORWARDED_SERVER: 'traefik-77ccf967dc-4dfwt'
HTTP_X_REAL_IP: '10.132.0.5'
PATH_INFO: '/organization/hatzlacha'
QUERY_STRING: 'license_id=cc-by&tags=%D7%94%D7%AA%D7%A4%D7%A8%D7%A6%D7%95%D7%AA+%D7%9C%D7%9E%D7%A7%D7%95%D7%9D+%D7%9E%D7%92%D7%95%D7%A8%D7%99%D7%9D&organization=hatzlacha&res_format=DOC&tags=%D7%A2%D7%99%D7%9B%D7%95%D7%91+%D7%94%D7%9C%D7%99%D7%9B%D7%99%D7%9D'
REMOTE_ADDR: '10.20.1.228'
REQUEST_METHOD: 'GET'
SERVER_NAME: '0.0.0.0'
SERVER_PORT: '5000'
SERVER_PROTOCOL: 'HTTP/1.1'
WSGI Variables
--------------
__no_cache__: True
application: <fanstatic.publisher.Delegator object at 0x7f3ac38876d0>
beaker.cache: <beaker.cache.CacheManager object at 0x7f3ac3887690>
beaker.get_session: <bound method SessionMiddleware._get_session of <beaker.middleware.SessionMiddleware object at 0x7f3ac38875d0>>
beaker.session: {'_accessed_time': 1535377065.228329, '_creation_time': 1535377065.228329}
ckan.app: 'pylons_app'
fanstatic.needed: <fanstatic.core.NeededResources object at 0x7f3aa9ff7450>
paste.cookies: (<SimpleCookie: >, '')
paste.httpserver.thread_pool: <paste.httpserver.ThreadPool object at 0x7f3ac4b8e910>
paste.parsed_dict_querystring: (MultiDict([('license_id', 'cc-by'), ('tags', '\xd7\x94\xd7\xaa\xd7\xa4\xd7\xa8\xd7\xa6\xd7\x95\xd7\xaa \xd7\x9c\xd7\x9e\xd7\xa7\xd7\x95\xd7\x9d \xd7\x9e\xd7\x92\xd7\x95\xd7\xa8\xd7\x99\xd7\x9d'), ('organization', 'hatzlacha'), ('res_format', 'DOC'), ('tags', '\xd7\xa2\xd7\x99\xd7\x9b\xd7\x95\xd7\x91 \xd7\x94\xd7\x9c\xd7\x99\xd7\x9b\xd7\x99\xd7\x9d')]), 'license_id=cc-by&tags=%D7%94%D7%AA%D7%A4%D7%A8%D7%A6%D7%95%D7%AA+%D7%9C%D7%9E%D7%A7%D7%95%D7%9D+%D7%9E%D7%92%D7%95%D7%A8%D7%99%D7%9D&organization=hatzlacha&res_format=DOC&tags=%D7%A2%D7%99%D7%9B%D7%95%D7%91+%D7%94%D7%9C%D7%99%D7%9B%D7%99%D7%9D')
paste.parsed_querystring: ([('license_id', 'cc-by'), ('tags', '\xd7\x94\xd7\xaa\xd7\xa4\xd7\xa8\xd7\xa6\xd7\x95\xd7\xaa \xd7\x9c\xd7\x9e\xd7\xa7\xd7\x95\xd7\x9d \xd7\x9e\xd7\x92\xd7\x95\xd7\xa8\xd7\x99\xd7\x9d'), ('organization', 'hatzlacha'), ('res_format', 'DOC'), ('tags', '\xd7\xa2\xd7\x99\xd7\x9b\xd7\x95\xd7\x91 \xd7\x94\xd7\x9c\xd7\x99\xd7\x9b\xd7\x99\xd7\x9d')], 'license_id=cc-by&tags=%D7%94%D7%AA%D7%A4%D7%A8%D7%A6%D7%95%D7%AA+%D7%9C%D7%9E%D7%A7%D7%95%D7%9D+%D7%9E%D7%92%D7%95%D7%A8%D7%99%D7%9D&organization=hatzlacha&res_format=DOC&tags=%D7%A2%D7%99%D7%9B%D7%95%D7%91+%D7%94%D7%9C%D7%99%D7%9B%D7%99%D7%9D')
paste.registry: <paste.registry.Registry object at 0x7f3ac3f14450>
paste.throw_errors: True
pylons.action_method: <bound method OrganizationController.read of <ckan.controllers.organization.OrganizationController object at 0x7f3aa8826b10>>
pylons.controller: <ckan.controllers.organization.OrganizationController object at 0x7f3aa8826b10>
pylons.environ_config: {'session': 'beaker.session', 'cache': 'beaker.cache'}
pylons.pylons: <pylons.util.PylonsContext object at 0x7f3aa8826d50>
pylons.routes_dict: {'action': u'read', 'controller': u'organization', 'id': u'hatzlacha'}
repoze.who.api: <repoze.who.api.API object at 0x7f3ac37da0d0>
repoze.who.logger: <logging.Logger object at 0x7f3ac3887890>
repoze.who.plugins: {'ckan.lib.authenticator:UsernamePasswordAuthenticator': <ckan.lib.authenticator.UsernamePasswordAuthenticator object at 0x7f3ac3acd090>, 'friendlyform': <FriendlyFormPlugin 139890367716112>, 'auth_tkt': <CkanAuthTktCookiePlugin 139890367715984>}
routes.route: <routes.route.Route object at 0x7f3ac4b13f10>
routes.url: <routes.util.URLGenerator object at 0x7f3ac40d9690>
webob._parsed_query_vars: (GET([('license_id', 'cc-by'), ('tags', '\xd7\x94\xd7\xaa\xd7\xa4\xd7\xa8\xd7\xa6\xd7\x95\xd7\xaa \xd7\x9c\xd7\x9e\xd7\xa7\xd7\x95\xd7\x9d \xd7\x9e\xd7\x92\xd7\x95\xd7\xa8\xd7\x99\xd7\x9d'), ('organization', 'hatzlacha'), ('res_format', 'DOC'), ('tags', '\xd7\xa2\xd7\x99\xd7\x9b\xd7\x95\xd7\x91 \xd7\x94\xd7\x9c\xd7\x99\xd7\x9b\xd7\x99\xd7\x9d')]), 'license_id=cc-by&tags=%D7%94%D7%AA%D7%A4%D7%A8%D7%A6%D7%95%D7%AA+%D7%9C%D7%9E%D7%A7%D7%95%D7%9D+%D7%9E%D7%92%D7%95%D7%A8%D7%99%D7%9D&organization=hatzlacha&res_format=DOC&tags=%D7%A2%D7%99%D7%9B%D7%95%D7%91+%D7%94%D7%9C%D7%99%D7%9B%D7%99%D7%9D')
webob.adhoc_attrs: {'response': <Response at 0x7f3aa9ff7990 200 OK>, 'language': 'en-us'}
wsgi process: 'Multithreaded'
wsgiorg.routing_args: (<routes.util.URLGenerator object at 0x7f3ac40d9690>, {'action': u'read', 'controller': u'organization', 'id': u'hatzlacha'})
------------------------------------------------------------
```
### What steps can be taken to reproduce the issue?
I'm just seeing those exception in the server logs, couldn't reproduce locally
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckan/config/middleware/pylons_app.py`
Content:
```
1 # encoding: utf-8
2
3 import os
4 import re
5
6 from pylons.wsgiapp import PylonsApp
7
8 from beaker.middleware import CacheMiddleware, SessionMiddleware
9 from paste.cascade import Cascade
10 from paste.registry import RegistryManager
11 from paste.urlparser import StaticURLParser
12 from paste.deploy.converters import asbool
13 from paste.fileapp import _FileIter
14 from pylons.middleware import ErrorHandler, StatusCodeRedirect
15 from routes.middleware import RoutesMiddleware
16 from repoze.who.config import WhoConfig
17 from repoze.who.middleware import PluggableAuthenticationMiddleware
18 from fanstatic import Fanstatic
19
20 from ckan.plugins import PluginImplementations
21 from ckan.plugins.interfaces import IMiddleware
22 import ckan.lib.uploader as uploader
23 from ckan.config.middleware import common_middleware
24 from ckan.common import config
25
26 import logging
27 log = logging.getLogger(__name__)
28
29
30 def make_pylons_stack(conf, full_stack=True, static_files=True,
31 **app_conf):
32 """Create a Pylons WSGI application and return it
33
34 ``conf``
35 The inherited configuration for this application. Normally from
36 the [DEFAULT] section of the Paste ini file.
37
38 ``full_stack``
39 Whether this application provides a full WSGI stack (by default,
40 meaning it handles its own exceptions and errors). Disable
41 full_stack when this application is "managed" by another WSGI
42 middleware.
43
44 ``static_files``
45 Whether this application serves its own static files; disable
46 when another web server is responsible for serving them.
47
48 ``app_conf``
49 The application's local configuration. Normally specified in
50 the [app:<name>] section of the Paste ini file (where <name>
51 defaults to main).
52
53 """
54 # The Pylons WSGI app
55 app = pylons_app = CKANPylonsApp()
56
57 for plugin in PluginImplementations(IMiddleware):
58 app = plugin.make_middleware(app, config)
59
60 app = common_middleware.CloseWSGIInputMiddleware(app, config)
61 app = common_middleware.RootPathMiddleware(app, config)
62 # Routing/Session/Cache Middleware
63 app = RoutesMiddleware(app, config['routes.map'])
64 # we want to be able to retrieve the routes middleware to be able to update
65 # the mapper. We store it in the pylons config to allow this.
66 config['routes.middleware'] = app
67 app = SessionMiddleware(app, config)
68 app = CacheMiddleware(app, config)
69
70 # CUSTOM MIDDLEWARE HERE (filtered by error handling middlewares)
71 # app = QueueLogMiddleware(app)
72 if asbool(config.get('ckan.use_pylons_response_cleanup_middleware',
73 True)):
74 app = execute_on_completion(app, config,
75 cleanup_pylons_response_string)
76
77 # Fanstatic
78 fanstatic_enable_rollup = asbool(app_conf.get('fanstatic_enable_rollup',
79 False))
80 if asbool(config.get('debug', False)):
81 fanstatic_config = {
82 'versioning': True,
83 'recompute_hashes': True,
84 'minified': False,
85 'bottom': True,
86 'bundle': False,
87 'rollup': fanstatic_enable_rollup,
88 }
89 else:
90 fanstatic_config = {
91 'versioning': True,
92 'recompute_hashes': False,
93 'minified': True,
94 'bottom': True,
95 'bundle': True,
96 'rollup': fanstatic_enable_rollup,
97 }
98 root_path = config.get('ckan.root_path', None)
99 if root_path:
100 root_path = re.sub('/{{LANG}}', '', root_path)
101 fanstatic_config['base_url'] = root_path
102 app = Fanstatic(app, **fanstatic_config)
103
104 for plugin in PluginImplementations(IMiddleware):
105 try:
106 app = plugin.make_error_log_middleware(app, config)
107 except AttributeError:
108 log.critical('Middleware class {0} is missing the method'
109 'make_error_log_middleware.'
110 .format(plugin.__class__.__name__))
111
112 if asbool(full_stack):
113 # Handle Python exceptions
114 app = ErrorHandler(app, conf, **config['pylons.errorware'])
115
116 # Display error documents for 400, 403, 404 status codes (and
117 # 500 when debug is disabled)
118 if asbool(config['debug']):
119 app = StatusCodeRedirect(app, [400, 403, 404])
120 else:
121 app = StatusCodeRedirect(app, [400, 403, 404, 500])
122
123 # Initialize repoze.who
124 who_parser = WhoConfig(conf['here'])
125 who_parser.parse(open(app_conf['who.config_file']))
126
127 app = PluggableAuthenticationMiddleware(
128 app,
129 who_parser.identifiers,
130 who_parser.authenticators,
131 who_parser.challengers,
132 who_parser.mdproviders,
133 who_parser.request_classifier,
134 who_parser.challenge_decider,
135 logging.getLogger('repoze.who'),
136 logging.WARN, # ignored
137 who_parser.remote_user_key
138 )
139
140 # Establish the Registry for this application
141 # The RegistryManager includes code to pop
142 # registry values after the stream has completed,
143 # so we need to prevent this with `streaming` set to True.
144 app = RegistryManager(app, streaming=True)
145
146 if asbool(static_files):
147 # Serve static files
148 static_max_age = None if not asbool(
149 config.get('ckan.cache_enabled')) \
150 else int(config.get('ckan.static_max_age', 3600))
151
152 static_app = StaticURLParser(
153 config['pylons.paths']['static_files'],
154 cache_max_age=static_max_age)
155 static_parsers = [static_app, app]
156
157 storage_directory = uploader.get_storage_path()
158 if storage_directory:
159 path = os.path.join(storage_directory, 'storage')
160 try:
161 os.makedirs(path)
162 except OSError as e:
163 # errno 17 is file already exists
164 if e.errno != 17:
165 raise
166
167 storage_app = StaticURLParser(path, cache_max_age=static_max_age)
168 static_parsers.insert(0, storage_app)
169
170 # Configurable extra static file paths
171 extra_static_parsers = []
172 for public_path in config.get(
173 'extra_public_paths', '').split(','):
174 if public_path.strip():
175 extra_static_parsers.append(
176 StaticURLParser(public_path.strip(),
177 cache_max_age=static_max_age)
178 )
179 app = Cascade(extra_static_parsers + static_parsers)
180
181 # Tracking
182 if asbool(config.get('ckan.tracking_enabled', 'false')):
183 app = common_middleware.TrackingMiddleware(app, config)
184
185 # Add a reference to the actual Pylons app so it's easier to access
186 app._wsgi_app = pylons_app
187
188 return app
189
190
191 class CKANPylonsApp(PylonsApp):
192
193 app_name = 'pylons_app'
194
195 def can_handle_request(self, environ):
196 '''
197 Decides whether it can handle a request with the Pylons app by
198 matching the request environ against the route mapper
199
200 Returns (True, 'pylons_app', origin) if this is the case.
201
202 origin can be either 'core' or 'extension' depending on where
203 the route was defined.
204
205 NOTE: There is currently a catch all route for GET requests to
206 point arbitrary urls to templates with the same name:
207
208 map.connect('/*url', controller='template', action='view')
209
210 This means that this function will match all GET requests. This
211 does not cause issues as the Pylons core routes are the last to
212 take precedence so the current behaviour is kept, but it's worth
213 keeping in mind.
214 '''
215
216 pylons_mapper = config['routes.map']
217 match_route = pylons_mapper.routematch(environ=environ)
218 if match_route:
219 match, route = match_route
220 origin = 'core'
221 if hasattr(route, '_ckan_core') and not route._ckan_core:
222 origin = 'extension'
223 log.debug('Pylons route match: {0} Origin: {1}'.format(
224 match, origin))
225 return (True, self.app_name, origin)
226 else:
227 return (False, self.app_name)
228
229
230 class CloseCallbackWrapper(object):
231 def __init__(self, iterable, callback, environ):
232 # pylons.fileapp expects app_iter to have `file` attribute.
233 self.file = iterable
234 self.callback = callback
235 self.environ = environ
236
237 def __iter__(self):
238 """
239 return a generator that passes through items from iterable
240 then calls callback(environ).
241 """
242 try:
243 for item in self.file:
244 yield item
245 except GeneratorExit:
246 if hasattr(self.file, 'close'):
247 self.file.close()
248 raise
249 finally:
250 self.callback(self.environ)
251
252
253 class FileIterWrapper(CloseCallbackWrapper, _FileIter):
254 """Same CloseCallbackWrapper, just with _FileIter mixin.
255
256 That will prevent pylons from converting file responses into
257 in-memori lists.
258 """
259 pass
260
261
262 def execute_on_completion(application, config, callback):
263 """
264 Call callback(environ) once complete response is sent
265 """
266
267 def inner(environ, start_response):
268 try:
269 result = application(environ, start_response)
270 except:
271 callback(environ)
272 raise
273 # paste.fileapp converts non-file responses into list
274 # In order to avoid interception of OOM Killer
275 # file responses wrapped into generator with
276 # _FileIter in parent tree.
277 klass = CloseCallbackWrapper
278 if isinstance(result, _FileIter):
279 klass = FileIterWrapper
280 return klass(result, callback, environ)
281
282 return inner
283
284
285 def cleanup_pylons_response_string(environ):
286 try:
287 msg = 'response cleared by pylons response cleanup middleware'
288 environ['pylons.controller']._py_object.response._body = msg
289 except (KeyError, AttributeError):
290 pass
291
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ckan/config/middleware/pylons_app.py b/ckan/config/middleware/pylons_app.py
--- a/ckan/config/middleware/pylons_app.py
+++ b/ckan/config/middleware/pylons_app.py
@@ -138,10 +138,7 @@
)
# Establish the Registry for this application
- # The RegistryManager includes code to pop
- # registry values after the stream has completed,
- # so we need to prevent this with `streaming` set to True.
- app = RegistryManager(app, streaming=True)
+ app = RegistryManager(app, streaming=False)
if asbool(static_files):
# Serve static files
| {"golden_diff": "diff --git a/ckan/config/middleware/pylons_app.py b/ckan/config/middleware/pylons_app.py\n--- a/ckan/config/middleware/pylons_app.py\n+++ b/ckan/config/middleware/pylons_app.py\n@@ -138,10 +138,7 @@\n )\n \n # Establish the Registry for this application\n- # The RegistryManager includes code to pop\n- # registry values after the stream has completed,\n- # so we need to prevent this with `streaming` set to True.\n- app = RegistryManager(app, streaming=True)\n+ app = RegistryManager(app, streaming=False)\n \n if asbool(static_files):\n # Serve static files\n", "issue": "exception after upgrade to 2.8.1 - Popped wrong app context\nAfter upgrade from 2.7 to 2.8.1 - I'm seeing some exceptions in a production server log, couldn't reproduce the error locally\r\n\r\n### CKAN Version if known (or site URL)\r\n2.8.1\r\n\r\n### Please describe the expected behaviour\r\nno exception (or less cryptic exception..)\r\n\r\n### Please describe the actual behaviour\r\nexception - \r\n```\r\n-------------------------------------------------------------\r\nError - <type 'exceptions.AssertionError'>: Popped wrong app context. (<flask.ctx.AppContext object at 0x7f3ac4b90b10> instead of <flask.ctx.AppContext object at 0x7f3ac4b90690>)\r\nURL: http://www.odata.org.il/organization/hatzlacha?license_id=cc-by&tags=%D7%94%D7%AA%D7%A4%D7%A8%D7%A6%D7%95%D7%AA+%D7%9C%D7%9E%D7%A7%D7%95%D7%9D+%D7%9E%D7%92%D7%95%D7%A8%D7%99%D7%9D&organization=hatzlacha&res_format=DOC&tags=%D7%A2%D7%99%D7%9B%D7%95%D7%91+%D7%94%D7%9C%D7%99%D7%9B%D7%99%D7%9D\r\nFile '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/weberror/errormiddleware.py', line 171 in __call__\r\n app_iter = self.application(environ, sr_checker)\r\nFile '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/webob/dec.py', line 147 in __call__\r\n resp = self.call_func(req, *args, **self.kwargs)\r\nFile '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/webob/dec.py', line 208 in call_func\r\n return self.func(req, *args, **kwargs)\r\nFile '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/fanstatic/publisher.py', line 234 in __call__\r\n return request.get_response(self.app)\r\nFile '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/webob/request.py', line 1053 in get_response\r\n application, catch_exc_info=False)\r\nFile '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/webob/request.py', line 1022 in call_application\r\n app_iter = application(self.environ, start_response)\r\nFile '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/webob/dec.py', line 147 in __call__\r\n resp = self.call_func(req, *args, **self.kwargs)\r\nFile '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/webob/dec.py', line 208 in call_func\r\n return self.func(req, *args, **kwargs)\r\nFile '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/fanstatic/injector.py', line 54 in __call__\r\n response = request.get_response(self.app)\r\nFile '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/webob/request.py', line 1053 in get_response\r\n application, catch_exc_info=False)\r\nFile '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/webob/request.py', line 1022 in call_application\r\n app_iter = application(self.environ, start_response)\r\nFile '/usr/lib/ckan/venv/src/ckan/ckan/config/middleware/pylons_app.py', line 265 in inner\r\n result = application(environ, start_response)\r\nFile '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/beaker/middleware.py', line 73 in __call__\r\n return self.app(environ, start_response)\r\nFile '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/beaker/middleware.py', line 156 in __call__\r\n return self.wrap_app(environ, session_start_response)\r\nFile '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/routes/middleware.py', line 131 in __call__\r\n response = self.app(environ, start_response)\r\nFile '/usr/lib/ckan/venv/src/ckan/ckan/config/middleware/common_middleware.py', line 30 in __call__\r\n return self.app(environ, start_response)\r\nFile '/usr/lib/ckan/venv/src/ckan/ckan/config/middleware/common_middleware.py', line 56 in __call__\r\n return self.app(environ, start_response)\r\nFile '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/pylons/wsgiapp.py', line 125 in __call__\r\n response = self.dispatch(controller, environ, start_response)\r\nFile '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/pylons/wsgiapp.py', line 324 in dispatch\r\n return controller(environ, start_response)\r\nFile '/usr/lib/ckan/venv/src/ckan/ckan/lib/base.py', line 240 in __call__\r\n res = WSGIController.__call__(self, environ, start_response)\r\nFile '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/pylons/controllers/core.py', line 221 in __call__\r\n response = self._dispatch_call()\r\nFile '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/pylons/controllers/core.py', line 172 in _dispatch_call\r\n response = self._inspect_call(func)\r\nFile '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/pylons/controllers/core.py', line 107 in _inspect_call\r\n result = self._perform_call(func, args)\r\nFile '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/pylons/controllers/core.py', line 60 in _perform_call\r\n return func(**args)\r\nFile '/usr/lib/ckan/venv/src/ckan/ckan/controllers/group.py', line 230 in read\r\n extra_vars={'group_type': group_type})\r\nFile '/usr/lib/ckan/venv/src/ckan/ckan/lib/base.py', line 125 in render\r\n return cached_template(template_name, renderer)\r\nFile '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/pylons/templating.py', line 249 in cached_template\r\n return render_func()\r\nFile '/usr/lib/ckan/venv/src/ckan/ckan/lib/base.py', line 162 in render_template\r\n return render_jinja2(template_name, globs)\r\nFile '/usr/lib/ckan/venv/src/ckan/ckan/lib/base.py', line 94 in render_jinja2\r\n return template.render(**extra_vars)\r\nFile '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/jinja2/environment.py', line 989 in render\r\n return self.environment.handle_exception(exc_info, True)\r\nFile '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/jinja2/environment.py', line 754 in handle_exception\r\n reraise(exc_type, exc_value, tb)\r\nFile '/usr/lib/ckan/venv/src/ckan/ckan/templates/organization/read.html', line 1 in top-level template code\r\n {% extends \"organization/read_base.html\" %}\r\nFile '/usr/lib/ckan/venv/src/ckan/ckan/templates/organization/read_base.html', line 1 in top-level template code\r\n {% extends \"page.html\" %}\r\nFile '/usr/lib/ckan/venv/src/ckan/ckan/templates/page.html', line 1 in top-level template code\r\n {% extends \"base.html\" %}\r\nFile '/ckanext-odata_org_il/ckanext/odata_org_il/templates/base.html', line 1 in top-level template code\r\n {% ckan_extends %}\r\nFile '/usr/lib/ckan/venv/src/ckan/ckan/templates/base.html', line 101 in top-level template code\r\n {%- block page %}{% endblock -%}\r\nFile '/usr/lib/ckan/venv/src/ckan/ckan/templates/page.html', line 125 in block \"page\"\r\n {%- block footer %}\r\nFile '/usr/lib/ckan/venv/src/ckan/ckan/templates/page.html', line 126 in block \"footer\"\r\n {% include \"footer.html\" %}\r\nFile '/ckanext-odata_org_il/ckanext/odata_org_il/templates/footer.html', line 3 in top-level template code\r\n {% block footer_content %}\r\nFile '/ckanext-odata_org_il/ckanext/odata_org_il/templates/footer.html', line 32 in block \"footer_content\"\r\n {% block footer_lang %}\r\nFile '/ckanext-odata_org_il/ckanext/odata_org_il/templates/footer.html', line 33 in block \"footer_lang\"\r\n {% snippet \"snippets/language_selector.html\" %}\r\nFile '/usr/lib/ckan/venv/src/ckan/ckan/lib/jinja_extensions.py', line 268 in _call\r\n return base.render_snippet(args[0], **kwargs)\r\nFile '/usr/lib/ckan/venv/src/ckan/ckan/lib/base.py', line 84 in render_snippet\r\n output = render(template_name, extra_vars=kw)\r\nFile '/usr/lib/ckan/venv/src/ckan/ckan/lib/base.py', line 125 in render\r\n return cached_template(template_name, renderer)\r\nFile '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/pylons/templating.py', line 249 in cached_template\r\n return render_func()\r\nFile '/usr/lib/ckan/venv/src/ckan/ckan/lib/base.py', line 162 in render_template\r\n return render_jinja2(template_name, globs)\r\nFile '/usr/lib/ckan/venv/src/ckan/ckan/lib/base.py', line 94 in render_jinja2\r\n return template.render(**extra_vars)\r\nFile '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/jinja2/environment.py', line 989 in render\r\n return self.environment.handle_exception(exc_info, True)\r\nFile '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/jinja2/environment.py', line 754 in handle_exception\r\n reraise(exc_type, exc_value, tb)\r\nFile '/usr/lib/ckan/venv/src/ckan/ckan/templates/snippets/language_selector.html', line 2 in top-level template code\r\n <form class=\"form-inline form-select lang-select\" action=\"{% url_for controller='util', action='redirect' %}\" data-module=\"select-switch\" method=\"POST\">\r\nFile '/usr/lib/ckan/venv/src/ckan/ckan/lib/jinja_extensions.py', line 297 in _call\r\n return h.url_for(*args, **kwargs)\r\nFile '/usr/lib/ckan/venv/src/ckan/ckan/lib/helpers.py', line 326 in url_for\r\n _auto_flask_context.pop()\r\nFile '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/flask/ctx.py', line 376 in pop\r\n app_ctx.pop(exc)\r\nFile '/usr/lib/ckan/venv/local/lib/python2.7/site-packages/flask/ctx.py', line 193 in pop\r\n % (rv, self)\r\nAssertionError: Popped wrong app context. (<flask.ctx.AppContext object at 0x7f3ac4b90b10> instead of <flask.ctx.AppContext object at 0x7f3ac4b90690>)\r\n\r\n\r\nCGI Variables\r\n-------------\r\n CKAN_CURRENT_URL: '/organization/hatzlacha?license_id%3Dcc-by%26tags%3D%25D7%2594%25D7%25AA%25D7%25A4%25D7%25A8%25D7%25A6%25D7%2595%25D7%25AA%2B%25D7%259C%25D7%259E%25D7%25A7%25D7%2595%25D7%259D%2B%25D7%259E%25D7%2592%25D7%2595%25D7%25A8%25D7%2599%25D7%259D%26organization%3Dhatzlacha%26res_format%3DDOC%26tags%3D%25D7%25A2%25D7%2599%25D7%259B%25D7%2595%25D7%2591%2B%25D7%2594%25D7%259C%25D7%2599%25D7%259B%25D7%2599%25D7%259D'\r\n CKAN_LANG: 'he'\r\n CKAN_LANG_IS_DEFAULT: True\r\n CONTENT_LENGTH: '0'\r\n HTTP_ACCEPT: '*/*'\r\n HTTP_ACCEPT_ENCODING: 'gzip'\r\n HTTP_CF_CONNECTING_IP: ' --- '\r\n HTTP_CF_IPCOUNTRY: 'US'\r\n HTTP_CF_RAY: '450eef3ef9fa5afd-HEL'\r\n HTTP_CF_VISITOR: '{\"scheme\":\"https\"}'\r\n HTTP_HOST: 'www.odata.org.il'\r\n HTTP_USER_AGENT: ' --- '\r\n HTTP_X_FORWARDED_FOR: ' --- '\r\n HTTP_X_FORWARDED_HOST: 'www.odata.org.il'\r\n HTTP_X_FORWARDED_PORT: '443'\r\n HTTP_X_FORWARDED_PROTO: 'https'\r\n HTTP_X_FORWARDED_SERVER: 'traefik-77ccf967dc-4dfwt'\r\n HTTP_X_REAL_IP: '10.132.0.5'\r\n PATH_INFO: '/organization/hatzlacha'\r\n QUERY_STRING: 'license_id=cc-by&tags=%D7%94%D7%AA%D7%A4%D7%A8%D7%A6%D7%95%D7%AA+%D7%9C%D7%9E%D7%A7%D7%95%D7%9D+%D7%9E%D7%92%D7%95%D7%A8%D7%99%D7%9D&organization=hatzlacha&res_format=DOC&tags=%D7%A2%D7%99%D7%9B%D7%95%D7%91+%D7%94%D7%9C%D7%99%D7%9B%D7%99%D7%9D'\r\n REMOTE_ADDR: '10.20.1.228'\r\n REQUEST_METHOD: 'GET'\r\n SERVER_NAME: '0.0.0.0'\r\n SERVER_PORT: '5000'\r\n SERVER_PROTOCOL: 'HTTP/1.1'\r\n\r\n\r\nWSGI Variables\r\n--------------\r\n __no_cache__: True\r\n application: <fanstatic.publisher.Delegator object at 0x7f3ac38876d0>\r\n beaker.cache: <beaker.cache.CacheManager object at 0x7f3ac3887690>\r\n beaker.get_session: <bound method SessionMiddleware._get_session of <beaker.middleware.SessionMiddleware object at 0x7f3ac38875d0>>\r\n beaker.session: {'_accessed_time': 1535377065.228329, '_creation_time': 1535377065.228329}\r\n ckan.app: 'pylons_app'\r\n fanstatic.needed: <fanstatic.core.NeededResources object at 0x7f3aa9ff7450>\r\n paste.cookies: (<SimpleCookie: >, '')\r\n paste.httpserver.thread_pool: <paste.httpserver.ThreadPool object at 0x7f3ac4b8e910>\r\n paste.parsed_dict_querystring: (MultiDict([('license_id', 'cc-by'), ('tags', '\\xd7\\x94\\xd7\\xaa\\xd7\\xa4\\xd7\\xa8\\xd7\\xa6\\xd7\\x95\\xd7\\xaa \\xd7\\x9c\\xd7\\x9e\\xd7\\xa7\\xd7\\x95\\xd7\\x9d \\xd7\\x9e\\xd7\\x92\\xd7\\x95\\xd7\\xa8\\xd7\\x99\\xd7\\x9d'), ('organization', 'hatzlacha'), ('res_format', 'DOC'), ('tags', '\\xd7\\xa2\\xd7\\x99\\xd7\\x9b\\xd7\\x95\\xd7\\x91 \\xd7\\x94\\xd7\\x9c\\xd7\\x99\\xd7\\x9b\\xd7\\x99\\xd7\\x9d')]), 'license_id=cc-by&tags=%D7%94%D7%AA%D7%A4%D7%A8%D7%A6%D7%95%D7%AA+%D7%9C%D7%9E%D7%A7%D7%95%D7%9D+%D7%9E%D7%92%D7%95%D7%A8%D7%99%D7%9D&organization=hatzlacha&res_format=DOC&tags=%D7%A2%D7%99%D7%9B%D7%95%D7%91+%D7%94%D7%9C%D7%99%D7%9B%D7%99%D7%9D')\r\n paste.parsed_querystring: ([('license_id', 'cc-by'), ('tags', '\\xd7\\x94\\xd7\\xaa\\xd7\\xa4\\xd7\\xa8\\xd7\\xa6\\xd7\\x95\\xd7\\xaa \\xd7\\x9c\\xd7\\x9e\\xd7\\xa7\\xd7\\x95\\xd7\\x9d \\xd7\\x9e\\xd7\\x92\\xd7\\x95\\xd7\\xa8\\xd7\\x99\\xd7\\x9d'), ('organization', 'hatzlacha'), ('res_format', 'DOC'), ('tags', '\\xd7\\xa2\\xd7\\x99\\xd7\\x9b\\xd7\\x95\\xd7\\x91 \\xd7\\x94\\xd7\\x9c\\xd7\\x99\\xd7\\x9b\\xd7\\x99\\xd7\\x9d')], 'license_id=cc-by&tags=%D7%94%D7%AA%D7%A4%D7%A8%D7%A6%D7%95%D7%AA+%D7%9C%D7%9E%D7%A7%D7%95%D7%9D+%D7%9E%D7%92%D7%95%D7%A8%D7%99%D7%9D&organization=hatzlacha&res_format=DOC&tags=%D7%A2%D7%99%D7%9B%D7%95%D7%91+%D7%94%D7%9C%D7%99%D7%9B%D7%99%D7%9D')\r\n paste.registry: <paste.registry.Registry object at 0x7f3ac3f14450>\r\n paste.throw_errors: True\r\n pylons.action_method: <bound method OrganizationController.read of <ckan.controllers.organization.OrganizationController object at 0x7f3aa8826b10>>\r\n pylons.controller: <ckan.controllers.organization.OrganizationController object at 0x7f3aa8826b10>\r\n pylons.environ_config: {'session': 'beaker.session', 'cache': 'beaker.cache'}\r\n pylons.pylons: <pylons.util.PylonsContext object at 0x7f3aa8826d50>\r\n pylons.routes_dict: {'action': u'read', 'controller': u'organization', 'id': u'hatzlacha'}\r\n repoze.who.api: <repoze.who.api.API object at 0x7f3ac37da0d0>\r\n repoze.who.logger: <logging.Logger object at 0x7f3ac3887890>\r\n repoze.who.plugins: {'ckan.lib.authenticator:UsernamePasswordAuthenticator': <ckan.lib.authenticator.UsernamePasswordAuthenticator object at 0x7f3ac3acd090>, 'friendlyform': <FriendlyFormPlugin 139890367716112>, 'auth_tkt': <CkanAuthTktCookiePlugin 139890367715984>}\r\n routes.route: <routes.route.Route object at 0x7f3ac4b13f10>\r\n routes.url: <routes.util.URLGenerator object at 0x7f3ac40d9690>\r\n webob._parsed_query_vars: (GET([('license_id', 'cc-by'), ('tags', '\\xd7\\x94\\xd7\\xaa\\xd7\\xa4\\xd7\\xa8\\xd7\\xa6\\xd7\\x95\\xd7\\xaa \\xd7\\x9c\\xd7\\x9e\\xd7\\xa7\\xd7\\x95\\xd7\\x9d \\xd7\\x9e\\xd7\\x92\\xd7\\x95\\xd7\\xa8\\xd7\\x99\\xd7\\x9d'), ('organization', 'hatzlacha'), ('res_format', 'DOC'), ('tags', '\\xd7\\xa2\\xd7\\x99\\xd7\\x9b\\xd7\\x95\\xd7\\x91 \\xd7\\x94\\xd7\\x9c\\xd7\\x99\\xd7\\x9b\\xd7\\x99\\xd7\\x9d')]), 'license_id=cc-by&tags=%D7%94%D7%AA%D7%A4%D7%A8%D7%A6%D7%95%D7%AA+%D7%9C%D7%9E%D7%A7%D7%95%D7%9D+%D7%9E%D7%92%D7%95%D7%A8%D7%99%D7%9D&organization=hatzlacha&res_format=DOC&tags=%D7%A2%D7%99%D7%9B%D7%95%D7%91+%D7%94%D7%9C%D7%99%D7%9B%D7%99%D7%9D')\r\n webob.adhoc_attrs: {'response': <Response at 0x7f3aa9ff7990 200 OK>, 'language': 'en-us'}\r\n wsgi process: 'Multithreaded'\r\n wsgiorg.routing_args: (<routes.util.URLGenerator object at 0x7f3ac40d9690>, {'action': u'read', 'controller': u'organization', 'id': u'hatzlacha'})\r\n------------------------------------------------------------\r\n```\r\n\r\n### What steps can be taken to reproduce the issue? \r\nI'm just seeing those exception in the server logs, couldn't reproduce locally\n", "before_files": [{"content": "# encoding: utf-8\n\nimport os\nimport re\n\nfrom pylons.wsgiapp import PylonsApp\n\nfrom beaker.middleware import CacheMiddleware, SessionMiddleware\nfrom paste.cascade import Cascade\nfrom paste.registry import RegistryManager\nfrom paste.urlparser import StaticURLParser\nfrom paste.deploy.converters import asbool\nfrom paste.fileapp import _FileIter\nfrom pylons.middleware import ErrorHandler, StatusCodeRedirect\nfrom routes.middleware import RoutesMiddleware\nfrom repoze.who.config import WhoConfig\nfrom repoze.who.middleware import PluggableAuthenticationMiddleware\nfrom fanstatic import Fanstatic\n\nfrom ckan.plugins import PluginImplementations\nfrom ckan.plugins.interfaces import IMiddleware\nimport ckan.lib.uploader as uploader\nfrom ckan.config.middleware import common_middleware\nfrom ckan.common import config\n\nimport logging\nlog = logging.getLogger(__name__)\n\n\ndef make_pylons_stack(conf, full_stack=True, static_files=True,\n **app_conf):\n \"\"\"Create a Pylons WSGI application and return it\n\n ``conf``\n The inherited configuration for this application. Normally from\n the [DEFAULT] section of the Paste ini file.\n\n ``full_stack``\n Whether this application provides a full WSGI stack (by default,\n meaning it handles its own exceptions and errors). Disable\n full_stack when this application is \"managed\" by another WSGI\n middleware.\n\n ``static_files``\n Whether this application serves its own static files; disable\n when another web server is responsible for serving them.\n\n ``app_conf``\n The application's local configuration. Normally specified in\n the [app:<name>] section of the Paste ini file (where <name>\n defaults to main).\n\n \"\"\"\n # The Pylons WSGI app\n app = pylons_app = CKANPylonsApp()\n\n for plugin in PluginImplementations(IMiddleware):\n app = plugin.make_middleware(app, config)\n\n app = common_middleware.CloseWSGIInputMiddleware(app, config)\n app = common_middleware.RootPathMiddleware(app, config)\n # Routing/Session/Cache Middleware\n app = RoutesMiddleware(app, config['routes.map'])\n # we want to be able to retrieve the routes middleware to be able to update\n # the mapper. We store it in the pylons config to allow this.\n config['routes.middleware'] = app\n app = SessionMiddleware(app, config)\n app = CacheMiddleware(app, config)\n\n # CUSTOM MIDDLEWARE HERE (filtered by error handling middlewares)\n # app = QueueLogMiddleware(app)\n if asbool(config.get('ckan.use_pylons_response_cleanup_middleware',\n True)):\n app = execute_on_completion(app, config,\n cleanup_pylons_response_string)\n\n # Fanstatic\n fanstatic_enable_rollup = asbool(app_conf.get('fanstatic_enable_rollup',\n False))\n if asbool(config.get('debug', False)):\n fanstatic_config = {\n 'versioning': True,\n 'recompute_hashes': True,\n 'minified': False,\n 'bottom': True,\n 'bundle': False,\n 'rollup': fanstatic_enable_rollup,\n }\n else:\n fanstatic_config = {\n 'versioning': True,\n 'recompute_hashes': False,\n 'minified': True,\n 'bottom': True,\n 'bundle': True,\n 'rollup': fanstatic_enable_rollup,\n }\n root_path = config.get('ckan.root_path', None)\n if root_path:\n root_path = re.sub('/{{LANG}}', '', root_path)\n fanstatic_config['base_url'] = root_path\n app = Fanstatic(app, **fanstatic_config)\n\n for plugin in PluginImplementations(IMiddleware):\n try:\n app = plugin.make_error_log_middleware(app, config)\n except AttributeError:\n log.critical('Middleware class {0} is missing the method'\n 'make_error_log_middleware.'\n .format(plugin.__class__.__name__))\n\n if asbool(full_stack):\n # Handle Python exceptions\n app = ErrorHandler(app, conf, **config['pylons.errorware'])\n\n # Display error documents for 400, 403, 404 status codes (and\n # 500 when debug is disabled)\n if asbool(config['debug']):\n app = StatusCodeRedirect(app, [400, 403, 404])\n else:\n app = StatusCodeRedirect(app, [400, 403, 404, 500])\n\n # Initialize repoze.who\n who_parser = WhoConfig(conf['here'])\n who_parser.parse(open(app_conf['who.config_file']))\n\n app = PluggableAuthenticationMiddleware(\n app,\n who_parser.identifiers,\n who_parser.authenticators,\n who_parser.challengers,\n who_parser.mdproviders,\n who_parser.request_classifier,\n who_parser.challenge_decider,\n logging.getLogger('repoze.who'),\n logging.WARN, # ignored\n who_parser.remote_user_key\n )\n\n # Establish the Registry for this application\n # The RegistryManager includes code to pop\n # registry values after the stream has completed,\n # so we need to prevent this with `streaming` set to True.\n app = RegistryManager(app, streaming=True)\n\n if asbool(static_files):\n # Serve static files\n static_max_age = None if not asbool(\n config.get('ckan.cache_enabled')) \\\n else int(config.get('ckan.static_max_age', 3600))\n\n static_app = StaticURLParser(\n config['pylons.paths']['static_files'],\n cache_max_age=static_max_age)\n static_parsers = [static_app, app]\n\n storage_directory = uploader.get_storage_path()\n if storage_directory:\n path = os.path.join(storage_directory, 'storage')\n try:\n os.makedirs(path)\n except OSError as e:\n # errno 17 is file already exists\n if e.errno != 17:\n raise\n\n storage_app = StaticURLParser(path, cache_max_age=static_max_age)\n static_parsers.insert(0, storage_app)\n\n # Configurable extra static file paths\n extra_static_parsers = []\n for public_path in config.get(\n 'extra_public_paths', '').split(','):\n if public_path.strip():\n extra_static_parsers.append(\n StaticURLParser(public_path.strip(),\n cache_max_age=static_max_age)\n )\n app = Cascade(extra_static_parsers + static_parsers)\n\n # Tracking\n if asbool(config.get('ckan.tracking_enabled', 'false')):\n app = common_middleware.TrackingMiddleware(app, config)\n\n # Add a reference to the actual Pylons app so it's easier to access\n app._wsgi_app = pylons_app\n\n return app\n\n\nclass CKANPylonsApp(PylonsApp):\n\n app_name = 'pylons_app'\n\n def can_handle_request(self, environ):\n '''\n Decides whether it can handle a request with the Pylons app by\n matching the request environ against the route mapper\n\n Returns (True, 'pylons_app', origin) if this is the case.\n\n origin can be either 'core' or 'extension' depending on where\n the route was defined.\n\n NOTE: There is currently a catch all route for GET requests to\n point arbitrary urls to templates with the same name:\n\n map.connect('/*url', controller='template', action='view')\n\n This means that this function will match all GET requests. This\n does not cause issues as the Pylons core routes are the last to\n take precedence so the current behaviour is kept, but it's worth\n keeping in mind.\n '''\n\n pylons_mapper = config['routes.map']\n match_route = pylons_mapper.routematch(environ=environ)\n if match_route:\n match, route = match_route\n origin = 'core'\n if hasattr(route, '_ckan_core') and not route._ckan_core:\n origin = 'extension'\n log.debug('Pylons route match: {0} Origin: {1}'.format(\n match, origin))\n return (True, self.app_name, origin)\n else:\n return (False, self.app_name)\n\n\nclass CloseCallbackWrapper(object):\n def __init__(self, iterable, callback, environ):\n # pylons.fileapp expects app_iter to have `file` attribute.\n self.file = iterable\n self.callback = callback\n self.environ = environ\n\n def __iter__(self):\n \"\"\"\n return a generator that passes through items from iterable\n then calls callback(environ).\n \"\"\"\n try:\n for item in self.file:\n yield item\n except GeneratorExit:\n if hasattr(self.file, 'close'):\n self.file.close()\n raise\n finally:\n self.callback(self.environ)\n\n\nclass FileIterWrapper(CloseCallbackWrapper, _FileIter):\n \"\"\"Same CloseCallbackWrapper, just with _FileIter mixin.\n\n That will prevent pylons from converting file responses into\n in-memori lists.\n \"\"\"\n pass\n\n\ndef execute_on_completion(application, config, callback):\n \"\"\"\n Call callback(environ) once complete response is sent\n \"\"\"\n\n def inner(environ, start_response):\n try:\n result = application(environ, start_response)\n except:\n callback(environ)\n raise\n # paste.fileapp converts non-file responses into list\n # In order to avoid interception of OOM Killer\n # file responses wrapped into generator with\n # _FileIter in parent tree.\n klass = CloseCallbackWrapper\n if isinstance(result, _FileIter):\n klass = FileIterWrapper\n return klass(result, callback, environ)\n\n return inner\n\n\ndef cleanup_pylons_response_string(environ):\n try:\n msg = 'response cleared by pylons response cleanup middleware'\n environ['pylons.controller']._py_object.response._body = msg\n except (KeyError, AttributeError):\n pass\n", "path": "ckan/config/middleware/pylons_app.py"}], "after_files": [{"content": "# encoding: utf-8\n\nimport os\nimport re\n\nfrom pylons.wsgiapp import PylonsApp\n\nfrom beaker.middleware import CacheMiddleware, SessionMiddleware\nfrom paste.cascade import Cascade\nfrom paste.registry import RegistryManager\nfrom paste.urlparser import StaticURLParser\nfrom paste.deploy.converters import asbool\nfrom paste.fileapp import _FileIter\nfrom pylons.middleware import ErrorHandler, StatusCodeRedirect\nfrom routes.middleware import RoutesMiddleware\nfrom repoze.who.config import WhoConfig\nfrom repoze.who.middleware import PluggableAuthenticationMiddleware\nfrom fanstatic import Fanstatic\n\nfrom ckan.plugins import PluginImplementations\nfrom ckan.plugins.interfaces import IMiddleware\nimport ckan.lib.uploader as uploader\nfrom ckan.config.middleware import common_middleware\nfrom ckan.common import config\n\nimport logging\nlog = logging.getLogger(__name__)\n\n\ndef make_pylons_stack(conf, full_stack=True, static_files=True,\n **app_conf):\n \"\"\"Create a Pylons WSGI application and return it\n\n ``conf``\n The inherited configuration for this application. Normally from\n the [DEFAULT] section of the Paste ini file.\n\n ``full_stack``\n Whether this application provides a full WSGI stack (by default,\n meaning it handles its own exceptions and errors). Disable\n full_stack when this application is \"managed\" by another WSGI\n middleware.\n\n ``static_files``\n Whether this application serves its own static files; disable\n when another web server is responsible for serving them.\n\n ``app_conf``\n The application's local configuration. Normally specified in\n the [app:<name>] section of the Paste ini file (where <name>\n defaults to main).\n\n \"\"\"\n # The Pylons WSGI app\n app = pylons_app = CKANPylonsApp()\n\n for plugin in PluginImplementations(IMiddleware):\n app = plugin.make_middleware(app, config)\n\n app = common_middleware.CloseWSGIInputMiddleware(app, config)\n app = common_middleware.RootPathMiddleware(app, config)\n # Routing/Session/Cache Middleware\n app = RoutesMiddleware(app, config['routes.map'])\n # we want to be able to retrieve the routes middleware to be able to update\n # the mapper. We store it in the pylons config to allow this.\n config['routes.middleware'] = app\n app = SessionMiddleware(app, config)\n app = CacheMiddleware(app, config)\n\n # CUSTOM MIDDLEWARE HERE (filtered by error handling middlewares)\n # app = QueueLogMiddleware(app)\n if asbool(config.get('ckan.use_pylons_response_cleanup_middleware',\n True)):\n app = execute_on_completion(app, config,\n cleanup_pylons_response_string)\n\n # Fanstatic\n fanstatic_enable_rollup = asbool(app_conf.get('fanstatic_enable_rollup',\n False))\n if asbool(config.get('debug', False)):\n fanstatic_config = {\n 'versioning': True,\n 'recompute_hashes': True,\n 'minified': False,\n 'bottom': True,\n 'bundle': False,\n 'rollup': fanstatic_enable_rollup,\n }\n else:\n fanstatic_config = {\n 'versioning': True,\n 'recompute_hashes': False,\n 'minified': True,\n 'bottom': True,\n 'bundle': True,\n 'rollup': fanstatic_enable_rollup,\n }\n root_path = config.get('ckan.root_path', None)\n if root_path:\n root_path = re.sub('/{{LANG}}', '', root_path)\n fanstatic_config['base_url'] = root_path\n app = Fanstatic(app, **fanstatic_config)\n\n for plugin in PluginImplementations(IMiddleware):\n try:\n app = plugin.make_error_log_middleware(app, config)\n except AttributeError:\n log.critical('Middleware class {0} is missing the method'\n 'make_error_log_middleware.'\n .format(plugin.__class__.__name__))\n\n if asbool(full_stack):\n # Handle Python exceptions\n app = ErrorHandler(app, conf, **config['pylons.errorware'])\n\n # Display error documents for 400, 403, 404 status codes (and\n # 500 when debug is disabled)\n if asbool(config['debug']):\n app = StatusCodeRedirect(app, [400, 403, 404])\n else:\n app = StatusCodeRedirect(app, [400, 403, 404, 500])\n\n # Initialize repoze.who\n who_parser = WhoConfig(conf['here'])\n who_parser.parse(open(app_conf['who.config_file']))\n\n app = PluggableAuthenticationMiddleware(\n app,\n who_parser.identifiers,\n who_parser.authenticators,\n who_parser.challengers,\n who_parser.mdproviders,\n who_parser.request_classifier,\n who_parser.challenge_decider,\n logging.getLogger('repoze.who'),\n logging.WARN, # ignored\n who_parser.remote_user_key\n )\n\n # Establish the Registry for this application\n app = RegistryManager(app, streaming=False)\n\n if asbool(static_files):\n # Serve static files\n static_max_age = None if not asbool(\n config.get('ckan.cache_enabled')) \\\n else int(config.get('ckan.static_max_age', 3600))\n\n static_app = StaticURLParser(\n config['pylons.paths']['static_files'],\n cache_max_age=static_max_age)\n static_parsers = [static_app, app]\n\n storage_directory = uploader.get_storage_path()\n if storage_directory:\n path = os.path.join(storage_directory, 'storage')\n try:\n os.makedirs(path)\n except OSError as e:\n # errno 17 is file already exists\n if e.errno != 17:\n raise\n\n storage_app = StaticURLParser(path, cache_max_age=static_max_age)\n static_parsers.insert(0, storage_app)\n\n # Configurable extra static file paths\n extra_static_parsers = []\n for public_path in config.get(\n 'extra_public_paths', '').split(','):\n if public_path.strip():\n extra_static_parsers.append(\n StaticURLParser(public_path.strip(),\n cache_max_age=static_max_age)\n )\n app = Cascade(extra_static_parsers + static_parsers)\n\n # Tracking\n if asbool(config.get('ckan.tracking_enabled', 'false')):\n app = common_middleware.TrackingMiddleware(app, config)\n\n # Add a reference to the actual Pylons app so it's easier to access\n app._wsgi_app = pylons_app\n\n return app\n\n\nclass CKANPylonsApp(PylonsApp):\n\n app_name = 'pylons_app'\n\n def can_handle_request(self, environ):\n '''\n Decides whether it can handle a request with the Pylons app by\n matching the request environ against the route mapper\n\n Returns (True, 'pylons_app', origin) if this is the case.\n\n origin can be either 'core' or 'extension' depending on where\n the route was defined.\n\n NOTE: There is currently a catch all route for GET requests to\n point arbitrary urls to templates with the same name:\n\n map.connect('/*url', controller='template', action='view')\n\n This means that this function will match all GET requests. This\n does not cause issues as the Pylons core routes are the last to\n take precedence so the current behaviour is kept, but it's worth\n keeping in mind.\n '''\n\n pylons_mapper = config['routes.map']\n match_route = pylons_mapper.routematch(environ=environ)\n if match_route:\n match, route = match_route\n origin = 'core'\n if hasattr(route, '_ckan_core') and not route._ckan_core:\n origin = 'extension'\n log.debug('Pylons route match: {0} Origin: {1}'.format(\n match, origin))\n return (True, self.app_name, origin)\n else:\n return (False, self.app_name)\n\n\nclass CloseCallbackWrapper(object):\n def __init__(self, iterable, callback, environ):\n # pylons.fileapp expects app_iter to have `file` attribute.\n self.file = iterable\n self.callback = callback\n self.environ = environ\n\n def __iter__(self):\n \"\"\"\n return a generator that passes through items from iterable\n then calls callback(environ).\n \"\"\"\n try:\n for item in self.file:\n yield item\n except GeneratorExit:\n if hasattr(self.file, 'close'):\n self.file.close()\n raise\n finally:\n self.callback(self.environ)\n\n\nclass FileIterWrapper(CloseCallbackWrapper, _FileIter):\n \"\"\"Same CloseCallbackWrapper, just with _FileIter mixin.\n\n That will prevent pylons from converting file responses into\n in-memori lists.\n \"\"\"\n pass\n\n\ndef execute_on_completion(application, config, callback):\n \"\"\"\n Call callback(environ) once complete response is sent\n \"\"\"\n\n def inner(environ, start_response):\n try:\n result = application(environ, start_response)\n except:\n callback(environ)\n raise\n # paste.fileapp converts non-file responses into list\n # In order to avoid interception of OOM Killer\n # file responses wrapped into generator with\n # _FileIter in parent tree.\n klass = CloseCallbackWrapper\n if isinstance(result, _FileIter):\n klass = FileIterWrapper\n return klass(result, callback, environ)\n\n return inner\n\n\ndef cleanup_pylons_response_string(environ):\n try:\n msg = 'response cleared by pylons response cleanup middleware'\n environ['pylons.controller']._py_object.response._body = msg\n except (KeyError, AttributeError):\n pass\n", "path": "ckan/config/middleware/pylons_app.py"}]} |
gh_patches_debug_1241 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-2094 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Logging system tests: gRPC/GaxError for 'NotFound' in tearDown / logger.delete
From: https://travis-ci.org/GoogleCloudPlatform/gcloud-python/builds/151551907#L647-L675
``` python
======================================================================
ERROR: test_log_struct_w_metadata (logging_.TestLogging)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/build/GoogleCloudPlatform/gcloud-python/system_tests/logging_.py", line 69, in tearDown
retry(doomed.delete)()
File "/home/travis/build/GoogleCloudPlatform/gcloud-python/system_tests/retry.py", line 77, in wrapped_function
return to_wrap(*args, **kwargs)
File "/home/travis/build/GoogleCloudPlatform/gcloud-python/.tox/system-tests/lib/python2.7/site-packages/gcloud/logging/logger.py", line 268, in delete
client.logging_api.logger_delete(self.project, self.name)
File "/home/travis/build/GoogleCloudPlatform/gcloud-python/.tox/system-tests/lib/python2.7/site-packages/gcloud/logging/_gax.py", line 123, in logger_delete
self._gax_api.delete_log(path, options)
File "/home/travis/build/GoogleCloudPlatform/gcloud-python/.tox/system-tests/lib/python2.7/site-packages/google/cloud/logging/v2/logging_service_v2_api.py", line 216, in delete_log
self._delete_log(request, options)
File "/home/travis/build/GoogleCloudPlatform/gcloud-python/.tox/system-tests/lib/python2.7/site-packages/google/gax/api_callable.py", line 480, in inner
return api_caller(api_call, this_settings, request)
File "/home/travis/build/GoogleCloudPlatform/gcloud-python/.tox/system-tests/lib/python2.7/site-packages/google/gax/api_callable.py", line 468, in base_caller
return api_call(*args)
File "/home/travis/build/GoogleCloudPlatform/gcloud-python/.tox/system-tests/lib/python2.7/site-packages/google/gax/api_callable.py", line 433, in inner
raise_with_traceback(GaxError('RPC failed', cause=exception))
File "/home/travis/build/GoogleCloudPlatform/gcloud-python/.tox/system-tests/lib/python2.7/site-packages/google/gax/api_callable.py", line 430, in inner
return a_func(*args, **kwargs)
File "/home/travis/build/GoogleCloudPlatform/gcloud-python/.tox/system-tests/lib/python2.7/site-packages/google/gax/api_callable.py", line 64, in inner
return a_func(*updated_args, **kwargs)
File "/home/travis/build/GoogleCloudPlatform/gcloud-python/.tox/system-tests/lib/python2.7/site-packages/grpc/beta/_client_adaptations.py", line 305, in __call__
self._request_serializer, self._response_deserializer)
File "/home/travis/build/GoogleCloudPlatform/gcloud-python/.tox/system-tests/lib/python2.7/site-packages/grpc/beta/_client_adaptations.py", line 203, in _blocking_unary_unary
raise _abortion_error(rpc_error_call)
GaxError: GaxError(RPC failed, caused by AbortionError(code=StatusCode.NOT_FOUND, details="Requested entity was not found."))
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gcloud/logging/_gax.py`
Content:
```
1 # Copyright 2016 Google Inc. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """GAX wrapper for Logging API requests."""
16
17 import json
18
19 # pylint: disable=import-error
20 from google.gax import CallOptions
21 from google.gax import INITIAL_PAGE
22 from google.gax.errors import GaxError
23 from google.gax.grpc import exc_to_code
24 from google.logging.type.log_severity_pb2 import LogSeverity
25 from google.logging.v2.logging_config_pb2 import LogSink
26 from google.logging.v2.logging_metrics_pb2 import LogMetric
27 from google.logging.v2.log_entry_pb2 import LogEntry
28 from google.protobuf.json_format import Parse
29 from grpc.beta.interfaces import StatusCode
30 # pylint: enable=import-error
31
32 from gcloud.exceptions import Conflict
33 from gcloud.exceptions import NotFound
34 from gcloud._helpers import _datetime_to_pb_timestamp
35 from gcloud._helpers import _datetime_to_rfc3339
36 from gcloud._helpers import _pb_timestamp_to_datetime
37
38
39 class _LoggingAPI(object):
40 """Helper mapping logging-related APIs.
41
42 :type gax_api:
43 :class:`google.logging.v2.logging_service_v2_api.LoggingServiceV2Api`
44 :param gax_api: API object used to make GAX requests.
45 """
46 def __init__(self, gax_api):
47 self._gax_api = gax_api
48
49 def list_entries(self, projects, filter_='', order_by='',
50 page_size=0, page_token=None):
51 """Return a page of log entry resources.
52
53 :type projects: list of strings
54 :param projects: project IDs to include. If not passed,
55 defaults to the project bound to the API's client.
56
57 :type filter_: str
58 :param filter_: a filter expression. See:
59 https://cloud.google.com/logging/docs/view/advanced_filters
60
61 :type order_by: str
62 :param order_by: One of :data:`gcloud.logging.ASCENDING` or
63 :data:`gcloud.logging.DESCENDING`.
64
65 :type page_size: int
66 :param page_size: maximum number of entries to return, If not passed,
67 defaults to a value set by the API.
68
69 :type page_token: str
70 :param page_token: opaque marker for the next "page" of entries. If not
71 passed, the API will return the first page of
72 entries.
73
74 :rtype: tuple, (list, str)
75 :returns: list of mappings, plus a "next page token" string:
76 if not None, indicates that more entries can be retrieved
77 with another call (pass that value as ``page_token``).
78 """
79 options = _build_paging_options(page_token)
80 page_iter = self._gax_api.list_log_entries(
81 projects, filter_, order_by, page_size, options)
82 entries = [_log_entry_pb_to_mapping(entry_pb)
83 for entry_pb in page_iter.next()]
84 token = page_iter.page_token or None
85 return entries, token
86
87 def write_entries(self, entries, logger_name=None, resource=None,
88 labels=None):
89 """API call: log an entry resource via a POST request
90
91 :type entries: sequence of mapping
92 :param entries: the log entry resources to log.
93
94 :type logger_name: string
95 :param logger_name: name of default logger to which to log the entries;
96 individual entries may override.
97
98 :type resource: mapping
99 :param resource: default resource to associate with entries;
100 individual entries may override.
101
102 :type labels: mapping
103 :param labels: default labels to associate with entries;
104 individual entries may override.
105 """
106 options = None
107 partial_success = False
108 entry_pbs = [_log_entry_mapping_to_pb(entry) for entry in entries]
109 self._gax_api.write_log_entries(entry_pbs, logger_name, resource,
110 labels, partial_success, options)
111
112 def logger_delete(self, project, logger_name):
113 """API call: delete all entries in a logger via a DELETE request
114
115 :type project: string
116 :param project: ID of project containing the log entries to delete
117
118 :type logger_name: string
119 :param logger_name: name of logger containing the log entries to delete
120 """
121 options = None
122 path = 'projects/%s/logs/%s' % (project, logger_name)
123 self._gax_api.delete_log(path, options)
124
125
126 class _SinksAPI(object):
127 """Helper mapping sink-related APIs.
128
129 :type gax_api:
130 :class:`google.logging.v2.config_service_v2_api.ConfigServiceV2Api`
131 :param gax_api: API object used to make GAX requests.
132 """
133 def __init__(self, gax_api):
134 self._gax_api = gax_api
135
136 def list_sinks(self, project, page_size=0, page_token=None):
137 """List sinks for the project associated with this client.
138
139 :type project: string
140 :param project: ID of the project whose sinks are to be listed.
141
142 :type page_size: int
143 :param page_size: maximum number of sinks to return, If not passed,
144 defaults to a value set by the API.
145
146 :type page_token: str
147 :param page_token: opaque marker for the next "page" of sinks. If not
148 passed, the API will return the first page of
149 sinks.
150
151 :rtype: tuple, (list, str)
152 :returns: list of mappings, plus a "next page token" string:
153 if not None, indicates that more sinks can be retrieved
154 with another call (pass that value as ``page_token``).
155 """
156 options = _build_paging_options(page_token)
157 path = 'projects/%s' % (project,)
158 page_iter = self._gax_api.list_sinks(path, page_size, options)
159 sinks = [_log_sink_pb_to_mapping(log_sink_pb)
160 for log_sink_pb in page_iter.next()]
161 token = page_iter.page_token or None
162 return sinks, token
163
164 def sink_create(self, project, sink_name, filter_, destination):
165 """API call: create a sink resource.
166
167 See:
168 https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks/create
169
170 :type project: string
171 :param project: ID of the project in which to create the sink.
172
173 :type sink_name: string
174 :param sink_name: the name of the sink
175
176 :type filter_: string
177 :param filter_: the advanced logs filter expression defining the
178 entries exported by the sink.
179
180 :type destination: string
181 :param destination: destination URI for the entries exported by
182 the sink.
183 """
184 options = None
185 parent = 'projects/%s' % (project,)
186 sink_pb = LogSink(name=sink_name, filter=filter_,
187 destination=destination)
188 try:
189 self._gax_api.create_sink(parent, sink_pb, options)
190 except GaxError as exc:
191 if exc_to_code(exc.cause) == StatusCode.FAILED_PRECONDITION:
192 path = 'projects/%s/sinks/%s' % (project, sink_name)
193 raise Conflict(path)
194 raise
195
196 def sink_get(self, project, sink_name):
197 """API call: retrieve a sink resource.
198
199 :type project: string
200 :param project: ID of the project containing the sink.
201
202 :type sink_name: string
203 :param sink_name: the name of the sink
204
205 :rtype: dict
206 :returns: The sink object returned from the API (converted from a
207 protobuf to a dictionary).
208 """
209 options = None
210 path = 'projects/%s/sinks/%s' % (project, sink_name)
211 try:
212 sink_pb = self._gax_api.get_sink(path, options)
213 except GaxError as exc:
214 if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
215 raise NotFound(path)
216 raise
217 return _log_sink_pb_to_mapping(sink_pb)
218
219 def sink_update(self, project, sink_name, filter_, destination):
220 """API call: update a sink resource.
221
222 :type project: string
223 :param project: ID of the project containing the sink.
224
225 :type sink_name: string
226 :param sink_name: the name of the sink
227
228 :type filter_: string
229 :param filter_: the advanced logs filter expression defining the
230 entries exported by the sink.
231
232 :type destination: string
233 :param destination: destination URI for the entries exported by
234 the sink.
235
236 :rtype: dict
237 :returns: The sink object returned from the API (converted from a
238 protobuf to a dictionary).
239 """
240 options = None
241 path = 'projects/%s/sinks/%s' % (project, sink_name)
242 sink_pb = LogSink(name=path, filter=filter_, destination=destination)
243 try:
244 self._gax_api.update_sink(path, sink_pb, options)
245 except GaxError as exc:
246 if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
247 raise NotFound(path)
248 raise
249 return _log_sink_pb_to_mapping(sink_pb)
250
251 def sink_delete(self, project, sink_name):
252 """API call: delete a sink resource.
253
254 :type project: string
255 :param project: ID of the project containing the sink.
256
257 :type sink_name: string
258 :param sink_name: the name of the sink
259 """
260 options = None
261 path = 'projects/%s/sinks/%s' % (project, sink_name)
262 try:
263 self._gax_api.delete_sink(path, options)
264 except GaxError as exc:
265 if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
266 raise NotFound(path)
267 raise
268
269
270 class _MetricsAPI(object):
271 """Helper mapping sink-related APIs.
272
273 :type gax_api:
274 :class:`google.logging.v2.metrics_service_v2_api.MetricsServiceV2Api`
275 :param gax_api: API object used to make GAX requests.
276 """
277 def __init__(self, gax_api):
278 self._gax_api = gax_api
279
280 def list_metrics(self, project, page_size=0, page_token=None):
281 """List metrics for the project associated with this client.
282
283 :type project: string
284 :param project: ID of the project whose metrics are to be listed.
285
286 :type page_size: int
287 :param page_size: maximum number of metrics to return, If not passed,
288 defaults to a value set by the API.
289
290 :type page_token: str
291 :param page_token: opaque marker for the next "page" of metrics. If not
292 passed, the API will return the first page of
293 metrics.
294
295 :rtype: tuple, (list, str)
296 :returns: list of mappings, plus a "next page token" string:
297 if not None, indicates that more metrics can be retrieved
298 with another call (pass that value as ``page_token``).
299 """
300 options = _build_paging_options(page_token)
301 path = 'projects/%s' % (project,)
302 page_iter = self._gax_api.list_log_metrics(path, page_size, options)
303 metrics = [_log_metric_pb_to_mapping(log_metric_pb)
304 for log_metric_pb in page_iter.next()]
305 token = page_iter.page_token or None
306 return metrics, token
307
308 def metric_create(self, project, metric_name, filter_, description):
309 """API call: create a metric resource.
310
311 See:
312 https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics/create
313
314 :type project: string
315 :param project: ID of the project in which to create the metric.
316
317 :type metric_name: string
318 :param metric_name: the name of the metric
319
320 :type filter_: string
321 :param filter_: the advanced logs filter expression defining the
322 entries exported by the metric.
323
324 :type description: string
325 :param description: description of the metric.
326 """
327 options = None
328 parent = 'projects/%s' % (project,)
329 metric_pb = LogMetric(name=metric_name, filter=filter_,
330 description=description)
331 try:
332 self._gax_api.create_log_metric(parent, metric_pb, options)
333 except GaxError as exc:
334 if exc_to_code(exc.cause) == StatusCode.FAILED_PRECONDITION:
335 path = 'projects/%s/metrics/%s' % (project, metric_name)
336 raise Conflict(path)
337 raise
338
339 def metric_get(self, project, metric_name):
340 """API call: retrieve a metric resource.
341
342 :type project: string
343 :param project: ID of the project containing the metric.
344
345 :type metric_name: string
346 :param metric_name: the name of the metric
347
348 :rtype: dict
349 :returns: The metric object returned from the API (converted from a
350 protobuf to a dictionary).
351 """
352 options = None
353 path = 'projects/%s/metrics/%s' % (project, metric_name)
354 try:
355 metric_pb = self._gax_api.get_log_metric(path, options)
356 except GaxError as exc:
357 if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
358 raise NotFound(path)
359 raise
360 return _log_metric_pb_to_mapping(metric_pb)
361
362 def metric_update(self, project, metric_name, filter_, description):
363 """API call: update a metric resource.
364
365 :type project: string
366 :param project: ID of the project containing the metric.
367
368 :type metric_name: string
369 :param metric_name: the name of the metric
370
371 :type filter_: string
372 :param filter_: the advanced logs filter expression defining the
373 entries exported by the metric.
374
375 :type description: string
376 :param description: description of the metric.
377
378 :rtype: dict
379 :returns: The metric object returned from the API (converted from a
380 protobuf to a dictionary).
381 """
382 options = None
383 path = 'projects/%s/metrics/%s' % (project, metric_name)
384 metric_pb = LogMetric(name=path, filter=filter_,
385 description=description)
386 try:
387 self._gax_api.update_log_metric(path, metric_pb, options)
388 except GaxError as exc:
389 if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
390 raise NotFound(path)
391 raise
392 return _log_metric_pb_to_mapping(metric_pb)
393
394 def metric_delete(self, project, metric_name):
395 """API call: delete a metric resource.
396
397 :type project: string
398 :param project: ID of the project containing the metric.
399
400 :type metric_name: string
401 :param metric_name: the name of the metric
402 """
403 options = None
404 path = 'projects/%s/metrics/%s' % (project, metric_name)
405 try:
406 self._gax_api.delete_log_metric(path, options)
407 except GaxError as exc:
408 if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
409 raise NotFound(path)
410 raise
411
412
413 def _build_paging_options(page_token=None):
414 """Helper for :meth:'_PublisherAPI.list_topics' et aliae."""
415 if page_token is None:
416 page_token = INITIAL_PAGE
417 options = {'page_token': page_token}
418 return CallOptions(**options)
419
420
421 def _mon_resource_pb_to_mapping(resource_pb):
422 """Helper for :func:_log_entry_pb_to_mapping"""
423 mapping = {
424 'type': resource_pb.type,
425 }
426 if resource_pb.labels:
427 mapping['labels'] = resource_pb.labels
428 return mapping
429
430
431 def _pb_timestamp_to_rfc3339(timestamp_pb):
432 """Helper for :func:_log_entry_pb_to_mapping"""
433 timestamp = _pb_timestamp_to_datetime(timestamp_pb)
434 return _datetime_to_rfc3339(timestamp)
435
436
437 def _value_pb_to_value(value_pb):
438 """Helper for :func:`_log_entry_pb_to_mapping`."""
439 kind = value_pb.WhichOneof('kind')
440
441 if kind is None:
442 result = None
443
444 elif kind == 'string_value':
445 result = value_pb.string_value
446
447 elif kind == 'bool_value':
448 result = value_pb.bool_value
449
450 elif kind == 'number_value':
451 result = value_pb.number_value
452
453 elif kind == 'list_value':
454 result = [_value_pb_to_value(element)
455 for element in value_pb.list_value.values]
456
457 elif kind == 'struct_value':
458 result = _struct_pb_to_mapping(value_pb.struct_value)
459
460 else:
461 raise ValueError('Value protobuf had unknown kind: %s' % (kind,))
462
463 return result
464
465
466 def _struct_pb_to_mapping(struct_pb):
467 """Helper for :func:`_log_entry_pb_to_mapping`."""
468 return dict([(key, _value_pb_to_value(struct_pb.fields[key]))
469 for key in struct_pb.fields])
470
471
472 def _log_entry_pb_to_mapping(entry_pb):
473 """Helper for :meth:`list_entries`, et aliae
474
475 Ideally, would use a function from :mod:`protobuf.json_format`, but
476 the right one isn't public. See:
477 https://github.com/google/protobuf/issues/1351
478 """
479 mapping = {
480 'logName': entry_pb.log_name,
481 'resource': _mon_resource_pb_to_mapping(entry_pb.resource),
482 'severity': LogSeverity.Name(entry_pb.severity),
483 'insertId': entry_pb.insert_id,
484 'timestamp': _pb_timestamp_to_rfc3339(entry_pb.timestamp),
485 'labels': entry_pb.labels,
486 }
487 if entry_pb.HasField('text_payload'):
488 mapping['textPayload'] = entry_pb.text_payload
489
490 if entry_pb.HasField('json_payload'):
491 mapping['jsonPayload'] = _struct_pb_to_mapping(entry_pb.json_payload)
492
493 if entry_pb.HasField('proto_payload'):
494 mapping['protoPayload'] = entry_pb.proto_payload
495
496 if entry_pb.http_request:
497 request = entry_pb.http_request
498 mapping['httpRequest'] = {
499 'requestMethod': request.request_method,
500 'requestUrl': request.request_url,
501 'status': request.status,
502 'referer': request.referer,
503 'userAgent': request.user_agent,
504 'cacheHit': request.cache_hit,
505 'requestSize': request.request_size,
506 'responseSize': request.response_size,
507 'remoteIp': request.remote_ip,
508 }
509
510 if entry_pb.operation:
511 operation = entry_pb.operation
512 mapping['operation'] = {
513 'producer': operation.producer,
514 'id': operation.id,
515 'first': operation.first,
516 'last': operation.last,
517 }
518
519 return mapping
520
521
522 def _http_request_mapping_to_pb(info, request):
523 """Helper for _log_entry_mapping_to_pb"""
524 optional_request_keys = {
525 'requestMethod': 'request_method',
526 'requestUrl': 'request_url',
527 'status': 'status',
528 'referer': 'referer',
529 'userAgent': 'user_agent',
530 'cacheHit': 'cache_hit',
531 'requestSize': 'request_size',
532 'responseSize': 'response_size',
533 'remoteIp': 'remote_ip',
534 }
535 for key, pb_name in optional_request_keys.items():
536 if key in info:
537 setattr(request, pb_name, info[key])
538
539
540 def _log_operation_mapping_to_pb(info, operation):
541 """Helper for _log_entry_mapping_to_pb"""
542 operation.producer = info['producer']
543 operation.id = info['id']
544
545 if 'first' in info:
546 operation.first = info['first']
547
548 if 'last' in info:
549 operation.last = info['last']
550
551
552 def _log_entry_mapping_to_pb(mapping):
553 """Helper for :meth:`write_entries`, et aliae
554
555 Ideally, would use a function from :mod:`protobuf.json_format`, but
556 the right one isn't public. See:
557 https://github.com/google/protobuf/issues/1351
558 """
559 # pylint: disable=too-many-branches
560 entry_pb = LogEntry()
561
562 optional_scalar_keys = {
563 'logName': 'log_name',
564 'insertId': 'insert_id',
565 'textPayload': 'text_payload',
566 }
567
568 for key, pb_name in optional_scalar_keys.items():
569 if key in mapping:
570 setattr(entry_pb, pb_name, mapping[key])
571
572 if 'resource' in mapping:
573 entry_pb.resource.type = mapping['resource']['type']
574
575 if 'severity' in mapping:
576 severity = mapping['severity']
577 if isinstance(severity, str):
578 severity = LogSeverity.Value(severity)
579 entry_pb.severity = severity
580
581 if 'timestamp' in mapping:
582 timestamp = _datetime_to_pb_timestamp(mapping['timestamp'])
583 entry_pb.timestamp.CopyFrom(timestamp)
584
585 if 'labels' in mapping:
586 for key, value in mapping['labels'].items():
587 entry_pb.labels[key] = value
588
589 if 'jsonPayload' in mapping:
590 for key, value in mapping['jsonPayload'].items():
591 entry_pb.json_payload[key] = value
592
593 if 'protoPayload' in mapping:
594 Parse(json.dumps(mapping['protoPayload']), entry_pb.proto_payload)
595
596 if 'httpRequest' in mapping:
597 _http_request_mapping_to_pb(
598 mapping['httpRequest'], entry_pb.http_request)
599
600 if 'operation' in mapping:
601 _log_operation_mapping_to_pb(
602 mapping['operation'], entry_pb.operation)
603
604 return entry_pb
605 # pylint: enable=too-many-branches
606
607
608 def _log_sink_pb_to_mapping(sink_pb):
609 """Helper for :meth:`list_sinks`, et aliae
610
611 Ideally, would use a function from :mod:`protobuf.json_format`, but
612 the right one isn't public. See:
613 https://github.com/google/protobuf/issues/1351
614 """
615 return {
616 'name': sink_pb.name,
617 'destination': sink_pb.destination,
618 'filter': sink_pb.filter,
619 }
620
621
622 def _log_metric_pb_to_mapping(metric_pb):
623 """Helper for :meth:`list_metrics`, et aliae
624
625 Ideally, would use a function from :mod:`protobuf.json_format`, but
626 the right one isn't public. See:
627 https://github.com/google/protobuf/issues/1351
628 """
629 return {
630 'name': metric_pb.name,
631 'description': metric_pb.description,
632 'filter': metric_pb.filter,
633 }
634
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gcloud/logging/_gax.py b/gcloud/logging/_gax.py
--- a/gcloud/logging/_gax.py
+++ b/gcloud/logging/_gax.py
@@ -120,7 +120,12 @@
"""
options = None
path = 'projects/%s/logs/%s' % (project, logger_name)
- self._gax_api.delete_log(path, options)
+ try:
+ self._gax_api.delete_log(path, options)
+ except GaxError as exc:
+ if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
+ raise NotFound(path)
+ raise
class _SinksAPI(object):
| {"golden_diff": "diff --git a/gcloud/logging/_gax.py b/gcloud/logging/_gax.py\n--- a/gcloud/logging/_gax.py\n+++ b/gcloud/logging/_gax.py\n@@ -120,7 +120,12 @@\n \"\"\"\n options = None\n path = 'projects/%s/logs/%s' % (project, logger_name)\n- self._gax_api.delete_log(path, options)\n+ try:\n+ self._gax_api.delete_log(path, options)\n+ except GaxError as exc:\n+ if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:\n+ raise NotFound(path)\n+ raise\n \n \n class _SinksAPI(object):\n", "issue": "Logging system tests: gRPC/GaxError for 'NotFound' in tearDown / logger.delete\nFrom: https://travis-ci.org/GoogleCloudPlatform/gcloud-python/builds/151551907#L647-L675\n\n``` python\n======================================================================\nERROR: test_log_struct_w_metadata (logging_.TestLogging)\n----------------------------------------------------------------------\nTraceback (most recent call last):\n File \"/home/travis/build/GoogleCloudPlatform/gcloud-python/system_tests/logging_.py\", line 69, in tearDown\n retry(doomed.delete)()\n File \"/home/travis/build/GoogleCloudPlatform/gcloud-python/system_tests/retry.py\", line 77, in wrapped_function\n return to_wrap(*args, **kwargs)\n File \"/home/travis/build/GoogleCloudPlatform/gcloud-python/.tox/system-tests/lib/python2.7/site-packages/gcloud/logging/logger.py\", line 268, in delete\n client.logging_api.logger_delete(self.project, self.name)\n File \"/home/travis/build/GoogleCloudPlatform/gcloud-python/.tox/system-tests/lib/python2.7/site-packages/gcloud/logging/_gax.py\", line 123, in logger_delete\n self._gax_api.delete_log(path, options)\n File \"/home/travis/build/GoogleCloudPlatform/gcloud-python/.tox/system-tests/lib/python2.7/site-packages/google/cloud/logging/v2/logging_service_v2_api.py\", line 216, in delete_log\n self._delete_log(request, options)\n File \"/home/travis/build/GoogleCloudPlatform/gcloud-python/.tox/system-tests/lib/python2.7/site-packages/google/gax/api_callable.py\", line 480, in inner\n return api_caller(api_call, this_settings, request)\n File \"/home/travis/build/GoogleCloudPlatform/gcloud-python/.tox/system-tests/lib/python2.7/site-packages/google/gax/api_callable.py\", line 468, in base_caller\n return api_call(*args)\n File \"/home/travis/build/GoogleCloudPlatform/gcloud-python/.tox/system-tests/lib/python2.7/site-packages/google/gax/api_callable.py\", line 433, in inner\n raise_with_traceback(GaxError('RPC failed', cause=exception))\n File \"/home/travis/build/GoogleCloudPlatform/gcloud-python/.tox/system-tests/lib/python2.7/site-packages/google/gax/api_callable.py\", line 430, in inner\n return a_func(*args, **kwargs)\n File \"/home/travis/build/GoogleCloudPlatform/gcloud-python/.tox/system-tests/lib/python2.7/site-packages/google/gax/api_callable.py\", line 64, in inner\n return a_func(*updated_args, **kwargs)\n File \"/home/travis/build/GoogleCloudPlatform/gcloud-python/.tox/system-tests/lib/python2.7/site-packages/grpc/beta/_client_adaptations.py\", line 305, in __call__\n self._request_serializer, self._response_deserializer)\n File \"/home/travis/build/GoogleCloudPlatform/gcloud-python/.tox/system-tests/lib/python2.7/site-packages/grpc/beta/_client_adaptations.py\", line 203, in _blocking_unary_unary\n raise _abortion_error(rpc_error_call)\nGaxError: GaxError(RPC failed, caused by AbortionError(code=StatusCode.NOT_FOUND, details=\"Requested entity was not found.\"))\n```\n\n", "before_files": [{"content": "# Copyright 2016 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"GAX wrapper for Logging API requests.\"\"\"\n\nimport json\n\n# pylint: disable=import-error\nfrom google.gax import CallOptions\nfrom google.gax import INITIAL_PAGE\nfrom google.gax.errors import GaxError\nfrom google.gax.grpc import exc_to_code\nfrom google.logging.type.log_severity_pb2 import LogSeverity\nfrom google.logging.v2.logging_config_pb2 import LogSink\nfrom google.logging.v2.logging_metrics_pb2 import LogMetric\nfrom google.logging.v2.log_entry_pb2 import LogEntry\nfrom google.protobuf.json_format import Parse\nfrom grpc.beta.interfaces import StatusCode\n# pylint: enable=import-error\n\nfrom gcloud.exceptions import Conflict\nfrom gcloud.exceptions import NotFound\nfrom gcloud._helpers import _datetime_to_pb_timestamp\nfrom gcloud._helpers import _datetime_to_rfc3339\nfrom gcloud._helpers import _pb_timestamp_to_datetime\n\n\nclass _LoggingAPI(object):\n \"\"\"Helper mapping logging-related APIs.\n\n :type gax_api:\n :class:`google.logging.v2.logging_service_v2_api.LoggingServiceV2Api`\n :param gax_api: API object used to make GAX requests.\n \"\"\"\n def __init__(self, gax_api):\n self._gax_api = gax_api\n\n def list_entries(self, projects, filter_='', order_by='',\n page_size=0, page_token=None):\n \"\"\"Return a page of log entry resources.\n\n :type projects: list of strings\n :param projects: project IDs to include. If not passed,\n defaults to the project bound to the API's client.\n\n :type filter_: str\n :param filter_: a filter expression. See:\n https://cloud.google.com/logging/docs/view/advanced_filters\n\n :type order_by: str\n :param order_by: One of :data:`gcloud.logging.ASCENDING` or\n :data:`gcloud.logging.DESCENDING`.\n\n :type page_size: int\n :param page_size: maximum number of entries to return, If not passed,\n defaults to a value set by the API.\n\n :type page_token: str\n :param page_token: opaque marker for the next \"page\" of entries. If not\n passed, the API will return the first page of\n entries.\n\n :rtype: tuple, (list, str)\n :returns: list of mappings, plus a \"next page token\" string:\n if not None, indicates that more entries can be retrieved\n with another call (pass that value as ``page_token``).\n \"\"\"\n options = _build_paging_options(page_token)\n page_iter = self._gax_api.list_log_entries(\n projects, filter_, order_by, page_size, options)\n entries = [_log_entry_pb_to_mapping(entry_pb)\n for entry_pb in page_iter.next()]\n token = page_iter.page_token or None\n return entries, token\n\n def write_entries(self, entries, logger_name=None, resource=None,\n labels=None):\n \"\"\"API call: log an entry resource via a POST request\n\n :type entries: sequence of mapping\n :param entries: the log entry resources to log.\n\n :type logger_name: string\n :param logger_name: name of default logger to which to log the entries;\n individual entries may override.\n\n :type resource: mapping\n :param resource: default resource to associate with entries;\n individual entries may override.\n\n :type labels: mapping\n :param labels: default labels to associate with entries;\n individual entries may override.\n \"\"\"\n options = None\n partial_success = False\n entry_pbs = [_log_entry_mapping_to_pb(entry) for entry in entries]\n self._gax_api.write_log_entries(entry_pbs, logger_name, resource,\n labels, partial_success, options)\n\n def logger_delete(self, project, logger_name):\n \"\"\"API call: delete all entries in a logger via a DELETE request\n\n :type project: string\n :param project: ID of project containing the log entries to delete\n\n :type logger_name: string\n :param logger_name: name of logger containing the log entries to delete\n \"\"\"\n options = None\n path = 'projects/%s/logs/%s' % (project, logger_name)\n self._gax_api.delete_log(path, options)\n\n\nclass _SinksAPI(object):\n \"\"\"Helper mapping sink-related APIs.\n\n :type gax_api:\n :class:`google.logging.v2.config_service_v2_api.ConfigServiceV2Api`\n :param gax_api: API object used to make GAX requests.\n \"\"\"\n def __init__(self, gax_api):\n self._gax_api = gax_api\n\n def list_sinks(self, project, page_size=0, page_token=None):\n \"\"\"List sinks for the project associated with this client.\n\n :type project: string\n :param project: ID of the project whose sinks are to be listed.\n\n :type page_size: int\n :param page_size: maximum number of sinks to return, If not passed,\n defaults to a value set by the API.\n\n :type page_token: str\n :param page_token: opaque marker for the next \"page\" of sinks. If not\n passed, the API will return the first page of\n sinks.\n\n :rtype: tuple, (list, str)\n :returns: list of mappings, plus a \"next page token\" string:\n if not None, indicates that more sinks can be retrieved\n with another call (pass that value as ``page_token``).\n \"\"\"\n options = _build_paging_options(page_token)\n path = 'projects/%s' % (project,)\n page_iter = self._gax_api.list_sinks(path, page_size, options)\n sinks = [_log_sink_pb_to_mapping(log_sink_pb)\n for log_sink_pb in page_iter.next()]\n token = page_iter.page_token or None\n return sinks, token\n\n def sink_create(self, project, sink_name, filter_, destination):\n \"\"\"API call: create a sink resource.\n\n See:\n https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks/create\n\n :type project: string\n :param project: ID of the project in which to create the sink.\n\n :type sink_name: string\n :param sink_name: the name of the sink\n\n :type filter_: string\n :param filter_: the advanced logs filter expression defining the\n entries exported by the sink.\n\n :type destination: string\n :param destination: destination URI for the entries exported by\n the sink.\n \"\"\"\n options = None\n parent = 'projects/%s' % (project,)\n sink_pb = LogSink(name=sink_name, filter=filter_,\n destination=destination)\n try:\n self._gax_api.create_sink(parent, sink_pb, options)\n except GaxError as exc:\n if exc_to_code(exc.cause) == StatusCode.FAILED_PRECONDITION:\n path = 'projects/%s/sinks/%s' % (project, sink_name)\n raise Conflict(path)\n raise\n\n def sink_get(self, project, sink_name):\n \"\"\"API call: retrieve a sink resource.\n\n :type project: string\n :param project: ID of the project containing the sink.\n\n :type sink_name: string\n :param sink_name: the name of the sink\n\n :rtype: dict\n :returns: The sink object returned from the API (converted from a\n protobuf to a dictionary).\n \"\"\"\n options = None\n path = 'projects/%s/sinks/%s' % (project, sink_name)\n try:\n sink_pb = self._gax_api.get_sink(path, options)\n except GaxError as exc:\n if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:\n raise NotFound(path)\n raise\n return _log_sink_pb_to_mapping(sink_pb)\n\n def sink_update(self, project, sink_name, filter_, destination):\n \"\"\"API call: update a sink resource.\n\n :type project: string\n :param project: ID of the project containing the sink.\n\n :type sink_name: string\n :param sink_name: the name of the sink\n\n :type filter_: string\n :param filter_: the advanced logs filter expression defining the\n entries exported by the sink.\n\n :type destination: string\n :param destination: destination URI for the entries exported by\n the sink.\n\n :rtype: dict\n :returns: The sink object returned from the API (converted from a\n protobuf to a dictionary).\n \"\"\"\n options = None\n path = 'projects/%s/sinks/%s' % (project, sink_name)\n sink_pb = LogSink(name=path, filter=filter_, destination=destination)\n try:\n self._gax_api.update_sink(path, sink_pb, options)\n except GaxError as exc:\n if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:\n raise NotFound(path)\n raise\n return _log_sink_pb_to_mapping(sink_pb)\n\n def sink_delete(self, project, sink_name):\n \"\"\"API call: delete a sink resource.\n\n :type project: string\n :param project: ID of the project containing the sink.\n\n :type sink_name: string\n :param sink_name: the name of the sink\n \"\"\"\n options = None\n path = 'projects/%s/sinks/%s' % (project, sink_name)\n try:\n self._gax_api.delete_sink(path, options)\n except GaxError as exc:\n if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:\n raise NotFound(path)\n raise\n\n\nclass _MetricsAPI(object):\n \"\"\"Helper mapping sink-related APIs.\n\n :type gax_api:\n :class:`google.logging.v2.metrics_service_v2_api.MetricsServiceV2Api`\n :param gax_api: API object used to make GAX requests.\n \"\"\"\n def __init__(self, gax_api):\n self._gax_api = gax_api\n\n def list_metrics(self, project, page_size=0, page_token=None):\n \"\"\"List metrics for the project associated with this client.\n\n :type project: string\n :param project: ID of the project whose metrics are to be listed.\n\n :type page_size: int\n :param page_size: maximum number of metrics to return, If not passed,\n defaults to a value set by the API.\n\n :type page_token: str\n :param page_token: opaque marker for the next \"page\" of metrics. If not\n passed, the API will return the first page of\n metrics.\n\n :rtype: tuple, (list, str)\n :returns: list of mappings, plus a \"next page token\" string:\n if not None, indicates that more metrics can be retrieved\n with another call (pass that value as ``page_token``).\n \"\"\"\n options = _build_paging_options(page_token)\n path = 'projects/%s' % (project,)\n page_iter = self._gax_api.list_log_metrics(path, page_size, options)\n metrics = [_log_metric_pb_to_mapping(log_metric_pb)\n for log_metric_pb in page_iter.next()]\n token = page_iter.page_token or None\n return metrics, token\n\n def metric_create(self, project, metric_name, filter_, description):\n \"\"\"API call: create a metric resource.\n\n See:\n https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics/create\n\n :type project: string\n :param project: ID of the project in which to create the metric.\n\n :type metric_name: string\n :param metric_name: the name of the metric\n\n :type filter_: string\n :param filter_: the advanced logs filter expression defining the\n entries exported by the metric.\n\n :type description: string\n :param description: description of the metric.\n \"\"\"\n options = None\n parent = 'projects/%s' % (project,)\n metric_pb = LogMetric(name=metric_name, filter=filter_,\n description=description)\n try:\n self._gax_api.create_log_metric(parent, metric_pb, options)\n except GaxError as exc:\n if exc_to_code(exc.cause) == StatusCode.FAILED_PRECONDITION:\n path = 'projects/%s/metrics/%s' % (project, metric_name)\n raise Conflict(path)\n raise\n\n def metric_get(self, project, metric_name):\n \"\"\"API call: retrieve a metric resource.\n\n :type project: string\n :param project: ID of the project containing the metric.\n\n :type metric_name: string\n :param metric_name: the name of the metric\n\n :rtype: dict\n :returns: The metric object returned from the API (converted from a\n protobuf to a dictionary).\n \"\"\"\n options = None\n path = 'projects/%s/metrics/%s' % (project, metric_name)\n try:\n metric_pb = self._gax_api.get_log_metric(path, options)\n except GaxError as exc:\n if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:\n raise NotFound(path)\n raise\n return _log_metric_pb_to_mapping(metric_pb)\n\n def metric_update(self, project, metric_name, filter_, description):\n \"\"\"API call: update a metric resource.\n\n :type project: string\n :param project: ID of the project containing the metric.\n\n :type metric_name: string\n :param metric_name: the name of the metric\n\n :type filter_: string\n :param filter_: the advanced logs filter expression defining the\n entries exported by the metric.\n\n :type description: string\n :param description: description of the metric.\n\n :rtype: dict\n :returns: The metric object returned from the API (converted from a\n protobuf to a dictionary).\n \"\"\"\n options = None\n path = 'projects/%s/metrics/%s' % (project, metric_name)\n metric_pb = LogMetric(name=path, filter=filter_,\n description=description)\n try:\n self._gax_api.update_log_metric(path, metric_pb, options)\n except GaxError as exc:\n if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:\n raise NotFound(path)\n raise\n return _log_metric_pb_to_mapping(metric_pb)\n\n def metric_delete(self, project, metric_name):\n \"\"\"API call: delete a metric resource.\n\n :type project: string\n :param project: ID of the project containing the metric.\n\n :type metric_name: string\n :param metric_name: the name of the metric\n \"\"\"\n options = None\n path = 'projects/%s/metrics/%s' % (project, metric_name)\n try:\n self._gax_api.delete_log_metric(path, options)\n except GaxError as exc:\n if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:\n raise NotFound(path)\n raise\n\n\ndef _build_paging_options(page_token=None):\n \"\"\"Helper for :meth:'_PublisherAPI.list_topics' et aliae.\"\"\"\n if page_token is None:\n page_token = INITIAL_PAGE\n options = {'page_token': page_token}\n return CallOptions(**options)\n\n\ndef _mon_resource_pb_to_mapping(resource_pb):\n \"\"\"Helper for :func:_log_entry_pb_to_mapping\"\"\"\n mapping = {\n 'type': resource_pb.type,\n }\n if resource_pb.labels:\n mapping['labels'] = resource_pb.labels\n return mapping\n\n\ndef _pb_timestamp_to_rfc3339(timestamp_pb):\n \"\"\"Helper for :func:_log_entry_pb_to_mapping\"\"\"\n timestamp = _pb_timestamp_to_datetime(timestamp_pb)\n return _datetime_to_rfc3339(timestamp)\n\n\ndef _value_pb_to_value(value_pb):\n \"\"\"Helper for :func:`_log_entry_pb_to_mapping`.\"\"\"\n kind = value_pb.WhichOneof('kind')\n\n if kind is None:\n result = None\n\n elif kind == 'string_value':\n result = value_pb.string_value\n\n elif kind == 'bool_value':\n result = value_pb.bool_value\n\n elif kind == 'number_value':\n result = value_pb.number_value\n\n elif kind == 'list_value':\n result = [_value_pb_to_value(element)\n for element in value_pb.list_value.values]\n\n elif kind == 'struct_value':\n result = _struct_pb_to_mapping(value_pb.struct_value)\n\n else:\n raise ValueError('Value protobuf had unknown kind: %s' % (kind,))\n\n return result\n\n\ndef _struct_pb_to_mapping(struct_pb):\n \"\"\"Helper for :func:`_log_entry_pb_to_mapping`.\"\"\"\n return dict([(key, _value_pb_to_value(struct_pb.fields[key]))\n for key in struct_pb.fields])\n\n\ndef _log_entry_pb_to_mapping(entry_pb):\n \"\"\"Helper for :meth:`list_entries`, et aliae\n\n Ideally, would use a function from :mod:`protobuf.json_format`, but\n the right one isn't public. See:\n https://github.com/google/protobuf/issues/1351\n \"\"\"\n mapping = {\n 'logName': entry_pb.log_name,\n 'resource': _mon_resource_pb_to_mapping(entry_pb.resource),\n 'severity': LogSeverity.Name(entry_pb.severity),\n 'insertId': entry_pb.insert_id,\n 'timestamp': _pb_timestamp_to_rfc3339(entry_pb.timestamp),\n 'labels': entry_pb.labels,\n }\n if entry_pb.HasField('text_payload'):\n mapping['textPayload'] = entry_pb.text_payload\n\n if entry_pb.HasField('json_payload'):\n mapping['jsonPayload'] = _struct_pb_to_mapping(entry_pb.json_payload)\n\n if entry_pb.HasField('proto_payload'):\n mapping['protoPayload'] = entry_pb.proto_payload\n\n if entry_pb.http_request:\n request = entry_pb.http_request\n mapping['httpRequest'] = {\n 'requestMethod': request.request_method,\n 'requestUrl': request.request_url,\n 'status': request.status,\n 'referer': request.referer,\n 'userAgent': request.user_agent,\n 'cacheHit': request.cache_hit,\n 'requestSize': request.request_size,\n 'responseSize': request.response_size,\n 'remoteIp': request.remote_ip,\n }\n\n if entry_pb.operation:\n operation = entry_pb.operation\n mapping['operation'] = {\n 'producer': operation.producer,\n 'id': operation.id,\n 'first': operation.first,\n 'last': operation.last,\n }\n\n return mapping\n\n\ndef _http_request_mapping_to_pb(info, request):\n \"\"\"Helper for _log_entry_mapping_to_pb\"\"\"\n optional_request_keys = {\n 'requestMethod': 'request_method',\n 'requestUrl': 'request_url',\n 'status': 'status',\n 'referer': 'referer',\n 'userAgent': 'user_agent',\n 'cacheHit': 'cache_hit',\n 'requestSize': 'request_size',\n 'responseSize': 'response_size',\n 'remoteIp': 'remote_ip',\n }\n for key, pb_name in optional_request_keys.items():\n if key in info:\n setattr(request, pb_name, info[key])\n\n\ndef _log_operation_mapping_to_pb(info, operation):\n \"\"\"Helper for _log_entry_mapping_to_pb\"\"\"\n operation.producer = info['producer']\n operation.id = info['id']\n\n if 'first' in info:\n operation.first = info['first']\n\n if 'last' in info:\n operation.last = info['last']\n\n\ndef _log_entry_mapping_to_pb(mapping):\n \"\"\"Helper for :meth:`write_entries`, et aliae\n\n Ideally, would use a function from :mod:`protobuf.json_format`, but\n the right one isn't public. See:\n https://github.com/google/protobuf/issues/1351\n \"\"\"\n # pylint: disable=too-many-branches\n entry_pb = LogEntry()\n\n optional_scalar_keys = {\n 'logName': 'log_name',\n 'insertId': 'insert_id',\n 'textPayload': 'text_payload',\n }\n\n for key, pb_name in optional_scalar_keys.items():\n if key in mapping:\n setattr(entry_pb, pb_name, mapping[key])\n\n if 'resource' in mapping:\n entry_pb.resource.type = mapping['resource']['type']\n\n if 'severity' in mapping:\n severity = mapping['severity']\n if isinstance(severity, str):\n severity = LogSeverity.Value(severity)\n entry_pb.severity = severity\n\n if 'timestamp' in mapping:\n timestamp = _datetime_to_pb_timestamp(mapping['timestamp'])\n entry_pb.timestamp.CopyFrom(timestamp)\n\n if 'labels' in mapping:\n for key, value in mapping['labels'].items():\n entry_pb.labels[key] = value\n\n if 'jsonPayload' in mapping:\n for key, value in mapping['jsonPayload'].items():\n entry_pb.json_payload[key] = value\n\n if 'protoPayload' in mapping:\n Parse(json.dumps(mapping['protoPayload']), entry_pb.proto_payload)\n\n if 'httpRequest' in mapping:\n _http_request_mapping_to_pb(\n mapping['httpRequest'], entry_pb.http_request)\n\n if 'operation' in mapping:\n _log_operation_mapping_to_pb(\n mapping['operation'], entry_pb.operation)\n\n return entry_pb\n # pylint: enable=too-many-branches\n\n\ndef _log_sink_pb_to_mapping(sink_pb):\n \"\"\"Helper for :meth:`list_sinks`, et aliae\n\n Ideally, would use a function from :mod:`protobuf.json_format`, but\n the right one isn't public. See:\n https://github.com/google/protobuf/issues/1351\n \"\"\"\n return {\n 'name': sink_pb.name,\n 'destination': sink_pb.destination,\n 'filter': sink_pb.filter,\n }\n\n\ndef _log_metric_pb_to_mapping(metric_pb):\n \"\"\"Helper for :meth:`list_metrics`, et aliae\n\n Ideally, would use a function from :mod:`protobuf.json_format`, but\n the right one isn't public. See:\n https://github.com/google/protobuf/issues/1351\n \"\"\"\n return {\n 'name': metric_pb.name,\n 'description': metric_pb.description,\n 'filter': metric_pb.filter,\n }\n", "path": "gcloud/logging/_gax.py"}], "after_files": [{"content": "# Copyright 2016 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"GAX wrapper for Logging API requests.\"\"\"\n\nimport json\n\n# pylint: disable=import-error\nfrom google.gax import CallOptions\nfrom google.gax import INITIAL_PAGE\nfrom google.gax.errors import GaxError\nfrom google.gax.grpc import exc_to_code\nfrom google.logging.type.log_severity_pb2 import LogSeverity\nfrom google.logging.v2.logging_config_pb2 import LogSink\nfrom google.logging.v2.logging_metrics_pb2 import LogMetric\nfrom google.logging.v2.log_entry_pb2 import LogEntry\nfrom google.protobuf.json_format import Parse\nfrom grpc.beta.interfaces import StatusCode\n# pylint: enable=import-error\n\nfrom gcloud.exceptions import Conflict\nfrom gcloud.exceptions import NotFound\nfrom gcloud._helpers import _datetime_to_pb_timestamp\nfrom gcloud._helpers import _datetime_to_rfc3339\nfrom gcloud._helpers import _pb_timestamp_to_datetime\n\n\nclass _LoggingAPI(object):\n \"\"\"Helper mapping logging-related APIs.\n\n :type gax_api:\n :class:`google.logging.v2.logging_service_v2_api.LoggingServiceV2Api`\n :param gax_api: API object used to make GAX requests.\n \"\"\"\n def __init__(self, gax_api):\n self._gax_api = gax_api\n\n def list_entries(self, projects, filter_='', order_by='',\n page_size=0, page_token=None):\n \"\"\"Return a page of log entry resources.\n\n :type projects: list of strings\n :param projects: project IDs to include. If not passed,\n defaults to the project bound to the API's client.\n\n :type filter_: str\n :param filter_: a filter expression. See:\n https://cloud.google.com/logging/docs/view/advanced_filters\n\n :type order_by: str\n :param order_by: One of :data:`gcloud.logging.ASCENDING` or\n :data:`gcloud.logging.DESCENDING`.\n\n :type page_size: int\n :param page_size: maximum number of entries to return, If not passed,\n defaults to a value set by the API.\n\n :type page_token: str\n :param page_token: opaque marker for the next \"page\" of entries. If not\n passed, the API will return the first page of\n entries.\n\n :rtype: tuple, (list, str)\n :returns: list of mappings, plus a \"next page token\" string:\n if not None, indicates that more entries can be retrieved\n with another call (pass that value as ``page_token``).\n \"\"\"\n options = _build_paging_options(page_token)\n page_iter = self._gax_api.list_log_entries(\n projects, filter_, order_by, page_size, options)\n entries = [_log_entry_pb_to_mapping(entry_pb)\n for entry_pb in page_iter.next()]\n token = page_iter.page_token or None\n return entries, token\n\n def write_entries(self, entries, logger_name=None, resource=None,\n labels=None):\n \"\"\"API call: log an entry resource via a POST request\n\n :type entries: sequence of mapping\n :param entries: the log entry resources to log.\n\n :type logger_name: string\n :param logger_name: name of default logger to which to log the entries;\n individual entries may override.\n\n :type resource: mapping\n :param resource: default resource to associate with entries;\n individual entries may override.\n\n :type labels: mapping\n :param labels: default labels to associate with entries;\n individual entries may override.\n \"\"\"\n options = None\n partial_success = False\n entry_pbs = [_log_entry_mapping_to_pb(entry) for entry in entries]\n self._gax_api.write_log_entries(entry_pbs, logger_name, resource,\n labels, partial_success, options)\n\n def logger_delete(self, project, logger_name):\n \"\"\"API call: delete all entries in a logger via a DELETE request\n\n :type project: string\n :param project: ID of project containing the log entries to delete\n\n :type logger_name: string\n :param logger_name: name of logger containing the log entries to delete\n \"\"\"\n options = None\n path = 'projects/%s/logs/%s' % (project, logger_name)\n try:\n self._gax_api.delete_log(path, options)\n except GaxError as exc:\n if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:\n raise NotFound(path)\n raise\n\n\nclass _SinksAPI(object):\n \"\"\"Helper mapping sink-related APIs.\n\n :type gax_api:\n :class:`google.logging.v2.config_service_v2_api.ConfigServiceV2Api`\n :param gax_api: API object used to make GAX requests.\n \"\"\"\n def __init__(self, gax_api):\n self._gax_api = gax_api\n\n def list_sinks(self, project, page_size=0, page_token=None):\n \"\"\"List sinks for the project associated with this client.\n\n :type project: string\n :param project: ID of the project whose sinks are to be listed.\n\n :type page_size: int\n :param page_size: maximum number of sinks to return, If not passed,\n defaults to a value set by the API.\n\n :type page_token: str\n :param page_token: opaque marker for the next \"page\" of sinks. If not\n passed, the API will return the first page of\n sinks.\n\n :rtype: tuple, (list, str)\n :returns: list of mappings, plus a \"next page token\" string:\n if not None, indicates that more sinks can be retrieved\n with another call (pass that value as ``page_token``).\n \"\"\"\n options = _build_paging_options(page_token)\n path = 'projects/%s' % (project,)\n page_iter = self._gax_api.list_sinks(path, page_size, options)\n sinks = [_log_sink_pb_to_mapping(log_sink_pb)\n for log_sink_pb in page_iter.next()]\n token = page_iter.page_token or None\n return sinks, token\n\n def sink_create(self, project, sink_name, filter_, destination):\n \"\"\"API call: create a sink resource.\n\n See:\n https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks/create\n\n :type project: string\n :param project: ID of the project in which to create the sink.\n\n :type sink_name: string\n :param sink_name: the name of the sink\n\n :type filter_: string\n :param filter_: the advanced logs filter expression defining the\n entries exported by the sink.\n\n :type destination: string\n :param destination: destination URI for the entries exported by\n the sink.\n \"\"\"\n options = None\n parent = 'projects/%s' % (project,)\n sink_pb = LogSink(name=sink_name, filter=filter_,\n destination=destination)\n try:\n self._gax_api.create_sink(parent, sink_pb, options)\n except GaxError as exc:\n if exc_to_code(exc.cause) == StatusCode.FAILED_PRECONDITION:\n path = 'projects/%s/sinks/%s' % (project, sink_name)\n raise Conflict(path)\n raise\n\n def sink_get(self, project, sink_name):\n \"\"\"API call: retrieve a sink resource.\n\n :type project: string\n :param project: ID of the project containing the sink.\n\n :type sink_name: string\n :param sink_name: the name of the sink\n\n :rtype: dict\n :returns: The sink object returned from the API (converted from a\n protobuf to a dictionary).\n \"\"\"\n options = None\n path = 'projects/%s/sinks/%s' % (project, sink_name)\n try:\n sink_pb = self._gax_api.get_sink(path, options)\n except GaxError as exc:\n if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:\n raise NotFound(path)\n raise\n return _log_sink_pb_to_mapping(sink_pb)\n\n def sink_update(self, project, sink_name, filter_, destination):\n \"\"\"API call: update a sink resource.\n\n :type project: string\n :param project: ID of the project containing the sink.\n\n :type sink_name: string\n :param sink_name: the name of the sink\n\n :type filter_: string\n :param filter_: the advanced logs filter expression defining the\n entries exported by the sink.\n\n :type destination: string\n :param destination: destination URI for the entries exported by\n the sink.\n\n :rtype: dict\n :returns: The sink object returned from the API (converted from a\n protobuf to a dictionary).\n \"\"\"\n options = None\n path = 'projects/%s/sinks/%s' % (project, sink_name)\n sink_pb = LogSink(name=path, filter=filter_, destination=destination)\n try:\n self._gax_api.update_sink(path, sink_pb, options)\n except GaxError as exc:\n if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:\n raise NotFound(path)\n raise\n return _log_sink_pb_to_mapping(sink_pb)\n\n def sink_delete(self, project, sink_name):\n \"\"\"API call: delete a sink resource.\n\n :type project: string\n :param project: ID of the project containing the sink.\n\n :type sink_name: string\n :param sink_name: the name of the sink\n \"\"\"\n options = None\n path = 'projects/%s/sinks/%s' % (project, sink_name)\n try:\n self._gax_api.delete_sink(path, options)\n except GaxError as exc:\n if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:\n raise NotFound(path)\n raise\n\n\nclass _MetricsAPI(object):\n \"\"\"Helper mapping sink-related APIs.\n\n :type gax_api:\n :class:`google.logging.v2.metrics_service_v2_api.MetricsServiceV2Api`\n :param gax_api: API object used to make GAX requests.\n \"\"\"\n def __init__(self, gax_api):\n self._gax_api = gax_api\n\n def list_metrics(self, project, page_size=0, page_token=None):\n \"\"\"List metrics for the project associated with this client.\n\n :type project: string\n :param project: ID of the project whose metrics are to be listed.\n\n :type page_size: int\n :param page_size: maximum number of metrics to return, If not passed,\n defaults to a value set by the API.\n\n :type page_token: str\n :param page_token: opaque marker for the next \"page\" of metrics. If not\n passed, the API will return the first page of\n metrics.\n\n :rtype: tuple, (list, str)\n :returns: list of mappings, plus a \"next page token\" string:\n if not None, indicates that more metrics can be retrieved\n with another call (pass that value as ``page_token``).\n \"\"\"\n options = _build_paging_options(page_token)\n path = 'projects/%s' % (project,)\n page_iter = self._gax_api.list_log_metrics(path, page_size, options)\n metrics = [_log_metric_pb_to_mapping(log_metric_pb)\n for log_metric_pb in page_iter.next()]\n token = page_iter.page_token or None\n return metrics, token\n\n def metric_create(self, project, metric_name, filter_, description):\n \"\"\"API call: create a metric resource.\n\n See:\n https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics/create\n\n :type project: string\n :param project: ID of the project in which to create the metric.\n\n :type metric_name: string\n :param metric_name: the name of the metric\n\n :type filter_: string\n :param filter_: the advanced logs filter expression defining the\n entries exported by the metric.\n\n :type description: string\n :param description: description of the metric.\n \"\"\"\n options = None\n parent = 'projects/%s' % (project,)\n metric_pb = LogMetric(name=metric_name, filter=filter_,\n description=description)\n try:\n self._gax_api.create_log_metric(parent, metric_pb, options)\n except GaxError as exc:\n if exc_to_code(exc.cause) == StatusCode.FAILED_PRECONDITION:\n path = 'projects/%s/metrics/%s' % (project, metric_name)\n raise Conflict(path)\n raise\n\n def metric_get(self, project, metric_name):\n \"\"\"API call: retrieve a metric resource.\n\n :type project: string\n :param project: ID of the project containing the metric.\n\n :type metric_name: string\n :param metric_name: the name of the metric\n\n :rtype: dict\n :returns: The metric object returned from the API (converted from a\n protobuf to a dictionary).\n \"\"\"\n options = None\n path = 'projects/%s/metrics/%s' % (project, metric_name)\n try:\n metric_pb = self._gax_api.get_log_metric(path, options)\n except GaxError as exc:\n if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:\n raise NotFound(path)\n raise\n return _log_metric_pb_to_mapping(metric_pb)\n\n def metric_update(self, project, metric_name, filter_, description):\n \"\"\"API call: update a metric resource.\n\n :type project: string\n :param project: ID of the project containing the metric.\n\n :type metric_name: string\n :param metric_name: the name of the metric\n\n :type filter_: string\n :param filter_: the advanced logs filter expression defining the\n entries exported by the metric.\n\n :type description: string\n :param description: description of the metric.\n\n :rtype: dict\n :returns: The metric object returned from the API (converted from a\n protobuf to a dictionary).\n \"\"\"\n options = None\n path = 'projects/%s/metrics/%s' % (project, metric_name)\n metric_pb = LogMetric(name=path, filter=filter_,\n description=description)\n try:\n self._gax_api.update_log_metric(path, metric_pb, options)\n except GaxError as exc:\n if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:\n raise NotFound(path)\n raise\n return _log_metric_pb_to_mapping(metric_pb)\n\n def metric_delete(self, project, metric_name):\n \"\"\"API call: delete a metric resource.\n\n :type project: string\n :param project: ID of the project containing the metric.\n\n :type metric_name: string\n :param metric_name: the name of the metric\n \"\"\"\n options = None\n path = 'projects/%s/metrics/%s' % (project, metric_name)\n try:\n self._gax_api.delete_log_metric(path, options)\n except GaxError as exc:\n if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:\n raise NotFound(path)\n raise\n\n\ndef _build_paging_options(page_token=None):\n \"\"\"Helper for :meth:'_PublisherAPI.list_topics' et aliae.\"\"\"\n if page_token is None:\n page_token = INITIAL_PAGE\n options = {'page_token': page_token}\n return CallOptions(**options)\n\n\ndef _mon_resource_pb_to_mapping(resource_pb):\n \"\"\"Helper for :func:_log_entry_pb_to_mapping\"\"\"\n mapping = {\n 'type': resource_pb.type,\n }\n if resource_pb.labels:\n mapping['labels'] = resource_pb.labels\n return mapping\n\n\ndef _pb_timestamp_to_rfc3339(timestamp_pb):\n \"\"\"Helper for :func:_log_entry_pb_to_mapping\"\"\"\n timestamp = _pb_timestamp_to_datetime(timestamp_pb)\n return _datetime_to_rfc3339(timestamp)\n\n\ndef _value_pb_to_value(value_pb):\n \"\"\"Helper for :func:`_log_entry_pb_to_mapping`.\"\"\"\n kind = value_pb.WhichOneof('kind')\n\n if kind is None:\n result = None\n\n elif kind == 'string_value':\n result = value_pb.string_value\n\n elif kind == 'bool_value':\n result = value_pb.bool_value\n\n elif kind == 'number_value':\n result = value_pb.number_value\n\n elif kind == 'list_value':\n result = [_value_pb_to_value(element)\n for element in value_pb.list_value.values]\n\n elif kind == 'struct_value':\n result = _struct_pb_to_mapping(value_pb.struct_value)\n\n else:\n raise ValueError('Value protobuf had unknown kind: %s' % (kind,))\n\n return result\n\n\ndef _struct_pb_to_mapping(struct_pb):\n \"\"\"Helper for :func:`_log_entry_pb_to_mapping`.\"\"\"\n return dict([(key, _value_pb_to_value(struct_pb.fields[key]))\n for key in struct_pb.fields])\n\n\ndef _log_entry_pb_to_mapping(entry_pb):\n \"\"\"Helper for :meth:`list_entries`, et aliae\n\n Ideally, would use a function from :mod:`protobuf.json_format`, but\n the right one isn't public. See:\n https://github.com/google/protobuf/issues/1351\n \"\"\"\n mapping = {\n 'logName': entry_pb.log_name,\n 'resource': _mon_resource_pb_to_mapping(entry_pb.resource),\n 'severity': LogSeverity.Name(entry_pb.severity),\n 'insertId': entry_pb.insert_id,\n 'timestamp': _pb_timestamp_to_rfc3339(entry_pb.timestamp),\n 'labels': entry_pb.labels,\n }\n if entry_pb.HasField('text_payload'):\n mapping['textPayload'] = entry_pb.text_payload\n\n if entry_pb.HasField('json_payload'):\n mapping['jsonPayload'] = _struct_pb_to_mapping(entry_pb.json_payload)\n\n if entry_pb.HasField('proto_payload'):\n mapping['protoPayload'] = entry_pb.proto_payload\n\n if entry_pb.http_request:\n request = entry_pb.http_request\n mapping['httpRequest'] = {\n 'requestMethod': request.request_method,\n 'requestUrl': request.request_url,\n 'status': request.status,\n 'referer': request.referer,\n 'userAgent': request.user_agent,\n 'cacheHit': request.cache_hit,\n 'requestSize': request.request_size,\n 'responseSize': request.response_size,\n 'remoteIp': request.remote_ip,\n }\n\n if entry_pb.operation:\n operation = entry_pb.operation\n mapping['operation'] = {\n 'producer': operation.producer,\n 'id': operation.id,\n 'first': operation.first,\n 'last': operation.last,\n }\n\n return mapping\n\n\ndef _http_request_mapping_to_pb(info, request):\n \"\"\"Helper for _log_entry_mapping_to_pb\"\"\"\n optional_request_keys = {\n 'requestMethod': 'request_method',\n 'requestUrl': 'request_url',\n 'status': 'status',\n 'referer': 'referer',\n 'userAgent': 'user_agent',\n 'cacheHit': 'cache_hit',\n 'requestSize': 'request_size',\n 'responseSize': 'response_size',\n 'remoteIp': 'remote_ip',\n }\n for key, pb_name in optional_request_keys.items():\n if key in info:\n setattr(request, pb_name, info[key])\n\n\ndef _log_operation_mapping_to_pb(info, operation):\n \"\"\"Helper for _log_entry_mapping_to_pb\"\"\"\n operation.producer = info['producer']\n operation.id = info['id']\n\n if 'first' in info:\n operation.first = info['first']\n\n if 'last' in info:\n operation.last = info['last']\n\n\ndef _log_entry_mapping_to_pb(mapping):\n \"\"\"Helper for :meth:`write_entries`, et aliae\n\n Ideally, would use a function from :mod:`protobuf.json_format`, but\n the right one isn't public. See:\n https://github.com/google/protobuf/issues/1351\n \"\"\"\n # pylint: disable=too-many-branches\n entry_pb = LogEntry()\n\n optional_scalar_keys = {\n 'logName': 'log_name',\n 'insertId': 'insert_id',\n 'textPayload': 'text_payload',\n }\n\n for key, pb_name in optional_scalar_keys.items():\n if key in mapping:\n setattr(entry_pb, pb_name, mapping[key])\n\n if 'resource' in mapping:\n entry_pb.resource.type = mapping['resource']['type']\n\n if 'severity' in mapping:\n severity = mapping['severity']\n if isinstance(severity, str):\n severity = LogSeverity.Value(severity)\n entry_pb.severity = severity\n\n if 'timestamp' in mapping:\n timestamp = _datetime_to_pb_timestamp(mapping['timestamp'])\n entry_pb.timestamp.CopyFrom(timestamp)\n\n if 'labels' in mapping:\n for key, value in mapping['labels'].items():\n entry_pb.labels[key] = value\n\n if 'jsonPayload' in mapping:\n for key, value in mapping['jsonPayload'].items():\n entry_pb.json_payload[key] = value\n\n if 'protoPayload' in mapping:\n Parse(json.dumps(mapping['protoPayload']), entry_pb.proto_payload)\n\n if 'httpRequest' in mapping:\n _http_request_mapping_to_pb(\n mapping['httpRequest'], entry_pb.http_request)\n\n if 'operation' in mapping:\n _log_operation_mapping_to_pb(\n mapping['operation'], entry_pb.operation)\n\n return entry_pb\n # pylint: enable=too-many-branches\n\n\ndef _log_sink_pb_to_mapping(sink_pb):\n \"\"\"Helper for :meth:`list_sinks`, et aliae\n\n Ideally, would use a function from :mod:`protobuf.json_format`, but\n the right one isn't public. See:\n https://github.com/google/protobuf/issues/1351\n \"\"\"\n return {\n 'name': sink_pb.name,\n 'destination': sink_pb.destination,\n 'filter': sink_pb.filter,\n }\n\n\ndef _log_metric_pb_to_mapping(metric_pb):\n \"\"\"Helper for :meth:`list_metrics`, et aliae\n\n Ideally, would use a function from :mod:`protobuf.json_format`, but\n the right one isn't public. See:\n https://github.com/google/protobuf/issues/1351\n \"\"\"\n return {\n 'name': metric_pb.name,\n 'description': metric_pb.description,\n 'filter': metric_pb.filter,\n }\n", "path": "gcloud/logging/_gax.py"}]} |
gh_patches_debug_1242 | rasdani/github-patches | git_diff | joke2k__faker-1743 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing return in faker-master\faker\providers\address\en_US\__init__.py
* Faker version: current
* OS: Windows 10 x64
Brief summary of the issue goes here.
Missing return on row 496 (def state_abbr(self, include_territories: bool = True) -> str:)
### Steps to reproduce
1. invoke state_abbr()
2. check the output
### Expected behavior
Should return an element from states_and_territories_abbr list
### Actual behavior
Returns an element from states_abbr list only
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `faker/providers/address/en_US/__init__.py`
Content:
```
1 from collections import OrderedDict
2 from typing import Optional
3
4 from ..en import Provider as AddressProvider
5
6
7 class Provider(AddressProvider):
8 city_prefixes = ("North", "East", "West", "South", "New", "Lake", "Port")
9
10 city_suffixes = (
11 "town",
12 "ton",
13 "land",
14 "ville",
15 "berg",
16 "burgh",
17 "borough",
18 "bury",
19 "view",
20 "port",
21 "mouth",
22 "stad",
23 "furt",
24 "chester",
25 "mouth",
26 "fort",
27 "haven",
28 "side",
29 "shire",
30 )
31
32 building_number_formats = ("#####", "####", "###")
33
34 street_suffixes = (
35 "Alley",
36 "Avenue",
37 "Branch",
38 "Bridge",
39 "Brook",
40 "Brooks",
41 "Burg",
42 "Burgs",
43 "Bypass",
44 "Camp",
45 "Canyon",
46 "Cape",
47 "Causeway",
48 "Center",
49 "Centers",
50 "Circle",
51 "Circles",
52 "Cliff",
53 "Cliffs",
54 "Club",
55 "Common",
56 "Corner",
57 "Corners",
58 "Course",
59 "Court",
60 "Courts",
61 "Cove",
62 "Coves",
63 "Creek",
64 "Crescent",
65 "Crest",
66 "Crossing",
67 "Crossroad",
68 "Curve",
69 "Dale",
70 "Dam",
71 "Divide",
72 "Drive",
73 "Drive",
74 "Drives",
75 "Estate",
76 "Estates",
77 "Expressway",
78 "Extension",
79 "Extensions",
80 "Fall",
81 "Falls",
82 "Ferry",
83 "Field",
84 "Fields",
85 "Flat",
86 "Flats",
87 "Ford",
88 "Fords",
89 "Forest",
90 "Forge",
91 "Forges",
92 "Fork",
93 "Forks",
94 "Fort",
95 "Freeway",
96 "Garden",
97 "Gardens",
98 "Gateway",
99 "Glen",
100 "Glens",
101 "Green",
102 "Greens",
103 "Grove",
104 "Groves",
105 "Harbor",
106 "Harbors",
107 "Haven",
108 "Heights",
109 "Highway",
110 "Hill",
111 "Hills",
112 "Hollow",
113 "Inlet",
114 "Inlet",
115 "Island",
116 "Island",
117 "Islands",
118 "Islands",
119 "Isle",
120 "Isle",
121 "Junction",
122 "Junctions",
123 "Key",
124 "Keys",
125 "Knoll",
126 "Knolls",
127 "Lake",
128 "Lakes",
129 "Land",
130 "Landing",
131 "Lane",
132 "Light",
133 "Lights",
134 "Loaf",
135 "Lock",
136 "Locks",
137 "Locks",
138 "Lodge",
139 "Lodge",
140 "Loop",
141 "Mall",
142 "Manor",
143 "Manors",
144 "Meadow",
145 "Meadows",
146 "Mews",
147 "Mill",
148 "Mills",
149 "Mission",
150 "Mission",
151 "Motorway",
152 "Mount",
153 "Mountain",
154 "Mountain",
155 "Mountains",
156 "Mountains",
157 "Neck",
158 "Orchard",
159 "Oval",
160 "Overpass",
161 "Park",
162 "Parks",
163 "Parkway",
164 "Parkways",
165 "Pass",
166 "Passage",
167 "Path",
168 "Pike",
169 "Pine",
170 "Pines",
171 "Place",
172 "Plain",
173 "Plains",
174 "Plains",
175 "Plaza",
176 "Plaza",
177 "Point",
178 "Points",
179 "Port",
180 "Port",
181 "Ports",
182 "Ports",
183 "Prairie",
184 "Prairie",
185 "Radial",
186 "Ramp",
187 "Ranch",
188 "Rapid",
189 "Rapids",
190 "Rest",
191 "Ridge",
192 "Ridges",
193 "River",
194 "Road",
195 "Road",
196 "Roads",
197 "Roads",
198 "Route",
199 "Row",
200 "Rue",
201 "Run",
202 "Shoal",
203 "Shoals",
204 "Shore",
205 "Shores",
206 "Skyway",
207 "Spring",
208 "Springs",
209 "Springs",
210 "Spur",
211 "Spurs",
212 "Square",
213 "Square",
214 "Squares",
215 "Squares",
216 "Station",
217 "Station",
218 "Stravenue",
219 "Stravenue",
220 "Stream",
221 "Stream",
222 "Street",
223 "Street",
224 "Streets",
225 "Summit",
226 "Summit",
227 "Terrace",
228 "Throughway",
229 "Trace",
230 "Track",
231 "Trafficway",
232 "Trail",
233 "Trail",
234 "Tunnel",
235 "Tunnel",
236 "Turnpike",
237 "Turnpike",
238 "Underpass",
239 "Union",
240 "Unions",
241 "Valley",
242 "Valleys",
243 "Via",
244 "Viaduct",
245 "View",
246 "Views",
247 "Village",
248 "Village",
249 "Villages",
250 "Ville",
251 "Vista",
252 "Vista",
253 "Walk",
254 "Walks",
255 "Wall",
256 "Way",
257 "Ways",
258 "Well",
259 "Wells",
260 )
261
262 postcode_formats = ("#####", "#####-####")
263
264 states = (
265 "Alabama",
266 "Alaska",
267 "Arizona",
268 "Arkansas",
269 "California",
270 "Colorado",
271 "Connecticut",
272 "Delaware",
273 "Florida",
274 "Georgia",
275 "Hawaii",
276 "Idaho",
277 "Illinois",
278 "Indiana",
279 "Iowa",
280 "Kansas",
281 "Kentucky",
282 "Louisiana",
283 "Maine",
284 "Maryland",
285 "Massachusetts",
286 "Michigan",
287 "Minnesota",
288 "Mississippi",
289 "Missouri",
290 "Montana",
291 "Nebraska",
292 "Nevada",
293 "New Hampshire",
294 "New Jersey",
295 "New Mexico",
296 "New York",
297 "North Carolina",
298 "North Dakota",
299 "Ohio",
300 "Oklahoma",
301 "Oregon",
302 "Pennsylvania",
303 "Rhode Island",
304 "South Carolina",
305 "South Dakota",
306 "Tennessee",
307 "Texas",
308 "Utah",
309 "Vermont",
310 "Virginia",
311 "Washington",
312 "West Virginia",
313 "Wisconsin",
314 "Wyoming",
315 )
316 states_abbr = (
317 "AL",
318 "AK",
319 "AZ",
320 "AR",
321 "CA",
322 "CO",
323 "CT",
324 "DE",
325 "DC",
326 "FL",
327 "GA",
328 "HI",
329 "ID",
330 "IL",
331 "IN",
332 "IA",
333 "KS",
334 "KY",
335 "LA",
336 "ME",
337 "MD",
338 "MA",
339 "MI",
340 "MN",
341 "MS",
342 "MO",
343 "MT",
344 "NE",
345 "NV",
346 "NH",
347 "NJ",
348 "NM",
349 "NY",
350 "NC",
351 "ND",
352 "OH",
353 "OK",
354 "OR",
355 "PA",
356 "RI",
357 "SC",
358 "SD",
359 "TN",
360 "TX",
361 "UT",
362 "VT",
363 "VA",
364 "WA",
365 "WV",
366 "WI",
367 "WY",
368 )
369
370 states_postcode = {
371 "AL": (35004, 36925),
372 "AK": (99501, 99950),
373 "AZ": (85001, 86556),
374 "AR": (71601, 72959),
375 "CA": (90001, 96162),
376 "CO": (80001, 81658),
377 "CT": (6001, 6389),
378 "DE": (19701, 19980),
379 "DC": (20001, 20039),
380 "FL": (32004, 34997),
381 "GA": (30001, 31999),
382 "HI": (96701, 96898),
383 "ID": (83201, 83876),
384 "IL": (60001, 62999),
385 "IN": (46001, 47997),
386 "IA": (50001, 52809),
387 "KS": (66002, 67954),
388 "KY": (40003, 42788),
389 "LA": (70001, 71232),
390 "ME": (3901, 4992),
391 "MD": (20812, 21930),
392 "MA": (1001, 2791),
393 "MI": (48001, 49971),
394 "MN": (55001, 56763),
395 "MS": (38601, 39776),
396 "MO": (63001, 65899),
397 "MT": (59001, 59937),
398 "NE": (68001, 68118),
399 "NV": (88901, 89883),
400 "NH": (3031, 3897),
401 "NJ": (7001, 8989),
402 "NM": (87001, 88441),
403 "NY": (10001, 14905),
404 "NC": (27006, 28909),
405 "ND": (58001, 58856),
406 "OH": (43001, 45999),
407 "OK": (73001, 73199),
408 "OR": (97001, 97920),
409 "PA": (15001, 19640),
410 "RI": (2801, 2940),
411 "SC": (29001, 29948),
412 "SD": (57001, 57799),
413 "TN": (37010, 38589),
414 "TX": (75503, 79999),
415 "UT": (84001, 84784),
416 "VT": (5001, 5495),
417 "VA": (22001, 24658),
418 "WA": (98001, 99403),
419 "WV": (24701, 26886),
420 "WI": (53001, 54990),
421 "WY": (82001, 83128),
422 }
423
424 territories_abbr = (
425 "AS",
426 "FM",
427 "GU",
428 "MH",
429 "MP",
430 "PW",
431 "PR",
432 "VI",
433 )
434
435 states_and_territories_abbr = states_abbr + territories_abbr
436
437 military_state_abbr = ("AE", "AA", "AP")
438
439 military_ship_prefix = ("USS", "USNS", "USNV", "USCGC")
440
441 military_apo_format = "PSC ####, Box ####"
442
443 military_dpo_format = "Unit #### Box ####"
444
445 city_formats = (
446 "{{city_prefix}} {{first_name}}{{city_suffix}}",
447 "{{city_prefix}} {{first_name}}",
448 "{{first_name}}{{city_suffix}}",
449 "{{last_name}}{{city_suffix}}",
450 )
451
452 street_name_formats = (
453 "{{first_name}} {{street_suffix}}",
454 "{{last_name}} {{street_suffix}}",
455 )
456
457 street_address_formats = (
458 "{{building_number}} {{street_name}}",
459 "{{building_number}} {{street_name}} {{secondary_address}}",
460 )
461
462 address_formats = OrderedDict(
463 (
464 ("{{street_address}}\n{{city}}, {{state_abbr}} {{postcode}}", 25.0),
465 # military address formatting.
466 ("{{military_apo}}\nAPO {{military_state}} {{postcode}}", 1.0),
467 (
468 "{{military_ship}} {{last_name}}\nFPO {{military_state}} {{postcode}}",
469 1.0,
470 ),
471 ("{{military_dpo}}\nDPO {{military_state}} {{postcode}}", 1.0),
472 )
473 )
474
475 secondary_address_formats = ("Apt. ###", "Suite ###")
476
477 def city_prefix(self) -> str:
478 return self.random_element(self.city_prefixes)
479
480 def secondary_address(self) -> str:
481 return self.numerify(self.random_element(self.secondary_address_formats))
482
483 def administrative_unit(self) -> str:
484 return self.random_element(self.states)
485
486 state = administrative_unit
487
488 def state_abbr(self, include_territories: bool = True) -> str:
489 """
490 :returns: A random state or territory abbreviation.
491
492 :param include_territories: If True, territories will be included.
493 If False, only states will be returned.
494 """
495 if include_territories:
496 self.random_element(self.states_and_territories_abbr)
497 return self.random_element(self.states_abbr)
498
499 def postcode(self) -> str:
500 return "%05d" % self.generator.random.randint(501, 99950)
501
502 def zipcode_plus4(self) -> str:
503 return "%s-%04d" % (self.zipcode(), self.generator.random.randint(1, 9999))
504
505 def postcode_in_state(self, state_abbr: Optional[str] = None) -> str:
506 """
507 :returns: A random postcode within the provided state abbreviation
508
509 :param state_abbr: A state abbreviation
510 """
511 if state_abbr is None:
512 state_abbr = self.random_element(self.states_abbr)
513
514 if state_abbr in self.states_abbr:
515 postcode = "%d" % (
516 self.generator.random.randint(
517 self.states_postcode[state_abbr][0],
518 self.states_postcode[state_abbr][1],
519 )
520 )
521
522 if len(postcode) == 4:
523 postcode = "0%s" % postcode
524
525 return postcode
526
527 else:
528 raise Exception("State Abbreviation not found in list")
529
530 def military_ship(self) -> str:
531 """
532 :example: 'USS'
533 """
534 return self.random_element(self.military_ship_prefix)
535
536 def military_state(self) -> str:
537 """
538 :example: 'APO'
539 """
540 return self.random_element(self.military_state_abbr)
541
542 def military_apo(self) -> str:
543 """
544 :example: 'PSC 5394 Box 3492
545 """
546 return self.numerify(self.military_apo_format)
547
548 def military_dpo(self) -> str:
549 """
550 :example: 'Unit 3333 Box 9342'
551 """
552 return self.numerify(self.military_dpo_format)
553
554 # Aliases
555 def zipcode(self) -> str:
556 return self.postcode()
557
558 def zipcode_in_state(self, state_abbr: Optional[str] = None) -> str:
559 return self.postcode_in_state(state_abbr)
560
561 def postalcode(self) -> str:
562 return self.postcode()
563
564 def postalcode_in_state(self, state_abbr: Optional[str] = None) -> str:
565 return self.postcode_in_state(state_abbr)
566
567 def postalcode_plus4(self) -> str:
568 return self.zipcode_plus4()
569
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/faker/providers/address/en_US/__init__.py b/faker/providers/address/en_US/__init__.py
--- a/faker/providers/address/en_US/__init__.py
+++ b/faker/providers/address/en_US/__init__.py
@@ -493,7 +493,7 @@
If False, only states will be returned.
"""
if include_territories:
- self.random_element(self.states_and_territories_abbr)
+ return self.random_element(self.states_and_territories_abbr)
return self.random_element(self.states_abbr)
def postcode(self) -> str:
| {"golden_diff": "diff --git a/faker/providers/address/en_US/__init__.py b/faker/providers/address/en_US/__init__.py\n--- a/faker/providers/address/en_US/__init__.py\n+++ b/faker/providers/address/en_US/__init__.py\n@@ -493,7 +493,7 @@\n If False, only states will be returned.\n \"\"\"\n if include_territories:\n- self.random_element(self.states_and_territories_abbr)\n+ return self.random_element(self.states_and_territories_abbr)\n return self.random_element(self.states_abbr)\n \n def postcode(self) -> str:\n", "issue": "Missing return in faker-master\\faker\\providers\\address\\en_US\\__init__.py\n* Faker version: current\r\n* OS: Windows 10 x64\r\n\r\nBrief summary of the issue goes here.\r\nMissing return on row 496 (def state_abbr(self, include_territories: bool = True) -> str:)\r\n\r\n### Steps to reproduce\r\n\r\n1. invoke state_abbr()\r\n2. check the output \r\n\r\n### Expected behavior\r\nShould return an element from states_and_territories_abbr list\r\n\r\n### Actual behavior\r\nReturns an element from states_abbr list only\r\n\n", "before_files": [{"content": "from collections import OrderedDict\nfrom typing import Optional\n\nfrom ..en import Provider as AddressProvider\n\n\nclass Provider(AddressProvider):\n city_prefixes = (\"North\", \"East\", \"West\", \"South\", \"New\", \"Lake\", \"Port\")\n\n city_suffixes = (\n \"town\",\n \"ton\",\n \"land\",\n \"ville\",\n \"berg\",\n \"burgh\",\n \"borough\",\n \"bury\",\n \"view\",\n \"port\",\n \"mouth\",\n \"stad\",\n \"furt\",\n \"chester\",\n \"mouth\",\n \"fort\",\n \"haven\",\n \"side\",\n \"shire\",\n )\n\n building_number_formats = (\"#####\", \"####\", \"###\")\n\n street_suffixes = (\n \"Alley\",\n \"Avenue\",\n \"Branch\",\n \"Bridge\",\n \"Brook\",\n \"Brooks\",\n \"Burg\",\n \"Burgs\",\n \"Bypass\",\n \"Camp\",\n \"Canyon\",\n \"Cape\",\n \"Causeway\",\n \"Center\",\n \"Centers\",\n \"Circle\",\n \"Circles\",\n \"Cliff\",\n \"Cliffs\",\n \"Club\",\n \"Common\",\n \"Corner\",\n \"Corners\",\n \"Course\",\n \"Court\",\n \"Courts\",\n \"Cove\",\n \"Coves\",\n \"Creek\",\n \"Crescent\",\n \"Crest\",\n \"Crossing\",\n \"Crossroad\",\n \"Curve\",\n \"Dale\",\n \"Dam\",\n \"Divide\",\n \"Drive\",\n \"Drive\",\n \"Drives\",\n \"Estate\",\n \"Estates\",\n \"Expressway\",\n \"Extension\",\n \"Extensions\",\n \"Fall\",\n \"Falls\",\n \"Ferry\",\n \"Field\",\n \"Fields\",\n \"Flat\",\n \"Flats\",\n \"Ford\",\n \"Fords\",\n \"Forest\",\n \"Forge\",\n \"Forges\",\n \"Fork\",\n \"Forks\",\n \"Fort\",\n \"Freeway\",\n \"Garden\",\n \"Gardens\",\n \"Gateway\",\n \"Glen\",\n \"Glens\",\n \"Green\",\n \"Greens\",\n \"Grove\",\n \"Groves\",\n \"Harbor\",\n \"Harbors\",\n \"Haven\",\n \"Heights\",\n \"Highway\",\n \"Hill\",\n \"Hills\",\n \"Hollow\",\n \"Inlet\",\n \"Inlet\",\n \"Island\",\n \"Island\",\n \"Islands\",\n \"Islands\",\n \"Isle\",\n \"Isle\",\n \"Junction\",\n \"Junctions\",\n \"Key\",\n \"Keys\",\n \"Knoll\",\n \"Knolls\",\n \"Lake\",\n \"Lakes\",\n \"Land\",\n \"Landing\",\n \"Lane\",\n \"Light\",\n \"Lights\",\n \"Loaf\",\n \"Lock\",\n \"Locks\",\n \"Locks\",\n \"Lodge\",\n \"Lodge\",\n \"Loop\",\n \"Mall\",\n \"Manor\",\n \"Manors\",\n \"Meadow\",\n \"Meadows\",\n \"Mews\",\n \"Mill\",\n \"Mills\",\n \"Mission\",\n \"Mission\",\n \"Motorway\",\n \"Mount\",\n \"Mountain\",\n \"Mountain\",\n \"Mountains\",\n \"Mountains\",\n \"Neck\",\n \"Orchard\",\n \"Oval\",\n \"Overpass\",\n \"Park\",\n \"Parks\",\n \"Parkway\",\n \"Parkways\",\n \"Pass\",\n \"Passage\",\n \"Path\",\n \"Pike\",\n \"Pine\",\n \"Pines\",\n \"Place\",\n \"Plain\",\n \"Plains\",\n \"Plains\",\n \"Plaza\",\n \"Plaza\",\n \"Point\",\n \"Points\",\n \"Port\",\n \"Port\",\n \"Ports\",\n \"Ports\",\n \"Prairie\",\n \"Prairie\",\n \"Radial\",\n \"Ramp\",\n \"Ranch\",\n \"Rapid\",\n \"Rapids\",\n \"Rest\",\n \"Ridge\",\n \"Ridges\",\n \"River\",\n \"Road\",\n \"Road\",\n \"Roads\",\n \"Roads\",\n \"Route\",\n \"Row\",\n \"Rue\",\n \"Run\",\n \"Shoal\",\n \"Shoals\",\n \"Shore\",\n \"Shores\",\n \"Skyway\",\n \"Spring\",\n \"Springs\",\n \"Springs\",\n \"Spur\",\n \"Spurs\",\n \"Square\",\n \"Square\",\n \"Squares\",\n \"Squares\",\n \"Station\",\n \"Station\",\n \"Stravenue\",\n \"Stravenue\",\n \"Stream\",\n \"Stream\",\n \"Street\",\n \"Street\",\n \"Streets\",\n \"Summit\",\n \"Summit\",\n \"Terrace\",\n \"Throughway\",\n \"Trace\",\n \"Track\",\n \"Trafficway\",\n \"Trail\",\n \"Trail\",\n \"Tunnel\",\n \"Tunnel\",\n \"Turnpike\",\n \"Turnpike\",\n \"Underpass\",\n \"Union\",\n \"Unions\",\n \"Valley\",\n \"Valleys\",\n \"Via\",\n \"Viaduct\",\n \"View\",\n \"Views\",\n \"Village\",\n \"Village\",\n \"Villages\",\n \"Ville\",\n \"Vista\",\n \"Vista\",\n \"Walk\",\n \"Walks\",\n \"Wall\",\n \"Way\",\n \"Ways\",\n \"Well\",\n \"Wells\",\n )\n\n postcode_formats = (\"#####\", \"#####-####\")\n\n states = (\n \"Alabama\",\n \"Alaska\",\n \"Arizona\",\n \"Arkansas\",\n \"California\",\n \"Colorado\",\n \"Connecticut\",\n \"Delaware\",\n \"Florida\",\n \"Georgia\",\n \"Hawaii\",\n \"Idaho\",\n \"Illinois\",\n \"Indiana\",\n \"Iowa\",\n \"Kansas\",\n \"Kentucky\",\n \"Louisiana\",\n \"Maine\",\n \"Maryland\",\n \"Massachusetts\",\n \"Michigan\",\n \"Minnesota\",\n \"Mississippi\",\n \"Missouri\",\n \"Montana\",\n \"Nebraska\",\n \"Nevada\",\n \"New Hampshire\",\n \"New Jersey\",\n \"New Mexico\",\n \"New York\",\n \"North Carolina\",\n \"North Dakota\",\n \"Ohio\",\n \"Oklahoma\",\n \"Oregon\",\n \"Pennsylvania\",\n \"Rhode Island\",\n \"South Carolina\",\n \"South Dakota\",\n \"Tennessee\",\n \"Texas\",\n \"Utah\",\n \"Vermont\",\n \"Virginia\",\n \"Washington\",\n \"West Virginia\",\n \"Wisconsin\",\n \"Wyoming\",\n )\n states_abbr = (\n \"AL\",\n \"AK\",\n \"AZ\",\n \"AR\",\n \"CA\",\n \"CO\",\n \"CT\",\n \"DE\",\n \"DC\",\n \"FL\",\n \"GA\",\n \"HI\",\n \"ID\",\n \"IL\",\n \"IN\",\n \"IA\",\n \"KS\",\n \"KY\",\n \"LA\",\n \"ME\",\n \"MD\",\n \"MA\",\n \"MI\",\n \"MN\",\n \"MS\",\n \"MO\",\n \"MT\",\n \"NE\",\n \"NV\",\n \"NH\",\n \"NJ\",\n \"NM\",\n \"NY\",\n \"NC\",\n \"ND\",\n \"OH\",\n \"OK\",\n \"OR\",\n \"PA\",\n \"RI\",\n \"SC\",\n \"SD\",\n \"TN\",\n \"TX\",\n \"UT\",\n \"VT\",\n \"VA\",\n \"WA\",\n \"WV\",\n \"WI\",\n \"WY\",\n )\n\n states_postcode = {\n \"AL\": (35004, 36925),\n \"AK\": (99501, 99950),\n \"AZ\": (85001, 86556),\n \"AR\": (71601, 72959),\n \"CA\": (90001, 96162),\n \"CO\": (80001, 81658),\n \"CT\": (6001, 6389),\n \"DE\": (19701, 19980),\n \"DC\": (20001, 20039),\n \"FL\": (32004, 34997),\n \"GA\": (30001, 31999),\n \"HI\": (96701, 96898),\n \"ID\": (83201, 83876),\n \"IL\": (60001, 62999),\n \"IN\": (46001, 47997),\n \"IA\": (50001, 52809),\n \"KS\": (66002, 67954),\n \"KY\": (40003, 42788),\n \"LA\": (70001, 71232),\n \"ME\": (3901, 4992),\n \"MD\": (20812, 21930),\n \"MA\": (1001, 2791),\n \"MI\": (48001, 49971),\n \"MN\": (55001, 56763),\n \"MS\": (38601, 39776),\n \"MO\": (63001, 65899),\n \"MT\": (59001, 59937),\n \"NE\": (68001, 68118),\n \"NV\": (88901, 89883),\n \"NH\": (3031, 3897),\n \"NJ\": (7001, 8989),\n \"NM\": (87001, 88441),\n \"NY\": (10001, 14905),\n \"NC\": (27006, 28909),\n \"ND\": (58001, 58856),\n \"OH\": (43001, 45999),\n \"OK\": (73001, 73199),\n \"OR\": (97001, 97920),\n \"PA\": (15001, 19640),\n \"RI\": (2801, 2940),\n \"SC\": (29001, 29948),\n \"SD\": (57001, 57799),\n \"TN\": (37010, 38589),\n \"TX\": (75503, 79999),\n \"UT\": (84001, 84784),\n \"VT\": (5001, 5495),\n \"VA\": (22001, 24658),\n \"WA\": (98001, 99403),\n \"WV\": (24701, 26886),\n \"WI\": (53001, 54990),\n \"WY\": (82001, 83128),\n }\n\n territories_abbr = (\n \"AS\",\n \"FM\",\n \"GU\",\n \"MH\",\n \"MP\",\n \"PW\",\n \"PR\",\n \"VI\",\n )\n\n states_and_territories_abbr = states_abbr + territories_abbr\n\n military_state_abbr = (\"AE\", \"AA\", \"AP\")\n\n military_ship_prefix = (\"USS\", \"USNS\", \"USNV\", \"USCGC\")\n\n military_apo_format = \"PSC ####, Box ####\"\n\n military_dpo_format = \"Unit #### Box ####\"\n\n city_formats = (\n \"{{city_prefix}} {{first_name}}{{city_suffix}}\",\n \"{{city_prefix}} {{first_name}}\",\n \"{{first_name}}{{city_suffix}}\",\n \"{{last_name}}{{city_suffix}}\",\n )\n\n street_name_formats = (\n \"{{first_name}} {{street_suffix}}\",\n \"{{last_name}} {{street_suffix}}\",\n )\n\n street_address_formats = (\n \"{{building_number}} {{street_name}}\",\n \"{{building_number}} {{street_name}} {{secondary_address}}\",\n )\n\n address_formats = OrderedDict(\n (\n (\"{{street_address}}\\n{{city}}, {{state_abbr}} {{postcode}}\", 25.0),\n # military address formatting.\n (\"{{military_apo}}\\nAPO {{military_state}} {{postcode}}\", 1.0),\n (\n \"{{military_ship}} {{last_name}}\\nFPO {{military_state}} {{postcode}}\",\n 1.0,\n ),\n (\"{{military_dpo}}\\nDPO {{military_state}} {{postcode}}\", 1.0),\n )\n )\n\n secondary_address_formats = (\"Apt. ###\", \"Suite ###\")\n\n def city_prefix(self) -> str:\n return self.random_element(self.city_prefixes)\n\n def secondary_address(self) -> str:\n return self.numerify(self.random_element(self.secondary_address_formats))\n\n def administrative_unit(self) -> str:\n return self.random_element(self.states)\n\n state = administrative_unit\n\n def state_abbr(self, include_territories: bool = True) -> str:\n \"\"\"\n :returns: A random state or territory abbreviation.\n\n :param include_territories: If True, territories will be included.\n If False, only states will be returned.\n \"\"\"\n if include_territories:\n self.random_element(self.states_and_territories_abbr)\n return self.random_element(self.states_abbr)\n\n def postcode(self) -> str:\n return \"%05d\" % self.generator.random.randint(501, 99950)\n\n def zipcode_plus4(self) -> str:\n return \"%s-%04d\" % (self.zipcode(), self.generator.random.randint(1, 9999))\n\n def postcode_in_state(self, state_abbr: Optional[str] = None) -> str:\n \"\"\"\n :returns: A random postcode within the provided state abbreviation\n\n :param state_abbr: A state abbreviation\n \"\"\"\n if state_abbr is None:\n state_abbr = self.random_element(self.states_abbr)\n\n if state_abbr in self.states_abbr:\n postcode = \"%d\" % (\n self.generator.random.randint(\n self.states_postcode[state_abbr][0],\n self.states_postcode[state_abbr][1],\n )\n )\n\n if len(postcode) == 4:\n postcode = \"0%s\" % postcode\n\n return postcode\n\n else:\n raise Exception(\"State Abbreviation not found in list\")\n\n def military_ship(self) -> str:\n \"\"\"\n :example: 'USS'\n \"\"\"\n return self.random_element(self.military_ship_prefix)\n\n def military_state(self) -> str:\n \"\"\"\n :example: 'APO'\n \"\"\"\n return self.random_element(self.military_state_abbr)\n\n def military_apo(self) -> str:\n \"\"\"\n :example: 'PSC 5394 Box 3492\n \"\"\"\n return self.numerify(self.military_apo_format)\n\n def military_dpo(self) -> str:\n \"\"\"\n :example: 'Unit 3333 Box 9342'\n \"\"\"\n return self.numerify(self.military_dpo_format)\n\n # Aliases\n def zipcode(self) -> str:\n return self.postcode()\n\n def zipcode_in_state(self, state_abbr: Optional[str] = None) -> str:\n return self.postcode_in_state(state_abbr)\n\n def postalcode(self) -> str:\n return self.postcode()\n\n def postalcode_in_state(self, state_abbr: Optional[str] = None) -> str:\n return self.postcode_in_state(state_abbr)\n\n def postalcode_plus4(self) -> str:\n return self.zipcode_plus4()\n", "path": "faker/providers/address/en_US/__init__.py"}], "after_files": [{"content": "from collections import OrderedDict\nfrom typing import Optional\n\nfrom ..en import Provider as AddressProvider\n\n\nclass Provider(AddressProvider):\n city_prefixes = (\"North\", \"East\", \"West\", \"South\", \"New\", \"Lake\", \"Port\")\n\n city_suffixes = (\n \"town\",\n \"ton\",\n \"land\",\n \"ville\",\n \"berg\",\n \"burgh\",\n \"borough\",\n \"bury\",\n \"view\",\n \"port\",\n \"mouth\",\n \"stad\",\n \"furt\",\n \"chester\",\n \"mouth\",\n \"fort\",\n \"haven\",\n \"side\",\n \"shire\",\n )\n\n building_number_formats = (\"#####\", \"####\", \"###\")\n\n street_suffixes = (\n \"Alley\",\n \"Avenue\",\n \"Branch\",\n \"Bridge\",\n \"Brook\",\n \"Brooks\",\n \"Burg\",\n \"Burgs\",\n \"Bypass\",\n \"Camp\",\n \"Canyon\",\n \"Cape\",\n \"Causeway\",\n \"Center\",\n \"Centers\",\n \"Circle\",\n \"Circles\",\n \"Cliff\",\n \"Cliffs\",\n \"Club\",\n \"Common\",\n \"Corner\",\n \"Corners\",\n \"Course\",\n \"Court\",\n \"Courts\",\n \"Cove\",\n \"Coves\",\n \"Creek\",\n \"Crescent\",\n \"Crest\",\n \"Crossing\",\n \"Crossroad\",\n \"Curve\",\n \"Dale\",\n \"Dam\",\n \"Divide\",\n \"Drive\",\n \"Drive\",\n \"Drives\",\n \"Estate\",\n \"Estates\",\n \"Expressway\",\n \"Extension\",\n \"Extensions\",\n \"Fall\",\n \"Falls\",\n \"Ferry\",\n \"Field\",\n \"Fields\",\n \"Flat\",\n \"Flats\",\n \"Ford\",\n \"Fords\",\n \"Forest\",\n \"Forge\",\n \"Forges\",\n \"Fork\",\n \"Forks\",\n \"Fort\",\n \"Freeway\",\n \"Garden\",\n \"Gardens\",\n \"Gateway\",\n \"Glen\",\n \"Glens\",\n \"Green\",\n \"Greens\",\n \"Grove\",\n \"Groves\",\n \"Harbor\",\n \"Harbors\",\n \"Haven\",\n \"Heights\",\n \"Highway\",\n \"Hill\",\n \"Hills\",\n \"Hollow\",\n \"Inlet\",\n \"Inlet\",\n \"Island\",\n \"Island\",\n \"Islands\",\n \"Islands\",\n \"Isle\",\n \"Isle\",\n \"Junction\",\n \"Junctions\",\n \"Key\",\n \"Keys\",\n \"Knoll\",\n \"Knolls\",\n \"Lake\",\n \"Lakes\",\n \"Land\",\n \"Landing\",\n \"Lane\",\n \"Light\",\n \"Lights\",\n \"Loaf\",\n \"Lock\",\n \"Locks\",\n \"Locks\",\n \"Lodge\",\n \"Lodge\",\n \"Loop\",\n \"Mall\",\n \"Manor\",\n \"Manors\",\n \"Meadow\",\n \"Meadows\",\n \"Mews\",\n \"Mill\",\n \"Mills\",\n \"Mission\",\n \"Mission\",\n \"Motorway\",\n \"Mount\",\n \"Mountain\",\n \"Mountain\",\n \"Mountains\",\n \"Mountains\",\n \"Neck\",\n \"Orchard\",\n \"Oval\",\n \"Overpass\",\n \"Park\",\n \"Parks\",\n \"Parkway\",\n \"Parkways\",\n \"Pass\",\n \"Passage\",\n \"Path\",\n \"Pike\",\n \"Pine\",\n \"Pines\",\n \"Place\",\n \"Plain\",\n \"Plains\",\n \"Plains\",\n \"Plaza\",\n \"Plaza\",\n \"Point\",\n \"Points\",\n \"Port\",\n \"Port\",\n \"Ports\",\n \"Ports\",\n \"Prairie\",\n \"Prairie\",\n \"Radial\",\n \"Ramp\",\n \"Ranch\",\n \"Rapid\",\n \"Rapids\",\n \"Rest\",\n \"Ridge\",\n \"Ridges\",\n \"River\",\n \"Road\",\n \"Road\",\n \"Roads\",\n \"Roads\",\n \"Route\",\n \"Row\",\n \"Rue\",\n \"Run\",\n \"Shoal\",\n \"Shoals\",\n \"Shore\",\n \"Shores\",\n \"Skyway\",\n \"Spring\",\n \"Springs\",\n \"Springs\",\n \"Spur\",\n \"Spurs\",\n \"Square\",\n \"Square\",\n \"Squares\",\n \"Squares\",\n \"Station\",\n \"Station\",\n \"Stravenue\",\n \"Stravenue\",\n \"Stream\",\n \"Stream\",\n \"Street\",\n \"Street\",\n \"Streets\",\n \"Summit\",\n \"Summit\",\n \"Terrace\",\n \"Throughway\",\n \"Trace\",\n \"Track\",\n \"Trafficway\",\n \"Trail\",\n \"Trail\",\n \"Tunnel\",\n \"Tunnel\",\n \"Turnpike\",\n \"Turnpike\",\n \"Underpass\",\n \"Union\",\n \"Unions\",\n \"Valley\",\n \"Valleys\",\n \"Via\",\n \"Viaduct\",\n \"View\",\n \"Views\",\n \"Village\",\n \"Village\",\n \"Villages\",\n \"Ville\",\n \"Vista\",\n \"Vista\",\n \"Walk\",\n \"Walks\",\n \"Wall\",\n \"Way\",\n \"Ways\",\n \"Well\",\n \"Wells\",\n )\n\n postcode_formats = (\"#####\", \"#####-####\")\n\n states = (\n \"Alabama\",\n \"Alaska\",\n \"Arizona\",\n \"Arkansas\",\n \"California\",\n \"Colorado\",\n \"Connecticut\",\n \"Delaware\",\n \"Florida\",\n \"Georgia\",\n \"Hawaii\",\n \"Idaho\",\n \"Illinois\",\n \"Indiana\",\n \"Iowa\",\n \"Kansas\",\n \"Kentucky\",\n \"Louisiana\",\n \"Maine\",\n \"Maryland\",\n \"Massachusetts\",\n \"Michigan\",\n \"Minnesota\",\n \"Mississippi\",\n \"Missouri\",\n \"Montana\",\n \"Nebraska\",\n \"Nevada\",\n \"New Hampshire\",\n \"New Jersey\",\n \"New Mexico\",\n \"New York\",\n \"North Carolina\",\n \"North Dakota\",\n \"Ohio\",\n \"Oklahoma\",\n \"Oregon\",\n \"Pennsylvania\",\n \"Rhode Island\",\n \"South Carolina\",\n \"South Dakota\",\n \"Tennessee\",\n \"Texas\",\n \"Utah\",\n \"Vermont\",\n \"Virginia\",\n \"Washington\",\n \"West Virginia\",\n \"Wisconsin\",\n \"Wyoming\",\n )\n states_abbr = (\n \"AL\",\n \"AK\",\n \"AZ\",\n \"AR\",\n \"CA\",\n \"CO\",\n \"CT\",\n \"DE\",\n \"DC\",\n \"FL\",\n \"GA\",\n \"HI\",\n \"ID\",\n \"IL\",\n \"IN\",\n \"IA\",\n \"KS\",\n \"KY\",\n \"LA\",\n \"ME\",\n \"MD\",\n \"MA\",\n \"MI\",\n \"MN\",\n \"MS\",\n \"MO\",\n \"MT\",\n \"NE\",\n \"NV\",\n \"NH\",\n \"NJ\",\n \"NM\",\n \"NY\",\n \"NC\",\n \"ND\",\n \"OH\",\n \"OK\",\n \"OR\",\n \"PA\",\n \"RI\",\n \"SC\",\n \"SD\",\n \"TN\",\n \"TX\",\n \"UT\",\n \"VT\",\n \"VA\",\n \"WA\",\n \"WV\",\n \"WI\",\n \"WY\",\n )\n\n states_postcode = {\n \"AL\": (35004, 36925),\n \"AK\": (99501, 99950),\n \"AZ\": (85001, 86556),\n \"AR\": (71601, 72959),\n \"CA\": (90001, 96162),\n \"CO\": (80001, 81658),\n \"CT\": (6001, 6389),\n \"DE\": (19701, 19980),\n \"DC\": (20001, 20039),\n \"FL\": (32004, 34997),\n \"GA\": (30001, 31999),\n \"HI\": (96701, 96898),\n \"ID\": (83201, 83876),\n \"IL\": (60001, 62999),\n \"IN\": (46001, 47997),\n \"IA\": (50001, 52809),\n \"KS\": (66002, 67954),\n \"KY\": (40003, 42788),\n \"LA\": (70001, 71232),\n \"ME\": (3901, 4992),\n \"MD\": (20812, 21930),\n \"MA\": (1001, 2791),\n \"MI\": (48001, 49971),\n \"MN\": (55001, 56763),\n \"MS\": (38601, 39776),\n \"MO\": (63001, 65899),\n \"MT\": (59001, 59937),\n \"NE\": (68001, 68118),\n \"NV\": (88901, 89883),\n \"NH\": (3031, 3897),\n \"NJ\": (7001, 8989),\n \"NM\": (87001, 88441),\n \"NY\": (10001, 14905),\n \"NC\": (27006, 28909),\n \"ND\": (58001, 58856),\n \"OH\": (43001, 45999),\n \"OK\": (73001, 73199),\n \"OR\": (97001, 97920),\n \"PA\": (15001, 19640),\n \"RI\": (2801, 2940),\n \"SC\": (29001, 29948),\n \"SD\": (57001, 57799),\n \"TN\": (37010, 38589),\n \"TX\": (75503, 79999),\n \"UT\": (84001, 84784),\n \"VT\": (5001, 5495),\n \"VA\": (22001, 24658),\n \"WA\": (98001, 99403),\n \"WV\": (24701, 26886),\n \"WI\": (53001, 54990),\n \"WY\": (82001, 83128),\n }\n\n territories_abbr = (\n \"AS\",\n \"FM\",\n \"GU\",\n \"MH\",\n \"MP\",\n \"PW\",\n \"PR\",\n \"VI\",\n )\n\n states_and_territories_abbr = states_abbr + territories_abbr\n\n military_state_abbr = (\"AE\", \"AA\", \"AP\")\n\n military_ship_prefix = (\"USS\", \"USNS\", \"USNV\", \"USCGC\")\n\n military_apo_format = \"PSC ####, Box ####\"\n\n military_dpo_format = \"Unit #### Box ####\"\n\n city_formats = (\n \"{{city_prefix}} {{first_name}}{{city_suffix}}\",\n \"{{city_prefix}} {{first_name}}\",\n \"{{first_name}}{{city_suffix}}\",\n \"{{last_name}}{{city_suffix}}\",\n )\n\n street_name_formats = (\n \"{{first_name}} {{street_suffix}}\",\n \"{{last_name}} {{street_suffix}}\",\n )\n\n street_address_formats = (\n \"{{building_number}} {{street_name}}\",\n \"{{building_number}} {{street_name}} {{secondary_address}}\",\n )\n\n address_formats = OrderedDict(\n (\n (\"{{street_address}}\\n{{city}}, {{state_abbr}} {{postcode}}\", 25.0),\n # military address formatting.\n (\"{{military_apo}}\\nAPO {{military_state}} {{postcode}}\", 1.0),\n (\n \"{{military_ship}} {{last_name}}\\nFPO {{military_state}} {{postcode}}\",\n 1.0,\n ),\n (\"{{military_dpo}}\\nDPO {{military_state}} {{postcode}}\", 1.0),\n )\n )\n\n secondary_address_formats = (\"Apt. ###\", \"Suite ###\")\n\n def city_prefix(self) -> str:\n return self.random_element(self.city_prefixes)\n\n def secondary_address(self) -> str:\n return self.numerify(self.random_element(self.secondary_address_formats))\n\n def administrative_unit(self) -> str:\n return self.random_element(self.states)\n\n state = administrative_unit\n\n def state_abbr(self, include_territories: bool = True) -> str:\n \"\"\"\n :returns: A random state or territory abbreviation.\n\n :param include_territories: If True, territories will be included.\n If False, only states will be returned.\n \"\"\"\n if include_territories:\n return self.random_element(self.states_and_territories_abbr)\n return self.random_element(self.states_abbr)\n\n def postcode(self) -> str:\n return \"%05d\" % self.generator.random.randint(501, 99950)\n\n def zipcode_plus4(self) -> str:\n return \"%s-%04d\" % (self.zipcode(), self.generator.random.randint(1, 9999))\n\n def postcode_in_state(self, state_abbr: Optional[str] = None) -> str:\n \"\"\"\n :returns: A random postcode within the provided state abbreviation\n\n :param state_abbr: A state abbreviation\n \"\"\"\n if state_abbr is None:\n state_abbr = self.random_element(self.states_abbr)\n\n if state_abbr in self.states_abbr:\n postcode = \"%d\" % (\n self.generator.random.randint(\n self.states_postcode[state_abbr][0],\n self.states_postcode[state_abbr][1],\n )\n )\n\n if len(postcode) == 4:\n postcode = \"0%s\" % postcode\n\n return postcode\n\n else:\n raise Exception(\"State Abbreviation not found in list\")\n\n def military_ship(self) -> str:\n \"\"\"\n :example: 'USS'\n \"\"\"\n return self.random_element(self.military_ship_prefix)\n\n def military_state(self) -> str:\n \"\"\"\n :example: 'APO'\n \"\"\"\n return self.random_element(self.military_state_abbr)\n\n def military_apo(self) -> str:\n \"\"\"\n :example: 'PSC 5394 Box 3492\n \"\"\"\n return self.numerify(self.military_apo_format)\n\n def military_dpo(self) -> str:\n \"\"\"\n :example: 'Unit 3333 Box 9342'\n \"\"\"\n return self.numerify(self.military_dpo_format)\n\n # Aliases\n def zipcode(self) -> str:\n return self.postcode()\n\n def zipcode_in_state(self, state_abbr: Optional[str] = None) -> str:\n return self.postcode_in_state(state_abbr)\n\n def postalcode(self) -> str:\n return self.postcode()\n\n def postalcode_in_state(self, state_abbr: Optional[str] = None) -> str:\n return self.postcode_in_state(state_abbr)\n\n def postalcode_plus4(self) -> str:\n return self.zipcode_plus4()\n", "path": "faker/providers/address/en_US/__init__.py"}]} |
gh_patches_debug_1243 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-5886 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
name=Bankomat should not be added for amenity=atm
It is like `name=ATM`
The same goes for `name=Wpłatomat` (for ATM accepting cash)
https://www.alltheplaces.xyz/map/#16.82/50.072257/20.036549

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/santander_pl.py`
Content:
```
1 import chompjs
2 from scrapy import Spider
3
4 from locations.categories import Categories, apply_category, apply_yes_no
5 from locations.dict_parser import DictParser
6 from locations.hours import DAYS, OpeningHours
7 from locations.items import Feature
8
9
10 class SantanderPLSpider(Spider):
11 name = "santander_pl"
12 item_attributes = {"brand": "Santander", "brand_wikidata": "Q806653"}
13 # The "20000000000000" needs to be a valid date time, but it seems it's just there to stop the page being cached by
14 # the CDN. We always get the same data.
15 start_urls = ["https://www.santander.pl/_js_places/time20000000000000/places.js"]
16
17 def parse(self, response, **kwargs):
18 data = chompjs.parse_js_object(response.text)
19 for ref, branch in data["atm"].items():
20 yield self.parse_item(ref, branch, Categories.ATM)
21 for ref, branch in data["branch"].items():
22 yield self.parse_item(ref, branch, Categories.BANK)
23 for ref, branch in data["cashin"].items():
24 item = self.parse_item(ref, branch, Categories.ATM)
25 apply_yes_no("cash_in", item, True)
26 yield item
27
28 @staticmethod
29 def parse_item(ref: str, data: dict, category) -> Feature:
30 data["basicParameters"]["street_address"] = data["basicParameters"].pop("street")
31 item = DictParser.parse(data["basicParameters"])
32 item["ref"] = ref
33
34 if data["open_24h"]:
35 item["opening_hours"] = "24/7"
36 else:
37 item["opening_hours"] = OpeningHours()
38 for day, hours in data["basicParameters"]["opening_hours"].items():
39 start_time, end_time = hours.split("-")
40 item["opening_hours"].add_range(DAYS[int(day) - 2], start_time.strip(), end_time.strip())
41
42 apply_category(category, item)
43
44 return item
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/santander_pl.py b/locations/spiders/santander_pl.py
--- a/locations/spiders/santander_pl.py
+++ b/locations/spiders/santander_pl.py
@@ -39,6 +39,9 @@
start_time, end_time = hours.split("-")
item["opening_hours"].add_range(DAYS[int(day) - 2], start_time.strip(), end_time.strip())
+ if category == Categories.ATM:
+ item["name"] = None
+
apply_category(category, item)
return item
| {"golden_diff": "diff --git a/locations/spiders/santander_pl.py b/locations/spiders/santander_pl.py\n--- a/locations/spiders/santander_pl.py\n+++ b/locations/spiders/santander_pl.py\n@@ -39,6 +39,9 @@\n start_time, end_time = hours.split(\"-\")\n item[\"opening_hours\"].add_range(DAYS[int(day) - 2], start_time.strip(), end_time.strip())\n \n+ if category == Categories.ATM:\n+ item[\"name\"] = None\n+\n apply_category(category, item)\n \n return item\n", "issue": "name=Bankomat should not be added for amenity=atm\nIt is like `name=ATM`\r\n\r\nThe same goes for `name=Wp\u0142atomat` (for ATM accepting cash)\r\n\r\nhttps://www.alltheplaces.xyz/map/#16.82/50.072257/20.036549\r\n\r\n\r\n\n", "before_files": [{"content": "import chompjs\nfrom scrapy import Spider\n\nfrom locations.categories import Categories, apply_category, apply_yes_no\nfrom locations.dict_parser import DictParser\nfrom locations.hours import DAYS, OpeningHours\nfrom locations.items import Feature\n\n\nclass SantanderPLSpider(Spider):\n name = \"santander_pl\"\n item_attributes = {\"brand\": \"Santander\", \"brand_wikidata\": \"Q806653\"}\n # The \"20000000000000\" needs to be a valid date time, but it seems it's just there to stop the page being cached by\n # the CDN. We always get the same data.\n start_urls = [\"https://www.santander.pl/_js_places/time20000000000000/places.js\"]\n\n def parse(self, response, **kwargs):\n data = chompjs.parse_js_object(response.text)\n for ref, branch in data[\"atm\"].items():\n yield self.parse_item(ref, branch, Categories.ATM)\n for ref, branch in data[\"branch\"].items():\n yield self.parse_item(ref, branch, Categories.BANK)\n for ref, branch in data[\"cashin\"].items():\n item = self.parse_item(ref, branch, Categories.ATM)\n apply_yes_no(\"cash_in\", item, True)\n yield item\n\n @staticmethod\n def parse_item(ref: str, data: dict, category) -> Feature:\n data[\"basicParameters\"][\"street_address\"] = data[\"basicParameters\"].pop(\"street\")\n item = DictParser.parse(data[\"basicParameters\"])\n item[\"ref\"] = ref\n\n if data[\"open_24h\"]:\n item[\"opening_hours\"] = \"24/7\"\n else:\n item[\"opening_hours\"] = OpeningHours()\n for day, hours in data[\"basicParameters\"][\"opening_hours\"].items():\n start_time, end_time = hours.split(\"-\")\n item[\"opening_hours\"].add_range(DAYS[int(day) - 2], start_time.strip(), end_time.strip())\n\n apply_category(category, item)\n\n return item\n", "path": "locations/spiders/santander_pl.py"}], "after_files": [{"content": "import chompjs\nfrom scrapy import Spider\n\nfrom locations.categories import Categories, apply_category, apply_yes_no\nfrom locations.dict_parser import DictParser\nfrom locations.hours import DAYS, OpeningHours\nfrom locations.items import Feature\n\n\nclass SantanderPLSpider(Spider):\n name = \"santander_pl\"\n item_attributes = {\"brand\": \"Santander\", \"brand_wikidata\": \"Q806653\"}\n # The \"20000000000000\" needs to be a valid date time, but it seems it's just there to stop the page being cached by\n # the CDN. We always get the same data.\n start_urls = [\"https://www.santander.pl/_js_places/time20000000000000/places.js\"]\n\n def parse(self, response, **kwargs):\n data = chompjs.parse_js_object(response.text)\n for ref, branch in data[\"atm\"].items():\n yield self.parse_item(ref, branch, Categories.ATM)\n for ref, branch in data[\"branch\"].items():\n yield self.parse_item(ref, branch, Categories.BANK)\n for ref, branch in data[\"cashin\"].items():\n item = self.parse_item(ref, branch, Categories.ATM)\n apply_yes_no(\"cash_in\", item, True)\n yield item\n\n @staticmethod\n def parse_item(ref: str, data: dict, category) -> Feature:\n data[\"basicParameters\"][\"street_address\"] = data[\"basicParameters\"].pop(\"street\")\n item = DictParser.parse(data[\"basicParameters\"])\n item[\"ref\"] = ref\n\n if data[\"open_24h\"]:\n item[\"opening_hours\"] = \"24/7\"\n else:\n item[\"opening_hours\"] = OpeningHours()\n for day, hours in data[\"basicParameters\"][\"opening_hours\"].items():\n start_time, end_time = hours.split(\"-\")\n item[\"opening_hours\"].add_range(DAYS[int(day) - 2], start_time.strip(), end_time.strip())\n\n if category == Categories.ATM:\n item[\"name\"] = None\n\n apply_category(category, item)\n\n return item\n", "path": "locations/spiders/santander_pl.py"}]} |
gh_patches_debug_1244 | rasdani/github-patches | git_diff | pantsbuild__pants-16113 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pants poetry-based lockfiles fail to include hashes.
This was detected in a unit test in the Pants repo, but is a wider problem for all versions of Pants that support generating lockfiles using Poetry.
The proximal cause is this announcement from PyPI:
https://discuss.python.org/t/backwards-incompatible-change-to-pypi-json-api/17154
And the root cause is this Poetry code:
https://github.com/python-poetry/poetry/blob/bce13c14f73060b3abbb791dea585d8fde26eaef/poetry/repositories/pypi_repository.py#L272-L283
There was a Poetry fix released and backported to the 1.1. branch here:
https://github.com/python-poetry/poetry/pull/5973
Users can fix with 2 steps:
1. Update Pants config
```toml
[poetry]
# N.B.: Works around issue described at https://github.com/pantsbuild/pants/issues/16111
# Undo once on a Pants with this version or greater as the default.
version = "poetry==1.1.14"
```
2. Clear Poetry caches with `rm -rf ~/.cache/pypoetry` on Linux and `rm -rf ~/Library/Caches/pypoetry` on Mac.
This issue tracks bumping Pants default to this fixed Poetry version.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/python/pants/backend/python/subsystems/poetry.py`
Content:
```
1 # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import annotations
5
6 from collections import defaultdict
7 from dataclasses import dataclass
8 from textwrap import dedent
9 from typing import Any, Iterable, Sequence
10
11 import toml
12 from pkg_resources import Requirement
13
14 from pants.backend.python.subsystems.python_tool_base import PythonToolRequirementsBase
15 from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
16 from pants.engine.fs import FileContent
17
18 # ----------------------------------------------------------------------------------------
19 # Subsystem
20 # ----------------------------------------------------------------------------------------
21
22
23 class PoetrySubsystem(PythonToolRequirementsBase):
24 options_scope = "poetry"
25 help = "Used to generate lockfiles for third-party Python dependencies."
26
27 default_version = "poetry==1.1.8"
28
29 register_interpreter_constraints = True
30 default_interpreter_constraints = ["CPython>=3.7,<4"]
31
32
33 # We must monkeypatch Poetry to include `setuptools` and `wheel` in the lockfile. This was fixed
34 # in Poetry 1.2. See https://github.com/python-poetry/poetry/issues/1584.
35 # WONTFIX(#12314): only use this custom launcher if using Poetry 1.1..
36 POETRY_LAUNCHER = FileContent(
37 "__pants_poetry_launcher.py",
38 dedent(
39 """\
40 from poetry.console import main
41 from poetry.puzzle.provider import Provider
42
43 Provider.UNSAFE_PACKAGES = set()
44 main()
45 """
46 ).encode(),
47 )
48
49
50 # ----------------------------------------------------------------------------------------
51 # Parsing
52 # ----------------------------------------------------------------------------------------
53
54 _HEADER = {
55 "name": "pants-lockfile-generation",
56 "version": "0.1.0",
57 "description": "",
58 "authors": ["pantsbuild"],
59 }
60
61
62 def create_pyproject_toml(
63 requirements: Iterable[str], interpreter_constraints: InterpreterConstraints
64 ) -> str:
65 return toml.dumps(create_pyproject_toml_as_dict(requirements, interpreter_constraints))
66
67
68 def create_pyproject_toml_as_dict(
69 raw_requirements: Iterable[str], interpreter_constraints: InterpreterConstraints
70 ) -> dict:
71 python_constraint = {"python": interpreter_constraints.to_poetry_constraint()}
72 project_name_to_poetry_deps = defaultdict(list)
73 for raw_req in raw_requirements:
74 # WONTFIX(#12314): add error handling.
75 req = Requirement.parse(raw_req)
76 poetry_dep = PoetryDependency.from_requirement(req)
77 project_name_to_poetry_deps[req.project_name].append(poetry_dep)
78
79 deps = {
80 project_name: PoetryDependency.to_pyproject_toml_metadata(poetry_deps)
81 for project_name, poetry_deps in project_name_to_poetry_deps.items()
82 }
83 return {"tool": {"poetry": {**_HEADER, "dependencies": {**python_constraint, **deps}}}}
84
85
86 @dataclass(frozen=True)
87 class PoetryDependency:
88 name: str
89 version: str | None
90 extras: tuple[str, ...] = ()
91 markers: str | None = None
92
93 @classmethod
94 def from_requirement(cls, requirement: Requirement) -> PoetryDependency:
95 return PoetryDependency(
96 requirement.project_name,
97 version=str(requirement.specifier) or None, # type: ignore[attr-defined]
98 extras=tuple(sorted(requirement.extras)),
99 markers=str(requirement.marker) if requirement.marker else None,
100 )
101
102 @classmethod
103 def to_pyproject_toml_metadata(
104 cls, deps: Sequence[PoetryDependency]
105 ) -> dict[str, Any] | list[dict[str, Any]]:
106 def convert_dep(dep: PoetryDependency) -> dict[str, Any]:
107 metadata: dict[str, Any] = {"version": dep.version or "*"}
108 if dep.extras:
109 metadata["extras"] = dep.extras
110 if dep.markers:
111 metadata["markers"] = dep.markers
112 return metadata
113
114 if not deps:
115 raise AssertionError("Must have at least one element!")
116 if len(deps) == 1:
117 return convert_dep(deps[0])
118
119 entries = []
120 name = deps[0].name
121 for dep in deps:
122 if dep.name != name:
123 raise AssertionError(f"All elements must have the same project name. Given: {deps}")
124 entries.append(convert_dep(dep))
125 return entries
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/python/pants/backend/python/subsystems/poetry.py b/src/python/pants/backend/python/subsystems/poetry.py
--- a/src/python/pants/backend/python/subsystems/poetry.py
+++ b/src/python/pants/backend/python/subsystems/poetry.py
@@ -24,7 +24,7 @@
options_scope = "poetry"
help = "Used to generate lockfiles for third-party Python dependencies."
- default_version = "poetry==1.1.8"
+ default_version = "poetry==1.1.14"
register_interpreter_constraints = True
default_interpreter_constraints = ["CPython>=3.7,<4"]
| {"golden_diff": "diff --git a/src/python/pants/backend/python/subsystems/poetry.py b/src/python/pants/backend/python/subsystems/poetry.py\n--- a/src/python/pants/backend/python/subsystems/poetry.py\n+++ b/src/python/pants/backend/python/subsystems/poetry.py\n@@ -24,7 +24,7 @@\n options_scope = \"poetry\"\n help = \"Used to generate lockfiles for third-party Python dependencies.\"\n \n- default_version = \"poetry==1.1.8\"\n+ default_version = \"poetry==1.1.14\"\n \n register_interpreter_constraints = True\n default_interpreter_constraints = [\"CPython>=3.7,<4\"]\n", "issue": "Pants poetry-based lockfiles fail to include hashes.\nThis was detected in a unit test in the Pants repo, but is a wider problem for all versions of Pants that support generating lockfiles using Poetry.\r\n\r\nThe proximal cause is this announcement from PyPI:\r\n https://discuss.python.org/t/backwards-incompatible-change-to-pypi-json-api/17154\r\n\r\nAnd the root cause is this Poetry code:\r\n https://github.com/python-poetry/poetry/blob/bce13c14f73060b3abbb791dea585d8fde26eaef/poetry/repositories/pypi_repository.py#L272-L283\r\n\r\nThere was a Poetry fix released and backported to the 1.1. branch here:\r\n https://github.com/python-poetry/poetry/pull/5973\r\n\r\nUsers can fix with 2 steps:\r\n1. Update Pants config\r\n```toml\r\n[poetry]\r\n# N.B.: Works around issue described at https://github.com/pantsbuild/pants/issues/16111\r\n# Undo once on a Pants with this version or greater as the default.\r\nversion = \"poetry==1.1.14\"\r\n```\r\n2. Clear Poetry caches with `rm -rf ~/.cache/pypoetry` on Linux and `rm -rf ~/Library/Caches/pypoetry` on Mac.\r\n\r\nThis issue tracks bumping Pants default to this fixed Poetry version.\r\n\n", "before_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom collections import defaultdict\nfrom dataclasses import dataclass\nfrom textwrap import dedent\nfrom typing import Any, Iterable, Sequence\n\nimport toml\nfrom pkg_resources import Requirement\n\nfrom pants.backend.python.subsystems.python_tool_base import PythonToolRequirementsBase\nfrom pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints\nfrom pants.engine.fs import FileContent\n\n# ----------------------------------------------------------------------------------------\n# Subsystem\n# ----------------------------------------------------------------------------------------\n\n\nclass PoetrySubsystem(PythonToolRequirementsBase):\n options_scope = \"poetry\"\n help = \"Used to generate lockfiles for third-party Python dependencies.\"\n\n default_version = \"poetry==1.1.8\"\n\n register_interpreter_constraints = True\n default_interpreter_constraints = [\"CPython>=3.7,<4\"]\n\n\n# We must monkeypatch Poetry to include `setuptools` and `wheel` in the lockfile. This was fixed\n# in Poetry 1.2. See https://github.com/python-poetry/poetry/issues/1584.\n# WONTFIX(#12314): only use this custom launcher if using Poetry 1.1..\nPOETRY_LAUNCHER = FileContent(\n \"__pants_poetry_launcher.py\",\n dedent(\n \"\"\"\\\n from poetry.console import main\n from poetry.puzzle.provider import Provider\n\n Provider.UNSAFE_PACKAGES = set()\n main()\n \"\"\"\n ).encode(),\n)\n\n\n# ----------------------------------------------------------------------------------------\n# Parsing\n# ----------------------------------------------------------------------------------------\n\n_HEADER = {\n \"name\": \"pants-lockfile-generation\",\n \"version\": \"0.1.0\",\n \"description\": \"\",\n \"authors\": [\"pantsbuild\"],\n}\n\n\ndef create_pyproject_toml(\n requirements: Iterable[str], interpreter_constraints: InterpreterConstraints\n) -> str:\n return toml.dumps(create_pyproject_toml_as_dict(requirements, interpreter_constraints))\n\n\ndef create_pyproject_toml_as_dict(\n raw_requirements: Iterable[str], interpreter_constraints: InterpreterConstraints\n) -> dict:\n python_constraint = {\"python\": interpreter_constraints.to_poetry_constraint()}\n project_name_to_poetry_deps = defaultdict(list)\n for raw_req in raw_requirements:\n # WONTFIX(#12314): add error handling.\n req = Requirement.parse(raw_req)\n poetry_dep = PoetryDependency.from_requirement(req)\n project_name_to_poetry_deps[req.project_name].append(poetry_dep)\n\n deps = {\n project_name: PoetryDependency.to_pyproject_toml_metadata(poetry_deps)\n for project_name, poetry_deps in project_name_to_poetry_deps.items()\n }\n return {\"tool\": {\"poetry\": {**_HEADER, \"dependencies\": {**python_constraint, **deps}}}}\n\n\n@dataclass(frozen=True)\nclass PoetryDependency:\n name: str\n version: str | None\n extras: tuple[str, ...] = ()\n markers: str | None = None\n\n @classmethod\n def from_requirement(cls, requirement: Requirement) -> PoetryDependency:\n return PoetryDependency(\n requirement.project_name,\n version=str(requirement.specifier) or None, # type: ignore[attr-defined]\n extras=tuple(sorted(requirement.extras)),\n markers=str(requirement.marker) if requirement.marker else None,\n )\n\n @classmethod\n def to_pyproject_toml_metadata(\n cls, deps: Sequence[PoetryDependency]\n ) -> dict[str, Any] | list[dict[str, Any]]:\n def convert_dep(dep: PoetryDependency) -> dict[str, Any]:\n metadata: dict[str, Any] = {\"version\": dep.version or \"*\"}\n if dep.extras:\n metadata[\"extras\"] = dep.extras\n if dep.markers:\n metadata[\"markers\"] = dep.markers\n return metadata\n\n if not deps:\n raise AssertionError(\"Must have at least one element!\")\n if len(deps) == 1:\n return convert_dep(deps[0])\n\n entries = []\n name = deps[0].name\n for dep in deps:\n if dep.name != name:\n raise AssertionError(f\"All elements must have the same project name. Given: {deps}\")\n entries.append(convert_dep(dep))\n return entries\n", "path": "src/python/pants/backend/python/subsystems/poetry.py"}], "after_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom collections import defaultdict\nfrom dataclasses import dataclass\nfrom textwrap import dedent\nfrom typing import Any, Iterable, Sequence\n\nimport toml\nfrom pkg_resources import Requirement\n\nfrom pants.backend.python.subsystems.python_tool_base import PythonToolRequirementsBase\nfrom pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints\nfrom pants.engine.fs import FileContent\n\n# ----------------------------------------------------------------------------------------\n# Subsystem\n# ----------------------------------------------------------------------------------------\n\n\nclass PoetrySubsystem(PythonToolRequirementsBase):\n options_scope = \"poetry\"\n help = \"Used to generate lockfiles for third-party Python dependencies.\"\n\n default_version = \"poetry==1.1.14\"\n\n register_interpreter_constraints = True\n default_interpreter_constraints = [\"CPython>=3.7,<4\"]\n\n\n# We must monkeypatch Poetry to include `setuptools` and `wheel` in the lockfile. This was fixed\n# in Poetry 1.2. See https://github.com/python-poetry/poetry/issues/1584.\n# WONTFIX(#12314): only use this custom launcher if using Poetry 1.1..\nPOETRY_LAUNCHER = FileContent(\n \"__pants_poetry_launcher.py\",\n dedent(\n \"\"\"\\\n from poetry.console import main\n from poetry.puzzle.provider import Provider\n\n Provider.UNSAFE_PACKAGES = set()\n main()\n \"\"\"\n ).encode(),\n)\n\n\n# ----------------------------------------------------------------------------------------\n# Parsing\n# ----------------------------------------------------------------------------------------\n\n_HEADER = {\n \"name\": \"pants-lockfile-generation\",\n \"version\": \"0.1.0\",\n \"description\": \"\",\n \"authors\": [\"pantsbuild\"],\n}\n\n\ndef create_pyproject_toml(\n requirements: Iterable[str], interpreter_constraints: InterpreterConstraints\n) -> str:\n return toml.dumps(create_pyproject_toml_as_dict(requirements, interpreter_constraints))\n\n\ndef create_pyproject_toml_as_dict(\n raw_requirements: Iterable[str], interpreter_constraints: InterpreterConstraints\n) -> dict:\n python_constraint = {\"python\": interpreter_constraints.to_poetry_constraint()}\n project_name_to_poetry_deps = defaultdict(list)\n for raw_req in raw_requirements:\n # WONTFIX(#12314): add error handling.\n req = Requirement.parse(raw_req)\n poetry_dep = PoetryDependency.from_requirement(req)\n project_name_to_poetry_deps[req.project_name].append(poetry_dep)\n\n deps = {\n project_name: PoetryDependency.to_pyproject_toml_metadata(poetry_deps)\n for project_name, poetry_deps in project_name_to_poetry_deps.items()\n }\n return {\"tool\": {\"poetry\": {**_HEADER, \"dependencies\": {**python_constraint, **deps}}}}\n\n\n@dataclass(frozen=True)\nclass PoetryDependency:\n name: str\n version: str | None\n extras: tuple[str, ...] = ()\n markers: str | None = None\n\n @classmethod\n def from_requirement(cls, requirement: Requirement) -> PoetryDependency:\n return PoetryDependency(\n requirement.project_name,\n version=str(requirement.specifier) or None, # type: ignore[attr-defined]\n extras=tuple(sorted(requirement.extras)),\n markers=str(requirement.marker) if requirement.marker else None,\n )\n\n @classmethod\n def to_pyproject_toml_metadata(\n cls, deps: Sequence[PoetryDependency]\n ) -> dict[str, Any] | list[dict[str, Any]]:\n def convert_dep(dep: PoetryDependency) -> dict[str, Any]:\n metadata: dict[str, Any] = {\"version\": dep.version or \"*\"}\n if dep.extras:\n metadata[\"extras\"] = dep.extras\n if dep.markers:\n metadata[\"markers\"] = dep.markers\n return metadata\n\n if not deps:\n raise AssertionError(\"Must have at least one element!\")\n if len(deps) == 1:\n return convert_dep(deps[0])\n\n entries = []\n name = deps[0].name\n for dep in deps:\n if dep.name != name:\n raise AssertionError(f\"All elements must have the same project name. Given: {deps}\")\n entries.append(convert_dep(dep))\n return entries\n", "path": "src/python/pants/backend/python/subsystems/poetry.py"}]} |
gh_patches_debug_1245 | rasdani/github-patches | git_diff | saleor__saleor-4008 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Duplicate choices in shipping address
### What I'm trying to achieve
I'm trying to set shipping user for Russian user and there are duplicate values in "Oblast" selector.
### Steps to reproduce the problem
1. Create new shipping address
2. Country --> Russia
3. There are duplicated values in "Oblast" selector
### What I expected to happen
There are no duplicated values in "Oblast" selector
### Screenshots
What happens now
<img src="https://user-images.githubusercontent.com/13136992/53255369-8a239600-36d6-11e9-84a6-24a10b96a321.png" width="300">
What I expect to see
<img src="https://user-images.githubusercontent.com/13136992/53255400-99a2df00-36d6-11e9-8913-ecaec174487a.png" width="300">
**System information**
Operating system: Manjaro Linux
Browser: Google Chrome 72.0.3626.81
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/account/forms.py`
Content:
```
1 from captcha.fields import ReCaptchaField
2 from django import forms
3 from django.conf import settings
4 from django.contrib.auth import forms as django_forms, update_session_auth_hash
5 from django.utils.translation import pgettext, pgettext_lazy
6 from phonenumbers.phonenumberutil import country_code_for_region
7
8 from ..account.models import User
9 from . import emails
10 from .i18n import AddressMetaForm, get_address_form_class
11
12
13 class FormWithReCaptcha(forms.BaseForm):
14 def __new__(cls, *args, **kwargs):
15 if settings.RECAPTCHA_PUBLIC_KEY and settings.RECAPTCHA_PRIVATE_KEY:
16 # insert a Google reCaptcha field inside the form
17 # note: label is empty, the reCaptcha is self-explanatory making
18 # the form simpler for the user.
19 cls.base_fields['_captcha'] = ReCaptchaField(label='')
20 return super(FormWithReCaptcha, cls).__new__(cls)
21
22
23 def get_address_form(
24 data, country_code, initial=None, instance=None, **kwargs):
25 country_form = AddressMetaForm(data, initial=initial)
26 preview = False
27 if country_form.is_valid():
28 country_code = country_form.cleaned_data['country']
29 preview = country_form.cleaned_data['preview']
30
31 if initial is None and country_code:
32 initial = {}
33 if country_code:
34 initial['phone'] = '+{}'.format(country_code_for_region(country_code))
35
36 address_form_class = get_address_form_class(country_code)
37
38 if not preview and instance is not None:
39 address_form_class = get_address_form_class(instance.country.code)
40 address_form = address_form_class(data, instance=instance, **kwargs)
41 else:
42 initial_address = (
43 initial if not preview
44 else data.dict() if data is not None else data)
45 address_form = address_form_class(
46 not preview and data or None,
47 initial=initial_address,
48 **kwargs)
49 return address_form, preview
50
51
52 class ChangePasswordForm(django_forms.PasswordChangeForm):
53 def __init__(self, *args, **kwargs):
54 super().__init__(*args, **kwargs)
55 self.fields['new_password1'].user = self.user
56 self.fields['old_password'].widget.attrs['placeholder'] = ''
57 self.fields['new_password1'].widget.attrs['placeholder'] = ''
58 del self.fields['new_password2']
59
60
61 def logout_on_password_change(request, user):
62 if (update_session_auth_hash is not None and
63 not settings.LOGOUT_ON_PASSWORD_CHANGE):
64 update_session_auth_hash(request, user)
65
66
67 class LoginForm(django_forms.AuthenticationForm, FormWithReCaptcha):
68 username = forms.EmailField(
69 label=pgettext('Form field', 'Email'), max_length=75)
70
71 def __init__(self, request=None, *args, **kwargs):
72 super().__init__(request=request, *args, **kwargs)
73 if request:
74 email = request.GET.get('email')
75 if email:
76 self.fields['username'].initial = email
77
78
79 class SignupForm(forms.ModelForm, FormWithReCaptcha):
80 password = forms.CharField(
81 widget=forms.PasswordInput,
82 label=pgettext('Password', 'Password'))
83 email = forms.EmailField(
84 label=pgettext('Email', 'Email'),
85 error_messages={
86 'unique': pgettext_lazy(
87 'Registration error',
88 'This email has already been registered.')})
89
90 class Meta:
91 model = User
92 fields = ('email',)
93
94 def __init__(self, *args, **kwargs):
95 super().__init__(*args, **kwargs)
96 if self._meta.model.USERNAME_FIELD in self.fields:
97 self.fields[self._meta.model.USERNAME_FIELD].widget.attrs.update(
98 {'autofocus': ''})
99
100 def save(self, request=None, commit=True):
101 user = super().save(commit=False)
102 password = self.cleaned_data['password']
103 user.set_password(password)
104 if commit:
105 user.save()
106 return user
107
108
109 class PasswordResetForm(django_forms.PasswordResetForm, FormWithReCaptcha):
110 """Allow resetting passwords.
111
112 This subclass overrides sending emails to use templated email.
113 """
114
115 def get_users(self, email):
116 active_users = User.objects.filter(email__iexact=email, is_active=True)
117 return active_users
118
119 def send_mail(
120 self, subject_template_name, email_template_name, context,
121 from_email, to_email, html_email_template_name=None):
122 # Passing the user object to the Celery task throws an
123 # error "'User' is not JSON serializable". Since it's not used in our
124 # template, we remove it from the context.
125 del context['user']
126 emails.send_password_reset_email.delay(context, to_email)
127
128
129 class NameForm(forms.ModelForm):
130 class Meta:
131 model = User
132 fields = ['first_name', 'last_name']
133 labels = {
134 'first_name': pgettext_lazy(
135 'Customer form: Given name field', 'Given name'),
136 'last_name': pgettext_lazy(
137 'Customer form: Family name field', 'Family name')}
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/saleor/account/forms.py b/saleor/account/forms.py
--- a/saleor/account/forms.py
+++ b/saleor/account/forms.py
@@ -46,6 +46,11 @@
not preview and data or None,
initial=initial_address,
**kwargs)
+
+ if hasattr(address_form.fields['country_area'], 'choices'):
+ choices = address_form.fields['country_area'].choices
+ choices = [(choice[1], choice[1]) for choice in choices]
+ address_form.fields['country_area'].choices = choices
return address_form, preview
| {"golden_diff": "diff --git a/saleor/account/forms.py b/saleor/account/forms.py\n--- a/saleor/account/forms.py\n+++ b/saleor/account/forms.py\n@@ -46,6 +46,11 @@\n not preview and data or None,\n initial=initial_address,\n **kwargs)\n+\n+ if hasattr(address_form.fields['country_area'], 'choices'):\n+ choices = address_form.fields['country_area'].choices\n+ choices = [(choice[1], choice[1]) for choice in choices]\n+ address_form.fields['country_area'].choices = choices\n return address_form, preview\n", "issue": "Duplicate choices in shipping address\n### What I'm trying to achieve\r\nI'm trying to set shipping user for Russian user and there are duplicate values in \"Oblast\" selector.\r\n\r\n### Steps to reproduce the problem\r\n1. Create new shipping address\r\n2. Country --> Russia\r\n3. There are duplicated values in \"Oblast\" selector\r\n\r\n### What I expected to happen\r\nThere are no duplicated values in \"Oblast\" selector\r\n\r\n### Screenshots\r\nWhat happens now\r\n<img src=\"https://user-images.githubusercontent.com/13136992/53255369-8a239600-36d6-11e9-84a6-24a10b96a321.png\" width=\"300\">\r\n\r\n\r\nWhat I expect to see\r\n<img src=\"https://user-images.githubusercontent.com/13136992/53255400-99a2df00-36d6-11e9-8913-ecaec174487a.png\" width=\"300\">\r\n\r\n\r\n**System information**\r\nOperating system: Manjaro Linux\r\nBrowser: Google Chrome 72.0.3626.81\r\n\n", "before_files": [{"content": "from captcha.fields import ReCaptchaField\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.auth import forms as django_forms, update_session_auth_hash\nfrom django.utils.translation import pgettext, pgettext_lazy\nfrom phonenumbers.phonenumberutil import country_code_for_region\n\nfrom ..account.models import User\nfrom . import emails\nfrom .i18n import AddressMetaForm, get_address_form_class\n\n\nclass FormWithReCaptcha(forms.BaseForm):\n def __new__(cls, *args, **kwargs):\n if settings.RECAPTCHA_PUBLIC_KEY and settings.RECAPTCHA_PRIVATE_KEY:\n # insert a Google reCaptcha field inside the form\n # note: label is empty, the reCaptcha is self-explanatory making\n # the form simpler for the user.\n cls.base_fields['_captcha'] = ReCaptchaField(label='')\n return super(FormWithReCaptcha, cls).__new__(cls)\n\n\ndef get_address_form(\n data, country_code, initial=None, instance=None, **kwargs):\n country_form = AddressMetaForm(data, initial=initial)\n preview = False\n if country_form.is_valid():\n country_code = country_form.cleaned_data['country']\n preview = country_form.cleaned_data['preview']\n\n if initial is None and country_code:\n initial = {}\n if country_code:\n initial['phone'] = '+{}'.format(country_code_for_region(country_code))\n\n address_form_class = get_address_form_class(country_code)\n\n if not preview and instance is not None:\n address_form_class = get_address_form_class(instance.country.code)\n address_form = address_form_class(data, instance=instance, **kwargs)\n else:\n initial_address = (\n initial if not preview\n else data.dict() if data is not None else data)\n address_form = address_form_class(\n not preview and data or None,\n initial=initial_address,\n **kwargs)\n return address_form, preview\n\n\nclass ChangePasswordForm(django_forms.PasswordChangeForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['new_password1'].user = self.user\n self.fields['old_password'].widget.attrs['placeholder'] = ''\n self.fields['new_password1'].widget.attrs['placeholder'] = ''\n del self.fields['new_password2']\n\n\ndef logout_on_password_change(request, user):\n if (update_session_auth_hash is not None and\n not settings.LOGOUT_ON_PASSWORD_CHANGE):\n update_session_auth_hash(request, user)\n\n\nclass LoginForm(django_forms.AuthenticationForm, FormWithReCaptcha):\n username = forms.EmailField(\n label=pgettext('Form field', 'Email'), max_length=75)\n\n def __init__(self, request=None, *args, **kwargs):\n super().__init__(request=request, *args, **kwargs)\n if request:\n email = request.GET.get('email')\n if email:\n self.fields['username'].initial = email\n\n\nclass SignupForm(forms.ModelForm, FormWithReCaptcha):\n password = forms.CharField(\n widget=forms.PasswordInput,\n label=pgettext('Password', 'Password'))\n email = forms.EmailField(\n label=pgettext('Email', 'Email'),\n error_messages={\n 'unique': pgettext_lazy(\n 'Registration error',\n 'This email has already been registered.')})\n\n class Meta:\n model = User\n fields = ('email',)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self._meta.model.USERNAME_FIELD in self.fields:\n self.fields[self._meta.model.USERNAME_FIELD].widget.attrs.update(\n {'autofocus': ''})\n\n def save(self, request=None, commit=True):\n user = super().save(commit=False)\n password = self.cleaned_data['password']\n user.set_password(password)\n if commit:\n user.save()\n return user\n\n\nclass PasswordResetForm(django_forms.PasswordResetForm, FormWithReCaptcha):\n \"\"\"Allow resetting passwords.\n\n This subclass overrides sending emails to use templated email.\n \"\"\"\n\n def get_users(self, email):\n active_users = User.objects.filter(email__iexact=email, is_active=True)\n return active_users\n\n def send_mail(\n self, subject_template_name, email_template_name, context,\n from_email, to_email, html_email_template_name=None):\n # Passing the user object to the Celery task throws an\n # error \"'User' is not JSON serializable\". Since it's not used in our\n # template, we remove it from the context.\n del context['user']\n emails.send_password_reset_email.delay(context, to_email)\n\n\nclass NameForm(forms.ModelForm):\n class Meta:\n model = User\n fields = ['first_name', 'last_name']\n labels = {\n 'first_name': pgettext_lazy(\n 'Customer form: Given name field', 'Given name'),\n 'last_name': pgettext_lazy(\n 'Customer form: Family name field', 'Family name')}\n", "path": "saleor/account/forms.py"}], "after_files": [{"content": "from captcha.fields import ReCaptchaField\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.auth import forms as django_forms, update_session_auth_hash\nfrom django.utils.translation import pgettext, pgettext_lazy\nfrom phonenumbers.phonenumberutil import country_code_for_region\n\nfrom ..account.models import User\nfrom . import emails\nfrom .i18n import AddressMetaForm, get_address_form_class\n\n\nclass FormWithReCaptcha(forms.BaseForm):\n def __new__(cls, *args, **kwargs):\n if settings.RECAPTCHA_PUBLIC_KEY and settings.RECAPTCHA_PRIVATE_KEY:\n # insert a Google reCaptcha field inside the form\n # note: label is empty, the reCaptcha is self-explanatory making\n # the form simpler for the user.\n cls.base_fields['_captcha'] = ReCaptchaField(label='')\n return super(FormWithReCaptcha, cls).__new__(cls)\n\n\ndef get_address_form(\n data, country_code, initial=None, instance=None, **kwargs):\n country_form = AddressMetaForm(data, initial=initial)\n preview = False\n if country_form.is_valid():\n country_code = country_form.cleaned_data['country']\n preview = country_form.cleaned_data['preview']\n\n if initial is None and country_code:\n initial = {}\n if country_code:\n initial['phone'] = '+{}'.format(country_code_for_region(country_code))\n\n address_form_class = get_address_form_class(country_code)\n\n if not preview and instance is not None:\n address_form_class = get_address_form_class(instance.country.code)\n address_form = address_form_class(data, instance=instance, **kwargs)\n else:\n initial_address = (\n initial if not preview\n else data.dict() if data is not None else data)\n address_form = address_form_class(\n not preview and data or None,\n initial=initial_address,\n **kwargs)\n\n if hasattr(address_form.fields['country_area'], 'choices'):\n choices = address_form.fields['country_area'].choices\n choices = [(choice[1], choice[1]) for choice in choices]\n address_form.fields['country_area'].choices = choices\n return address_form, preview\n\n\nclass ChangePasswordForm(django_forms.PasswordChangeForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['new_password1'].user = self.user\n self.fields['old_password'].widget.attrs['placeholder'] = ''\n self.fields['new_password1'].widget.attrs['placeholder'] = ''\n del self.fields['new_password2']\n\n\ndef logout_on_password_change(request, user):\n if (update_session_auth_hash is not None and\n not settings.LOGOUT_ON_PASSWORD_CHANGE):\n update_session_auth_hash(request, user)\n\n\nclass LoginForm(django_forms.AuthenticationForm, FormWithReCaptcha):\n username = forms.EmailField(\n label=pgettext('Form field', 'Email'), max_length=75)\n\n def __init__(self, request=None, *args, **kwargs):\n super().__init__(request=request, *args, **kwargs)\n if request:\n email = request.GET.get('email')\n if email:\n self.fields['username'].initial = email\n\n\nclass SignupForm(forms.ModelForm, FormWithReCaptcha):\n password = forms.CharField(\n widget=forms.PasswordInput,\n label=pgettext('Password', 'Password'))\n email = forms.EmailField(\n label=pgettext('Email', 'Email'),\n error_messages={\n 'unique': pgettext_lazy(\n 'Registration error',\n 'This email has already been registered.')})\n\n class Meta:\n model = User\n fields = ('email',)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self._meta.model.USERNAME_FIELD in self.fields:\n self.fields[self._meta.model.USERNAME_FIELD].widget.attrs.update(\n {'autofocus': ''})\n\n def save(self, request=None, commit=True):\n user = super().save(commit=False)\n password = self.cleaned_data['password']\n user.set_password(password)\n if commit:\n user.save()\n return user\n\n\nclass PasswordResetForm(django_forms.PasswordResetForm, FormWithReCaptcha):\n \"\"\"Allow resetting passwords.\n\n This subclass overrides sending emails to use templated email.\n \"\"\"\n\n def get_users(self, email):\n active_users = User.objects.filter(email__iexact=email, is_active=True)\n return active_users\n\n def send_mail(\n self, subject_template_name, email_template_name, context,\n from_email, to_email, html_email_template_name=None):\n # Passing the user object to the Celery task throws an\n # error \"'User' is not JSON serializable\". Since it's not used in our\n # template, we remove it from the context.\n del context['user']\n emails.send_password_reset_email.delay(context, to_email)\n\n\nclass NameForm(forms.ModelForm):\n class Meta:\n model = User\n fields = ['first_name', 'last_name']\n labels = {\n 'first_name': pgettext_lazy(\n 'Customer form: Given name field', 'Given name'),\n 'last_name': pgettext_lazy(\n 'Customer form: Family name field', 'Family name')}\n", "path": "saleor/account/forms.py"}]} |
gh_patches_debug_1246 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-2411 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
App breaking when using Enum as field for Generic
<!-- Provide a general summary of the bug in the title above. -->
When using an Enum as a field on a Generic, the app breaks, throwing a `NotImplementedError`.
<!--- This template is entirely optional and can be removed, but is here to help both you and us. -->
<!--- Anything on lines wrapped in comments like these will not show up in the final text. -->
## Describe the Bug
The below code is an example of the error.
```python
from enum import Enum
from typing import Generic, Optional, TypeVar
import strawberry
T = TypeVar("T")
@strawberry.enum
class EstimatedValueEnum(Enum):
test = "test"
testtest = "testtest"
@strawberry.type
class EstimatedValue(Generic[T]):
value: T
type: EstimatedValueEnum
@strawberry.type
class Query:
id_translations: Optional[EstimatedValue[int]]
schema = strawberry.Schema(query=Query)
```
Are we doing something wrong and this is intended or is this a bug?
<!-- A clear and concise description of what the bug is. -->
## System Information
- Operating system: Docker
- Strawberry version (if applicable): Since [0.149.2](https://github.com/strawberry-graphql/strawberry/blob/main/CHANGELOG.md#01492---2022-12-09)
<!-- Add any other relevant information about the problem here. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `strawberry/enum.py`
Content:
```
1 import dataclasses
2 from enum import EnumMeta
3 from typing import (
4 Any,
5 Callable,
6 Iterable,
7 List,
8 Mapping,
9 Optional,
10 TypeVar,
11 Union,
12 overload,
13 )
14
15 from strawberry.type import StrawberryType
16
17 from .exceptions import ObjectIsNotAnEnumError
18
19
20 @dataclasses.dataclass
21 class EnumValue:
22 name: str
23 value: Any
24 deprecation_reason: Optional[str] = None
25 directives: Iterable[object] = ()
26 description: Optional[str] = None
27
28
29 @dataclasses.dataclass
30 class EnumDefinition(StrawberryType):
31 wrapped_cls: EnumMeta
32 name: str
33 values: List[EnumValue]
34 description: Optional[str]
35 directives: Iterable[object] = ()
36
37 def __hash__(self) -> int:
38 # TODO: Is this enough for unique-ness?
39 return hash(self.name)
40
41 def copy_with(
42 self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]
43 ) -> Union[StrawberryType, type]:
44 return super().copy_with(type_var_map) # type: ignore[safe-super]
45
46 @property
47 def is_generic(self) -> bool:
48 return False
49
50
51 # TODO: remove duplication of EnumValueDefinition and EnumValue
52 @dataclasses.dataclass
53 class EnumValueDefinition:
54 value: Any
55 deprecation_reason: Optional[str] = None
56 directives: Iterable[object] = ()
57 description: Optional[str] = None
58
59
60 def enum_value(
61 value: Any,
62 deprecation_reason: Optional[str] = None,
63 directives: Iterable[object] = (),
64 description: Optional[str] = None,
65 ) -> EnumValueDefinition:
66 return EnumValueDefinition(
67 value=value,
68 deprecation_reason=deprecation_reason,
69 directives=directives,
70 description=description,
71 )
72
73
74 EnumType = TypeVar("EnumType", bound=EnumMeta)
75
76
77 def _process_enum(
78 cls: EnumType,
79 name: Optional[str] = None,
80 description: Optional[str] = None,
81 directives: Iterable[object] = (),
82 ) -> EnumType:
83 if not isinstance(cls, EnumMeta):
84 raise ObjectIsNotAnEnumError(cls)
85
86 if not name:
87 name = cls.__name__
88
89 description = description
90
91 values = []
92 for item in cls: # type: ignore
93 item_value = item.value
94 item_name = item.name
95 deprecation_reason = None
96 item_directives: Iterable[object] = ()
97 enum_value_description = None
98
99 if isinstance(item_value, EnumValueDefinition):
100 item_directives = item_value.directives
101 enum_value_description = item_value.description
102 deprecation_reason = item_value.deprecation_reason
103 item_value = item_value.value
104
105 # update _value2member_map_ so that doing `MyEnum.MY_VALUE` and
106 # `MyEnum['MY_VALUE']` both work
107 cls._value2member_map_[item_value] = item
108 cls._member_map_[item_name]._value_ = item_value
109
110 value = EnumValue(
111 item_name,
112 item_value,
113 deprecation_reason=deprecation_reason,
114 directives=item_directives,
115 description=enum_value_description,
116 )
117 values.append(value)
118
119 cls._enum_definition = EnumDefinition( # type: ignore
120 wrapped_cls=cls,
121 name=name,
122 values=values,
123 description=description,
124 directives=directives,
125 )
126
127 return cls
128
129
130 @overload
131 def enum(
132 _cls: EnumType,
133 *,
134 name: Optional[str] = None,
135 description: Optional[str] = None,
136 directives: Iterable[object] = ()
137 ) -> EnumType:
138 ...
139
140
141 @overload
142 def enum(
143 _cls: None = None,
144 *,
145 name: Optional[str] = None,
146 description: Optional[str] = None,
147 directives: Iterable[object] = ()
148 ) -> Callable[[EnumType], EnumType]:
149 ...
150
151
152 def enum(
153 _cls: Optional[EnumType] = None,
154 *,
155 name: Optional[str] = None,
156 description: Optional[str] = None,
157 directives: Iterable[object] = ()
158 ) -> Union[EnumType, Callable[[EnumType], EnumType]]:
159 """Registers the enum in the GraphQL type system.
160
161 If name is passed, the name of the GraphQL type will be
162 the value passed of name instead of the Enum class name.
163 """
164
165 def wrap(cls: EnumType) -> EnumType:
166 return _process_enum(cls, name, description, directives=directives)
167
168 if not _cls:
169 return wrap
170
171 return wrap(_cls)
172
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/strawberry/enum.py b/strawberry/enum.py
--- a/strawberry/enum.py
+++ b/strawberry/enum.py
@@ -41,7 +41,8 @@
def copy_with(
self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]
) -> Union[StrawberryType, type]:
- return super().copy_with(type_var_map) # type: ignore[safe-super]
+ # enum don't support type parameters, so we can safely return self
+ return self
@property
def is_generic(self) -> bool:
| {"golden_diff": "diff --git a/strawberry/enum.py b/strawberry/enum.py\n--- a/strawberry/enum.py\n+++ b/strawberry/enum.py\n@@ -41,7 +41,8 @@\n def copy_with(\n self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]\n ) -> Union[StrawberryType, type]:\n- return super().copy_with(type_var_map) # type: ignore[safe-super]\n+ # enum don't support type parameters, so we can safely return self\n+ return self\n \n @property\n def is_generic(self) -> bool:\n", "issue": "App breaking when using Enum as field for Generic\n<!-- Provide a general summary of the bug in the title above. -->\r\nWhen using an Enum as a field on a Generic, the app breaks, throwing a `NotImplementedError`.\r\n\r\n<!--- This template is entirely optional and can be removed, but is here to help both you and us. -->\r\n<!--- Anything on lines wrapped in comments like these will not show up in the final text. -->\r\n\r\n## Describe the Bug\r\nThe below code is an example of the error.\r\n```python\r\nfrom enum import Enum\r\nfrom typing import Generic, Optional, TypeVar\r\n\r\nimport strawberry\r\n\r\nT = TypeVar(\"T\")\r\n\r\n\r\[email protected]\r\nclass EstimatedValueEnum(Enum):\r\n test = \"test\"\r\n testtest = \"testtest\"\r\n\r\n\r\[email protected]\r\nclass EstimatedValue(Generic[T]):\r\n value: T\r\n type: EstimatedValueEnum\r\n\r\n\r\[email protected]\r\nclass Query:\r\n id_translations: Optional[EstimatedValue[int]]\r\n\r\n\r\nschema = strawberry.Schema(query=Query)\r\n```\r\nAre we doing something wrong and this is intended or is this a bug?\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n## System Information\r\n\r\n - Operating system: Docker\r\n - Strawberry version (if applicable): Since [0.149.2](https://github.com/strawberry-graphql/strawberry/blob/main/CHANGELOG.md#01492---2022-12-09)\r\n\r\n<!-- Add any other relevant information about the problem here. -->\r\n\n", "before_files": [{"content": "import dataclasses\nfrom enum import EnumMeta\nfrom typing import (\n Any,\n Callable,\n Iterable,\n List,\n Mapping,\n Optional,\n TypeVar,\n Union,\n overload,\n)\n\nfrom strawberry.type import StrawberryType\n\nfrom .exceptions import ObjectIsNotAnEnumError\n\n\[email protected]\nclass EnumValue:\n name: str\n value: Any\n deprecation_reason: Optional[str] = None\n directives: Iterable[object] = ()\n description: Optional[str] = None\n\n\[email protected]\nclass EnumDefinition(StrawberryType):\n wrapped_cls: EnumMeta\n name: str\n values: List[EnumValue]\n description: Optional[str]\n directives: Iterable[object] = ()\n\n def __hash__(self) -> int:\n # TODO: Is this enough for unique-ness?\n return hash(self.name)\n\n def copy_with(\n self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]\n ) -> Union[StrawberryType, type]:\n return super().copy_with(type_var_map) # type: ignore[safe-super]\n\n @property\n def is_generic(self) -> bool:\n return False\n\n\n# TODO: remove duplication of EnumValueDefinition and EnumValue\[email protected]\nclass EnumValueDefinition:\n value: Any\n deprecation_reason: Optional[str] = None\n directives: Iterable[object] = ()\n description: Optional[str] = None\n\n\ndef enum_value(\n value: Any,\n deprecation_reason: Optional[str] = None,\n directives: Iterable[object] = (),\n description: Optional[str] = None,\n) -> EnumValueDefinition:\n return EnumValueDefinition(\n value=value,\n deprecation_reason=deprecation_reason,\n directives=directives,\n description=description,\n )\n\n\nEnumType = TypeVar(\"EnumType\", bound=EnumMeta)\n\n\ndef _process_enum(\n cls: EnumType,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = (),\n) -> EnumType:\n if not isinstance(cls, EnumMeta):\n raise ObjectIsNotAnEnumError(cls)\n\n if not name:\n name = cls.__name__\n\n description = description\n\n values = []\n for item in cls: # type: ignore\n item_value = item.value\n item_name = item.name\n deprecation_reason = None\n item_directives: Iterable[object] = ()\n enum_value_description = None\n\n if isinstance(item_value, EnumValueDefinition):\n item_directives = item_value.directives\n enum_value_description = item_value.description\n deprecation_reason = item_value.deprecation_reason\n item_value = item_value.value\n\n # update _value2member_map_ so that doing `MyEnum.MY_VALUE` and\n # `MyEnum['MY_VALUE']` both work\n cls._value2member_map_[item_value] = item\n cls._member_map_[item_name]._value_ = item_value\n\n value = EnumValue(\n item_name,\n item_value,\n deprecation_reason=deprecation_reason,\n directives=item_directives,\n description=enum_value_description,\n )\n values.append(value)\n\n cls._enum_definition = EnumDefinition( # type: ignore\n wrapped_cls=cls,\n name=name,\n values=values,\n description=description,\n directives=directives,\n )\n\n return cls\n\n\n@overload\ndef enum(\n _cls: EnumType,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = ()\n) -> EnumType:\n ...\n\n\n@overload\ndef enum(\n _cls: None = None,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = ()\n) -> Callable[[EnumType], EnumType]:\n ...\n\n\ndef enum(\n _cls: Optional[EnumType] = None,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = ()\n) -> Union[EnumType, Callable[[EnumType], EnumType]]:\n \"\"\"Registers the enum in the GraphQL type system.\n\n If name is passed, the name of the GraphQL type will be\n the value passed of name instead of the Enum class name.\n \"\"\"\n\n def wrap(cls: EnumType) -> EnumType:\n return _process_enum(cls, name, description, directives=directives)\n\n if not _cls:\n return wrap\n\n return wrap(_cls)\n", "path": "strawberry/enum.py"}], "after_files": [{"content": "import dataclasses\nfrom enum import EnumMeta\nfrom typing import (\n Any,\n Callable,\n Iterable,\n List,\n Mapping,\n Optional,\n TypeVar,\n Union,\n overload,\n)\n\nfrom strawberry.type import StrawberryType\n\nfrom .exceptions import ObjectIsNotAnEnumError\n\n\[email protected]\nclass EnumValue:\n name: str\n value: Any\n deprecation_reason: Optional[str] = None\n directives: Iterable[object] = ()\n description: Optional[str] = None\n\n\[email protected]\nclass EnumDefinition(StrawberryType):\n wrapped_cls: EnumMeta\n name: str\n values: List[EnumValue]\n description: Optional[str]\n directives: Iterable[object] = ()\n\n def __hash__(self) -> int:\n # TODO: Is this enough for unique-ness?\n return hash(self.name)\n\n def copy_with(\n self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]\n ) -> Union[StrawberryType, type]:\n # enum don't support type parameters, so we can safely return self\n return self\n\n @property\n def is_generic(self) -> bool:\n return False\n\n\n# TODO: remove duplication of EnumValueDefinition and EnumValue\[email protected]\nclass EnumValueDefinition:\n value: Any\n deprecation_reason: Optional[str] = None\n directives: Iterable[object] = ()\n description: Optional[str] = None\n\n\ndef enum_value(\n value: Any,\n deprecation_reason: Optional[str] = None,\n directives: Iterable[object] = (),\n description: Optional[str] = None,\n) -> EnumValueDefinition:\n return EnumValueDefinition(\n value=value,\n deprecation_reason=deprecation_reason,\n directives=directives,\n description=description,\n )\n\n\nEnumType = TypeVar(\"EnumType\", bound=EnumMeta)\n\n\ndef _process_enum(\n cls: EnumType,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = (),\n) -> EnumType:\n if not isinstance(cls, EnumMeta):\n raise ObjectIsNotAnEnumError(cls)\n\n if not name:\n name = cls.__name__\n\n description = description\n\n values = []\n for item in cls: # type: ignore\n item_value = item.value\n item_name = item.name\n deprecation_reason = None\n item_directives: Iterable[object] = ()\n enum_value_description = None\n\n if isinstance(item_value, EnumValueDefinition):\n item_directives = item_value.directives\n enum_value_description = item_value.description\n deprecation_reason = item_value.deprecation_reason\n item_value = item_value.value\n\n # update _value2member_map_ so that doing `MyEnum.MY_VALUE` and\n # `MyEnum['MY_VALUE']` both work\n cls._value2member_map_[item_value] = item\n cls._member_map_[item_name]._value_ = item_value\n\n value = EnumValue(\n item_name,\n item_value,\n deprecation_reason=deprecation_reason,\n directives=item_directives,\n description=enum_value_description,\n )\n values.append(value)\n\n cls._enum_definition = EnumDefinition( # type: ignore\n wrapped_cls=cls,\n name=name,\n values=values,\n description=description,\n directives=directives,\n )\n\n return cls\n\n\n@overload\ndef enum(\n _cls: EnumType,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = ()\n) -> EnumType:\n ...\n\n\n@overload\ndef enum(\n _cls: None = None,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = ()\n) -> Callable[[EnumType], EnumType]:\n ...\n\n\ndef enum(\n _cls: Optional[EnumType] = None,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = ()\n) -> Union[EnumType, Callable[[EnumType], EnumType]]:\n \"\"\"Registers the enum in the GraphQL type system.\n\n If name is passed, the name of the GraphQL type will be\n the value passed of name instead of the Enum class name.\n \"\"\"\n\n def wrap(cls: EnumType) -> EnumType:\n return _process_enum(cls, name, description, directives=directives)\n\n if not _cls:\n return wrap\n\n return wrap(_cls)\n", "path": "strawberry/enum.py"}]} |
gh_patches_debug_1247 | rasdani/github-patches | git_diff | PennyLaneAI__pennylane-3182 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Inline transforms vs. Decorator transforms
### Expected behavior
This script:
```
def circuit(circuit_param):
qml.RY(circuit_param, wires=0)
qml.Hadamard(wires=0)
qml.T(wires=0)
def noise(noise_param, wires):
qml.CRX(noise_param, wires=[0,1])
qml.CNOT(wires=[1,0])
dev = qml.device("default.mixed", wires=2)
noise_param = 0.3
@qml.qnode(dev)
@qml.transforms.insert(noise, noise_param, position="all")
def noisy_circuit(circuit_param):
circuit(circuit_param)
return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliY(0)), qml.expval(qml.PauliZ(0))
noisy_circuit(0.4)
```
And this one
```
def circuit(circuit_param):
qml.RY(circuit_param, wires=0)
qml.Hadamard(wires=0)
qml.T(wires=0)
def noise(noise_param, wires):
qml.CRX(noise_param, wires=[0,1])
qml.CNOT(wires=[1,0])
dev = qml.device("default.mixed", wires=2)
noise_param = 0.3
@qml.qnode(dev)
def noisy_circuit(circuit_param):
qml.transforms.insert(noise, noise_param, position="all")(circuit)(circuit_param)
return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliY(0)), qml.expval(qml.PauliZ(0))
noisy_circuit(0.4)
```
should both return `array([0.64497588, 0.61505021, 0.40503904])`
### Actual behavior
The second script works, but the first one returns errors.
### Additional information
I know this is an issue with how tapes are handled in either case. Please reconsider how this interacts with users.
### Source code
```shell
def circuit(circuit_param):
qml.RY(circuit_param, wires=0)
qml.Hadamard(wires=0)
qml.T(wires=0)
def noise(noise_param, wires):
qml.CRX(noise_param, wires=[0,1])
qml.CNOT(wires=[1,0])
dev = qml.device("default.mixed", wires=2)
noise_param = 0.3
@qml.qnode(dev)
@qml.transforms.insert(noise, noise_param, position="all")
def noisy_circuit(circuit_param):
circuit(circuit_param)
return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliY(0)), qml.expval(qml.PauliZ(0))
noisy_circuit(0.4)
```
### Tracebacks
```shell
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
~\anaconda3\lib\site-packages\pennylane\tape\tape.py in expand_tape(tape, depth, stop_at, expand_measurements)
156 try:
--> 157 rotations, diag_obs = qml.grouping.diagonalize_qwc_pauli_words(
158 tape._obs_sharing_wires
~\anaconda3\lib\site-packages\pennylane\grouping\transformations.py in diagonalize_qwc_pauli_words(qwc_grouping)
139 if not are_pauli_words_qwc(qwc_grouping):
--> 140 raise ValueError("The list of Pauli words are not qubit-wise commuting.")
141
ValueError: The list of Pauli words are not qubit-wise commuting.
The above exception was the direct cause of the following exception:
QuantumFunctionError Traceback (most recent call last)
~\anaconda3\lib\site-packages\pennylane\transforms\qfunc_transforms.py in __call__(self, tape, *args, **kwargs)
169 with tape_class() as new_tape:
--> 170 self.transform_fn(tape, *args, **kwargs)
171
~\anaconda3\lib\site-packages\pennylane\transforms\insert_ops.py in insert(circuit, op, op_args, position, before)
212 # TODO: change this to be cleaner and more robust
--> 213 circuit = circuit.expand(
214 stop_at=lambda op: not hasattr(qml.templates, op.name) and not isinstance(op, Adjoint)
~\anaconda3\lib\site-packages\pennylane\tape\tape.py in expand(self, depth, stop_at, expand_measurements)
686 """
--> 687 new_tape = expand_tape(
688 self, depth=depth, stop_at=stop_at, expand_measurements=expand_measurements
~\anaconda3\lib\site-packages\pennylane\tape\tape.py in expand_tape(tape, depth, stop_at, expand_measurements)
172
--> 173 raise qml.QuantumFunctionError(
174 "Only observables that are qubit-wise commuting "
QuantumFunctionError: Only observables that are qubit-wise commuting Pauli words can be returned on the same wire, some of the following measurements do not commute:
[expval(PauliX(wires=[0])), expval(PauliY(wires=[0])), expval(PauliZ(wires=[0]))]
During handling of the above exception, another exception occurred:
IndexError Traceback (most recent call last)
~\anaconda3\lib\site-packages\pennylane\qnode.py in construct(self, args, kwargs)
525 with self.tape:
--> 526 self._qfunc_output = self.func(*args, **kwargs)
527 self._tape._qfunc_output = self._qfunc_output
~\anaconda3\lib\site-packages\pennylane\transforms\qfunc_transforms.py in internal_wrapper(*args, **kwargs)
201 tape = make_tape(fn)(*args, **kwargs)
--> 202 tape = tape_transform(tape, *transform_args, **transform_kwargs)
203
~\anaconda3\lib\site-packages\pennylane\transforms\qfunc_transforms.py in __call__(self, tape, *args, **kwargs)
169 with tape_class() as new_tape:
--> 170 self.transform_fn(tape, *args, **kwargs)
171
~\anaconda3\lib\site-packages\pennylane\tape\tape.py in __exit__(self, exception_type, exception_value, traceback)
389 try:
--> 390 super().__exit__(exception_type, exception_value, traceback)
391 # After other optimizations in #2963, #2986 and follow-up work, we should check whether
~\anaconda3\lib\site-packages\pennylane\queuing.py in __exit__(self, exception_type, exception_value, traceback)
127 """Remove this instance from the global list of active contexts."""
--> 128 QueuingContext._active_contexts.pop()
129
IndexError: pop from an empty deque
During handling of the above exception, another exception occurred:
IndexError Traceback (most recent call last)
<ipython-input-33-37d3113aba14> in <module>
20 return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliY(0)), qml.expval(qml.PauliZ(0))
21
---> 22 noisy_circuit(0.4)
~\anaconda3\lib\site-packages\pennylane\qnode.py in __call__(self, *args, **kwargs)
609
610 # construct the tape
--> 611 self.construct(args, kwargs)
612
613 cache = self.execute_kwargs.get("cache", False)
~\anaconda3\lib\site-packages\pennylane\qnode.py in construct(self, args, kwargs)
524
525 with self.tape:
--> 526 self._qfunc_output = self.func(*args, **kwargs)
527 self._tape._qfunc_output = self._qfunc_output
528
~\anaconda3\lib\site-packages\pennylane\tape\tape.py in __exit__(self, exception_type, exception_value, traceback)
388 def __exit__(self, exception_type, exception_value, traceback):
389 try:
--> 390 super().__exit__(exception_type, exception_value, traceback)
391 # After other optimizations in #2963, #2986 and follow-up work, we should check whether
392 # calling `_process_queue` only if there is no `exception_type` saves time. This would
~\anaconda3\lib\site-packages\pennylane\queuing.py in __exit__(self, exception_type, exception_value, traceback)
126 def __exit__(self, exception_type, exception_value, traceback):
127 """Remove this instance from the global list of active contexts."""
--> 128 QueuingContext._active_contexts.pop()
129
130 @abc.abstractmethod
IndexError: pop from an empty deque
```
### System information
```shell
Name: PennyLane
Version: 0.26.0
Summary: PennyLane is a Python quantum machine learning library by Xanadu Inc.
Home-page: https://github.com/XanaduAI/pennylane
Author: None
Author-email: None
License: Apache License 2.0
Location: c:\users\alvaro\anaconda3\lib\site-packages
Requires: numpy, appdirs, cachetools, retworkx, toml, autograd, semantic-version, pennylane-lightning, autoray, scipy, networkx
Required-by: PennyLane-SF, pennylane-qulacs, PennyLane-Qchem, PennyLane-Lightning
Platform info: Windows-10-10.0.19041-SP0
Python version: 3.8.8
Numpy version: 1.22.1
Scipy version: 1.6.2
Installed devices:
- default.gaussian (PennyLane-0.26.0)
- default.mixed (PennyLane-0.26.0)
- default.qubit (PennyLane-0.26.0)
- default.qubit.autograd (PennyLane-0.26.0)
- default.qubit.jax (PennyLane-0.26.0)
- default.qubit.tf (PennyLane-0.26.0)
- default.qubit.torch (PennyLane-0.26.0)
- default.qutrit (PennyLane-0.26.0)
- strawberryfields.fock (PennyLane-SF-0.19.0)
- strawberryfields.gaussian (PennyLane-SF-0.19.0)
- strawberryfields.gbs (PennyLane-SF-0.19.0)
- strawberryfields.remote (PennyLane-SF-0.19.0)
- strawberryfields.tf (PennyLane-SF-0.19.0)
- qulacs.simulator (pennylane-qulacs-0.16.0)
- lightning.qubit (PennyLane-Lightning-0.26.0)
```
### Existing GitHub issues
- [X] I have searched existing GitHub issues to make sure the issue does not already exist.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pennylane/queuing.py`
Content:
```
1 # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """
15 This module contains the :class:`QueuingManager`.
16 """
17
18 import copy
19 from collections import OrderedDict
20 from contextlib import contextmanager
21 from warnings import warn
22
23
24 def __getattr__(name):
25 # for more information on overwriting `__getattr__`, see https://peps.python.org/pep-0562/
26 if name == "QueuingContext":
27 warn("QueuingContext has been renamed qml.queuing.QueuingManager.", UserWarning)
28 return QueuingManager
29 try:
30 return globals()[name]
31 except KeyError as e:
32 raise AttributeError from e
33
34
35 class QueuingError(Exception):
36 """Exception that is raised when there is a queuing error"""
37
38
39 class QueuingManager:
40 """Singleton global entry point for managing active recording contexts.
41
42 This class consists purely of class methods. It both maintains a list of
43 recording queues and allows communication with the currently active object.
44
45 Queueable objects, like :class:`~.operation.Operator` and :class:`~.measurements.MeasurementProcess`, should
46 use ``QueuingManager`` as an entry point for accessing the active queue.
47
48 See also: :class:`~.AnnotatedQueue`, :class:`~.tape.QuantumTape`, :meth:`~.operation.Operator.queue`.
49
50 Recording queues, such as :class:`~.AnnotatedQueue`, must define the following methods:
51
52 * ``append``: define an action to perform when an object append
53 request is made.
54
55 * ``remove``: define an action to perform when an object removal request is made.
56
57 * ``get_info``: retrieve the object's metadata
58
59 * ``update_info``: Update an object's metadata if it is already queued.
60
61 To start and end recording, the recording queue can use the :meth:`add_active_queue` and
62 :meth:`remove_active_queue` methods.
63
64 """
65
66 _active_contexts = []
67 """The stack of contexts that are currently active."""
68
69 @classmethod
70 def add_active_queue(cls, queue):
71 """Makes a queue the currently active recording context."""
72 cls._active_contexts.append(queue)
73
74 @classmethod
75 def remove_active_queue(cls):
76 """Ends recording on the currently active recording queue."""
77 return cls._active_contexts.pop()
78
79 @classmethod
80 def recording(cls):
81 """Whether a queuing context is active and recording operations"""
82 return bool(cls._active_contexts)
83
84 @classmethod
85 def active_context(cls):
86 """Returns the currently active queuing context."""
87 return cls._active_contexts[-1] if cls.recording() else None
88
89 @classmethod
90 @contextmanager
91 def stop_recording(cls):
92 """A context manager and decorator to ensure that contained logic is non-recordable
93 or non-queueable within a QNode or quantum tape context.
94
95 **Example:**
96
97 Consider the function:
98
99 >>> def list_of_ops(params, wires):
100 ... return [
101 ... qml.RX(params[0], wires=wires),
102 ... qml.RY(params[1], wires=wires),
103 ... qml.RZ(params[2], wires=wires)
104 ... ]
105
106 If executed in a recording context, the operations constructed in the function will be queued:
107
108 >>> dev = qml.device("default.qubit", wires=2)
109 >>> @qml.qnode(dev)
110 ... def circuit(params):
111 ... ops = list_of_ops(params, wires=0)
112 ... qml.apply(ops[-1]) # apply the last operation from the list again
113 ... return qml.expval(qml.PauliZ(0))
114 >>> print(qml.draw(circuit)([1, 2, 3]))
115 0: ──RX(1.00)──RY(2.00)──RZ(3.00)──RZ(3.00)─┤ <Z>
116
117 Using the ``stop_recording`` context manager, all logic contained inside is not queued or recorded.
118
119 >>> @qml.qnode(dev)
120 ... def circuit(params):
121 ... with qml.QueuingManager.stop_recording():
122 ... ops = list_of_ops(params, wires=0)
123 ... qml.apply(ops[-1])
124 ... return qml.expval(qml.PauliZ(0))
125 >>> print(qml.draw(circuit)([1, 2, 3]))
126 0: ──RZ(3.00)─┤ <Z>
127
128 The context manager can also be used as a decorator on a function:
129
130 >>> @qml.QueuingManager.stop_recording()
131 ... def list_of_ops(params, wires):
132 ... return [
133 ... qml.RX(params[0], wires=wires),
134 ... qml.RY(params[1], wires=wires),
135 ... qml.RZ(params[2], wires=wires)
136 ... ]
137 >>> @qml.qnode(dev)
138 ... def circuit(params):
139 ... ops = list_of_ops(params, wires=0)
140 ... qml.apply(ops[-1])
141 ... return qml.expval(qml.PauliZ(0))
142 >>> print(qml.draw(circuit)([1, 2, 3]))
143 0: ──RZ(3.00)─┤ <Z>
144
145 """
146 previously_active_contexts = cls._active_contexts
147 cls._active_contexts = []
148 yield
149 cls._active_contexts = previously_active_contexts
150
151 @classmethod
152 def append(cls, obj, **kwargs):
153 """Append an object to the queue(s).
154
155 Args:
156 obj: the object to be appended
157 """
158 if cls.recording():
159 cls.active_context().append(obj, **kwargs)
160
161 @classmethod
162 def remove(cls, obj):
163 """Remove an object from the queue(s) if it is in the queue(s).
164
165 Args:
166 obj: the object to be removed
167 """
168 if cls.recording():
169 cls.active_context().remove(obj)
170
171 @classmethod
172 def update_info(cls, obj, **kwargs):
173 """Updates information of an object in the active queue if it is already in the queue.
174
175 Args:
176 obj: the object with metadata to be updated
177 """
178 if cls.recording():
179 cls.active_context().update_info(obj, **kwargs)
180
181 @classmethod
182 def safe_update_info(cls, obj, **kwargs):
183 """Updates information of an object in the active queue if it is already in the queue.
184
185 Args:
186 obj: the object with metadata to be updated
187 """
188 warn(
189 "QueuingManager.safe_update_info is deprecated."
190 "It's behavior has been moved to `update_info`.",
191 UserWarning,
192 )
193 cls.update_info(obj, **kwargs)
194
195 @classmethod
196 def get_info(cls, obj):
197 """Retrieves information of an object in the active queue.
198
199 Args:
200 obj: the object with metadata to be retrieved
201
202 Returns:
203 object metadata
204 """
205 return cls.active_context().get_info(obj) if cls.recording() else None
206
207
208 class AnnotatedQueue:
209 """Lightweight class that maintains a basic queue of operations, in addition
210 to metadata annotations."""
211
212 def __init__(self):
213 self._queue = OrderedDict()
214
215 def __enter__(self):
216 """Adds this instance to the global list of active contexts.
217
218 Returns:
219 AnnotatedQueue: this instance
220 """
221 QueuingManager.add_active_queue(self)
222
223 return self
224
225 def __exit__(self, exception_type, exception_value, traceback):
226 """Remove this instance from the global list of active contexts."""
227 QueuingManager.remove_active_queue()
228
229 def append(self, obj, **kwargs):
230 """Append ``obj`` into the queue with ``kwargs`` metadata."""
231 self._queue[obj] = kwargs
232
233 def remove(self, obj):
234 """Remove ``obj`` from the queue. Raises ``KeyError`` if ``obj`` is not already in the queue."""
235 del self._queue[obj]
236
237 def update_info(self, obj, **kwargs):
238 """Update ``obj``'s metadata with ``kwargs`` if it exists in the queue."""
239 if obj in self._queue:
240 self._queue[obj].update(kwargs)
241
242 def safe_update_info(self, obj, **kwargs):
243 """Update ``obj``'s metadata with ``kwargs`` if it exists in the queue."""
244 warn(
245 "AnnotatedQueue.safe_update_info is deprecated."
246 "It's behavior has been moved to `update_info`.",
247 UserWarning,
248 )
249 self.update_info(obj, **kwargs)
250
251 def get_info(self, obj):
252 """Retrieve the metadata for ``obj``. Raises a ``QueuingError`` if obj is not in the queue."""
253 if obj not in self._queue:
254 raise QueuingError(f"Object {obj} not in the queue.")
255
256 return self._queue[obj]
257
258 @property
259 def queue(self):
260 """Returns a list of objects in the annotated queue"""
261 return list(self._queue.keys())
262
263
264 def apply(op, context=QueuingManager):
265 """Apply an instantiated operator or measurement to a queuing context.
266
267 Args:
268 op (.Operator or .MeasurementProcess): the operator or measurement to apply/queue
269 context (.QueuingManager): The queuing context to queue the operator to.
270 Note that if no context is specified, the operator is
271 applied to the currently active queuing context.
272 Returns:
273 .Operator or .MeasurementProcess: the input operator is returned for convenience
274
275 **Example**
276
277 In PennyLane, **operations and measurements are 'queued' or added to a circuit
278 when they are instantiated**.
279
280 The ``apply`` function can be used to add operations that might have
281 already been instantiated elsewhere to the QNode:
282
283 .. code-block:: python
284
285 op = qml.RX(0.4, wires=0)
286 dev = qml.device("default.qubit", wires=2)
287
288 @qml.qnode(dev)
289 def circuit(x):
290 qml.RY(x, wires=0) # applied during instantiation
291 qml.apply(op) # manually applied
292 return qml.expval(qml.PauliZ(0))
293
294 >>> print(qml.draw(circuit)(0.6))
295 0: ──RY(0.6)──RX(0.4)──┤ ⟨Z⟩
296
297 It can also be used to apply functions repeatedly:
298
299 .. code-block:: python
300
301 @qml.qnode(dev)
302 def circuit(x):
303 qml.apply(op)
304 qml.RY(x, wires=0)
305 qml.apply(op)
306 return qml.expval(qml.PauliZ(0))
307
308 >>> print(qml.draw(circuit)(0.6))
309 0: ──RX(0.4)──RY(0.6)──RX(0.4)──┤ ⟨Z⟩
310
311 .. details::
312 :title: Usage Details
313
314 Instantiated measurements can also be applied to queuing contexts
315 using ``apply``:
316
317 .. code-block:: python
318
319 meas = qml.expval(qml.PauliZ(0) @ qml.PauliY(1))
320 dev = qml.device("default.qubit", wires=2)
321
322 @qml.qnode(dev)
323 def circuit(x):
324 qml.RY(x, wires=0)
325 qml.CNOT(wires=[0, 1])
326 return qml.apply(meas)
327
328 >>> print(qml.draw(circuit)(0.6))
329 0: ──RY(0.6)──╭●──╭┤ ⟨Z ⊗ Y⟩
330 1: ───────────╰X──╰┤ ⟨Z ⊗ Y⟩
331
332 By default, ``apply`` will queue operators to the currently
333 active queuing context.
334
335 When working with low-level queuing contexts such as quantum tapes,
336 the desired context to queue the operation to can be explicitly
337 passed:
338
339 .. code-block:: python
340
341 with qml.tape.QuantumTape() as tape1:
342 qml.Hadamard(wires=1)
343
344 with qml.tape.QuantumTape() as tape2:
345 # Due to the nesting behaviour of queuing contexts,
346 # tape2 will be queued to tape1.
347
348 # The following PauliX operation will be queued
349 # to the active queuing context, tape2, during instantiation.
350 op1 = qml.PauliX(wires=0)
351
352 # We can use qml.apply to apply the same operation to tape1
353 # without leaving the tape2 context.
354 qml.apply(op1, context=tape1)
355
356 qml.RZ(0.2, wires=0)
357
358 qml.CNOT(wires=[0, 1])
359
360 >>> tape1.operations
361 [Hadamard(wires=[1]), <QuantumTape: wires=[0], params=1>, PauliX(wires=[0]), CNOT(wires=[0, 1])]
362 >>> tape2.operations
363 [PauliX(wires=[0]), RZ(0.2, wires=[0])]
364 """
365 if not QueuingManager.recording():
366 raise RuntimeError("No queuing context available to append operation to.")
367
368 if op in getattr(context, "queue", QueuingManager.active_context().queue):
369 # Queuing contexts can only contain unique objects.
370 # If the object to be queued already exists, copy it.
371 op = copy.copy(op)
372
373 if hasattr(op, "queue"):
374 # operator provides its own logic for queuing
375 op.queue(context=context)
376 else:
377 # append the operator directly to the relevant queuing context
378 context.append(op)
379
380 return op
381
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pennylane/queuing.py b/pennylane/queuing.py
--- a/pennylane/queuing.py
+++ b/pennylane/queuing.py
@@ -145,7 +145,11 @@
"""
previously_active_contexts = cls._active_contexts
cls._active_contexts = []
- yield
+ try:
+ yield
+ except Exception as e:
+ cls._active_contexts = previously_active_contexts
+ raise e
cls._active_contexts = previously_active_contexts
@classmethod
| {"golden_diff": "diff --git a/pennylane/queuing.py b/pennylane/queuing.py\n--- a/pennylane/queuing.py\n+++ b/pennylane/queuing.py\n@@ -145,7 +145,11 @@\n \"\"\"\n previously_active_contexts = cls._active_contexts\n cls._active_contexts = []\n- yield\n+ try:\n+ yield\n+ except Exception as e:\n+ cls._active_contexts = previously_active_contexts\n+ raise e\n cls._active_contexts = previously_active_contexts\n \n @classmethod\n", "issue": "[BUG] Inline transforms vs. Decorator transforms\n### Expected behavior\n\nThis script:\r\n\r\n```\r\ndef circuit(circuit_param):\r\n qml.RY(circuit_param, wires=0)\r\n qml.Hadamard(wires=0)\r\n qml.T(wires=0)\r\n\r\ndef noise(noise_param, wires):\r\n qml.CRX(noise_param, wires=[0,1])\r\n qml.CNOT(wires=[1,0])\r\n \r\ndev = qml.device(\"default.mixed\", wires=2)\r\n\r\nnoise_param = 0.3\r\n \r\[email protected](dev)\r\[email protected](noise, noise_param, position=\"all\")\r\ndef noisy_circuit(circuit_param):\r\n \r\n circuit(circuit_param)\r\n \r\n return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliY(0)), qml.expval(qml.PauliZ(0))\r\n\r\nnoisy_circuit(0.4)\r\n```\r\nAnd this one\r\n```\r\ndef circuit(circuit_param):\r\n qml.RY(circuit_param, wires=0)\r\n qml.Hadamard(wires=0)\r\n qml.T(wires=0)\r\n\r\ndef noise(noise_param, wires):\r\n qml.CRX(noise_param, wires=[0,1])\r\n qml.CNOT(wires=[1,0])\r\n \r\ndev = qml.device(\"default.mixed\", wires=2)\r\n\r\nnoise_param = 0.3\r\n \r\[email protected](dev)\r\ndef noisy_circuit(circuit_param):\r\n \r\n qml.transforms.insert(noise, noise_param, position=\"all\")(circuit)(circuit_param)\r\n \r\n return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliY(0)), qml.expval(qml.PauliZ(0))\r\n\r\nnoisy_circuit(0.4)\r\n```\r\nshould both return `array([0.64497588, 0.61505021, 0.40503904])`\n\n### Actual behavior\n\nThe second script works, but the first one returns errors.\n\n### Additional information\n\nI know this is an issue with how tapes are handled in either case. Please reconsider how this interacts with users.\n\n### Source code\n\n```shell\ndef circuit(circuit_param):\r\n qml.RY(circuit_param, wires=0)\r\n qml.Hadamard(wires=0)\r\n qml.T(wires=0)\r\n\r\ndef noise(noise_param, wires):\r\n qml.CRX(noise_param, wires=[0,1])\r\n qml.CNOT(wires=[1,0])\r\n \r\ndev = qml.device(\"default.mixed\", wires=2)\r\n\r\nnoise_param = 0.3\r\n \r\[email protected](dev)\r\[email protected](noise, noise_param, position=\"all\")\r\ndef noisy_circuit(circuit_param):\r\n \r\n circuit(circuit_param)\r\n \r\n return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliY(0)), qml.expval(qml.PauliZ(0))\r\n\r\nnoisy_circuit(0.4)\n```\n\n\n### Tracebacks\n\n```shell\n---------------------------------------------------------------------------\r\nValueError Traceback (most recent call last)\r\n~\\anaconda3\\lib\\site-packages\\pennylane\\tape\\tape.py in expand_tape(tape, depth, stop_at, expand_measurements)\r\n 156 try:\r\n--> 157 rotations, diag_obs = qml.grouping.diagonalize_qwc_pauli_words(\r\n 158 tape._obs_sharing_wires\r\n\r\n~\\anaconda3\\lib\\site-packages\\pennylane\\grouping\\transformations.py in diagonalize_qwc_pauli_words(qwc_grouping)\r\n 139 if not are_pauli_words_qwc(qwc_grouping):\r\n--> 140 raise ValueError(\"The list of Pauli words are not qubit-wise commuting.\")\r\n 141 \r\n\r\nValueError: The list of Pauli words are not qubit-wise commuting.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nQuantumFunctionError Traceback (most recent call last)\r\n~\\anaconda3\\lib\\site-packages\\pennylane\\transforms\\qfunc_transforms.py in __call__(self, tape, *args, **kwargs)\r\n 169 with tape_class() as new_tape:\r\n--> 170 self.transform_fn(tape, *args, **kwargs)\r\n 171 \r\n\r\n~\\anaconda3\\lib\\site-packages\\pennylane\\transforms\\insert_ops.py in insert(circuit, op, op_args, position, before)\r\n 212 # TODO: change this to be cleaner and more robust\r\n--> 213 circuit = circuit.expand(\r\n 214 stop_at=lambda op: not hasattr(qml.templates, op.name) and not isinstance(op, Adjoint)\r\n\r\n~\\anaconda3\\lib\\site-packages\\pennylane\\tape\\tape.py in expand(self, depth, stop_at, expand_measurements)\r\n 686 \"\"\"\r\n--> 687 new_tape = expand_tape(\r\n 688 self, depth=depth, stop_at=stop_at, expand_measurements=expand_measurements\r\n\r\n~\\anaconda3\\lib\\site-packages\\pennylane\\tape\\tape.py in expand_tape(tape, depth, stop_at, expand_measurements)\r\n 172 \r\n--> 173 raise qml.QuantumFunctionError(\r\n 174 \"Only observables that are qubit-wise commuting \"\r\n\r\nQuantumFunctionError: Only observables that are qubit-wise commuting Pauli words can be returned on the same wire, some of the following measurements do not commute:\r\n[expval(PauliX(wires=[0])), expval(PauliY(wires=[0])), expval(PauliZ(wires=[0]))]\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nIndexError Traceback (most recent call last)\r\n~\\anaconda3\\lib\\site-packages\\pennylane\\qnode.py in construct(self, args, kwargs)\r\n 525 with self.tape:\r\n--> 526 self._qfunc_output = self.func(*args, **kwargs)\r\n 527 self._tape._qfunc_output = self._qfunc_output\r\n\r\n~\\anaconda3\\lib\\site-packages\\pennylane\\transforms\\qfunc_transforms.py in internal_wrapper(*args, **kwargs)\r\n 201 tape = make_tape(fn)(*args, **kwargs)\r\n--> 202 tape = tape_transform(tape, *transform_args, **transform_kwargs)\r\n 203 \r\n\r\n~\\anaconda3\\lib\\site-packages\\pennylane\\transforms\\qfunc_transforms.py in __call__(self, tape, *args, **kwargs)\r\n 169 with tape_class() as new_tape:\r\n--> 170 self.transform_fn(tape, *args, **kwargs)\r\n 171 \r\n\r\n~\\anaconda3\\lib\\site-packages\\pennylane\\tape\\tape.py in __exit__(self, exception_type, exception_value, traceback)\r\n 389 try:\r\n--> 390 super().__exit__(exception_type, exception_value, traceback)\r\n 391 # After other optimizations in #2963, #2986 and follow-up work, we should check whether\r\n\r\n~\\anaconda3\\lib\\site-packages\\pennylane\\queuing.py in __exit__(self, exception_type, exception_value, traceback)\r\n 127 \"\"\"Remove this instance from the global list of active contexts.\"\"\"\r\n--> 128 QueuingContext._active_contexts.pop()\r\n 129 \r\n\r\nIndexError: pop from an empty deque\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nIndexError Traceback (most recent call last)\r\n<ipython-input-33-37d3113aba14> in <module>\r\n 20 return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliY(0)), qml.expval(qml.PauliZ(0))\r\n 21 \r\n---> 22 noisy_circuit(0.4)\r\n\r\n~\\anaconda3\\lib\\site-packages\\pennylane\\qnode.py in __call__(self, *args, **kwargs)\r\n 609 \r\n 610 # construct the tape\r\n--> 611 self.construct(args, kwargs)\r\n 612 \r\n 613 cache = self.execute_kwargs.get(\"cache\", False)\r\n\r\n~\\anaconda3\\lib\\site-packages\\pennylane\\qnode.py in construct(self, args, kwargs)\r\n 524 \r\n 525 with self.tape:\r\n--> 526 self._qfunc_output = self.func(*args, **kwargs)\r\n 527 self._tape._qfunc_output = self._qfunc_output\r\n 528 \r\n\r\n~\\anaconda3\\lib\\site-packages\\pennylane\\tape\\tape.py in __exit__(self, exception_type, exception_value, traceback)\r\n 388 def __exit__(self, exception_type, exception_value, traceback):\r\n 389 try:\r\n--> 390 super().__exit__(exception_type, exception_value, traceback)\r\n 391 # After other optimizations in #2963, #2986 and follow-up work, we should check whether\r\n 392 # calling `_process_queue` only if there is no `exception_type` saves time. This would\r\n\r\n~\\anaconda3\\lib\\site-packages\\pennylane\\queuing.py in __exit__(self, exception_type, exception_value, traceback)\r\n 126 def __exit__(self, exception_type, exception_value, traceback):\r\n 127 \"\"\"Remove this instance from the global list of active contexts.\"\"\"\r\n--> 128 QueuingContext._active_contexts.pop()\r\n 129 \r\n 130 @abc.abstractmethod\r\n\r\nIndexError: pop from an empty deque\n```\n\n\n### System information\n\n```shell\nName: PennyLane\r\nVersion: 0.26.0\r\nSummary: PennyLane is a Python quantum machine learning library by Xanadu Inc.\r\nHome-page: https://github.com/XanaduAI/pennylane\r\nAuthor: None\r\nAuthor-email: None\r\nLicense: Apache License 2.0\r\nLocation: c:\\users\\alvaro\\anaconda3\\lib\\site-packages\r\nRequires: numpy, appdirs, cachetools, retworkx, toml, autograd, semantic-version, pennylane-lightning, autoray, scipy, networkx\r\nRequired-by: PennyLane-SF, pennylane-qulacs, PennyLane-Qchem, PennyLane-Lightning\r\n\r\nPlatform info: Windows-10-10.0.19041-SP0\r\nPython version: 3.8.8\r\nNumpy version: 1.22.1\r\nScipy version: 1.6.2\r\nInstalled devices:\r\n- default.gaussian (PennyLane-0.26.0)\r\n- default.mixed (PennyLane-0.26.0)\r\n- default.qubit (PennyLane-0.26.0)\r\n- default.qubit.autograd (PennyLane-0.26.0)\r\n- default.qubit.jax (PennyLane-0.26.0)\r\n- default.qubit.tf (PennyLane-0.26.0)\r\n- default.qubit.torch (PennyLane-0.26.0)\r\n- default.qutrit (PennyLane-0.26.0)\r\n- strawberryfields.fock (PennyLane-SF-0.19.0)\r\n- strawberryfields.gaussian (PennyLane-SF-0.19.0)\r\n- strawberryfields.gbs (PennyLane-SF-0.19.0)\r\n- strawberryfields.remote (PennyLane-SF-0.19.0)\r\n- strawberryfields.tf (PennyLane-SF-0.19.0)\r\n- qulacs.simulator (pennylane-qulacs-0.16.0)\r\n- lightning.qubit (PennyLane-Lightning-0.26.0)\n```\n\n\n### Existing GitHub issues\n\n- [X] I have searched existing GitHub issues to make sure the issue does not already exist.\n", "before_files": [{"content": "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis module contains the :class:`QueuingManager`.\n\"\"\"\n\nimport copy\nfrom collections import OrderedDict\nfrom contextlib import contextmanager\nfrom warnings import warn\n\n\ndef __getattr__(name):\n # for more information on overwriting `__getattr__`, see https://peps.python.org/pep-0562/\n if name == \"QueuingContext\":\n warn(\"QueuingContext has been renamed qml.queuing.QueuingManager.\", UserWarning)\n return QueuingManager\n try:\n return globals()[name]\n except KeyError as e:\n raise AttributeError from e\n\n\nclass QueuingError(Exception):\n \"\"\"Exception that is raised when there is a queuing error\"\"\"\n\n\nclass QueuingManager:\n \"\"\"Singleton global entry point for managing active recording contexts.\n\n This class consists purely of class methods. It both maintains a list of\n recording queues and allows communication with the currently active object.\n\n Queueable objects, like :class:`~.operation.Operator` and :class:`~.measurements.MeasurementProcess`, should\n use ``QueuingManager`` as an entry point for accessing the active queue.\n\n See also: :class:`~.AnnotatedQueue`, :class:`~.tape.QuantumTape`, :meth:`~.operation.Operator.queue`.\n\n Recording queues, such as :class:`~.AnnotatedQueue`, must define the following methods:\n\n * ``append``: define an action to perform when an object append\n request is made.\n\n * ``remove``: define an action to perform when an object removal request is made.\n\n * ``get_info``: retrieve the object's metadata\n\n * ``update_info``: Update an object's metadata if it is already queued.\n\n To start and end recording, the recording queue can use the :meth:`add_active_queue` and\n :meth:`remove_active_queue` methods.\n\n \"\"\"\n\n _active_contexts = []\n \"\"\"The stack of contexts that are currently active.\"\"\"\n\n @classmethod\n def add_active_queue(cls, queue):\n \"\"\"Makes a queue the currently active recording context.\"\"\"\n cls._active_contexts.append(queue)\n\n @classmethod\n def remove_active_queue(cls):\n \"\"\"Ends recording on the currently active recording queue.\"\"\"\n return cls._active_contexts.pop()\n\n @classmethod\n def recording(cls):\n \"\"\"Whether a queuing context is active and recording operations\"\"\"\n return bool(cls._active_contexts)\n\n @classmethod\n def active_context(cls):\n \"\"\"Returns the currently active queuing context.\"\"\"\n return cls._active_contexts[-1] if cls.recording() else None\n\n @classmethod\n @contextmanager\n def stop_recording(cls):\n \"\"\"A context manager and decorator to ensure that contained logic is non-recordable\n or non-queueable within a QNode or quantum tape context.\n\n **Example:**\n\n Consider the function:\n\n >>> def list_of_ops(params, wires):\n ... return [\n ... qml.RX(params[0], wires=wires),\n ... qml.RY(params[1], wires=wires),\n ... qml.RZ(params[2], wires=wires)\n ... ]\n\n If executed in a recording context, the operations constructed in the function will be queued:\n\n >>> dev = qml.device(\"default.qubit\", wires=2)\n >>> @qml.qnode(dev)\n ... def circuit(params):\n ... ops = list_of_ops(params, wires=0)\n ... qml.apply(ops[-1]) # apply the last operation from the list again\n ... return qml.expval(qml.PauliZ(0))\n >>> print(qml.draw(circuit)([1, 2, 3]))\n 0: \u2500\u2500RX(1.00)\u2500\u2500RY(2.00)\u2500\u2500RZ(3.00)\u2500\u2500RZ(3.00)\u2500\u2524 <Z>\n\n Using the ``stop_recording`` context manager, all logic contained inside is not queued or recorded.\n\n >>> @qml.qnode(dev)\n ... def circuit(params):\n ... with qml.QueuingManager.stop_recording():\n ... ops = list_of_ops(params, wires=0)\n ... qml.apply(ops[-1])\n ... return qml.expval(qml.PauliZ(0))\n >>> print(qml.draw(circuit)([1, 2, 3]))\n 0: \u2500\u2500RZ(3.00)\u2500\u2524 <Z>\n\n The context manager can also be used as a decorator on a function:\n\n >>> @qml.QueuingManager.stop_recording()\n ... def list_of_ops(params, wires):\n ... return [\n ... qml.RX(params[0], wires=wires),\n ... qml.RY(params[1], wires=wires),\n ... qml.RZ(params[2], wires=wires)\n ... ]\n >>> @qml.qnode(dev)\n ... def circuit(params):\n ... ops = list_of_ops(params, wires=0)\n ... qml.apply(ops[-1])\n ... return qml.expval(qml.PauliZ(0))\n >>> print(qml.draw(circuit)([1, 2, 3]))\n 0: \u2500\u2500RZ(3.00)\u2500\u2524 <Z>\n\n \"\"\"\n previously_active_contexts = cls._active_contexts\n cls._active_contexts = []\n yield\n cls._active_contexts = previously_active_contexts\n\n @classmethod\n def append(cls, obj, **kwargs):\n \"\"\"Append an object to the queue(s).\n\n Args:\n obj: the object to be appended\n \"\"\"\n if cls.recording():\n cls.active_context().append(obj, **kwargs)\n\n @classmethod\n def remove(cls, obj):\n \"\"\"Remove an object from the queue(s) if it is in the queue(s).\n\n Args:\n obj: the object to be removed\n \"\"\"\n if cls.recording():\n cls.active_context().remove(obj)\n\n @classmethod\n def update_info(cls, obj, **kwargs):\n \"\"\"Updates information of an object in the active queue if it is already in the queue.\n\n Args:\n obj: the object with metadata to be updated\n \"\"\"\n if cls.recording():\n cls.active_context().update_info(obj, **kwargs)\n\n @classmethod\n def safe_update_info(cls, obj, **kwargs):\n \"\"\"Updates information of an object in the active queue if it is already in the queue.\n\n Args:\n obj: the object with metadata to be updated\n \"\"\"\n warn(\n \"QueuingManager.safe_update_info is deprecated.\"\n \"It's behavior has been moved to `update_info`.\",\n UserWarning,\n )\n cls.update_info(obj, **kwargs)\n\n @classmethod\n def get_info(cls, obj):\n \"\"\"Retrieves information of an object in the active queue.\n\n Args:\n obj: the object with metadata to be retrieved\n\n Returns:\n object metadata\n \"\"\"\n return cls.active_context().get_info(obj) if cls.recording() else None\n\n\nclass AnnotatedQueue:\n \"\"\"Lightweight class that maintains a basic queue of operations, in addition\n to metadata annotations.\"\"\"\n\n def __init__(self):\n self._queue = OrderedDict()\n\n def __enter__(self):\n \"\"\"Adds this instance to the global list of active contexts.\n\n Returns:\n AnnotatedQueue: this instance\n \"\"\"\n QueuingManager.add_active_queue(self)\n\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n \"\"\"Remove this instance from the global list of active contexts.\"\"\"\n QueuingManager.remove_active_queue()\n\n def append(self, obj, **kwargs):\n \"\"\"Append ``obj`` into the queue with ``kwargs`` metadata.\"\"\"\n self._queue[obj] = kwargs\n\n def remove(self, obj):\n \"\"\"Remove ``obj`` from the queue. Raises ``KeyError`` if ``obj`` is not already in the queue.\"\"\"\n del self._queue[obj]\n\n def update_info(self, obj, **kwargs):\n \"\"\"Update ``obj``'s metadata with ``kwargs`` if it exists in the queue.\"\"\"\n if obj in self._queue:\n self._queue[obj].update(kwargs)\n\n def safe_update_info(self, obj, **kwargs):\n \"\"\"Update ``obj``'s metadata with ``kwargs`` if it exists in the queue.\"\"\"\n warn(\n \"AnnotatedQueue.safe_update_info is deprecated.\"\n \"It's behavior has been moved to `update_info`.\",\n UserWarning,\n )\n self.update_info(obj, **kwargs)\n\n def get_info(self, obj):\n \"\"\"Retrieve the metadata for ``obj``. Raises a ``QueuingError`` if obj is not in the queue.\"\"\"\n if obj not in self._queue:\n raise QueuingError(f\"Object {obj} not in the queue.\")\n\n return self._queue[obj]\n\n @property\n def queue(self):\n \"\"\"Returns a list of objects in the annotated queue\"\"\"\n return list(self._queue.keys())\n\n\ndef apply(op, context=QueuingManager):\n \"\"\"Apply an instantiated operator or measurement to a queuing context.\n\n Args:\n op (.Operator or .MeasurementProcess): the operator or measurement to apply/queue\n context (.QueuingManager): The queuing context to queue the operator to.\n Note that if no context is specified, the operator is\n applied to the currently active queuing context.\n Returns:\n .Operator or .MeasurementProcess: the input operator is returned for convenience\n\n **Example**\n\n In PennyLane, **operations and measurements are 'queued' or added to a circuit\n when they are instantiated**.\n\n The ``apply`` function can be used to add operations that might have\n already been instantiated elsewhere to the QNode:\n\n .. code-block:: python\n\n op = qml.RX(0.4, wires=0)\n dev = qml.device(\"default.qubit\", wires=2)\n\n @qml.qnode(dev)\n def circuit(x):\n qml.RY(x, wires=0) # applied during instantiation\n qml.apply(op) # manually applied\n return qml.expval(qml.PauliZ(0))\n\n >>> print(qml.draw(circuit)(0.6))\n 0: \u2500\u2500RY(0.6)\u2500\u2500RX(0.4)\u2500\u2500\u2524 \u27e8Z\u27e9\n\n It can also be used to apply functions repeatedly:\n\n .. code-block:: python\n\n @qml.qnode(dev)\n def circuit(x):\n qml.apply(op)\n qml.RY(x, wires=0)\n qml.apply(op)\n return qml.expval(qml.PauliZ(0))\n\n >>> print(qml.draw(circuit)(0.6))\n 0: \u2500\u2500RX(0.4)\u2500\u2500RY(0.6)\u2500\u2500RX(0.4)\u2500\u2500\u2524 \u27e8Z\u27e9\n\n .. details::\n :title: Usage Details\n\n Instantiated measurements can also be applied to queuing contexts\n using ``apply``:\n\n .. code-block:: python\n\n meas = qml.expval(qml.PauliZ(0) @ qml.PauliY(1))\n dev = qml.device(\"default.qubit\", wires=2)\n\n @qml.qnode(dev)\n def circuit(x):\n qml.RY(x, wires=0)\n qml.CNOT(wires=[0, 1])\n return qml.apply(meas)\n\n >>> print(qml.draw(circuit)(0.6))\n 0: \u2500\u2500RY(0.6)\u2500\u2500\u256d\u25cf\u2500\u2500\u256d\u2524 \u27e8Z \u2297 Y\u27e9\n 1: \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2570X\u2500\u2500\u2570\u2524 \u27e8Z \u2297 Y\u27e9\n\n By default, ``apply`` will queue operators to the currently\n active queuing context.\n\n When working with low-level queuing contexts such as quantum tapes,\n the desired context to queue the operation to can be explicitly\n passed:\n\n .. code-block:: python\n\n with qml.tape.QuantumTape() as tape1:\n qml.Hadamard(wires=1)\n\n with qml.tape.QuantumTape() as tape2:\n # Due to the nesting behaviour of queuing contexts,\n # tape2 will be queued to tape1.\n\n # The following PauliX operation will be queued\n # to the active queuing context, tape2, during instantiation.\n op1 = qml.PauliX(wires=0)\n\n # We can use qml.apply to apply the same operation to tape1\n # without leaving the tape2 context.\n qml.apply(op1, context=tape1)\n\n qml.RZ(0.2, wires=0)\n\n qml.CNOT(wires=[0, 1])\n\n >>> tape1.operations\n [Hadamard(wires=[1]), <QuantumTape: wires=[0], params=1>, PauliX(wires=[0]), CNOT(wires=[0, 1])]\n >>> tape2.operations\n [PauliX(wires=[0]), RZ(0.2, wires=[0])]\n \"\"\"\n if not QueuingManager.recording():\n raise RuntimeError(\"No queuing context available to append operation to.\")\n\n if op in getattr(context, \"queue\", QueuingManager.active_context().queue):\n # Queuing contexts can only contain unique objects.\n # If the object to be queued already exists, copy it.\n op = copy.copy(op)\n\n if hasattr(op, \"queue\"):\n # operator provides its own logic for queuing\n op.queue(context=context)\n else:\n # append the operator directly to the relevant queuing context\n context.append(op)\n\n return op\n", "path": "pennylane/queuing.py"}], "after_files": [{"content": "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis module contains the :class:`QueuingManager`.\n\"\"\"\n\nimport copy\nfrom collections import OrderedDict\nfrom contextlib import contextmanager\nfrom warnings import warn\n\n\ndef __getattr__(name):\n # for more information on overwriting `__getattr__`, see https://peps.python.org/pep-0562/\n if name == \"QueuingContext\":\n warn(\"QueuingContext has been renamed qml.queuing.QueuingManager.\", UserWarning)\n return QueuingManager\n try:\n return globals()[name]\n except KeyError as e:\n raise AttributeError from e\n\n\nclass QueuingError(Exception):\n \"\"\"Exception that is raised when there is a queuing error\"\"\"\n\n\nclass QueuingManager:\n \"\"\"Singleton global entry point for managing active recording contexts.\n\n This class consists purely of class methods. It both maintains a list of\n recording queues and allows communication with the currently active object.\n\n Queueable objects, like :class:`~.operation.Operator` and :class:`~.measurements.MeasurementProcess`, should\n use ``QueuingManager`` as an entry point for accessing the active queue.\n\n See also: :class:`~.AnnotatedQueue`, :class:`~.tape.QuantumTape`, :meth:`~.operation.Operator.queue`.\n\n Recording queues, such as :class:`~.AnnotatedQueue`, must define the following methods:\n\n * ``append``: define an action to perform when an object append\n request is made.\n\n * ``remove``: define an action to perform when an object removal request is made.\n\n * ``get_info``: retrieve the object's metadata\n\n * ``update_info``: Update an object's metadata if it is already queued.\n\n To start and end recording, the recording queue can use the :meth:`add_active_queue` and\n :meth:`remove_active_queue` methods.\n\n \"\"\"\n\n _active_contexts = []\n \"\"\"The stack of contexts that are currently active.\"\"\"\n\n @classmethod\n def add_active_queue(cls, queue):\n \"\"\"Makes a queue the currently active recording context.\"\"\"\n cls._active_contexts.append(queue)\n\n @classmethod\n def remove_active_queue(cls):\n \"\"\"Ends recording on the currently active recording queue.\"\"\"\n return cls._active_contexts.pop()\n\n @classmethod\n def recording(cls):\n \"\"\"Whether a queuing context is active and recording operations\"\"\"\n return bool(cls._active_contexts)\n\n @classmethod\n def active_context(cls):\n \"\"\"Returns the currently active queuing context.\"\"\"\n return cls._active_contexts[-1] if cls.recording() else None\n\n @classmethod\n @contextmanager\n def stop_recording(cls):\n \"\"\"A context manager and decorator to ensure that contained logic is non-recordable\n or non-queueable within a QNode or quantum tape context.\n\n **Example:**\n\n Consider the function:\n\n >>> def list_of_ops(params, wires):\n ... return [\n ... qml.RX(params[0], wires=wires),\n ... qml.RY(params[1], wires=wires),\n ... qml.RZ(params[2], wires=wires)\n ... ]\n\n If executed in a recording context, the operations constructed in the function will be queued:\n\n >>> dev = qml.device(\"default.qubit\", wires=2)\n >>> @qml.qnode(dev)\n ... def circuit(params):\n ... ops = list_of_ops(params, wires=0)\n ... qml.apply(ops[-1]) # apply the last operation from the list again\n ... return qml.expval(qml.PauliZ(0))\n >>> print(qml.draw(circuit)([1, 2, 3]))\n 0: \u2500\u2500RX(1.00)\u2500\u2500RY(2.00)\u2500\u2500RZ(3.00)\u2500\u2500RZ(3.00)\u2500\u2524 <Z>\n\n Using the ``stop_recording`` context manager, all logic contained inside is not queued or recorded.\n\n >>> @qml.qnode(dev)\n ... def circuit(params):\n ... with qml.QueuingManager.stop_recording():\n ... ops = list_of_ops(params, wires=0)\n ... qml.apply(ops[-1])\n ... return qml.expval(qml.PauliZ(0))\n >>> print(qml.draw(circuit)([1, 2, 3]))\n 0: \u2500\u2500RZ(3.00)\u2500\u2524 <Z>\n\n The context manager can also be used as a decorator on a function:\n\n >>> @qml.QueuingManager.stop_recording()\n ... def list_of_ops(params, wires):\n ... return [\n ... qml.RX(params[0], wires=wires),\n ... qml.RY(params[1], wires=wires),\n ... qml.RZ(params[2], wires=wires)\n ... ]\n >>> @qml.qnode(dev)\n ... def circuit(params):\n ... ops = list_of_ops(params, wires=0)\n ... qml.apply(ops[-1])\n ... return qml.expval(qml.PauliZ(0))\n >>> print(qml.draw(circuit)([1, 2, 3]))\n 0: \u2500\u2500RZ(3.00)\u2500\u2524 <Z>\n\n \"\"\"\n previously_active_contexts = cls._active_contexts\n cls._active_contexts = []\n try:\n yield\n except Exception as e:\n cls._active_contexts = previously_active_contexts\n raise e\n cls._active_contexts = previously_active_contexts\n\n @classmethod\n def append(cls, obj, **kwargs):\n \"\"\"Append an object to the queue(s).\n\n Args:\n obj: the object to be appended\n \"\"\"\n if cls.recording():\n cls.active_context().append(obj, **kwargs)\n\n @classmethod\n def remove(cls, obj):\n \"\"\"Remove an object from the queue(s) if it is in the queue(s).\n\n Args:\n obj: the object to be removed\n \"\"\"\n if cls.recording():\n cls.active_context().remove(obj)\n\n @classmethod\n def update_info(cls, obj, **kwargs):\n \"\"\"Updates information of an object in the active queue if it is already in the queue.\n\n Args:\n obj: the object with metadata to be updated\n \"\"\"\n if cls.recording():\n cls.active_context().update_info(obj, **kwargs)\n\n @classmethod\n def safe_update_info(cls, obj, **kwargs):\n \"\"\"Updates information of an object in the active queue if it is already in the queue.\n\n Args:\n obj: the object with metadata to be updated\n \"\"\"\n warn(\n \"QueuingManager.safe_update_info is deprecated.\"\n \"It's behavior has been moved to `update_info`.\",\n UserWarning,\n )\n cls.update_info(obj, **kwargs)\n\n @classmethod\n def get_info(cls, obj):\n \"\"\"Retrieves information of an object in the active queue.\n\n Args:\n obj: the object with metadata to be retrieved\n\n Returns:\n object metadata\n \"\"\"\n return cls.active_context().get_info(obj) if cls.recording() else None\n\n\nclass AnnotatedQueue:\n \"\"\"Lightweight class that maintains a basic queue of operations, in addition\n to metadata annotations.\"\"\"\n\n def __init__(self):\n self._queue = OrderedDict()\n\n def __enter__(self):\n \"\"\"Adds this instance to the global list of active contexts.\n\n Returns:\n AnnotatedQueue: this instance\n \"\"\"\n QueuingManager.add_active_queue(self)\n\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n \"\"\"Remove this instance from the global list of active contexts.\"\"\"\n QueuingManager.remove_active_queue()\n\n def append(self, obj, **kwargs):\n \"\"\"Append ``obj`` into the queue with ``kwargs`` metadata.\"\"\"\n self._queue[obj] = kwargs\n\n def remove(self, obj):\n \"\"\"Remove ``obj`` from the queue. Raises ``KeyError`` if ``obj`` is not already in the queue.\"\"\"\n del self._queue[obj]\n\n def update_info(self, obj, **kwargs):\n \"\"\"Update ``obj``'s metadata with ``kwargs`` if it exists in the queue.\"\"\"\n if obj in self._queue:\n self._queue[obj].update(kwargs)\n\n def safe_update_info(self, obj, **kwargs):\n \"\"\"Update ``obj``'s metadata with ``kwargs`` if it exists in the queue.\"\"\"\n warn(\n \"AnnotatedQueue.safe_update_info is deprecated.\"\n \"It's behavior has been moved to `update_info`.\",\n UserWarning,\n )\n self.update_info(obj, **kwargs)\n\n def get_info(self, obj):\n \"\"\"Retrieve the metadata for ``obj``. Raises a ``QueuingError`` if obj is not in the queue.\"\"\"\n if obj not in self._queue:\n raise QueuingError(f\"Object {obj} not in the queue.\")\n\n return self._queue[obj]\n\n @property\n def queue(self):\n \"\"\"Returns a list of objects in the annotated queue\"\"\"\n return list(self._queue.keys())\n\n\ndef apply(op, context=QueuingManager):\n \"\"\"Apply an instantiated operator or measurement to a queuing context.\n\n Args:\n op (.Operator or .MeasurementProcess): the operator or measurement to apply/queue\n context (.QueuingManager): The queuing context to queue the operator to.\n Note that if no context is specified, the operator is\n applied to the currently active queuing context.\n Returns:\n .Operator or .MeasurementProcess: the input operator is returned for convenience\n\n **Example**\n\n In PennyLane, **operations and measurements are 'queued' or added to a circuit\n when they are instantiated**.\n\n The ``apply`` function can be used to add operations that might have\n already been instantiated elsewhere to the QNode:\n\n .. code-block:: python\n\n op = qml.RX(0.4, wires=0)\n dev = qml.device(\"default.qubit\", wires=2)\n\n @qml.qnode(dev)\n def circuit(x):\n qml.RY(x, wires=0) # applied during instantiation\n qml.apply(op) # manually applied\n return qml.expval(qml.PauliZ(0))\n\n >>> print(qml.draw(circuit)(0.6))\n 0: \u2500\u2500RY(0.6)\u2500\u2500RX(0.4)\u2500\u2500\u2524 \u27e8Z\u27e9\n\n It can also be used to apply functions repeatedly:\n\n .. code-block:: python\n\n @qml.qnode(dev)\n def circuit(x):\n qml.apply(op)\n qml.RY(x, wires=0)\n qml.apply(op)\n return qml.expval(qml.PauliZ(0))\n\n >>> print(qml.draw(circuit)(0.6))\n 0: \u2500\u2500RX(0.4)\u2500\u2500RY(0.6)\u2500\u2500RX(0.4)\u2500\u2500\u2524 \u27e8Z\u27e9\n\n .. details::\n :title: Usage Details\n\n Instantiated measurements can also be applied to queuing contexts\n using ``apply``:\n\n .. code-block:: python\n\n meas = qml.expval(qml.PauliZ(0) @ qml.PauliY(1))\n dev = qml.device(\"default.qubit\", wires=2)\n\n @qml.qnode(dev)\n def circuit(x):\n qml.RY(x, wires=0)\n qml.CNOT(wires=[0, 1])\n return qml.apply(meas)\n\n >>> print(qml.draw(circuit)(0.6))\n 0: \u2500\u2500RY(0.6)\u2500\u2500\u256d\u25cf\u2500\u2500\u256d\u2524 \u27e8Z \u2297 Y\u27e9\n 1: \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2570X\u2500\u2500\u2570\u2524 \u27e8Z \u2297 Y\u27e9\n\n By default, ``apply`` will queue operators to the currently\n active queuing context.\n\n When working with low-level queuing contexts such as quantum tapes,\n the desired context to queue the operation to can be explicitly\n passed:\n\n .. code-block:: python\n\n with qml.tape.QuantumTape() as tape1:\n qml.Hadamard(wires=1)\n\n with qml.tape.QuantumTape() as tape2:\n # Due to the nesting behaviour of queuing contexts,\n # tape2 will be queued to tape1.\n\n # The following PauliX operation will be queued\n # to the active queuing context, tape2, during instantiation.\n op1 = qml.PauliX(wires=0)\n\n # We can use qml.apply to apply the same operation to tape1\n # without leaving the tape2 context.\n qml.apply(op1, context=tape1)\n\n qml.RZ(0.2, wires=0)\n\n qml.CNOT(wires=[0, 1])\n\n >>> tape1.operations\n [Hadamard(wires=[1]), <QuantumTape: wires=[0], params=1>, PauliX(wires=[0]), CNOT(wires=[0, 1])]\n >>> tape2.operations\n [PauliX(wires=[0]), RZ(0.2, wires=[0])]\n \"\"\"\n if not QueuingManager.recording():\n raise RuntimeError(\"No queuing context available to append operation to.\")\n\n if op in getattr(context, \"queue\", QueuingManager.active_context().queue):\n # Queuing contexts can only contain unique objects.\n # If the object to be queued already exists, copy it.\n op = copy.copy(op)\n\n if hasattr(op, \"queue\"):\n # operator provides its own logic for queuing\n op.queue(context=context)\n else:\n # append the operator directly to the relevant queuing context\n context.append(op)\n\n return op\n", "path": "pennylane/queuing.py"}]} |
gh_patches_debug_1248 | rasdani/github-patches | git_diff | pyqtgraph__pyqtgraph-696 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
update version for new development and PlotDataItem stepMode fix
### Version
PyPI version is 0.10.0 and uses the release code from november 2016, however the documentation (http://www.pyqtgraph.org/documentation/index.html) has the same version but is based on the development branch. This is very confusing (take for example _addMarker_ method of InfiniteLine).
### PlotDataItem fix
If _stepMode_ is True we get an exception in the _addPoints_ method of _ScatterPlotItem_ since X and Y axis length is not the same. The last X point is in this case excluded.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyqtgraph/graphicsItems/PlotDataItem.py`
Content:
```
1 import numpy as np
2 from .. import metaarray as metaarray
3 from ..Qt import QtCore
4 from .GraphicsObject import GraphicsObject
5 from .PlotCurveItem import PlotCurveItem
6 from .ScatterPlotItem import ScatterPlotItem
7 from .. import functions as fn
8 from .. import debug as debug
9 from .. import getConfigOption
10
11
12 class PlotDataItem(GraphicsObject):
13 """
14 **Bases:** :class:`GraphicsObject <pyqtgraph.GraphicsObject>`
15
16 GraphicsItem for displaying plot curves, scatter plots, or both.
17 While it is possible to use :class:`PlotCurveItem <pyqtgraph.PlotCurveItem>` or
18 :class:`ScatterPlotItem <pyqtgraph.ScatterPlotItem>` individually, this class
19 provides a unified interface to both. Instances of :class:`PlotDataItem` are
20 usually created by plot() methods such as :func:`pyqtgraph.plot` and
21 :func:`PlotItem.plot() <pyqtgraph.PlotItem.plot>`.
22
23 ============================== ==============================================
24 **Signals:**
25 sigPlotChanged(self) Emitted when the data in this item is updated.
26 sigClicked(self) Emitted when the item is clicked.
27 sigPointsClicked(self, points) Emitted when a plot point is clicked
28 Sends the list of points under the mouse.
29 ============================== ==============================================
30 """
31
32 sigPlotChanged = QtCore.Signal(object)
33 sigClicked = QtCore.Signal(object)
34 sigPointsClicked = QtCore.Signal(object, object)
35
36 def __init__(self, *args, **kargs):
37 """
38 There are many different ways to create a PlotDataItem:
39
40 **Data initialization arguments:** (x,y data only)
41
42 =================================== ======================================
43 PlotDataItem(xValues, yValues) x and y values may be any sequence (including ndarray) of real numbers
44 PlotDataItem(yValues) y values only -- x will be automatically set to range(len(y))
45 PlotDataItem(x=xValues, y=yValues) x and y given by keyword arguments
46 PlotDataItem(ndarray(Nx2)) numpy array with shape (N, 2) where x=data[:,0] and y=data[:,1]
47 =================================== ======================================
48
49 **Data initialization arguments:** (x,y data AND may include spot style)
50
51 =========================== =========================================
52 PlotDataItem(recarray) numpy array with dtype=[('x', float), ('y', float), ...]
53 PlotDataItem(list-of-dicts) [{'x': x, 'y': y, ...}, ...]
54 PlotDataItem(dict-of-lists) {'x': [...], 'y': [...], ...}
55 PlotDataItem(MetaArray) 1D array of Y values with X sepecified as axis values
56 OR 2D array with a column 'y' and extra columns as needed.
57 =========================== =========================================
58
59 **Line style keyword arguments:**
60
61 ========== ==============================================================================
62 connect Specifies how / whether vertexes should be connected. See
63 :func:`arrayToQPath() <pyqtgraph.arrayToQPath>`
64 pen Pen to use for drawing line between points.
65 Default is solid grey, 1px width. Use None to disable line drawing.
66 May be any single argument accepted by :func:`mkPen() <pyqtgraph.mkPen>`
67 shadowPen Pen for secondary line to draw behind the primary line. disabled by default.
68 May be any single argument accepted by :func:`mkPen() <pyqtgraph.mkPen>`
69 fillLevel Fill the area between the curve and fillLevel
70 fillBrush Fill to use when fillLevel is specified.
71 May be any single argument accepted by :func:`mkBrush() <pyqtgraph.mkBrush>`
72 stepMode If True, two orthogonal lines are drawn for each sample
73 as steps. This is commonly used when drawing histograms.
74 Note that in this case, `len(x) == len(y) + 1`
75 (added in version 0.9.9)
76 ========== ==============================================================================
77
78 **Point style keyword arguments:** (see :func:`ScatterPlotItem.setData() <pyqtgraph.ScatterPlotItem.setData>` for more information)
79
80 ============ =====================================================
81 symbol Symbol to use for drawing points OR list of symbols,
82 one per point. Default is no symbol.
83 Options are o, s, t, d, +, or any QPainterPath
84 symbolPen Outline pen for drawing points OR list of pens, one
85 per point. May be any single argument accepted by
86 :func:`mkPen() <pyqtgraph.mkPen>`
87 symbolBrush Brush for filling points OR list of brushes, one per
88 point. May be any single argument accepted by
89 :func:`mkBrush() <pyqtgraph.mkBrush>`
90 symbolSize Diameter of symbols OR list of diameters.
91 pxMode (bool) If True, then symbolSize is specified in
92 pixels. If False, then symbolSize is
93 specified in data coordinates.
94 ============ =====================================================
95
96 **Optimization keyword arguments:**
97
98 ================ =====================================================================
99 antialias (bool) By default, antialiasing is disabled to improve performance.
100 Note that in some cases (in particluar, when pxMode=True), points
101 will be rendered antialiased even if this is set to False.
102 decimate deprecated.
103 downsample (int) Reduce the number of samples displayed by this value
104 downsampleMethod 'subsample': Downsample by taking the first of N samples.
105 This method is fastest and least accurate.
106 'mean': Downsample by taking the mean of N samples.
107 'peak': Downsample by drawing a saw wave that follows the min
108 and max of the original data. This method produces the best
109 visual representation of the data but is slower.
110 autoDownsample (bool) If True, resample the data before plotting to avoid plotting
111 multiple line segments per pixel. This can improve performance when
112 viewing very high-density data, but increases the initial overhead
113 and memory usage.
114 clipToView (bool) If True, only plot data that is visible within the X range of
115 the containing ViewBox. This can improve performance when plotting
116 very large data sets where only a fraction of the data is visible
117 at any time.
118 identical *deprecated*
119 ================ =====================================================================
120
121 **Meta-info keyword arguments:**
122
123 ========== ================================================
124 name name of dataset. This would appear in a legend
125 ========== ================================================
126 """
127 GraphicsObject.__init__(self)
128 self.setFlag(self.ItemHasNoContents)
129 self.xData = None
130 self.yData = None
131 self.xDisp = None
132 self.yDisp = None
133 #self.dataMask = None
134 #self.curves = []
135 #self.scatters = []
136 self.curve = PlotCurveItem()
137 self.scatter = ScatterPlotItem()
138 self.curve.setParentItem(self)
139 self.scatter.setParentItem(self)
140
141 self.curve.sigClicked.connect(self.curveClicked)
142 self.scatter.sigClicked.connect(self.scatterClicked)
143
144
145 #self.clear()
146 self.opts = {
147 'connect': 'all',
148
149 'fftMode': False,
150 'logMode': [False, False],
151 'alphaHint': 1.0,
152 'alphaMode': False,
153
154 'pen': (200,200,200),
155 'shadowPen': None,
156 'fillLevel': None,
157 'fillBrush': None,
158 'stepMode': None,
159
160 'symbol': None,
161 'symbolSize': 10,
162 'symbolPen': (200,200,200),
163 'symbolBrush': (50, 50, 150),
164 'pxMode': True,
165
166 'antialias': getConfigOption('antialias'),
167 'pointMode': None,
168
169 'downsample': 1,
170 'autoDownsample': False,
171 'downsampleMethod': 'peak',
172 'autoDownsampleFactor': 5., # draw ~5 samples per pixel
173 'clipToView': False,
174
175 'data': None,
176 }
177 self.setData(*args, **kargs)
178
179 def implements(self, interface=None):
180 ints = ['plotData']
181 if interface is None:
182 return ints
183 return interface in ints
184
185 def name(self):
186 return self.opts.get('name', None)
187
188 def boundingRect(self):
189 return QtCore.QRectF() ## let child items handle this
190
191 def setAlpha(self, alpha, auto):
192 if self.opts['alphaHint'] == alpha and self.opts['alphaMode'] == auto:
193 return
194 self.opts['alphaHint'] = alpha
195 self.opts['alphaMode'] = auto
196 self.setOpacity(alpha)
197 #self.update()
198
199 def setFftMode(self, mode):
200 if self.opts['fftMode'] == mode:
201 return
202 self.opts['fftMode'] = mode
203 self.xDisp = self.yDisp = None
204 self.xClean = self.yClean = None
205 self.updateItems()
206 self.informViewBoundsChanged()
207
208 def setLogMode(self, xMode, yMode):
209 if self.opts['logMode'] == [xMode, yMode]:
210 return
211 self.opts['logMode'] = [xMode, yMode]
212 self.xDisp = self.yDisp = None
213 self.xClean = self.yClean = None
214 self.updateItems()
215 self.informViewBoundsChanged()
216
217 def setPointMode(self, mode):
218 if self.opts['pointMode'] == mode:
219 return
220 self.opts['pointMode'] = mode
221 self.update()
222
223 def setPen(self, *args, **kargs):
224 """
225 | Sets the pen used to draw lines between points.
226 | *pen* can be a QPen or any argument accepted by :func:`pyqtgraph.mkPen() <pyqtgraph.mkPen>`
227 """
228 pen = fn.mkPen(*args, **kargs)
229 self.opts['pen'] = pen
230 #self.curve.setPen(pen)
231 #for c in self.curves:
232 #c.setPen(pen)
233 #self.update()
234 self.updateItems()
235
236 def setShadowPen(self, *args, **kargs):
237 """
238 | Sets the shadow pen used to draw lines between points (this is for enhancing contrast or
239 emphacizing data).
240 | This line is drawn behind the primary pen (see :func:`setPen() <pyqtgraph.PlotDataItem.setPen>`)
241 and should generally be assigned greater width than the primary pen.
242 | *pen* can be a QPen or any argument accepted by :func:`pyqtgraph.mkPen() <pyqtgraph.mkPen>`
243 """
244 pen = fn.mkPen(*args, **kargs)
245 self.opts['shadowPen'] = pen
246 #for c in self.curves:
247 #c.setPen(pen)
248 #self.update()
249 self.updateItems()
250
251 def setFillBrush(self, *args, **kargs):
252 brush = fn.mkBrush(*args, **kargs)
253 if self.opts['fillBrush'] == brush:
254 return
255 self.opts['fillBrush'] = brush
256 self.updateItems()
257
258 def setBrush(self, *args, **kargs):
259 return self.setFillBrush(*args, **kargs)
260
261 def setFillLevel(self, level):
262 if self.opts['fillLevel'] == level:
263 return
264 self.opts['fillLevel'] = level
265 self.updateItems()
266
267 def setSymbol(self, symbol):
268 if self.opts['symbol'] == symbol:
269 return
270 self.opts['symbol'] = symbol
271 #self.scatter.setSymbol(symbol)
272 self.updateItems()
273
274 def setSymbolPen(self, *args, **kargs):
275 pen = fn.mkPen(*args, **kargs)
276 if self.opts['symbolPen'] == pen:
277 return
278 self.opts['symbolPen'] = pen
279 #self.scatter.setSymbolPen(pen)
280 self.updateItems()
281
282
283
284 def setSymbolBrush(self, *args, **kargs):
285 brush = fn.mkBrush(*args, **kargs)
286 if self.opts['symbolBrush'] == brush:
287 return
288 self.opts['symbolBrush'] = brush
289 #self.scatter.setSymbolBrush(brush)
290 self.updateItems()
291
292
293 def setSymbolSize(self, size):
294 if self.opts['symbolSize'] == size:
295 return
296 self.opts['symbolSize'] = size
297 #self.scatter.setSymbolSize(symbolSize)
298 self.updateItems()
299
300 def setDownsampling(self, ds=None, auto=None, method=None):
301 """
302 Set the downsampling mode of this item. Downsampling reduces the number
303 of samples drawn to increase performance.
304
305 ============== =================================================================
306 **Arguments:**
307 ds (int) Reduce visible plot samples by this factor. To disable,
308 set ds=1.
309 auto (bool) If True, automatically pick *ds* based on visible range
310 mode 'subsample': Downsample by taking the first of N samples.
311 This method is fastest and least accurate.
312 'mean': Downsample by taking the mean of N samples.
313 'peak': Downsample by drawing a saw wave that follows the min
314 and max of the original data. This method produces the best
315 visual representation of the data but is slower.
316 ============== =================================================================
317 """
318 changed = False
319 if ds is not None:
320 if self.opts['downsample'] != ds:
321 changed = True
322 self.opts['downsample'] = ds
323
324 if auto is not None and self.opts['autoDownsample'] != auto:
325 self.opts['autoDownsample'] = auto
326 changed = True
327
328 if method is not None:
329 if self.opts['downsampleMethod'] != method:
330 changed = True
331 self.opts['downsampleMethod'] = method
332
333 if changed:
334 self.xDisp = self.yDisp = None
335 self.updateItems()
336
337 def setClipToView(self, clip):
338 if self.opts['clipToView'] == clip:
339 return
340 self.opts['clipToView'] = clip
341 self.xDisp = self.yDisp = None
342 self.updateItems()
343
344
345 def setData(self, *args, **kargs):
346 """
347 Clear any data displayed by this item and display new data.
348 See :func:`__init__() <pyqtgraph.PlotDataItem.__init__>` for details; it accepts the same arguments.
349 """
350 #self.clear()
351 profiler = debug.Profiler()
352 y = None
353 x = None
354 if len(args) == 1:
355 data = args[0]
356 dt = dataType(data)
357 if dt == 'empty':
358 pass
359 elif dt == 'listOfValues':
360 y = np.array(data)
361 elif dt == 'Nx2array':
362 x = data[:,0]
363 y = data[:,1]
364 elif dt == 'recarray' or dt == 'dictOfLists':
365 if 'x' in data:
366 x = np.array(data['x'])
367 if 'y' in data:
368 y = np.array(data['y'])
369 elif dt == 'listOfDicts':
370 if 'x' in data[0]:
371 x = np.array([d.get('x',None) for d in data])
372 if 'y' in data[0]:
373 y = np.array([d.get('y',None) for d in data])
374 for k in ['data', 'symbolSize', 'symbolPen', 'symbolBrush', 'symbolShape']:
375 if k in data:
376 kargs[k] = [d.get(k, None) for d in data]
377 elif dt == 'MetaArray':
378 y = data.view(np.ndarray)
379 x = data.xvals(0).view(np.ndarray)
380 else:
381 raise Exception('Invalid data type %s' % type(data))
382
383 elif len(args) == 2:
384 seq = ('listOfValues', 'MetaArray', 'empty')
385 dtyp = dataType(args[0]), dataType(args[1])
386 if dtyp[0] not in seq or dtyp[1] not in seq:
387 raise Exception('When passing two unnamed arguments, both must be a list or array of values. (got %s, %s)' % (str(type(args[0])), str(type(args[1]))))
388 if not isinstance(args[0], np.ndarray):
389 #x = np.array(args[0])
390 if dtyp[0] == 'MetaArray':
391 x = args[0].asarray()
392 else:
393 x = np.array(args[0])
394 else:
395 x = args[0].view(np.ndarray)
396 if not isinstance(args[1], np.ndarray):
397 #y = np.array(args[1])
398 if dtyp[1] == 'MetaArray':
399 y = args[1].asarray()
400 else:
401 y = np.array(args[1])
402 else:
403 y = args[1].view(np.ndarray)
404
405 if 'x' in kargs:
406 x = kargs['x']
407 if 'y' in kargs:
408 y = kargs['y']
409
410 profiler('interpret data')
411 ## pull in all style arguments.
412 ## Use self.opts to fill in anything not present in kargs.
413
414 if 'name' in kargs:
415 self.opts['name'] = kargs['name']
416 if 'connect' in kargs:
417 self.opts['connect'] = kargs['connect']
418
419 ## if symbol pen/brush are given with no symbol, then assume symbol is 'o'
420
421 if 'symbol' not in kargs and ('symbolPen' in kargs or 'symbolBrush' in kargs or 'symbolSize' in kargs):
422 kargs['symbol'] = 'o'
423
424 if 'brush' in kargs:
425 kargs['fillBrush'] = kargs['brush']
426
427 for k in list(self.opts.keys()):
428 if k in kargs:
429 self.opts[k] = kargs[k]
430
431 #curveArgs = {}
432 #for k in ['pen', 'shadowPen', 'fillLevel', 'brush']:
433 #if k in kargs:
434 #self.opts[k] = kargs[k]
435 #curveArgs[k] = self.opts[k]
436
437 #scatterArgs = {}
438 #for k,v in [('symbolPen','pen'), ('symbolBrush','brush'), ('symbol','symbol')]:
439 #if k in kargs:
440 #self.opts[k] = kargs[k]
441 #scatterArgs[v] = self.opts[k]
442
443
444 if y is None:
445 return
446 if y is not None and x is None:
447 x = np.arange(len(y))
448
449 if isinstance(x, list):
450 x = np.array(x)
451 if isinstance(y, list):
452 y = np.array(y)
453
454 self.xData = x.view(np.ndarray) ## one last check to make sure there are no MetaArrays getting by
455 self.yData = y.view(np.ndarray)
456 self.xClean = self.yClean = None
457 self.xDisp = None
458 self.yDisp = None
459 profiler('set data')
460
461 self.updateItems()
462 profiler('update items')
463
464 self.informViewBoundsChanged()
465 #view = self.getViewBox()
466 #if view is not None:
467 #view.itemBoundsChanged(self) ## inform view so it can update its range if it wants
468
469 self.sigPlotChanged.emit(self)
470 profiler('emit')
471
472 def updateItems(self):
473
474 curveArgs = {}
475 for k,v in [('pen','pen'), ('shadowPen','shadowPen'), ('fillLevel','fillLevel'), ('fillBrush', 'brush'), ('antialias', 'antialias'), ('connect', 'connect'), ('stepMode', 'stepMode')]:
476 curveArgs[v] = self.opts[k]
477
478 scatterArgs = {}
479 for k,v in [('symbolPen','pen'), ('symbolBrush','brush'), ('symbol','symbol'), ('symbolSize', 'size'), ('data', 'data'), ('pxMode', 'pxMode'), ('antialias', 'antialias')]:
480 if k in self.opts:
481 scatterArgs[v] = self.opts[k]
482
483 x,y = self.getData()
484 #scatterArgs['mask'] = self.dataMask
485
486 if curveArgs['pen'] is not None or (curveArgs['brush'] is not None and curveArgs['fillLevel'] is not None):
487 self.curve.setData(x=x, y=y, **curveArgs)
488 self.curve.show()
489 else:
490 self.curve.hide()
491
492 if scatterArgs['symbol'] is not None:
493 self.scatter.setData(x=x, y=y, **scatterArgs)
494 self.scatter.show()
495 else:
496 self.scatter.hide()
497
498
499 def getData(self):
500 if self.xData is None:
501 return (None, None)
502
503 if self.xDisp is None:
504 x = self.xData
505 y = self.yData
506
507 if self.opts['fftMode']:
508 x,y = self._fourierTransform(x, y)
509 # Ignore the first bin for fft data if we have a logx scale
510 if self.opts['logMode'][0]:
511 x=x[1:]
512 y=y[1:]
513 if self.opts['logMode'][0]:
514 x = np.log10(x)
515 if self.opts['logMode'][1]:
516 y = np.log10(y)
517
518 ds = self.opts['downsample']
519 if not isinstance(ds, int):
520 ds = 1
521
522 if self.opts['autoDownsample']:
523 # this option presumes that x-values have uniform spacing
524 range = self.viewRect()
525 if range is not None:
526 dx = float(x[-1]-x[0]) / (len(x)-1)
527 x0 = (range.left()-x[0]) / dx
528 x1 = (range.right()-x[0]) / dx
529 width = self.getViewBox().width()
530 if width != 0.0:
531 ds = int(max(1, int((x1-x0) / (width*self.opts['autoDownsampleFactor']))))
532 ## downsampling is expensive; delay until after clipping.
533
534 if self.opts['clipToView']:
535 view = self.getViewBox()
536 if view is None or not view.autoRangeEnabled()[0]:
537 # this option presumes that x-values have uniform spacing
538 range = self.viewRect()
539 if range is not None and len(x) > 1:
540 dx = float(x[-1]-x[0]) / (len(x)-1)
541 # clip to visible region extended by downsampling value
542 x0 = np.clip(int((range.left()-x[0])/dx)-1*ds , 0, len(x)-1)
543 x1 = np.clip(int((range.right()-x[0])/dx)+2*ds , 0, len(x)-1)
544 x = x[x0:x1]
545 y = y[x0:x1]
546
547 if ds > 1:
548 if self.opts['downsampleMethod'] == 'subsample':
549 x = x[::ds]
550 y = y[::ds]
551 elif self.opts['downsampleMethod'] == 'mean':
552 n = len(x) // ds
553 x = x[:n*ds:ds]
554 y = y[:n*ds].reshape(n,ds).mean(axis=1)
555 elif self.opts['downsampleMethod'] == 'peak':
556 n = len(x) // ds
557 x1 = np.empty((n,2))
558 x1[:] = x[:n*ds:ds,np.newaxis]
559 x = x1.reshape(n*2)
560 y1 = np.empty((n,2))
561 y2 = y[:n*ds].reshape((n, ds))
562 y1[:,0] = y2.max(axis=1)
563 y1[:,1] = y2.min(axis=1)
564 y = y1.reshape(n*2)
565
566
567 self.xDisp = x
568 self.yDisp = y
569 return self.xDisp, self.yDisp
570
571 def dataBounds(self, ax, frac=1.0, orthoRange=None):
572 """
573 Returns the range occupied by the data (along a specific axis) in this item.
574 This method is called by ViewBox when auto-scaling.
575
576 =============== =============================================================
577 **Arguments:**
578 ax (0 or 1) the axis for which to return this item's data range
579 frac (float 0.0-1.0) Specifies what fraction of the total data
580 range to return. By default, the entire range is returned.
581 This allows the ViewBox to ignore large spikes in the data
582 when auto-scaling.
583 orthoRange ([min,max] or None) Specifies that only the data within the
584 given range (orthogonal to *ax*) should me measured when
585 returning the data range. (For example, a ViewBox might ask
586 what is the y-range of all data with x-values between min
587 and max)
588 =============== =============================================================
589 """
590
591 range = [None, None]
592 if self.curve.isVisible():
593 range = self.curve.dataBounds(ax, frac, orthoRange)
594 elif self.scatter.isVisible():
595 r2 = self.scatter.dataBounds(ax, frac, orthoRange)
596 range = [
597 r2[0] if range[0] is None else (range[0] if r2[0] is None else min(r2[0], range[0])),
598 r2[1] if range[1] is None else (range[1] if r2[1] is None else min(r2[1], range[1]))
599 ]
600 return range
601
602 def pixelPadding(self):
603 """
604 Return the size in pixels that this item may draw beyond the values returned by dataBounds().
605 This method is called by ViewBox when auto-scaling.
606 """
607 pad = 0
608 if self.curve.isVisible():
609 pad = max(pad, self.curve.pixelPadding())
610 elif self.scatter.isVisible():
611 pad = max(pad, self.scatter.pixelPadding())
612 return pad
613
614
615 def clear(self):
616 #for i in self.curves+self.scatters:
617 #if i.scene() is not None:
618 #i.scene().removeItem(i)
619 #self.curves = []
620 #self.scatters = []
621 self.xData = None
622 self.yData = None
623 #self.xClean = None
624 #self.yClean = None
625 self.xDisp = None
626 self.yDisp = None
627 self.curve.setData([])
628 self.scatter.setData([])
629
630 def appendData(self, *args, **kargs):
631 pass
632
633 def curveClicked(self):
634 self.sigClicked.emit(self)
635
636 def scatterClicked(self, plt, points):
637 self.sigClicked.emit(self)
638 self.sigPointsClicked.emit(self, points)
639
640 def viewRangeChanged(self):
641 # view range has changed; re-plot if needed
642 if self.opts['clipToView'] or self.opts['autoDownsample']:
643 self.xDisp = self.yDisp = None
644 self.updateItems()
645
646 def _fourierTransform(self, x, y):
647 ## Perform fourier transform. If x values are not sampled uniformly,
648 ## then use np.interp to resample before taking fft.
649 dx = np.diff(x)
650 uniform = not np.any(np.abs(dx-dx[0]) > (abs(dx[0]) / 1000.))
651 if not uniform:
652 x2 = np.linspace(x[0], x[-1], len(x))
653 y = np.interp(x2, x, y)
654 x = x2
655 n = y.size
656 f = np.fft.rfft(y) / n
657 d = float(x[-1]-x[0]) / (len(x)-1)
658 x = np.fft.rfftfreq(n, d)
659 y = np.abs(f)
660 return x, y
661
662 def dataType(obj):
663 if hasattr(obj, '__len__') and len(obj) == 0:
664 return 'empty'
665 if isinstance(obj, dict):
666 return 'dictOfLists'
667 elif isSequence(obj):
668 first = obj[0]
669
670 if (hasattr(obj, 'implements') and obj.implements('MetaArray')):
671 return 'MetaArray'
672 elif isinstance(obj, np.ndarray):
673 if obj.ndim == 1:
674 if obj.dtype.names is None:
675 return 'listOfValues'
676 else:
677 return 'recarray'
678 elif obj.ndim == 2 and obj.dtype.names is None and obj.shape[1] == 2:
679 return 'Nx2array'
680 else:
681 raise Exception('array shape must be (N,) or (N,2); got %s instead' % str(obj.shape))
682 elif isinstance(first, dict):
683 return 'listOfDicts'
684 else:
685 return 'listOfValues'
686
687
688 def isSequence(obj):
689 return hasattr(obj, '__iter__') or isinstance(obj, np.ndarray) or (hasattr(obj, 'implements') and obj.implements('MetaArray'))
690
691
692
693 #class TableData:
694 #"""
695 #Class for presenting multiple forms of tabular data through a consistent interface.
696 #May contain:
697 #- numpy record array
698 #- list-of-dicts (all dicts are _not_ required to have the same keys)
699 #- dict-of-lists
700 #- dict (single record)
701 #Note: if all the values in this record are lists, it will be interpreted as multiple records
702
703 #Data can be accessed and modified by column, by row, or by value
704 #data[columnName]
705 #data[rowId]
706 #data[columnName, rowId] = value
707 #data[columnName] = [value, value, ...]
708 #data[rowId] = {columnName: value, ...}
709 #"""
710
711 #def __init__(self, data):
712 #self.data = data
713 #if isinstance(data, np.ndarray):
714 #self.mode = 'array'
715 #elif isinstance(data, list):
716 #self.mode = 'list'
717 #elif isinstance(data, dict):
718 #types = set(map(type, data.values()))
719 ### dict may be a dict-of-lists or a single record
720 #types -= set([list, np.ndarray]) ## if dict contains any non-sequence values, it is probably a single record.
721 #if len(types) != 0:
722 #self.data = [self.data]
723 #self.mode = 'list'
724 #else:
725 #self.mode = 'dict'
726 #elif isinstance(data, TableData):
727 #self.data = data.data
728 #self.mode = data.mode
729 #else:
730 #raise TypeError(type(data))
731
732 #for fn in ['__getitem__', '__setitem__']:
733 #setattr(self, fn, getattr(self, '_TableData'+fn+self.mode))
734
735 #def originalData(self):
736 #return self.data
737
738 #def toArray(self):
739 #if self.mode == 'array':
740 #return self.data
741 #if len(self) < 1:
742 ##return np.array([]) ## need to return empty array *with correct columns*, but this is very difficult, so just return None
743 #return None
744 #rec1 = self[0]
745 #dtype = functions.suggestRecordDType(rec1)
746 ##print rec1, dtype
747 #arr = np.empty(len(self), dtype=dtype)
748 #arr[0] = tuple(rec1.values())
749 #for i in xrange(1, len(self)):
750 #arr[i] = tuple(self[i].values())
751 #return arr
752
753 #def __getitem__array(self, arg):
754 #if isinstance(arg, tuple):
755 #return self.data[arg[0]][arg[1]]
756 #else:
757 #return self.data[arg]
758
759 #def __getitem__list(self, arg):
760 #if isinstance(arg, basestring):
761 #return [d.get(arg, None) for d in self.data]
762 #elif isinstance(arg, int):
763 #return self.data[arg]
764 #elif isinstance(arg, tuple):
765 #arg = self._orderArgs(arg)
766 #return self.data[arg[0]][arg[1]]
767 #else:
768 #raise TypeError(type(arg))
769
770 #def __getitem__dict(self, arg):
771 #if isinstance(arg, basestring):
772 #return self.data[arg]
773 #elif isinstance(arg, int):
774 #return dict([(k, v[arg]) for k, v in self.data.items()])
775 #elif isinstance(arg, tuple):
776 #arg = self._orderArgs(arg)
777 #return self.data[arg[1]][arg[0]]
778 #else:
779 #raise TypeError(type(arg))
780
781 #def __setitem__array(self, arg, val):
782 #if isinstance(arg, tuple):
783 #self.data[arg[0]][arg[1]] = val
784 #else:
785 #self.data[arg] = val
786
787 #def __setitem__list(self, arg, val):
788 #if isinstance(arg, basestring):
789 #if len(val) != len(self.data):
790 #raise Exception("Values (%d) and data set (%d) are not the same length." % (len(val), len(self.data)))
791 #for i, rec in enumerate(self.data):
792 #rec[arg] = val[i]
793 #elif isinstance(arg, int):
794 #self.data[arg] = val
795 #elif isinstance(arg, tuple):
796 #arg = self._orderArgs(arg)
797 #self.data[arg[0]][arg[1]] = val
798 #else:
799 #raise TypeError(type(arg))
800
801 #def __setitem__dict(self, arg, val):
802 #if isinstance(arg, basestring):
803 #if len(val) != len(self.data[arg]):
804 #raise Exception("Values (%d) and data set (%d) are not the same length." % (len(val), len(self.data[arg])))
805 #self.data[arg] = val
806 #elif isinstance(arg, int):
807 #for k in self.data:
808 #self.data[k][arg] = val[k]
809 #elif isinstance(arg, tuple):
810 #arg = self._orderArgs(arg)
811 #self.data[arg[1]][arg[0]] = val
812 #else:
813 #raise TypeError(type(arg))
814
815 #def _orderArgs(self, args):
816 ### return args in (int, str) order
817 #if isinstance(args[0], basestring):
818 #return (args[1], args[0])
819 #else:
820 #return args
821
822 #def __iter__(self):
823 #for i in xrange(len(self)):
824 #yield self[i]
825
826 #def __len__(self):
827 #if self.mode == 'array' or self.mode == 'list':
828 #return len(self.data)
829 #else:
830 #return max(map(len, self.data.values()))
831
832 #def columnNames(self):
833 #"""returns column names in no particular order"""
834 #if self.mode == 'array':
835 #return self.data.dtype.names
836 #elif self.mode == 'list':
837 #names = set()
838 #for row in self.data:
839 #names.update(row.keys())
840 #return list(names)
841 #elif self.mode == 'dict':
842 #return self.data.keys()
843
844 #def keys(self):
845 #return self.columnNames()
846
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyqtgraph/graphicsItems/PlotDataItem.py b/pyqtgraph/graphicsItems/PlotDataItem.py
--- a/pyqtgraph/graphicsItems/PlotDataItem.py
+++ b/pyqtgraph/graphicsItems/PlotDataItem.py
@@ -490,6 +490,9 @@
self.curve.hide()
if scatterArgs['symbol'] is not None:
+
+ if self.opts.get('stepMode', False) is True:
+ x = 0.5 * (x[:-1] + x[1:])
self.scatter.setData(x=x, y=y, **scatterArgs)
self.scatter.show()
else:
| {"golden_diff": "diff --git a/pyqtgraph/graphicsItems/PlotDataItem.py b/pyqtgraph/graphicsItems/PlotDataItem.py\n--- a/pyqtgraph/graphicsItems/PlotDataItem.py\n+++ b/pyqtgraph/graphicsItems/PlotDataItem.py\n@@ -490,6 +490,9 @@\n self.curve.hide()\n \n if scatterArgs['symbol'] is not None:\n+ \n+ if self.opts.get('stepMode', False) is True:\n+ x = 0.5 * (x[:-1] + x[1:]) \n self.scatter.setData(x=x, y=y, **scatterArgs)\n self.scatter.show()\n else:\n", "issue": "update version for new development and PlotDataItem stepMode fix\n### Version \r\nPyPI version is 0.10.0 and uses the release code from november 2016, however the documentation (http://www.pyqtgraph.org/documentation/index.html) has the same version but is based on the development branch. This is very confusing (take for example _addMarker_ method of InfiniteLine).\r\n\r\n### PlotDataItem fix\r\nIf _stepMode_ is True we get an exception in the _addPoints_ method of _ScatterPlotItem_ since X and Y axis length is not the same. The last X point is in this case excluded.\r\n\r\n\n", "before_files": [{"content": "import numpy as np\nfrom .. import metaarray as metaarray\nfrom ..Qt import QtCore\nfrom .GraphicsObject import GraphicsObject\nfrom .PlotCurveItem import PlotCurveItem\nfrom .ScatterPlotItem import ScatterPlotItem\nfrom .. import functions as fn\nfrom .. import debug as debug\nfrom .. import getConfigOption\n\n\nclass PlotDataItem(GraphicsObject):\n \"\"\"\n **Bases:** :class:`GraphicsObject <pyqtgraph.GraphicsObject>`\n \n GraphicsItem for displaying plot curves, scatter plots, or both. \n While it is possible to use :class:`PlotCurveItem <pyqtgraph.PlotCurveItem>` or\n :class:`ScatterPlotItem <pyqtgraph.ScatterPlotItem>` individually, this class\n provides a unified interface to both. Instances of :class:`PlotDataItem` are \n usually created by plot() methods such as :func:`pyqtgraph.plot` and\n :func:`PlotItem.plot() <pyqtgraph.PlotItem.plot>`.\n \n ============================== ==============================================\n **Signals:**\n sigPlotChanged(self) Emitted when the data in this item is updated. \n sigClicked(self) Emitted when the item is clicked.\n sigPointsClicked(self, points) Emitted when a plot point is clicked\n Sends the list of points under the mouse.\n ============================== ==============================================\n \"\"\"\n \n sigPlotChanged = QtCore.Signal(object)\n sigClicked = QtCore.Signal(object)\n sigPointsClicked = QtCore.Signal(object, object)\n \n def __init__(self, *args, **kargs):\n \"\"\"\n There are many different ways to create a PlotDataItem:\n \n **Data initialization arguments:** (x,y data only)\n \n =================================== ======================================\n PlotDataItem(xValues, yValues) x and y values may be any sequence (including ndarray) of real numbers\n PlotDataItem(yValues) y values only -- x will be automatically set to range(len(y))\n PlotDataItem(x=xValues, y=yValues) x and y given by keyword arguments\n PlotDataItem(ndarray(Nx2)) numpy array with shape (N, 2) where x=data[:,0] and y=data[:,1]\n =================================== ======================================\n \n **Data initialization arguments:** (x,y data AND may include spot style)\n \n =========================== =========================================\n PlotDataItem(recarray) numpy array with dtype=[('x', float), ('y', float), ...]\n PlotDataItem(list-of-dicts) [{'x': x, 'y': y, ...}, ...] \n PlotDataItem(dict-of-lists) {'x': [...], 'y': [...], ...} \n PlotDataItem(MetaArray) 1D array of Y values with X sepecified as axis values \n OR 2D array with a column 'y' and extra columns as needed.\n =========================== =========================================\n \n **Line style keyword arguments:**\n\n ========== ==============================================================================\n connect Specifies how / whether vertexes should be connected. See\n :func:`arrayToQPath() <pyqtgraph.arrayToQPath>`\n pen Pen to use for drawing line between points.\n Default is solid grey, 1px width. Use None to disable line drawing.\n May be any single argument accepted by :func:`mkPen() <pyqtgraph.mkPen>`\n shadowPen Pen for secondary line to draw behind the primary line. disabled by default.\n May be any single argument accepted by :func:`mkPen() <pyqtgraph.mkPen>`\n fillLevel Fill the area between the curve and fillLevel\n fillBrush Fill to use when fillLevel is specified. \n May be any single argument accepted by :func:`mkBrush() <pyqtgraph.mkBrush>`\n stepMode If True, two orthogonal lines are drawn for each sample\n as steps. This is commonly used when drawing histograms.\n Note that in this case, `len(x) == len(y) + 1`\n (added in version 0.9.9)\n ========== ==============================================================================\n \n **Point style keyword arguments:** (see :func:`ScatterPlotItem.setData() <pyqtgraph.ScatterPlotItem.setData>` for more information)\n \n ============ =====================================================\n symbol Symbol to use for drawing points OR list of symbols, \n one per point. Default is no symbol.\n Options are o, s, t, d, +, or any QPainterPath\n symbolPen Outline pen for drawing points OR list of pens, one \n per point. May be any single argument accepted by \n :func:`mkPen() <pyqtgraph.mkPen>`\n symbolBrush Brush for filling points OR list of brushes, one per \n point. May be any single argument accepted by \n :func:`mkBrush() <pyqtgraph.mkBrush>`\n symbolSize Diameter of symbols OR list of diameters.\n pxMode (bool) If True, then symbolSize is specified in \n pixels. If False, then symbolSize is \n specified in data coordinates.\n ============ =====================================================\n \n **Optimization keyword arguments:**\n \n ================ =====================================================================\n antialias (bool) By default, antialiasing is disabled to improve performance.\n Note that in some cases (in particluar, when pxMode=True), points \n will be rendered antialiased even if this is set to False.\n decimate deprecated.\n downsample (int) Reduce the number of samples displayed by this value\n downsampleMethod 'subsample': Downsample by taking the first of N samples. \n This method is fastest and least accurate.\n 'mean': Downsample by taking the mean of N samples.\n 'peak': Downsample by drawing a saw wave that follows the min \n and max of the original data. This method produces the best \n visual representation of the data but is slower.\n autoDownsample (bool) If True, resample the data before plotting to avoid plotting\n multiple line segments per pixel. This can improve performance when\n viewing very high-density data, but increases the initial overhead \n and memory usage.\n clipToView (bool) If True, only plot data that is visible within the X range of\n the containing ViewBox. This can improve performance when plotting\n very large data sets where only a fraction of the data is visible\n at any time.\n identical *deprecated*\n ================ =====================================================================\n \n **Meta-info keyword arguments:**\n \n ========== ================================================\n name name of dataset. This would appear in a legend\n ========== ================================================\n \"\"\"\n GraphicsObject.__init__(self)\n self.setFlag(self.ItemHasNoContents)\n self.xData = None\n self.yData = None\n self.xDisp = None\n self.yDisp = None\n #self.dataMask = None\n #self.curves = []\n #self.scatters = []\n self.curve = PlotCurveItem()\n self.scatter = ScatterPlotItem()\n self.curve.setParentItem(self)\n self.scatter.setParentItem(self)\n \n self.curve.sigClicked.connect(self.curveClicked)\n self.scatter.sigClicked.connect(self.scatterClicked)\n \n \n #self.clear()\n self.opts = {\n 'connect': 'all',\n \n 'fftMode': False,\n 'logMode': [False, False],\n 'alphaHint': 1.0,\n 'alphaMode': False,\n \n 'pen': (200,200,200),\n 'shadowPen': None,\n 'fillLevel': None,\n 'fillBrush': None,\n 'stepMode': None, \n \n 'symbol': None,\n 'symbolSize': 10,\n 'symbolPen': (200,200,200),\n 'symbolBrush': (50, 50, 150),\n 'pxMode': True,\n \n 'antialias': getConfigOption('antialias'),\n 'pointMode': None,\n \n 'downsample': 1,\n 'autoDownsample': False,\n 'downsampleMethod': 'peak',\n 'autoDownsampleFactor': 5., # draw ~5 samples per pixel\n 'clipToView': False,\n \n 'data': None,\n }\n self.setData(*args, **kargs)\n \n def implements(self, interface=None):\n ints = ['plotData']\n if interface is None:\n return ints\n return interface in ints\n \n def name(self):\n return self.opts.get('name', None)\n \n def boundingRect(self):\n return QtCore.QRectF() ## let child items handle this\n\n def setAlpha(self, alpha, auto):\n if self.opts['alphaHint'] == alpha and self.opts['alphaMode'] == auto:\n return\n self.opts['alphaHint'] = alpha\n self.opts['alphaMode'] = auto\n self.setOpacity(alpha)\n #self.update()\n \n def setFftMode(self, mode):\n if self.opts['fftMode'] == mode:\n return\n self.opts['fftMode'] = mode\n self.xDisp = self.yDisp = None\n self.xClean = self.yClean = None\n self.updateItems()\n self.informViewBoundsChanged()\n \n def setLogMode(self, xMode, yMode):\n if self.opts['logMode'] == [xMode, yMode]:\n return\n self.opts['logMode'] = [xMode, yMode]\n self.xDisp = self.yDisp = None\n self.xClean = self.yClean = None\n self.updateItems()\n self.informViewBoundsChanged()\n \n def setPointMode(self, mode):\n if self.opts['pointMode'] == mode:\n return\n self.opts['pointMode'] = mode\n self.update()\n \n def setPen(self, *args, **kargs):\n \"\"\"\n | Sets the pen used to draw lines between points.\n | *pen* can be a QPen or any argument accepted by :func:`pyqtgraph.mkPen() <pyqtgraph.mkPen>`\n \"\"\"\n pen = fn.mkPen(*args, **kargs)\n self.opts['pen'] = pen\n #self.curve.setPen(pen)\n #for c in self.curves:\n #c.setPen(pen)\n #self.update()\n self.updateItems()\n \n def setShadowPen(self, *args, **kargs):\n \"\"\"\n | Sets the shadow pen used to draw lines between points (this is for enhancing contrast or \n emphacizing data). \n | This line is drawn behind the primary pen (see :func:`setPen() <pyqtgraph.PlotDataItem.setPen>`)\n and should generally be assigned greater width than the primary pen.\n | *pen* can be a QPen or any argument accepted by :func:`pyqtgraph.mkPen() <pyqtgraph.mkPen>`\n \"\"\"\n pen = fn.mkPen(*args, **kargs)\n self.opts['shadowPen'] = pen\n #for c in self.curves:\n #c.setPen(pen)\n #self.update()\n self.updateItems()\n \n def setFillBrush(self, *args, **kargs):\n brush = fn.mkBrush(*args, **kargs)\n if self.opts['fillBrush'] == brush:\n return\n self.opts['fillBrush'] = brush\n self.updateItems()\n \n def setBrush(self, *args, **kargs):\n return self.setFillBrush(*args, **kargs)\n \n def setFillLevel(self, level):\n if self.opts['fillLevel'] == level:\n return\n self.opts['fillLevel'] = level\n self.updateItems()\n\n def setSymbol(self, symbol):\n if self.opts['symbol'] == symbol:\n return\n self.opts['symbol'] = symbol\n #self.scatter.setSymbol(symbol)\n self.updateItems()\n \n def setSymbolPen(self, *args, **kargs):\n pen = fn.mkPen(*args, **kargs)\n if self.opts['symbolPen'] == pen:\n return\n self.opts['symbolPen'] = pen\n #self.scatter.setSymbolPen(pen)\n self.updateItems()\n \n \n \n def setSymbolBrush(self, *args, **kargs):\n brush = fn.mkBrush(*args, **kargs)\n if self.opts['symbolBrush'] == brush:\n return\n self.opts['symbolBrush'] = brush\n #self.scatter.setSymbolBrush(brush)\n self.updateItems()\n \n \n def setSymbolSize(self, size):\n if self.opts['symbolSize'] == size:\n return\n self.opts['symbolSize'] = size\n #self.scatter.setSymbolSize(symbolSize)\n self.updateItems()\n\n def setDownsampling(self, ds=None, auto=None, method=None):\n \"\"\"\n Set the downsampling mode of this item. Downsampling reduces the number\n of samples drawn to increase performance. \n \n ============== =================================================================\n **Arguments:**\n ds (int) Reduce visible plot samples by this factor. To disable,\n set ds=1.\n auto (bool) If True, automatically pick *ds* based on visible range\n mode 'subsample': Downsample by taking the first of N samples.\n This method is fastest and least accurate.\n 'mean': Downsample by taking the mean of N samples.\n 'peak': Downsample by drawing a saw wave that follows the min\n and max of the original data. This method produces the best\n visual representation of the data but is slower.\n ============== =================================================================\n \"\"\"\n changed = False\n if ds is not None:\n if self.opts['downsample'] != ds:\n changed = True\n self.opts['downsample'] = ds\n \n if auto is not None and self.opts['autoDownsample'] != auto:\n self.opts['autoDownsample'] = auto\n changed = True\n \n if method is not None:\n if self.opts['downsampleMethod'] != method:\n changed = True\n self.opts['downsampleMethod'] = method\n \n if changed:\n self.xDisp = self.yDisp = None\n self.updateItems()\n \n def setClipToView(self, clip):\n if self.opts['clipToView'] == clip:\n return\n self.opts['clipToView'] = clip\n self.xDisp = self.yDisp = None\n self.updateItems()\n \n \n def setData(self, *args, **kargs):\n \"\"\"\n Clear any data displayed by this item and display new data.\n See :func:`__init__() <pyqtgraph.PlotDataItem.__init__>` for details; it accepts the same arguments.\n \"\"\"\n #self.clear()\n profiler = debug.Profiler()\n y = None\n x = None\n if len(args) == 1:\n data = args[0]\n dt = dataType(data)\n if dt == 'empty':\n pass\n elif dt == 'listOfValues':\n y = np.array(data)\n elif dt == 'Nx2array':\n x = data[:,0]\n y = data[:,1]\n elif dt == 'recarray' or dt == 'dictOfLists':\n if 'x' in data:\n x = np.array(data['x'])\n if 'y' in data:\n y = np.array(data['y'])\n elif dt == 'listOfDicts':\n if 'x' in data[0]:\n x = np.array([d.get('x',None) for d in data])\n if 'y' in data[0]:\n y = np.array([d.get('y',None) for d in data])\n for k in ['data', 'symbolSize', 'symbolPen', 'symbolBrush', 'symbolShape']:\n if k in data:\n kargs[k] = [d.get(k, None) for d in data]\n elif dt == 'MetaArray':\n y = data.view(np.ndarray)\n x = data.xvals(0).view(np.ndarray)\n else:\n raise Exception('Invalid data type %s' % type(data))\n \n elif len(args) == 2:\n seq = ('listOfValues', 'MetaArray', 'empty')\n dtyp = dataType(args[0]), dataType(args[1])\n if dtyp[0] not in seq or dtyp[1] not in seq:\n raise Exception('When passing two unnamed arguments, both must be a list or array of values. (got %s, %s)' % (str(type(args[0])), str(type(args[1]))))\n if not isinstance(args[0], np.ndarray):\n #x = np.array(args[0])\n if dtyp[0] == 'MetaArray':\n x = args[0].asarray()\n else:\n x = np.array(args[0])\n else:\n x = args[0].view(np.ndarray)\n if not isinstance(args[1], np.ndarray):\n #y = np.array(args[1])\n if dtyp[1] == 'MetaArray':\n y = args[1].asarray()\n else:\n y = np.array(args[1])\n else:\n y = args[1].view(np.ndarray)\n \n if 'x' in kargs:\n x = kargs['x']\n if 'y' in kargs:\n y = kargs['y']\n\n profiler('interpret data')\n ## pull in all style arguments. \n ## Use self.opts to fill in anything not present in kargs.\n \n if 'name' in kargs:\n self.opts['name'] = kargs['name']\n if 'connect' in kargs:\n self.opts['connect'] = kargs['connect']\n\n ## if symbol pen/brush are given with no symbol, then assume symbol is 'o'\n \n if 'symbol' not in kargs and ('symbolPen' in kargs or 'symbolBrush' in kargs or 'symbolSize' in kargs):\n kargs['symbol'] = 'o'\n \n if 'brush' in kargs:\n kargs['fillBrush'] = kargs['brush']\n \n for k in list(self.opts.keys()):\n if k in kargs:\n self.opts[k] = kargs[k]\n \n #curveArgs = {}\n #for k in ['pen', 'shadowPen', 'fillLevel', 'brush']:\n #if k in kargs:\n #self.opts[k] = kargs[k]\n #curveArgs[k] = self.opts[k]\n \n #scatterArgs = {}\n #for k,v in [('symbolPen','pen'), ('symbolBrush','brush'), ('symbol','symbol')]:\n #if k in kargs:\n #self.opts[k] = kargs[k]\n #scatterArgs[v] = self.opts[k]\n \n\n if y is None:\n return\n if y is not None and x is None:\n x = np.arange(len(y))\n \n if isinstance(x, list):\n x = np.array(x)\n if isinstance(y, list):\n y = np.array(y)\n \n self.xData = x.view(np.ndarray) ## one last check to make sure there are no MetaArrays getting by\n self.yData = y.view(np.ndarray)\n self.xClean = self.yClean = None\n self.xDisp = None\n self.yDisp = None\n profiler('set data')\n \n self.updateItems()\n profiler('update items')\n \n self.informViewBoundsChanged()\n #view = self.getViewBox()\n #if view is not None:\n #view.itemBoundsChanged(self) ## inform view so it can update its range if it wants\n \n self.sigPlotChanged.emit(self)\n profiler('emit')\n\n def updateItems(self):\n \n curveArgs = {}\n for k,v in [('pen','pen'), ('shadowPen','shadowPen'), ('fillLevel','fillLevel'), ('fillBrush', 'brush'), ('antialias', 'antialias'), ('connect', 'connect'), ('stepMode', 'stepMode')]:\n curveArgs[v] = self.opts[k]\n \n scatterArgs = {}\n for k,v in [('symbolPen','pen'), ('symbolBrush','brush'), ('symbol','symbol'), ('symbolSize', 'size'), ('data', 'data'), ('pxMode', 'pxMode'), ('antialias', 'antialias')]:\n if k in self.opts:\n scatterArgs[v] = self.opts[k]\n \n x,y = self.getData()\n #scatterArgs['mask'] = self.dataMask\n \n if curveArgs['pen'] is not None or (curveArgs['brush'] is not None and curveArgs['fillLevel'] is not None):\n self.curve.setData(x=x, y=y, **curveArgs)\n self.curve.show()\n else:\n self.curve.hide()\n \n if scatterArgs['symbol'] is not None:\n self.scatter.setData(x=x, y=y, **scatterArgs)\n self.scatter.show()\n else:\n self.scatter.hide()\n\n\n def getData(self):\n if self.xData is None:\n return (None, None)\n \n if self.xDisp is None:\n x = self.xData\n y = self.yData\n \n if self.opts['fftMode']:\n x,y = self._fourierTransform(x, y)\n # Ignore the first bin for fft data if we have a logx scale\n if self.opts['logMode'][0]:\n x=x[1:]\n y=y[1:] \n if self.opts['logMode'][0]:\n x = np.log10(x)\n if self.opts['logMode'][1]:\n y = np.log10(y)\n \n ds = self.opts['downsample']\n if not isinstance(ds, int):\n ds = 1\n \n if self.opts['autoDownsample']:\n # this option presumes that x-values have uniform spacing\n range = self.viewRect()\n if range is not None:\n dx = float(x[-1]-x[0]) / (len(x)-1)\n x0 = (range.left()-x[0]) / dx\n x1 = (range.right()-x[0]) / dx\n width = self.getViewBox().width()\n if width != 0.0:\n ds = int(max(1, int((x1-x0) / (width*self.opts['autoDownsampleFactor']))))\n ## downsampling is expensive; delay until after clipping.\n \n if self.opts['clipToView']:\n view = self.getViewBox()\n if view is None or not view.autoRangeEnabled()[0]:\n # this option presumes that x-values have uniform spacing\n range = self.viewRect()\n if range is not None and len(x) > 1:\n dx = float(x[-1]-x[0]) / (len(x)-1)\n # clip to visible region extended by downsampling value\n x0 = np.clip(int((range.left()-x[0])/dx)-1*ds , 0, len(x)-1)\n x1 = np.clip(int((range.right()-x[0])/dx)+2*ds , 0, len(x)-1)\n x = x[x0:x1]\n y = y[x0:x1]\n \n if ds > 1:\n if self.opts['downsampleMethod'] == 'subsample':\n x = x[::ds]\n y = y[::ds]\n elif self.opts['downsampleMethod'] == 'mean':\n n = len(x) // ds\n x = x[:n*ds:ds]\n y = y[:n*ds].reshape(n,ds).mean(axis=1)\n elif self.opts['downsampleMethod'] == 'peak':\n n = len(x) // ds\n x1 = np.empty((n,2))\n x1[:] = x[:n*ds:ds,np.newaxis]\n x = x1.reshape(n*2)\n y1 = np.empty((n,2))\n y2 = y[:n*ds].reshape((n, ds))\n y1[:,0] = y2.max(axis=1)\n y1[:,1] = y2.min(axis=1)\n y = y1.reshape(n*2)\n \n \n self.xDisp = x\n self.yDisp = y\n return self.xDisp, self.yDisp\n\n def dataBounds(self, ax, frac=1.0, orthoRange=None):\n \"\"\"\n Returns the range occupied by the data (along a specific axis) in this item.\n This method is called by ViewBox when auto-scaling.\n\n =============== =============================================================\n **Arguments:**\n ax (0 or 1) the axis for which to return this item's data range\n frac (float 0.0-1.0) Specifies what fraction of the total data \n range to return. By default, the entire range is returned.\n This allows the ViewBox to ignore large spikes in the data\n when auto-scaling.\n orthoRange ([min,max] or None) Specifies that only the data within the\n given range (orthogonal to *ax*) should me measured when \n returning the data range. (For example, a ViewBox might ask\n what is the y-range of all data with x-values between min\n and max)\n =============== =============================================================\n \"\"\"\n \n range = [None, None]\n if self.curve.isVisible():\n range = self.curve.dataBounds(ax, frac, orthoRange)\n elif self.scatter.isVisible():\n r2 = self.scatter.dataBounds(ax, frac, orthoRange)\n range = [\n r2[0] if range[0] is None else (range[0] if r2[0] is None else min(r2[0], range[0])),\n r2[1] if range[1] is None else (range[1] if r2[1] is None else min(r2[1], range[1]))\n ]\n return range\n \n def pixelPadding(self):\n \"\"\"\n Return the size in pixels that this item may draw beyond the values returned by dataBounds().\n This method is called by ViewBox when auto-scaling.\n \"\"\"\n pad = 0\n if self.curve.isVisible():\n pad = max(pad, self.curve.pixelPadding())\n elif self.scatter.isVisible():\n pad = max(pad, self.scatter.pixelPadding())\n return pad\n \n\n def clear(self):\n #for i in self.curves+self.scatters:\n #if i.scene() is not None:\n #i.scene().removeItem(i)\n #self.curves = []\n #self.scatters = []\n self.xData = None\n self.yData = None\n #self.xClean = None\n #self.yClean = None\n self.xDisp = None\n self.yDisp = None\n self.curve.setData([])\n self.scatter.setData([])\n \n def appendData(self, *args, **kargs):\n pass\n \n def curveClicked(self):\n self.sigClicked.emit(self)\n \n def scatterClicked(self, plt, points):\n self.sigClicked.emit(self)\n self.sigPointsClicked.emit(self, points)\n \n def viewRangeChanged(self):\n # view range has changed; re-plot if needed\n if self.opts['clipToView'] or self.opts['autoDownsample']:\n self.xDisp = self.yDisp = None\n self.updateItems()\n \n def _fourierTransform(self, x, y):\n ## Perform fourier transform. If x values are not sampled uniformly,\n ## then use np.interp to resample before taking fft.\n dx = np.diff(x)\n uniform = not np.any(np.abs(dx-dx[0]) > (abs(dx[0]) / 1000.))\n if not uniform:\n x2 = np.linspace(x[0], x[-1], len(x))\n y = np.interp(x2, x, y)\n x = x2\n n = y.size\n f = np.fft.rfft(y) / n\n d = float(x[-1]-x[0]) / (len(x)-1)\n x = np.fft.rfftfreq(n, d)\n y = np.abs(f)\n return x, y\n \ndef dataType(obj):\n if hasattr(obj, '__len__') and len(obj) == 0:\n return 'empty'\n if isinstance(obj, dict):\n return 'dictOfLists'\n elif isSequence(obj):\n first = obj[0]\n \n if (hasattr(obj, 'implements') and obj.implements('MetaArray')):\n return 'MetaArray'\n elif isinstance(obj, np.ndarray):\n if obj.ndim == 1:\n if obj.dtype.names is None:\n return 'listOfValues'\n else:\n return 'recarray'\n elif obj.ndim == 2 and obj.dtype.names is None and obj.shape[1] == 2:\n return 'Nx2array'\n else:\n raise Exception('array shape must be (N,) or (N,2); got %s instead' % str(obj.shape))\n elif isinstance(first, dict):\n return 'listOfDicts'\n else:\n return 'listOfValues'\n \n \ndef isSequence(obj):\n return hasattr(obj, '__iter__') or isinstance(obj, np.ndarray) or (hasattr(obj, 'implements') and obj.implements('MetaArray'))\n \n \n \n#class TableData:\n #\"\"\"\n #Class for presenting multiple forms of tabular data through a consistent interface.\n #May contain:\n #- numpy record array\n #- list-of-dicts (all dicts are _not_ required to have the same keys)\n #- dict-of-lists\n #- dict (single record)\n #Note: if all the values in this record are lists, it will be interpreted as multiple records\n \n #Data can be accessed and modified by column, by row, or by value\n #data[columnName]\n #data[rowId]\n #data[columnName, rowId] = value\n #data[columnName] = [value, value, ...]\n #data[rowId] = {columnName: value, ...}\n #\"\"\"\n \n #def __init__(self, data):\n #self.data = data\n #if isinstance(data, np.ndarray):\n #self.mode = 'array'\n #elif isinstance(data, list):\n #self.mode = 'list'\n #elif isinstance(data, dict):\n #types = set(map(type, data.values()))\n ### dict may be a dict-of-lists or a single record\n #types -= set([list, np.ndarray]) ## if dict contains any non-sequence values, it is probably a single record.\n #if len(types) != 0:\n #self.data = [self.data]\n #self.mode = 'list'\n #else:\n #self.mode = 'dict'\n #elif isinstance(data, TableData):\n #self.data = data.data\n #self.mode = data.mode\n #else:\n #raise TypeError(type(data))\n \n #for fn in ['__getitem__', '__setitem__']:\n #setattr(self, fn, getattr(self, '_TableData'+fn+self.mode))\n \n #def originalData(self):\n #return self.data\n \n #def toArray(self):\n #if self.mode == 'array':\n #return self.data\n #if len(self) < 1:\n ##return np.array([]) ## need to return empty array *with correct columns*, but this is very difficult, so just return None\n #return None\n #rec1 = self[0]\n #dtype = functions.suggestRecordDType(rec1)\n ##print rec1, dtype\n #arr = np.empty(len(self), dtype=dtype)\n #arr[0] = tuple(rec1.values())\n #for i in xrange(1, len(self)):\n #arr[i] = tuple(self[i].values())\n #return arr\n \n #def __getitem__array(self, arg):\n #if isinstance(arg, tuple):\n #return self.data[arg[0]][arg[1]]\n #else:\n #return self.data[arg]\n \n #def __getitem__list(self, arg):\n #if isinstance(arg, basestring):\n #return [d.get(arg, None) for d in self.data]\n #elif isinstance(arg, int):\n #return self.data[arg]\n #elif isinstance(arg, tuple):\n #arg = self._orderArgs(arg)\n #return self.data[arg[0]][arg[1]]\n #else:\n #raise TypeError(type(arg))\n \n #def __getitem__dict(self, arg):\n #if isinstance(arg, basestring):\n #return self.data[arg]\n #elif isinstance(arg, int):\n #return dict([(k, v[arg]) for k, v in self.data.items()])\n #elif isinstance(arg, tuple):\n #arg = self._orderArgs(arg)\n #return self.data[arg[1]][arg[0]]\n #else:\n #raise TypeError(type(arg))\n\n #def __setitem__array(self, arg, val):\n #if isinstance(arg, tuple):\n #self.data[arg[0]][arg[1]] = val\n #else:\n #self.data[arg] = val\n\n #def __setitem__list(self, arg, val):\n #if isinstance(arg, basestring):\n #if len(val) != len(self.data):\n #raise Exception(\"Values (%d) and data set (%d) are not the same length.\" % (len(val), len(self.data)))\n #for i, rec in enumerate(self.data):\n #rec[arg] = val[i]\n #elif isinstance(arg, int):\n #self.data[arg] = val\n #elif isinstance(arg, tuple):\n #arg = self._orderArgs(arg)\n #self.data[arg[0]][arg[1]] = val\n #else:\n #raise TypeError(type(arg))\n \n #def __setitem__dict(self, arg, val):\n #if isinstance(arg, basestring):\n #if len(val) != len(self.data[arg]):\n #raise Exception(\"Values (%d) and data set (%d) are not the same length.\" % (len(val), len(self.data[arg])))\n #self.data[arg] = val\n #elif isinstance(arg, int):\n #for k in self.data:\n #self.data[k][arg] = val[k]\n #elif isinstance(arg, tuple):\n #arg = self._orderArgs(arg)\n #self.data[arg[1]][arg[0]] = val\n #else:\n #raise TypeError(type(arg))\n\n #def _orderArgs(self, args):\n ### return args in (int, str) order\n #if isinstance(args[0], basestring):\n #return (args[1], args[0])\n #else:\n #return args\n \n #def __iter__(self):\n #for i in xrange(len(self)):\n #yield self[i]\n\n #def __len__(self):\n #if self.mode == 'array' or self.mode == 'list':\n #return len(self.data)\n #else:\n #return max(map(len, self.data.values()))\n\n #def columnNames(self):\n #\"\"\"returns column names in no particular order\"\"\"\n #if self.mode == 'array':\n #return self.data.dtype.names\n #elif self.mode == 'list':\n #names = set()\n #for row in self.data:\n #names.update(row.keys())\n #return list(names)\n #elif self.mode == 'dict':\n #return self.data.keys()\n \n #def keys(self):\n #return self.columnNames()\n", "path": "pyqtgraph/graphicsItems/PlotDataItem.py"}], "after_files": [{"content": "import numpy as np\nfrom .. import metaarray as metaarray\nfrom ..Qt import QtCore\nfrom .GraphicsObject import GraphicsObject\nfrom .PlotCurveItem import PlotCurveItem\nfrom .ScatterPlotItem import ScatterPlotItem\nfrom .. import functions as fn\nfrom .. import debug as debug\nfrom .. import getConfigOption\n\n\nclass PlotDataItem(GraphicsObject):\n \"\"\"\n **Bases:** :class:`GraphicsObject <pyqtgraph.GraphicsObject>`\n \n GraphicsItem for displaying plot curves, scatter plots, or both. \n While it is possible to use :class:`PlotCurveItem <pyqtgraph.PlotCurveItem>` or\n :class:`ScatterPlotItem <pyqtgraph.ScatterPlotItem>` individually, this class\n provides a unified interface to both. Instances of :class:`PlotDataItem` are \n usually created by plot() methods such as :func:`pyqtgraph.plot` and\n :func:`PlotItem.plot() <pyqtgraph.PlotItem.plot>`.\n \n ============================== ==============================================\n **Signals:**\n sigPlotChanged(self) Emitted when the data in this item is updated. \n sigClicked(self) Emitted when the item is clicked.\n sigPointsClicked(self, points) Emitted when a plot point is clicked\n Sends the list of points under the mouse.\n ============================== ==============================================\n \"\"\"\n \n sigPlotChanged = QtCore.Signal(object)\n sigClicked = QtCore.Signal(object)\n sigPointsClicked = QtCore.Signal(object, object)\n \n def __init__(self, *args, **kargs):\n \"\"\"\n There are many different ways to create a PlotDataItem:\n \n **Data initialization arguments:** (x,y data only)\n \n =================================== ======================================\n PlotDataItem(xValues, yValues) x and y values may be any sequence (including ndarray) of real numbers\n PlotDataItem(yValues) y values only -- x will be automatically set to range(len(y))\n PlotDataItem(x=xValues, y=yValues) x and y given by keyword arguments\n PlotDataItem(ndarray(Nx2)) numpy array with shape (N, 2) where x=data[:,0] and y=data[:,1]\n =================================== ======================================\n \n **Data initialization arguments:** (x,y data AND may include spot style)\n \n =========================== =========================================\n PlotDataItem(recarray) numpy array with dtype=[('x', float), ('y', float), ...]\n PlotDataItem(list-of-dicts) [{'x': x, 'y': y, ...}, ...] \n PlotDataItem(dict-of-lists) {'x': [...], 'y': [...], ...} \n PlotDataItem(MetaArray) 1D array of Y values with X sepecified as axis values \n OR 2D array with a column 'y' and extra columns as needed.\n =========================== =========================================\n \n **Line style keyword arguments:**\n\n ========== ==============================================================================\n connect Specifies how / whether vertexes should be connected. See\n :func:`arrayToQPath() <pyqtgraph.arrayToQPath>`\n pen Pen to use for drawing line between points.\n Default is solid grey, 1px width. Use None to disable line drawing.\n May be any single argument accepted by :func:`mkPen() <pyqtgraph.mkPen>`\n shadowPen Pen for secondary line to draw behind the primary line. disabled by default.\n May be any single argument accepted by :func:`mkPen() <pyqtgraph.mkPen>`\n fillLevel Fill the area between the curve and fillLevel\n fillBrush Fill to use when fillLevel is specified. \n May be any single argument accepted by :func:`mkBrush() <pyqtgraph.mkBrush>`\n stepMode If True, two orthogonal lines are drawn for each sample\n as steps. This is commonly used when drawing histograms.\n Note that in this case, `len(x) == len(y) + 1`\n (added in version 0.9.9)\n ========== ==============================================================================\n \n **Point style keyword arguments:** (see :func:`ScatterPlotItem.setData() <pyqtgraph.ScatterPlotItem.setData>` for more information)\n \n ============ =====================================================\n symbol Symbol to use for drawing points OR list of symbols, \n one per point. Default is no symbol.\n Options are o, s, t, d, +, or any QPainterPath\n symbolPen Outline pen for drawing points OR list of pens, one \n per point. May be any single argument accepted by \n :func:`mkPen() <pyqtgraph.mkPen>`\n symbolBrush Brush for filling points OR list of brushes, one per \n point. May be any single argument accepted by \n :func:`mkBrush() <pyqtgraph.mkBrush>`\n symbolSize Diameter of symbols OR list of diameters.\n pxMode (bool) If True, then symbolSize is specified in \n pixels. If False, then symbolSize is \n specified in data coordinates.\n ============ =====================================================\n \n **Optimization keyword arguments:**\n \n ================ =====================================================================\n antialias (bool) By default, antialiasing is disabled to improve performance.\n Note that in some cases (in particluar, when pxMode=True), points \n will be rendered antialiased even if this is set to False.\n decimate deprecated.\n downsample (int) Reduce the number of samples displayed by this value\n downsampleMethod 'subsample': Downsample by taking the first of N samples. \n This method is fastest and least accurate.\n 'mean': Downsample by taking the mean of N samples.\n 'peak': Downsample by drawing a saw wave that follows the min \n and max of the original data. This method produces the best \n visual representation of the data but is slower.\n autoDownsample (bool) If True, resample the data before plotting to avoid plotting\n multiple line segments per pixel. This can improve performance when\n viewing very high-density data, but increases the initial overhead \n and memory usage.\n clipToView (bool) If True, only plot data that is visible within the X range of\n the containing ViewBox. This can improve performance when plotting\n very large data sets where only a fraction of the data is visible\n at any time.\n identical *deprecated*\n ================ =====================================================================\n \n **Meta-info keyword arguments:**\n \n ========== ================================================\n name name of dataset. This would appear in a legend\n ========== ================================================\n \"\"\"\n GraphicsObject.__init__(self)\n self.setFlag(self.ItemHasNoContents)\n self.xData = None\n self.yData = None\n self.xDisp = None\n self.yDisp = None\n #self.dataMask = None\n #self.curves = []\n #self.scatters = []\n self.curve = PlotCurveItem()\n self.scatter = ScatterPlotItem()\n self.curve.setParentItem(self)\n self.scatter.setParentItem(self)\n \n self.curve.sigClicked.connect(self.curveClicked)\n self.scatter.sigClicked.connect(self.scatterClicked)\n \n \n #self.clear()\n self.opts = {\n 'connect': 'all',\n \n 'fftMode': False,\n 'logMode': [False, False],\n 'alphaHint': 1.0,\n 'alphaMode': False,\n \n 'pen': (200,200,200),\n 'shadowPen': None,\n 'fillLevel': None,\n 'fillBrush': None,\n 'stepMode': None, \n \n 'symbol': None,\n 'symbolSize': 10,\n 'symbolPen': (200,200,200),\n 'symbolBrush': (50, 50, 150),\n 'pxMode': True,\n \n 'antialias': getConfigOption('antialias'),\n 'pointMode': None,\n \n 'downsample': 1,\n 'autoDownsample': False,\n 'downsampleMethod': 'peak',\n 'autoDownsampleFactor': 5., # draw ~5 samples per pixel\n 'clipToView': False,\n \n 'data': None,\n }\n self.setData(*args, **kargs)\n \n def implements(self, interface=None):\n ints = ['plotData']\n if interface is None:\n return ints\n return interface in ints\n \n def name(self):\n return self.opts.get('name', None)\n \n def boundingRect(self):\n return QtCore.QRectF() ## let child items handle this\n\n def setAlpha(self, alpha, auto):\n if self.opts['alphaHint'] == alpha and self.opts['alphaMode'] == auto:\n return\n self.opts['alphaHint'] = alpha\n self.opts['alphaMode'] = auto\n self.setOpacity(alpha)\n #self.update()\n \n def setFftMode(self, mode):\n if self.opts['fftMode'] == mode:\n return\n self.opts['fftMode'] = mode\n self.xDisp = self.yDisp = None\n self.xClean = self.yClean = None\n self.updateItems()\n self.informViewBoundsChanged()\n \n def setLogMode(self, xMode, yMode):\n if self.opts['logMode'] == [xMode, yMode]:\n return\n self.opts['logMode'] = [xMode, yMode]\n self.xDisp = self.yDisp = None\n self.xClean = self.yClean = None\n self.updateItems()\n self.informViewBoundsChanged()\n \n def setPointMode(self, mode):\n if self.opts['pointMode'] == mode:\n return\n self.opts['pointMode'] = mode\n self.update()\n \n def setPen(self, *args, **kargs):\n \"\"\"\n | Sets the pen used to draw lines between points.\n | *pen* can be a QPen or any argument accepted by :func:`pyqtgraph.mkPen() <pyqtgraph.mkPen>`\n \"\"\"\n pen = fn.mkPen(*args, **kargs)\n self.opts['pen'] = pen\n #self.curve.setPen(pen)\n #for c in self.curves:\n #c.setPen(pen)\n #self.update()\n self.updateItems()\n \n def setShadowPen(self, *args, **kargs):\n \"\"\"\n | Sets the shadow pen used to draw lines between points (this is for enhancing contrast or \n emphacizing data). \n | This line is drawn behind the primary pen (see :func:`setPen() <pyqtgraph.PlotDataItem.setPen>`)\n and should generally be assigned greater width than the primary pen.\n | *pen* can be a QPen or any argument accepted by :func:`pyqtgraph.mkPen() <pyqtgraph.mkPen>`\n \"\"\"\n pen = fn.mkPen(*args, **kargs)\n self.opts['shadowPen'] = pen\n #for c in self.curves:\n #c.setPen(pen)\n #self.update()\n self.updateItems()\n \n def setFillBrush(self, *args, **kargs):\n brush = fn.mkBrush(*args, **kargs)\n if self.opts['fillBrush'] == brush:\n return\n self.opts['fillBrush'] = brush\n self.updateItems()\n \n def setBrush(self, *args, **kargs):\n return self.setFillBrush(*args, **kargs)\n \n def setFillLevel(self, level):\n if self.opts['fillLevel'] == level:\n return\n self.opts['fillLevel'] = level\n self.updateItems()\n\n def setSymbol(self, symbol):\n if self.opts['symbol'] == symbol:\n return\n self.opts['symbol'] = symbol\n #self.scatter.setSymbol(symbol)\n self.updateItems()\n \n def setSymbolPen(self, *args, **kargs):\n pen = fn.mkPen(*args, **kargs)\n if self.opts['symbolPen'] == pen:\n return\n self.opts['symbolPen'] = pen\n #self.scatter.setSymbolPen(pen)\n self.updateItems()\n \n \n \n def setSymbolBrush(self, *args, **kargs):\n brush = fn.mkBrush(*args, **kargs)\n if self.opts['symbolBrush'] == brush:\n return\n self.opts['symbolBrush'] = brush\n #self.scatter.setSymbolBrush(brush)\n self.updateItems()\n \n \n def setSymbolSize(self, size):\n if self.opts['symbolSize'] == size:\n return\n self.opts['symbolSize'] = size\n #self.scatter.setSymbolSize(symbolSize)\n self.updateItems()\n\n def setDownsampling(self, ds=None, auto=None, method=None):\n \"\"\"\n Set the downsampling mode of this item. Downsampling reduces the number\n of samples drawn to increase performance. \n \n ============== =================================================================\n **Arguments:**\n ds (int) Reduce visible plot samples by this factor. To disable,\n set ds=1.\n auto (bool) If True, automatically pick *ds* based on visible range\n mode 'subsample': Downsample by taking the first of N samples.\n This method is fastest and least accurate.\n 'mean': Downsample by taking the mean of N samples.\n 'peak': Downsample by drawing a saw wave that follows the min\n and max of the original data. This method produces the best\n visual representation of the data but is slower.\n ============== =================================================================\n \"\"\"\n changed = False\n if ds is not None:\n if self.opts['downsample'] != ds:\n changed = True\n self.opts['downsample'] = ds\n \n if auto is not None and self.opts['autoDownsample'] != auto:\n self.opts['autoDownsample'] = auto\n changed = True\n \n if method is not None:\n if self.opts['downsampleMethod'] != method:\n changed = True\n self.opts['downsampleMethod'] = method\n \n if changed:\n self.xDisp = self.yDisp = None\n self.updateItems()\n \n def setClipToView(self, clip):\n if self.opts['clipToView'] == clip:\n return\n self.opts['clipToView'] = clip\n self.xDisp = self.yDisp = None\n self.updateItems()\n \n \n def setData(self, *args, **kargs):\n \"\"\"\n Clear any data displayed by this item and display new data.\n See :func:`__init__() <pyqtgraph.PlotDataItem.__init__>` for details; it accepts the same arguments.\n \"\"\"\n #self.clear()\n profiler = debug.Profiler()\n y = None\n x = None\n if len(args) == 1:\n data = args[0]\n dt = dataType(data)\n if dt == 'empty':\n pass\n elif dt == 'listOfValues':\n y = np.array(data)\n elif dt == 'Nx2array':\n x = data[:,0]\n y = data[:,1]\n elif dt == 'recarray' or dt == 'dictOfLists':\n if 'x' in data:\n x = np.array(data['x'])\n if 'y' in data:\n y = np.array(data['y'])\n elif dt == 'listOfDicts':\n if 'x' in data[0]:\n x = np.array([d.get('x',None) for d in data])\n if 'y' in data[0]:\n y = np.array([d.get('y',None) for d in data])\n for k in ['data', 'symbolSize', 'symbolPen', 'symbolBrush', 'symbolShape']:\n if k in data:\n kargs[k] = [d.get(k, None) for d in data]\n elif dt == 'MetaArray':\n y = data.view(np.ndarray)\n x = data.xvals(0).view(np.ndarray)\n else:\n raise Exception('Invalid data type %s' % type(data))\n \n elif len(args) == 2:\n seq = ('listOfValues', 'MetaArray', 'empty')\n dtyp = dataType(args[0]), dataType(args[1])\n if dtyp[0] not in seq or dtyp[1] not in seq:\n raise Exception('When passing two unnamed arguments, both must be a list or array of values. (got %s, %s)' % (str(type(args[0])), str(type(args[1]))))\n if not isinstance(args[0], np.ndarray):\n #x = np.array(args[0])\n if dtyp[0] == 'MetaArray':\n x = args[0].asarray()\n else:\n x = np.array(args[0])\n else:\n x = args[0].view(np.ndarray)\n if not isinstance(args[1], np.ndarray):\n #y = np.array(args[1])\n if dtyp[1] == 'MetaArray':\n y = args[1].asarray()\n else:\n y = np.array(args[1])\n else:\n y = args[1].view(np.ndarray)\n \n if 'x' in kargs:\n x = kargs['x']\n if 'y' in kargs:\n y = kargs['y']\n\n profiler('interpret data')\n ## pull in all style arguments. \n ## Use self.opts to fill in anything not present in kargs.\n \n if 'name' in kargs:\n self.opts['name'] = kargs['name']\n if 'connect' in kargs:\n self.opts['connect'] = kargs['connect']\n\n ## if symbol pen/brush are given with no symbol, then assume symbol is 'o'\n \n if 'symbol' not in kargs and ('symbolPen' in kargs or 'symbolBrush' in kargs or 'symbolSize' in kargs):\n kargs['symbol'] = 'o'\n \n if 'brush' in kargs:\n kargs['fillBrush'] = kargs['brush']\n \n for k in list(self.opts.keys()):\n if k in kargs:\n self.opts[k] = kargs[k]\n \n #curveArgs = {}\n #for k in ['pen', 'shadowPen', 'fillLevel', 'brush']:\n #if k in kargs:\n #self.opts[k] = kargs[k]\n #curveArgs[k] = self.opts[k]\n \n #scatterArgs = {}\n #for k,v in [('symbolPen','pen'), ('symbolBrush','brush'), ('symbol','symbol')]:\n #if k in kargs:\n #self.opts[k] = kargs[k]\n #scatterArgs[v] = self.opts[k]\n \n\n if y is None:\n return\n if y is not None and x is None:\n x = np.arange(len(y))\n \n if isinstance(x, list):\n x = np.array(x)\n if isinstance(y, list):\n y = np.array(y)\n \n self.xData = x.view(np.ndarray) ## one last check to make sure there are no MetaArrays getting by\n self.yData = y.view(np.ndarray)\n self.xClean = self.yClean = None\n self.xDisp = None\n self.yDisp = None\n profiler('set data')\n \n self.updateItems()\n profiler('update items')\n \n self.informViewBoundsChanged()\n #view = self.getViewBox()\n #if view is not None:\n #view.itemBoundsChanged(self) ## inform view so it can update its range if it wants\n \n self.sigPlotChanged.emit(self)\n profiler('emit')\n\n def updateItems(self):\n \n curveArgs = {}\n for k,v in [('pen','pen'), ('shadowPen','shadowPen'), ('fillLevel','fillLevel'), ('fillBrush', 'brush'), ('antialias', 'antialias'), ('connect', 'connect'), ('stepMode', 'stepMode')]:\n curveArgs[v] = self.opts[k]\n \n scatterArgs = {}\n for k,v in [('symbolPen','pen'), ('symbolBrush','brush'), ('symbol','symbol'), ('symbolSize', 'size'), ('data', 'data'), ('pxMode', 'pxMode'), ('antialias', 'antialias')]:\n if k in self.opts:\n scatterArgs[v] = self.opts[k]\n \n x,y = self.getData()\n #scatterArgs['mask'] = self.dataMask\n \n if curveArgs['pen'] is not None or (curveArgs['brush'] is not None and curveArgs['fillLevel'] is not None):\n self.curve.setData(x=x, y=y, **curveArgs)\n self.curve.show()\n else:\n self.curve.hide()\n \n if scatterArgs['symbol'] is not None:\n \n if self.opts.get('stepMode', False) is True:\n x = 0.5 * (x[:-1] + x[1:]) \n self.scatter.setData(x=x, y=y, **scatterArgs)\n self.scatter.show()\n else:\n self.scatter.hide()\n\n\n def getData(self):\n if self.xData is None:\n return (None, None)\n \n if self.xDisp is None:\n x = self.xData\n y = self.yData\n \n if self.opts['fftMode']:\n x,y = self._fourierTransform(x, y)\n # Ignore the first bin for fft data if we have a logx scale\n if self.opts['logMode'][0]:\n x=x[1:]\n y=y[1:] \n if self.opts['logMode'][0]:\n x = np.log10(x)\n if self.opts['logMode'][1]:\n y = np.log10(y)\n \n ds = self.opts['downsample']\n if not isinstance(ds, int):\n ds = 1\n \n if self.opts['autoDownsample']:\n # this option presumes that x-values have uniform spacing\n range = self.viewRect()\n if range is not None:\n dx = float(x[-1]-x[0]) / (len(x)-1)\n x0 = (range.left()-x[0]) / dx\n x1 = (range.right()-x[0]) / dx\n width = self.getViewBox().width()\n if width != 0.0:\n ds = int(max(1, int((x1-x0) / (width*self.opts['autoDownsampleFactor']))))\n ## downsampling is expensive; delay until after clipping.\n \n if self.opts['clipToView']:\n view = self.getViewBox()\n if view is None or not view.autoRangeEnabled()[0]:\n # this option presumes that x-values have uniform spacing\n range = self.viewRect()\n if range is not None and len(x) > 1:\n dx = float(x[-1]-x[0]) / (len(x)-1)\n # clip to visible region extended by downsampling value\n x0 = np.clip(int((range.left()-x[0])/dx)-1*ds , 0, len(x)-1)\n x1 = np.clip(int((range.right()-x[0])/dx)+2*ds , 0, len(x)-1)\n x = x[x0:x1]\n y = y[x0:x1]\n \n if ds > 1:\n if self.opts['downsampleMethod'] == 'subsample':\n x = x[::ds]\n y = y[::ds]\n elif self.opts['downsampleMethod'] == 'mean':\n n = len(x) // ds\n x = x[:n*ds:ds]\n y = y[:n*ds].reshape(n,ds).mean(axis=1)\n elif self.opts['downsampleMethod'] == 'peak':\n n = len(x) // ds\n x1 = np.empty((n,2))\n x1[:] = x[:n*ds:ds,np.newaxis]\n x = x1.reshape(n*2)\n y1 = np.empty((n,2))\n y2 = y[:n*ds].reshape((n, ds))\n y1[:,0] = y2.max(axis=1)\n y1[:,1] = y2.min(axis=1)\n y = y1.reshape(n*2)\n \n \n self.xDisp = x\n self.yDisp = y\n return self.xDisp, self.yDisp\n\n def dataBounds(self, ax, frac=1.0, orthoRange=None):\n \"\"\"\n Returns the range occupied by the data (along a specific axis) in this item.\n This method is called by ViewBox when auto-scaling.\n\n =============== =============================================================\n **Arguments:**\n ax (0 or 1) the axis for which to return this item's data range\n frac (float 0.0-1.0) Specifies what fraction of the total data \n range to return. By default, the entire range is returned.\n This allows the ViewBox to ignore large spikes in the data\n when auto-scaling.\n orthoRange ([min,max] or None) Specifies that only the data within the\n given range (orthogonal to *ax*) should me measured when \n returning the data range. (For example, a ViewBox might ask\n what is the y-range of all data with x-values between min\n and max)\n =============== =============================================================\n \"\"\"\n \n range = [None, None]\n if self.curve.isVisible():\n range = self.curve.dataBounds(ax, frac, orthoRange)\n elif self.scatter.isVisible():\n r2 = self.scatter.dataBounds(ax, frac, orthoRange)\n range = [\n r2[0] if range[0] is None else (range[0] if r2[0] is None else min(r2[0], range[0])),\n r2[1] if range[1] is None else (range[1] if r2[1] is None else min(r2[1], range[1]))\n ]\n return range\n \n def pixelPadding(self):\n \"\"\"\n Return the size in pixels that this item may draw beyond the values returned by dataBounds().\n This method is called by ViewBox when auto-scaling.\n \"\"\"\n pad = 0\n if self.curve.isVisible():\n pad = max(pad, self.curve.pixelPadding())\n elif self.scatter.isVisible():\n pad = max(pad, self.scatter.pixelPadding())\n return pad\n \n\n def clear(self):\n #for i in self.curves+self.scatters:\n #if i.scene() is not None:\n #i.scene().removeItem(i)\n #self.curves = []\n #self.scatters = []\n self.xData = None\n self.yData = None\n #self.xClean = None\n #self.yClean = None\n self.xDisp = None\n self.yDisp = None\n self.curve.setData([])\n self.scatter.setData([])\n \n def appendData(self, *args, **kargs):\n pass\n \n def curveClicked(self):\n self.sigClicked.emit(self)\n \n def scatterClicked(self, plt, points):\n self.sigClicked.emit(self)\n self.sigPointsClicked.emit(self, points)\n \n def viewRangeChanged(self):\n # view range has changed; re-plot if needed\n if self.opts['clipToView'] or self.opts['autoDownsample']:\n self.xDisp = self.yDisp = None\n self.updateItems()\n \n def _fourierTransform(self, x, y):\n ## Perform fourier transform. If x values are not sampled uniformly,\n ## then use np.interp to resample before taking fft.\n dx = np.diff(x)\n uniform = not np.any(np.abs(dx-dx[0]) > (abs(dx[0]) / 1000.))\n if not uniform:\n x2 = np.linspace(x[0], x[-1], len(x))\n y = np.interp(x2, x, y)\n x = x2\n n = y.size\n f = np.fft.rfft(y) / n\n d = float(x[-1]-x[0]) / (len(x)-1)\n x = np.fft.rfftfreq(n, d)\n y = np.abs(f)\n return x, y\n \ndef dataType(obj):\n if hasattr(obj, '__len__') and len(obj) == 0:\n return 'empty'\n if isinstance(obj, dict):\n return 'dictOfLists'\n elif isSequence(obj):\n first = obj[0]\n \n if (hasattr(obj, 'implements') and obj.implements('MetaArray')):\n return 'MetaArray'\n elif isinstance(obj, np.ndarray):\n if obj.ndim == 1:\n if obj.dtype.names is None:\n return 'listOfValues'\n else:\n return 'recarray'\n elif obj.ndim == 2 and obj.dtype.names is None and obj.shape[1] == 2:\n return 'Nx2array'\n else:\n raise Exception('array shape must be (N,) or (N,2); got %s instead' % str(obj.shape))\n elif isinstance(first, dict):\n return 'listOfDicts'\n else:\n return 'listOfValues'\n \n \ndef isSequence(obj):\n return hasattr(obj, '__iter__') or isinstance(obj, np.ndarray) or (hasattr(obj, 'implements') and obj.implements('MetaArray'))\n \n \n \n#class TableData:\n #\"\"\"\n #Class for presenting multiple forms of tabular data through a consistent interface.\n #May contain:\n #- numpy record array\n #- list-of-dicts (all dicts are _not_ required to have the same keys)\n #- dict-of-lists\n #- dict (single record)\n #Note: if all the values in this record are lists, it will be interpreted as multiple records\n \n #Data can be accessed and modified by column, by row, or by value\n #data[columnName]\n #data[rowId]\n #data[columnName, rowId] = value\n #data[columnName] = [value, value, ...]\n #data[rowId] = {columnName: value, ...}\n #\"\"\"\n \n #def __init__(self, data):\n #self.data = data\n #if isinstance(data, np.ndarray):\n #self.mode = 'array'\n #elif isinstance(data, list):\n #self.mode = 'list'\n #elif isinstance(data, dict):\n #types = set(map(type, data.values()))\n ### dict may be a dict-of-lists or a single record\n #types -= set([list, np.ndarray]) ## if dict contains any non-sequence values, it is probably a single record.\n #if len(types) != 0:\n #self.data = [self.data]\n #self.mode = 'list'\n #else:\n #self.mode = 'dict'\n #elif isinstance(data, TableData):\n #self.data = data.data\n #self.mode = data.mode\n #else:\n #raise TypeError(type(data))\n \n #for fn in ['__getitem__', '__setitem__']:\n #setattr(self, fn, getattr(self, '_TableData'+fn+self.mode))\n \n #def originalData(self):\n #return self.data\n \n #def toArray(self):\n #if self.mode == 'array':\n #return self.data\n #if len(self) < 1:\n ##return np.array([]) ## need to return empty array *with correct columns*, but this is very difficult, so just return None\n #return None\n #rec1 = self[0]\n #dtype = functions.suggestRecordDType(rec1)\n ##print rec1, dtype\n #arr = np.empty(len(self), dtype=dtype)\n #arr[0] = tuple(rec1.values())\n #for i in xrange(1, len(self)):\n #arr[i] = tuple(self[i].values())\n #return arr\n \n #def __getitem__array(self, arg):\n #if isinstance(arg, tuple):\n #return self.data[arg[0]][arg[1]]\n #else:\n #return self.data[arg]\n \n #def __getitem__list(self, arg):\n #if isinstance(arg, basestring):\n #return [d.get(arg, None) for d in self.data]\n #elif isinstance(arg, int):\n #return self.data[arg]\n #elif isinstance(arg, tuple):\n #arg = self._orderArgs(arg)\n #return self.data[arg[0]][arg[1]]\n #else:\n #raise TypeError(type(arg))\n \n #def __getitem__dict(self, arg):\n #if isinstance(arg, basestring):\n #return self.data[arg]\n #elif isinstance(arg, int):\n #return dict([(k, v[arg]) for k, v in self.data.items()])\n #elif isinstance(arg, tuple):\n #arg = self._orderArgs(arg)\n #return self.data[arg[1]][arg[0]]\n #else:\n #raise TypeError(type(arg))\n\n #def __setitem__array(self, arg, val):\n #if isinstance(arg, tuple):\n #self.data[arg[0]][arg[1]] = val\n #else:\n #self.data[arg] = val\n\n #def __setitem__list(self, arg, val):\n #if isinstance(arg, basestring):\n #if len(val) != len(self.data):\n #raise Exception(\"Values (%d) and data set (%d) are not the same length.\" % (len(val), len(self.data)))\n #for i, rec in enumerate(self.data):\n #rec[arg] = val[i]\n #elif isinstance(arg, int):\n #self.data[arg] = val\n #elif isinstance(arg, tuple):\n #arg = self._orderArgs(arg)\n #self.data[arg[0]][arg[1]] = val\n #else:\n #raise TypeError(type(arg))\n \n #def __setitem__dict(self, arg, val):\n #if isinstance(arg, basestring):\n #if len(val) != len(self.data[arg]):\n #raise Exception(\"Values (%d) and data set (%d) are not the same length.\" % (len(val), len(self.data[arg])))\n #self.data[arg] = val\n #elif isinstance(arg, int):\n #for k in self.data:\n #self.data[k][arg] = val[k]\n #elif isinstance(arg, tuple):\n #arg = self._orderArgs(arg)\n #self.data[arg[1]][arg[0]] = val\n #else:\n #raise TypeError(type(arg))\n\n #def _orderArgs(self, args):\n ### return args in (int, str) order\n #if isinstance(args[0], basestring):\n #return (args[1], args[0])\n #else:\n #return args\n \n #def __iter__(self):\n #for i in xrange(len(self)):\n #yield self[i]\n\n #def __len__(self):\n #if self.mode == 'array' or self.mode == 'list':\n #return len(self.data)\n #else:\n #return max(map(len, self.data.values()))\n\n #def columnNames(self):\n #\"\"\"returns column names in no particular order\"\"\"\n #if self.mode == 'array':\n #return self.data.dtype.names\n #elif self.mode == 'list':\n #names = set()\n #for row in self.data:\n #names.update(row.keys())\n #return list(names)\n #elif self.mode == 'dict':\n #return self.data.keys()\n \n #def keys(self):\n #return self.columnNames()\n", "path": "pyqtgraph/graphicsItems/PlotDataItem.py"}]} |
gh_patches_debug_1249 | rasdani/github-patches | git_diff | edgedb__edgedb-6797 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Attempting to switch index type fails with `InternalServerError`
```
edgedb error: InternalServerError: AssertionError:
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/server/compiler_pool/worker.py", line 203, in compile_in_tx
units, cstate = COMPILER.compile_in_tx(cstate, *args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/server/compiler/compiler.py", line 939, in compile_in_tx
return compile(ctx=ctx, source=source), ctx.state
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/server/compiler/compiler.py", line 2313, in compile
return _try_compile(ctx=ctx, source=original)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/server/compiler/compiler.py", line 2377, in _try_compile
comp, capabilities = _compile_dispatch_ql(
^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/server/compiler/compiler.py", line 2222, in _compile_dispatch_ql
query = ddl.compile_dispatch_ql_migration(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/server/compiler/ddl.py", line 384, in compile_dispatch_ql_migration
return _describe_current_migration(ctx, ql)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/server/compiler/ddl.py", line 638, in _describe_current_migration
top_op2 = s_ddl.cmd_from_ddl(
^^^^^^^^^^^^^^^^^^^
File "/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/ddl.py", line 443, in cmd_from_ddl
res = sd.compile_ddl(schema, ddl, context=context)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/delta.py", line 4365, in compile_ddl
cmd = cmdcls._cmd_tree_from_ast(schema, astnode, context)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/inheriting.py", line 1040, in _cmd_tree_from_ast
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/delta.py", line 3526, in _cmd_tree_from_ast
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/delta.py", line 1087, in _cmd_tree_from_ast
subcmd = compile_ddl(schema, subastnode, context=context)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/delta.py", line 4365, in compile_ddl
cmd = cmdcls._cmd_tree_from_ast(schema, astnode, context)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/referencing.py", line 1230, in _cmd_tree_from_ast
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/inheriting.py", line 1040, in _cmd_tree_from_ast
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/delta.py", line 3526, in _cmd_tree_from_ast
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/delta.py", line 1087, in _cmd_tree_from_ast
subcmd = compile_ddl(schema, subastnode, context=context)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/delta.py", line 4367, in compile_ddl
cmd = cmdcls._cmd_tree_from_ast(schema, astnode, context)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/delta.py", line 1078, in _cmd_tree_from_ast
cmd = cls._cmd_from_ast(schema, astnode, context)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/delta.py", line 3476, in _cmd_from_ast
return rename_class._rename_cmd_from_ast(schema, astnode, context)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/delta.py", line 3494, in _rename_cmd_from_ast
new_name = cls._classname_from_ast(schema, astnode, context)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/indexes.py", line 413, in _classname_from_ast
quals = cls._classname_quals_from_ast(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/indexes.py", line 438, in _classname_quals_from_ast
assert isinstance(astnode, qlast.ConcreteIndexCommand)
AssertionError
```
- EdgeDB Version: 4.4+596d0e4 on Ubuntu 22.04.3 LTS
- EdgeDB CLI Version: EdgeDB CLI 4.0.2+500be79 on OSX 14.3, 16gb M1
Steps to reproduce immediate error:
1. Restore from dump provided via DM to Devon in discord
2. Attempt to `edgedb migration create`
I got into this state by creating and applying a migration for
`index ext::pg_trgm::gin on (.legal_name);`
After which the above error happens even with zero schema changes.
However, I've done this several times (currently playing with how to even hit the index) without issue. I've also been able to restore to a backup before this, add it again, and not hit the issue again. Reproducibility to create this state organically is probably tough.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `edb/schema/indexes.py`
Content:
```
1 #
2 # This source file is part of the EdgeDB open source project.
3 #
4 # Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17 #
18
19
20 from __future__ import annotations
21 from typing import *
22 from typing import overload
23
24 from edb import edgeql
25 from edb import errors
26 from edb.common import parsing
27 from edb.common import verutils
28 from edb.edgeql import ast as qlast
29 from edb.edgeql import compiler as qlcompiler
30 from edb.edgeql import qltypes
31
32 from . import annos as s_anno
33 from . import delta as sd
34 from . import expr as s_expr
35 from . import functions as s_func
36 from . import inheriting
37 from . import name as sn
38 from . import pointers as s_pointers
39 from . import objects as so
40 from . import referencing
41 from . import scalars as s_scalars
42 from . import types as s_types
43 from . import utils
44
45
46 if TYPE_CHECKING:
47 from . import schema as s_schema
48
49
50 # The name used for default concrete indexes
51 DEFAULT_INDEX = sn.QualName(module='__', name='idx')
52
53
54 def is_index_valid_for_type(
55 index: Index,
56 expr_type: s_types.Type,
57 schema: s_schema.Schema
58 ) -> bool:
59 # HACK: currently this helper just hardcodes the permitted index & type
60 # combinations, but this should be inferred based on index definition.
61 index_name = str(index.get_name(schema))
62 match index_name:
63 case 'pg::hash':
64 return True
65 case 'pg::btree':
66 return True
67 case 'pg::gin':
68 return (
69 expr_type.is_array()
70 or
71 expr_type.issubclass(
72 schema,
73 schema.get('std::json', type=s_scalars.ScalarType),
74 )
75 )
76 case 'fts::index':
77 return is_subclass_or_tuple(expr_type, 'fts::document', schema)
78 case 'pg::gist':
79 return expr_type.is_range() or expr_type.is_multirange()
80 case 'pg::spgist':
81 return (
82 expr_type.is_range()
83 or
84 expr_type.issubclass(
85 schema,
86 schema.get('std::str', type=s_scalars.ScalarType),
87 )
88 )
89 case 'pg::brin':
90 return (
91 expr_type.is_range()
92 or
93 expr_type.issubclass(
94 schema,
95 (
96 schema.get('std::anyreal',
97 type=s_scalars.ScalarType),
98 schema.get('std::bytes',
99 type=s_scalars.ScalarType),
100 schema.get('std::str',
101 type=s_scalars.ScalarType),
102 schema.get('std::uuid',
103 type=s_scalars.ScalarType),
104 schema.get('std::datetime',
105 type=s_scalars.ScalarType),
106 schema.get('std::duration',
107 type=s_scalars.ScalarType),
108 schema.get('cal::local_datetime',
109 type=s_scalars.ScalarType),
110 schema.get('cal::local_date',
111 type=s_scalars.ScalarType),
112 schema.get('cal::local_time',
113 type=s_scalars.ScalarType),
114 schema.get('cal::relative_duration',
115 type=s_scalars.ScalarType),
116 schema.get('cal::date_duration',
117 type=s_scalars.ScalarType),
118 )
119 )
120 )
121 case (
122 'ext::pgvector::ivfflat_euclidean'
123 | 'ext::pgvector::ivfflat_ip'
124 | 'ext::pgvector::ivfflat_cosine'
125 | 'ext::pgvector::hnsw_euclidean'
126 | 'ext::pgvector::hnsw_ip'
127 | 'ext::pgvector::hnsw_cosine'
128 ):
129 return expr_type.issubclass(
130 schema,
131 schema.get('ext::pgvector::vector', type=s_scalars.ScalarType),
132 )
133 case (
134 'ext::pg_trgm::gin'
135 | 'ext::pg_trgm::gist'
136 ):
137 return expr_type.issubclass(
138 schema,
139 schema.get('std::str', type=s_scalars.ScalarType),
140 )
141
142 return False
143
144
145 def is_subclass_or_tuple(
146 ty: s_types.Type, parent_name: str | sn.Name, schema: s_schema.Schema
147 ) -> bool:
148 parent = schema.get(parent_name, type=s_types.Type)
149
150 if isinstance(ty, s_types.Tuple):
151 for (_, st) in ty.iter_subtypes(schema):
152 if not st.issubclass(schema, parent):
153 return False
154 return True
155 else:
156 return ty.issubclass(schema, parent)
157
158
159 class Index(
160 referencing.ReferencedInheritingObject,
161 so.InheritingObject, # Help reflection figure out the right db MRO
162 s_anno.AnnotationSubject,
163 qlkind=qltypes.SchemaObjectClass.INDEX,
164 data_safe=True,
165 ):
166
167 subject = so.SchemaField(
168 so.Object,
169 default=None,
170 compcoef=None,
171 inheritable=False,
172 )
173
174 # These can only appear in base abstract index definitions. These
175 # determine how indexes can be configured.
176 params = so.SchemaField(
177 s_func.FuncParameterList,
178 coerce=True,
179 compcoef=0.4,
180 default=so.DEFAULT_CONSTRUCTOR,
181 inheritable=False,
182 )
183
184 # Appears in base abstract index definitions and defines how the index
185 # is represented in postgres.
186 code = so.SchemaField(
187 str,
188 default=None,
189 compcoef=None,
190 inheritable=False,
191 )
192
193 # These can appear in abstract indexes extending an existing one in order
194 # to override exisitng parameters. Also they can appear in concrete
195 # indexes.
196 kwargs = so.SchemaField(
197 s_expr.ExpressionDict,
198 coerce=True,
199 compcoef=0,
200 default=so.DEFAULT_CONSTRUCTOR,
201 inheritable=False,
202 ddl_identity=True,
203 )
204
205 expr = so.SchemaField(
206 s_expr.Expression,
207 default=None,
208 coerce=True,
209 compcoef=0.909,
210 ddl_identity=True,
211 )
212
213 except_expr = so.SchemaField(
214 s_expr.Expression,
215 default=None,
216 coerce=True,
217 compcoef=0.909,
218 ddl_identity=True,
219 )
220
221 def __repr__(self) -> str:
222 cls = self.__class__
223 return '<{}.{} {!r} at 0x{:x}>'.format(
224 cls.__module__, cls.__name__, self.id, id(self))
225
226 __str__ = __repr__
227
228 def as_delete_delta(
229 self,
230 *,
231 schema: s_schema.Schema,
232 context: so.ComparisonContext,
233 ) -> sd.ObjectCommand[Index]:
234 delta = super().as_delete_delta(schema=schema, context=context)
235 old_params = self.get_params(schema).objects(schema)
236 for p in old_params:
237 delta.add(p.as_delete_delta(schema=schema, context=context))
238
239 return delta
240
241 def get_verbosename(
242 self,
243 schema: s_schema.Schema,
244 *,
245 with_parent: bool = False
246 ) -> str:
247 # baseline name for indexes
248 vn = self.get_displayname(schema)
249
250 if self.get_abstract(schema):
251 return f"abstract index '{vn}'"
252 else:
253 # concrete index must have a subject
254 assert self.get_subject(schema) is not None
255
256 # add kwargs (if any) to the concrete name
257 kwargs = self.get_kwargs(schema)
258 if kwargs:
259 kw = []
260 for key, val in kwargs.items():
261 kw.append(f'{key}:={val.text}')
262 vn = f'{vn}({", ".join(kw)})'
263
264 vn = f"index {vn!r}"
265
266 if with_parent:
267 return self.add_parent_name(vn, schema)
268 return vn
269
270 def add_parent_name(
271 self,
272 base_name: str,
273 schema: s_schema.Schema,
274 ) -> str:
275 # Remove the placeholder name of the generic index.
276 if base_name == f"index '{DEFAULT_INDEX}'":
277 base_name = 'index'
278
279 return super().add_parent_name(base_name, schema)
280
281 def is_non_concrete(self, schema: s_schema.Schema) -> bool:
282 return self.get_subject(schema) is None
283
284 @classmethod
285 def get_shortname_static(cls, name: sn.Name) -> sn.QualName:
286 name = sn.shortname_from_fullname(name)
287 assert isinstance(name, sn.QualName)
288 return name
289
290 def get_all_kwargs(
291 self,
292 schema: s_schema.Schema,
293 ) -> s_expr.ExpressionDict:
294 kwargs = s_expr.ExpressionDict()
295 all_kw = type(self).get_field('kwargs').merge_fn(
296 self,
297 self.get_ancestors(schema).objects(schema),
298 'kwargs',
299 schema=schema,
300 )
301 if all_kw:
302 kwargs.update(all_kw)
303
304 return kwargs
305
306 def get_root(
307 self,
308 schema: s_schema.Schema,
309 ) -> Index:
310 if not self.get_abstract(schema):
311 name = sn.shortname_from_fullname(self.get_name(schema))
312 index = schema.get(name, type=Index)
313 else:
314 index = self
315
316 if index.get_bases(schema):
317 return index.get_ancestors(schema).objects(schema)[-1]
318 else:
319 return index
320
321 def get_concrete_kwargs(
322 self,
323 schema: s_schema.Schema,
324 ) -> s_expr.ExpressionDict:
325 assert not self.get_abstract(schema)
326
327 root = self.get_root(schema)
328
329 kwargs = self.get_all_kwargs(schema)
330
331 for param in root.get_params(schema).objects(schema):
332 kwname = param.get_parameter_name(schema)
333 if (
334 kwname not in kwargs and
335 (val := param.get_default(schema)) is not None
336 ):
337 kwargs[kwname] = val
338
339 return kwargs
340
341 def is_defined_here(
342 self,
343 schema: s_schema.Schema,
344 ) -> bool:
345 """
346 Returns True iff the index has not been inherited from a parent subject,
347 and was originally defined on the subject.
348 """
349 return all(
350 base.get_abstract(schema)
351 for base in self.get_bases(schema).objects(schema)
352 )
353
354
355 IndexableSubject_T = TypeVar('IndexableSubject_T', bound='IndexableSubject')
356
357
358 class IndexableSubject(so.InheritingObject):
359 indexes_refs = so.RefDict(
360 attr='indexes',
361 ref_cls=Index)
362
363 indexes = so.SchemaField(
364 so.ObjectIndexByFullname[Index],
365 inheritable=False, ephemeral=True, coerce=True, compcoef=0.909,
366 default=so.DEFAULT_CONSTRUCTOR)
367
368 def add_index(
369 self,
370 schema: s_schema.Schema,
371 index: Index,
372 ) -> s_schema.Schema:
373 return self.add_classref(schema, 'indexes', index)
374
375
376 class IndexSourceCommandContext:
377 pass
378
379
380 class IndexSourceCommand(
381 inheriting.InheritingObjectCommand[IndexableSubject_T],
382 ):
383 pass
384
385
386 class IndexCommandContext(sd.ObjectCommandContext[Index],
387 s_anno.AnnotationSubjectCommandContext):
388 pass
389
390
391 class IndexCommand(
392 referencing.ReferencedInheritingObjectCommand[Index],
393 s_func.ParametrizedCommand[Index],
394 context_class=IndexCommandContext,
395 referrer_context_class=IndexSourceCommandContext,
396 ):
397
398 @classmethod
399 def _classname_from_ast(
400 cls,
401 schema: s_schema.Schema,
402 astnode: qlast.NamedDDL,
403 context: sd.CommandContext,
404 ) -> sn.QualName:
405 # We actually want to override how ReferencedObjectCommand determines
406 # the classname
407 shortname = super(
408 referencing.ReferencedObjectCommand, cls
409 )._classname_from_ast(schema, astnode, context)
410
411 referrer_ctx = cls.get_referrer_context(context)
412 if referrer_ctx is not None:
413
414 referrer_name = referrer_ctx.op.classname
415 assert isinstance(referrer_name, sn.QualName)
416 quals = cls._classname_quals_from_ast(
417 schema, astnode, shortname, referrer_name, context)
418
419 name = sn.QualName(
420 module=referrer_name.module,
421 name=sn.get_specialized_name(
422 shortname,
423 str(referrer_name),
424 *quals,
425 ),
426 )
427 else:
428 name = super()._classname_from_ast(schema, astnode, context)
429
430 return name
431
432 @classmethod
433 def _classname_quals_from_ast(
434 cls,
435 schema: s_schema.Schema,
436 astnode: qlast.NamedDDL,
437 base_name: sn.Name,
438 referrer_name: sn.QualName,
439 context: sd.CommandContext,
440 ) -> Tuple[str, ...]:
441 assert isinstance(astnode, qlast.ConcreteIndexCommand)
442 exprs = []
443
444 kwargs = cls._index_kwargs_from_ast(schema, astnode, context)
445 for key, val in kwargs.items():
446 exprs.append(f'{key}:={val.text}')
447
448 # use the normalized text directly from the expression
449 expr = s_expr.Expression.from_ast(
450 astnode.expr, schema, context.modaliases)
451 expr_text = expr.text
452 assert expr_text is not None
453 exprs.append(expr_text)
454
455 if astnode.except_expr:
456 expr = s_expr.Expression.from_ast(
457 astnode.except_expr, schema, context.modaliases)
458 exprs.append('!' + expr.text)
459
460 return (cls._name_qual_from_exprs(schema, exprs),)
461
462 @classmethod
463 def _classname_quals_from_name(
464 cls,
465 name: sn.QualName
466 ) -> Tuple[str, ...]:
467 quals = sn.quals_from_fullname(name)
468 return tuple(quals[-1:])
469
470 @classmethod
471 def _index_kwargs_from_ast(
472 cls,
473 schema: s_schema.Schema,
474 astnode: qlast.NamedDDL,
475 context: sd.CommandContext,
476 ) -> Dict[str, s_expr.Expression]:
477 kwargs = dict()
478 # Some abstract indexes and all concrete index commands have kwargs.
479 assert isinstance(astnode, (qlast.CreateIndex,
480 qlast.ConcreteIndexCommand))
481
482 for key, val in astnode.kwargs.items():
483 kwargs[key] = s_expr.Expression.from_ast(
484 val, schema, context.modaliases, as_fragment=True)
485
486 return kwargs
487
488 @overload
489 def get_object(
490 self,
491 schema: s_schema.Schema,
492 context: sd.CommandContext,
493 *,
494 name: Optional[sn.Name] = None,
495 default: Union[Index, so.NoDefaultT] = so.NoDefault,
496 sourcectx: Optional[parsing.ParserContext] = None,
497 ) -> Index:
498 ...
499
500 @overload
501 def get_object(
502 self,
503 schema: s_schema.Schema,
504 context: sd.CommandContext,
505 *,
506 name: Optional[sn.Name] = None,
507 default: None = None,
508 sourcectx: Optional[parsing.ParserContext] = None,
509 ) -> Optional[Index]:
510 ...
511
512 def get_object(
513 self,
514 schema: s_schema.Schema,
515 context: sd.CommandContext,
516 *,
517 name: Optional[sn.Name] = None,
518 default: Union[Index, so.NoDefaultT, None] = so.NoDefault,
519 sourcectx: Optional[parsing.ParserContext] = None,
520 ) -> Optional[Index]:
521 try:
522 return super().get_object(
523 schema, context, name=name,
524 default=default, sourcectx=sourcectx,
525 )
526 except errors.InvalidReferenceError:
527 referrer_ctx = self.get_referrer_context_or_die(context)
528 referrer = referrer_ctx.scls
529 expr = self.get_ddl_identity('expr')
530 raise errors.InvalidReferenceError(
531 f"index on ({expr.text}) does not exist on "
532 f"{referrer.get_verbosename(schema)}"
533 ) from None
534
535 @classmethod
536 def _cmd_from_ast(
537 cls,
538 schema: s_schema.Schema,
539 astnode: qlast.DDLOperation,
540 context: sd.CommandContext,
541 ) -> sd.ObjectCommand[Index]:
542 cmd = super()._cmd_from_ast(schema, astnode, context)
543 if isinstance(astnode, qlast.ConcreteIndexCommand):
544 cmd.set_ddl_identity(
545 'expr',
546 s_expr.Expression.from_ast(
547 astnode.expr,
548 schema,
549 context.modaliases,
550 ),
551 )
552 return cmd
553
554 def _get_ast(
555 self,
556 schema: s_schema.Schema,
557 context: sd.CommandContext,
558 *,
559 parent_node: Optional[qlast.DDLOperation] = None,
560 ) -> Optional[qlast.DDLOperation]:
561 astnode = super()._get_ast(schema, context, parent_node=parent_node)
562
563 kwargs = self.get_resolved_attribute_value(
564 'kwargs',
565 schema=schema,
566 context=context,
567 )
568 if kwargs:
569 assert isinstance(astnode, (qlast.CreateIndex,
570 qlast.ConcreteIndexCommand))
571 astnode.kwargs = {
572 name: expr.qlast for name, expr in kwargs.items()
573 }
574
575 return astnode
576
577 def get_ast_attr_for_field(
578 self,
579 field: str,
580 astnode: Type[qlast.DDLOperation],
581 ) -> Optional[str]:
582 if field in ('kwargs', 'expr', 'except_expr'):
583 return field
584 else:
585 return super().get_ast_attr_for_field(field, astnode)
586
587 def get_ddl_identity_fields(
588 self,
589 context: sd.CommandContext,
590 ) -> Tuple[so.Field[Any], ...]:
591 id_fields = super().get_ddl_identity_fields(context)
592 omit_fields = set()
593
594 if (
595 self.get_attribute_value('abstract')
596 and not self.get_attribute_value('bases')
597 ):
598 # Base abstract indexes don't have kwargs at all.
599 omit_fields.add('kwargs')
600
601 if omit_fields:
602 return tuple(f for f in id_fields if f.name not in omit_fields)
603 else:
604 return id_fields
605
606 def compile_expr_field(
607 self,
608 schema: s_schema.Schema,
609 context: sd.CommandContext,
610 field: so.Field[Any],
611 value: s_expr.Expression,
612 track_schema_ref_exprs: bool=False,
613 ) -> s_expr.CompiledExpression:
614 from edb.ir import utils as irutils
615 from edb.ir import ast as irast
616
617 if field.name in {'expr', 'except_expr'}:
618 # type ignore below, for the class is used as mixin
619 parent_ctx = context.get_ancestor(
620 IndexSourceCommandContext, # type: ignore
621 self
622 )
623 assert parent_ctx is not None
624 assert isinstance(parent_ctx.op, sd.ObjectCommand)
625 subject = parent_ctx.op.get_object(schema, context)
626
627 expr = value.compiled(
628 schema=schema,
629 options=qlcompiler.CompilerOptions(
630 modaliases=context.modaliases,
631 schema_object_context=self.get_schema_metaclass(),
632 anchors={qlast.Subject().name: subject},
633 path_prefix_anchor=qlast.Subject().name,
634 singletons=frozenset([subject]),
635 apply_query_rewrites=False,
636 track_schema_ref_exprs=track_schema_ref_exprs,
637 detached=True,
638 ),
639 )
640
641 # Check that the inferred cardinality is no more than 1
642 if expr.irast.cardinality.is_multi():
643 raise errors.SchemaDefinitionError(
644 f'possibly more than one element returned by '
645 f'the index expression where only singletons '
646 f'are allowed',
647 context=value.qlast.context,
648 )
649
650 if expr.irast.volatility != qltypes.Volatility.Immutable:
651 raise errors.SchemaDefinitionError(
652 f'index expressions must be immutable',
653 context=value.qlast.context,
654 )
655
656 refs = irutils.get_longest_paths(expr.irast)
657
658 has_multi = False
659 for ref in refs:
660 assert subject
661 while ref.rptr:
662 rptr = ref.rptr
663 if rptr.dir_cardinality.is_multi():
664 has_multi = True
665
666 # We don't need to look further than the subject,
667 # which is always valid. (And which is a singleton
668 # in an index expression if it is itself a
669 # singleton, regardless of other parts of the path.)
670 if (
671 isinstance(rptr.ptrref, irast.PointerRef)
672 and rptr.ptrref.id == subject.id
673 ):
674 break
675 ref = rptr.source
676
677 if has_multi and irutils.contains_set_of_op(expr.irast):
678 raise errors.SchemaDefinitionError(
679 "cannot use aggregate functions or operators "
680 "in an index expression",
681 context=self.source_context,
682 )
683
684 return expr
685 else:
686 return super().compile_expr_field(
687 schema, context, field, value, track_schema_ref_exprs)
688
689 def get_dummy_expr_field_value(
690 self,
691 schema: s_schema.Schema,
692 context: sd.CommandContext,
693 field: so.Field[Any],
694 value: Any,
695 ) -> Optional[s_expr.Expression]:
696 if field.name == 'expr':
697 return s_expr.Expression(text='0')
698 else:
699 raise NotImplementedError(f'unhandled field {field.name!r}')
700
701
702 class CreateIndex(
703 IndexCommand,
704 referencing.CreateReferencedInheritingObject[Index],
705 ):
706 astnode = [qlast.CreateConcreteIndex, qlast.CreateIndex]
707 referenced_astnode = qlast.CreateConcreteIndex
708
709 @classmethod
710 def _cmd_tree_from_ast(
711 cls,
712 schema: s_schema.Schema,
713 astnode: qlast.DDLOperation,
714 context: sd.CommandContext,
715 ) -> sd.Command:
716 cmd = super()._cmd_tree_from_ast(schema, astnode, context)
717
718 assert isinstance(cmd, IndexCommand)
719 assert isinstance(astnode, (qlast.CreateConcreteIndex,
720 qlast.CreateIndex))
721
722 if isinstance(astnode, qlast.CreateIndex):
723 cmd.set_attribute_value('abstract', True)
724
725 params = cls._get_param_desc_from_ast(
726 schema, context.modaliases, astnode)
727 for param in params:
728 # as_create_delta requires the specific type
729 cmd.add_prerequisite(param.as_create_delta(
730 schema, cmd.classname, context=context))
731
732 # There are several possibilities for abstract indexes:
733 # 1) base abstract index
734 # 2) an abstract index extending another one
735 # 3) an abstract index listing index fallback alternatives
736 if astnode.bases is None:
737 if astnode.index_types is None:
738 # This actually defines a new index (1).
739 pass
740 else:
741 # This is for index fallback alternatives (3).
742 raise NotImplementedError("Index fallback not implemented")
743 else:
744 # Extending existing indexes for composition (2).
745 kwargs = cls._index_kwargs_from_ast(schema, astnode, context)
746 if kwargs:
747 cmd.set_attribute_value('kwargs', kwargs)
748
749 elif isinstance(astnode, qlast.CreateConcreteIndex):
750 orig_text = cls.get_orig_expr_text(schema, astnode, 'expr')
751
752 if (
753 orig_text is not None
754 and context.compat_ver_is_before(
755 (1, 0, verutils.VersionStage.ALPHA, 6)
756 )
757 ):
758 # Versions prior to a6 used a different expression
759 # normalization strategy, so we must renormalize the
760 # expression.
761 expr_ql = qlcompiler.renormalize_compat(
762 astnode.expr,
763 orig_text,
764 schema=schema,
765 localnames=context.localnames,
766 )
767 else:
768 expr_ql = astnode.expr
769
770 kwargs = cls._index_kwargs_from_ast(schema, astnode, context)
771 if kwargs:
772 cmd.set_attribute_value('kwargs', kwargs)
773
774 cmd.set_attribute_value(
775 'expr',
776 s_expr.Expression.from_ast(
777 expr_ql,
778 schema,
779 context.modaliases,
780 ),
781 )
782
783 if astnode.except_expr:
784 cmd.set_attribute_value(
785 'except_expr',
786 s_expr.Expression.from_ast(
787 astnode.except_expr,
788 schema,
789 context.modaliases,
790 ),
791 )
792
793 return cmd
794
795 @classmethod
796 def as_inherited_ref_ast(
797 cls,
798 schema: s_schema.Schema,
799 context: sd.CommandContext,
800 name: sn.Name,
801 parent: referencing.ReferencedObject,
802 ) -> qlast.ObjectDDL:
803 assert isinstance(parent, Index)
804 astnode_cls = cls.referenced_astnode
805
806 expr = parent.get_expr(schema)
807 assert expr is not None
808 expr_ql = edgeql.parse_fragment(expr.text)
809
810 except_expr = parent.get_except_expr(schema)
811 if except_expr:
812 except_expr_ql = except_expr.qlast
813 else:
814 except_expr_ql = None
815
816 qlkwargs = {
817 key: val.qlast for key, val in parent.get_kwargs(schema).items()
818 }
819
820 return astnode_cls(
821 name=cls.get_inherited_ref_name(schema, context, parent, name),
822 kwargs=qlkwargs,
823 expr=expr_ql,
824 except_expr=except_expr_ql,
825 )
826
827 @classmethod
828 def get_inherited_ref_name(
829 cls,
830 schema: s_schema.Schema,
831 context: sd.CommandContext,
832 parent: so.Object,
833 name: sn.Name,
834 ) -> qlast.ObjectRef:
835 bn = sn.shortname_from_fullname(name)
836 return utils.name_to_ast_ref(bn)
837
838 def _validate_kwargs(
839 self,
840 schema: s_schema.Schema,
841 params: s_func.FuncParameterList,
842 kwargs: s_expr.ExpressionDict,
843 ancestor_name: str,
844 ) -> None:
845 if not kwargs:
846 return
847
848 if not params:
849 raise errors.SchemaDefinitionError(
850 f'the {ancestor_name} does not support any parameters',
851 context=self.source_context
852 )
853
854 # Make sure that the kwargs are valid.
855 for key in kwargs:
856 expr = kwargs[key]
857 param = params.get_by_name(schema, key)
858 if param is None:
859 raise errors.SchemaDefinitionError(
860 f'the {ancestor_name} does not have a parameter {key!r}',
861 context=self.source_context
862 )
863
864 param_type = param.get_type(schema)
865 comp_expr = s_expr.Expression.compiled(expr, schema=schema)
866 expr_type = comp_expr.irast.stype
867
868 if (
869 not param_type.is_polymorphic(schema) and
870 not expr_type.is_polymorphic(schema) and
871 not expr_type.implicitly_castable_to(
872 param_type, schema)
873 ):
874 raise errors.SchemaDefinitionError(
875 f'the {key!r} parameter of the '
876 f'{self.get_verbosename()} has type of '
877 f'{expr_type.get_displayname(schema)} that '
878 f'is not implicitly castable to the '
879 f'corresponding parameter of the '
880 f'{ancestor_name} with type '
881 f'{param_type.get_displayname(schema)}',
882 context=self.source_context,
883 )
884
885 def validate_object(
886 self,
887 schema: s_schema.Schema,
888 context: sd.CommandContext,
889 ) -> None:
890 super().validate_object(schema, context)
891
892 referrer_ctx = self.get_referrer_context(context)
893
894 # Get kwargs if any, so that we can process them later.
895 kwargs = self.get_resolved_attribute_value(
896 'kwargs',
897 schema=schema,
898 context=context,
899 )
900
901 if referrer_ctx is None:
902 # Make sure that all bases are ultimately inherited from the same
903 # root base class.
904 bases = self.get_resolved_attribute_value(
905 'bases',
906 schema=schema,
907 context=context,
908 )
909 if bases:
910 # Users can extend abstract indexes.
911 root = None
912 for base in bases.objects(schema):
913 lineage = [base] + list(
914 base.get_ancestors(schema).objects(schema))
915
916 if root is None:
917 root = lineage[-1]
918 elif root != lineage[-1]:
919 raise errors.SchemaDefinitionError(
920 f'cannot create {self.get_verbosename()} '
921 f'because it extends incompatible abstract indxes',
922 context=self.source_context
923 )
924
925 # We should have found a root because we have bases.
926 assert root is not None
927 # Make sure that the kwargs are valid.
928 self._validate_kwargs(
929 schema,
930 root.get_params(schema),
931 kwargs,
932 root.get_verbosename(schema),
933 )
934
935 else:
936 # Creating new abstract indexes is only allowed in "EdgeDB
937 # developer" mode, i.e. when populating std library, etc.
938 if not context.stdmode and not context.testmode:
939 raise errors.SchemaDefinitionError(
940 f'cannot create {self.get_verbosename()} '
941 f'because user-defined abstract indexes are not '
942 f'supported',
943 context=self.source_context
944 )
945
946 return
947
948 # The checks below apply only to concrete indexes.
949 subject = referrer_ctx.scls
950 assert isinstance(subject, (s_types.Type, s_pointers.Pointer))
951
952 # FTS
953 if self.scls.has_base_with_name(schema, sn.QualName('fts', 'index')):
954
955 if isinstance(subject, s_pointers.Pointer):
956 raise errors.SchemaDefinitionError(
957 "fts::index cannot be declared on links",
958 context=self.source_context
959 )
960
961 # Ensure that the name of the index (if given) matches an existing
962 # abstract index.
963 name = sn.shortname_from_fullname(
964 self.get_resolved_attribute_value(
965 'name',
966 schema=schema,
967 context=context,
968 )
969 )
970
971 # HACK: the old concrete indexes all have names in form __::idx, but
972 # this should be the actual name provided. Also the index without name
973 # defaults to '__::idx'.
974 if name != DEFAULT_INDEX and (
975 abs_index := schema.get(name, type=Index)
976 ):
977 # only abstract indexes should have unmangled names
978 assert abs_index.get_abstract(schema)
979 root = abs_index.get_root(schema)
980
981 # Make sure that kwargs and parameters match in name and type.
982 # Also make sure that all parameters have values at this point
983 # (either default or provided in kwargs).
984 params = root.get_params(schema)
985 inh_kwargs = self.scls.get_all_kwargs(schema)
986
987 self._validate_kwargs(schema,
988 params,
989 kwargs,
990 abs_index.get_verbosename(schema))
991
992 unused_names = {p.get_parameter_name(schema)
993 for p in params.objects(schema)}
994 if kwargs:
995 unused_names -= set(kwargs)
996 if inh_kwargs:
997 unused_names -= set(inh_kwargs)
998 if unused_names:
999 # Check that all of these parameters have defaults.
1000 for pname in list(unused_names):
1001 param = params.get_by_name(schema, pname)
1002 if param and param.get_default(schema) is not None:
1003 unused_names.discard(pname)
1004
1005 if unused_names:
1006 names = ', '.join(repr(n) for n in sorted(unused_names))
1007 raise errors.SchemaDefinitionError(
1008 f'cannot create {self.get_verbosename()} '
1009 f'because the following parameters are still undefined: '
1010 f'{names}.',
1011 context=self.source_context
1012 )
1013
1014 # Make sure that the concrete index expression type matches the
1015 # abstract index type.
1016 expr = self.get_resolved_attribute_value(
1017 'expr',
1018 schema=schema,
1019 context=context,
1020 )
1021 options = qlcompiler.CompilerOptions(
1022 anchors={qlast.Subject().name: subject},
1023 path_prefix_anchor=qlast.Subject().name,
1024 singletons=frozenset([subject]),
1025 apply_query_rewrites=False,
1026 schema_object_context=self.get_schema_metaclass(),
1027 )
1028 comp_expr = s_expr.Expression.compiled(
1029 expr, schema=schema, options=options
1030 )
1031 expr_type = comp_expr.irast.stype
1032
1033 if not is_index_valid_for_type(root, expr_type, comp_expr.schema):
1034 hint = None
1035 if str(name) == 'fts::index':
1036 hint = (
1037 'fts::document can be constructed with '
1038 'fts::with_options(str, ...)'
1039 )
1040
1041 raise errors.SchemaDefinitionError(
1042 f'index expression ({expr.text}) '
1043 f'is not of a valid type for the '
1044 f'{self.scls.get_verbosename(comp_expr.schema)}',
1045 context=self.source_context,
1046 details=hint,
1047 )
1048
1049 def get_resolved_attributes(
1050 self,
1051 schema: s_schema.Schema,
1052 context: sd.CommandContext,
1053 ) -> Dict[str, Any]:
1054 params = self._get_params(schema, context)
1055 props = super().get_resolved_attributes(schema, context)
1056 props['params'] = params
1057 return props
1058
1059 @classmethod
1060 def _classbases_from_ast(
1061 cls,
1062 schema: s_schema.Schema,
1063 astnode: qlast.ObjectDDL,
1064 context: sd.CommandContext,
1065 ) -> List[so.ObjectShell[Index]]:
1066 if (
1067 isinstance(astnode, qlast.CreateConcreteIndex)
1068 and astnode.name
1069 and astnode.name.module != DEFAULT_INDEX.module
1070 and astnode.name.name != DEFAULT_INDEX.name
1071 ):
1072 base = utils.ast_objref_to_object_shell(
1073 astnode.name,
1074 metaclass=Index,
1075 schema=schema,
1076 modaliases=context.modaliases,
1077 )
1078 return [base]
1079 else:
1080 return super()._classbases_from_ast(schema, astnode, context)
1081
1082
1083 class RenameIndex(
1084 IndexCommand,
1085 referencing.RenameReferencedInheritingObject[Index],
1086 ):
1087
1088 @classmethod
1089 def _cmd_from_ast(
1090 cls,
1091 schema: s_schema.Schema,
1092 astnode: qlast.DDLOperation,
1093 context: sd.CommandContext,
1094 ) -> RenameIndex:
1095 return cast(
1096 RenameIndex,
1097 super()._cmd_from_ast(schema, astnode, context),
1098 )
1099
1100
1101 class AlterIndexOwned(
1102 IndexCommand,
1103 referencing.AlterOwned[Index],
1104 field='owned',
1105 ):
1106 pass
1107
1108
1109 class AlterIndex(
1110 IndexCommand,
1111 referencing.AlterReferencedInheritingObject[Index],
1112 ):
1113 astnode = [qlast.AlterConcreteIndex, qlast.AlterIndex]
1114 referenced_astnode = qlast.AlterConcreteIndex
1115
1116 def canonicalize_alter_from_external_ref(
1117 self,
1118 schema: s_schema.Schema,
1119 context: sd.CommandContext,
1120 ) -> None:
1121 if (
1122 not self.get_attribute_value('abstract')
1123 and (indexexpr := self.get_attribute_value('expr')) is not None
1124 ):
1125 # To compute the new name, we construct an AST of the
1126 # index, since that is the infrastructure we have for
1127 # computing the classname.
1128 name = sn.shortname_from_fullname(self.classname)
1129 assert isinstance(name, sn.QualName), "expected qualified name"
1130 ast = qlast.CreateConcreteIndex(
1131 name=qlast.ObjectRef(name=name.name, module=name.module),
1132 expr=indexexpr.qlast,
1133 )
1134 quals = sn.quals_from_fullname(self.classname)
1135 new_name = self._classname_from_ast_and_referrer(
1136 schema, sn.QualName.from_string(quals[0]), ast, context)
1137 if new_name == self.classname:
1138 return
1139
1140 rename = self.scls.init_delta_command(
1141 schema, sd.RenameObject, new_name=new_name)
1142 rename.set_attribute_value(
1143 'name', value=new_name, orig_value=self.classname)
1144 self.add(rename)
1145
1146
1147 class DeleteIndex(
1148 IndexCommand,
1149 referencing.DeleteReferencedInheritingObject[Index],
1150 ):
1151 astnode = [qlast.DropConcreteIndex, qlast.DropIndex]
1152 referenced_astnode = qlast.DropConcreteIndex
1153
1154 def _delete_begin(
1155 self,
1156 schema: s_schema.Schema,
1157 context: sd.CommandContext,
1158 ) -> s_schema.Schema:
1159 schema = super()._delete_begin(schema, context)
1160 if not context.canonical:
1161 for param in self.scls.get_params(schema).objects(schema):
1162 self.add(param.init_delta_command(schema, sd.DeleteObject))
1163 return schema
1164
1165 @classmethod
1166 def _cmd_tree_from_ast(
1167 cls,
1168 schema: s_schema.Schema,
1169 astnode: qlast.DDLOperation,
1170 context: sd.CommandContext,
1171 ) -> sd.Command:
1172 cmd = super()._cmd_tree_from_ast(schema, astnode, context)
1173
1174 if isinstance(astnode, qlast.ConcreteIndexCommand):
1175 cmd.set_attribute_value(
1176 'expr',
1177 s_expr.Expression.from_ast(
1178 astnode.expr, schema, context.modaliases),
1179 )
1180
1181 return cmd
1182
1183
1184 class RebaseIndex(
1185 IndexCommand,
1186 referencing.RebaseReferencedInheritingObject[Index],
1187 ):
1188 pass
1189
1190
1191 def get_effective_fts_index(
1192 subject: IndexableSubject, schema: s_schema.Schema
1193 ) -> Tuple[Optional[Index], bool]:
1194 """
1195 Returns the effective index of a subject and a boolean indicating
1196 if the effective index has overriden any other fts indexes on this subject.
1197 """
1198 indexes: so.ObjectIndexByFullname[Index] = subject.get_indexes(schema)
1199
1200 fts_name = sn.QualName('fts', 'index')
1201 fts_indexes = [
1202 ind
1203 for ind in indexes.objects(schema)
1204 if ind.has_base_with_name(schema, fts_name)
1205 ]
1206 if len(fts_indexes) == 0:
1207 return (None, False)
1208
1209 fts_indexes_defined_here = [
1210 ind for ind in fts_indexes if ind.is_defined_here(schema)
1211 ]
1212
1213 if len(fts_indexes_defined_here) > 0:
1214 # indexes defined here have priority
1215
1216 if len(fts_indexes_defined_here) > 1:
1217 subject_name = subject.get_displayname(schema)
1218 raise errors.SchemaDefinitionError(
1219 f'multiple {fts_name} indexes defined for {subject_name}'
1220 )
1221 effective = fts_indexes_defined_here[0]
1222 has_overridden = len(fts_indexes) >= 2
1223
1224 else:
1225 # there are no fts indexes defined on the subject
1226 # the inherited indexes take effect
1227
1228 if len(fts_indexes) > 1:
1229 subject_name = subject.get_displayname(schema)
1230 raise errors.SchemaDefinitionError(
1231 f'multiple {fts_name} indexes inherited for {subject_name}'
1232 )
1233
1234 effective = fts_indexes[0]
1235 has_overridden = False
1236
1237 return (effective, has_overridden)
1238
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/edb/schema/indexes.py b/edb/schema/indexes.py
--- a/edb/schema/indexes.py
+++ b/edb/schema/indexes.py
@@ -163,6 +163,15 @@
qlkind=qltypes.SchemaObjectClass.INDEX,
data_safe=True,
):
+ # redefine, so we can change compcoef
+ bases = so.SchemaField(
+ so.ObjectList['Index'], # type: ignore
+ type_is_generic_self=True,
+ default=so.DEFAULT_CONSTRUCTOR,
+ coerce=True,
+ inheritable=False,
+ compcoef=0.0, # can't rebase
+ )
subject = so.SchemaField(
so.Object,
| {"golden_diff": "diff --git a/edb/schema/indexes.py b/edb/schema/indexes.py\n--- a/edb/schema/indexes.py\n+++ b/edb/schema/indexes.py\n@@ -163,6 +163,15 @@\n qlkind=qltypes.SchemaObjectClass.INDEX,\n data_safe=True,\n ):\n+ # redefine, so we can change compcoef\n+ bases = so.SchemaField(\n+ so.ObjectList['Index'], # type: ignore\n+ type_is_generic_self=True,\n+ default=so.DEFAULT_CONSTRUCTOR,\n+ coerce=True,\n+ inheritable=False,\n+ compcoef=0.0, # can't rebase\n+ )\n \n subject = so.SchemaField(\n so.Object,\n", "issue": "Attempting to switch index type fails with `InternalServerError`\n```\r\nedgedb error: InternalServerError: AssertionError:\r\n Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md\r\n Server traceback:\r\n Traceback (most recent call last):\r\n File \"/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/server/compiler_pool/worker.py\", line 203, in compile_in_tx\r\n units, cstate = COMPILER.compile_in_tx(cstate, *args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/server/compiler/compiler.py\", line 939, in compile_in_tx\r\n return compile(ctx=ctx, source=source), ctx.state\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/server/compiler/compiler.py\", line 2313, in compile\r\n return _try_compile(ctx=ctx, source=original)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/server/compiler/compiler.py\", line 2377, in _try_compile\r\n comp, capabilities = _compile_dispatch_ql(\r\n ^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/server/compiler/compiler.py\", line 2222, in _compile_dispatch_ql\r\n query = ddl.compile_dispatch_ql_migration(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/server/compiler/ddl.py\", line 384, in compile_dispatch_ql_migration\r\n return _describe_current_migration(ctx, ql)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/server/compiler/ddl.py\", line 638, in _describe_current_migration\r\n top_op2 = s_ddl.cmd_from_ddl(\r\n ^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/ddl.py\", line 443, in cmd_from_ddl\r\n res = sd.compile_ddl(schema, ddl, context=context)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/delta.py\", line 4365, in compile_ddl\r\n cmd = cmdcls._cmd_tree_from_ast(schema, astnode, context)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/inheriting.py\", line 1040, in _cmd_tree_from_ast\r\n cmd = super()._cmd_tree_from_ast(schema, astnode, context)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/delta.py\", line 3526, in _cmd_tree_from_ast\r\n cmd = super()._cmd_tree_from_ast(schema, astnode, context)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/delta.py\", line 1087, in _cmd_tree_from_ast\r\n subcmd = compile_ddl(schema, subastnode, context=context)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/delta.py\", line 4365, in compile_ddl\r\n cmd = cmdcls._cmd_tree_from_ast(schema, astnode, context)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/referencing.py\", line 1230, in _cmd_tree_from_ast\r\n cmd = super()._cmd_tree_from_ast(schema, astnode, context)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/inheriting.py\", line 1040, in _cmd_tree_from_ast\r\n cmd = super()._cmd_tree_from_ast(schema, astnode, context)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/delta.py\", line 3526, in _cmd_tree_from_ast\r\n cmd = super()._cmd_tree_from_ast(schema, astnode, context)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/delta.py\", line 1087, in _cmd_tree_from_ast\r\n subcmd = compile_ddl(schema, subastnode, context=context)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/delta.py\", line 4367, in compile_ddl\r\n cmd = cmdcls._cmd_tree_from_ast(schema, astnode, context)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/delta.py\", line 1078, in _cmd_tree_from_ast\r\n cmd = cls._cmd_from_ast(schema, astnode, context)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/delta.py\", line 3476, in _cmd_from_ast\r\n return rename_class._rename_cmd_from_ast(schema, astnode, context)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/delta.py\", line 3494, in _rename_cmd_from_ast\r\n new_name = cls._classname_from_ast(schema, astnode, context)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/indexes.py\", line 413, in _classname_from_ast\r\n quals = cls._classname_quals_from_ast(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/x86_64-linux-gnu/edgedb-server-4/lib/python3.11/site-packages/edb/schema/indexes.py\", line 438, in _classname_quals_from_ast\r\n assert isinstance(astnode, qlast.ConcreteIndexCommand)\r\n AssertionError\r\n```\r\n\r\n- EdgeDB Version: 4.4+596d0e4 on Ubuntu 22.04.3 LTS\r\n- EdgeDB CLI Version: EdgeDB CLI 4.0.2+500be79 on OSX 14.3, 16gb M1\r\n\r\nSteps to reproduce immediate error:\r\n\r\n1. Restore from dump provided via DM to Devon in discord\r\n2. Attempt to `edgedb migration create`\r\n\r\nI got into this state by creating and applying a migration for\r\n\r\n`index ext::pg_trgm::gin on (.legal_name);`\r\n\r\nAfter which the above error happens even with zero schema changes.\r\n\r\nHowever, I've done this several times (currently playing with how to even hit the index) without issue. I've also been able to restore to a backup before this, add it again, and not hit the issue again. Reproducibility to create this state organically is probably tough. \n", "before_files": [{"content": "#\n# This source file is part of the EdgeDB open source project.\n#\n# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nfrom __future__ import annotations\nfrom typing import *\nfrom typing import overload\n\nfrom edb import edgeql\nfrom edb import errors\nfrom edb.common import parsing\nfrom edb.common import verutils\nfrom edb.edgeql import ast as qlast\nfrom edb.edgeql import compiler as qlcompiler\nfrom edb.edgeql import qltypes\n\nfrom . import annos as s_anno\nfrom . import delta as sd\nfrom . import expr as s_expr\nfrom . import functions as s_func\nfrom . import inheriting\nfrom . import name as sn\nfrom . import pointers as s_pointers\nfrom . import objects as so\nfrom . import referencing\nfrom . import scalars as s_scalars\nfrom . import types as s_types\nfrom . import utils\n\n\nif TYPE_CHECKING:\n from . import schema as s_schema\n\n\n# The name used for default concrete indexes\nDEFAULT_INDEX = sn.QualName(module='__', name='idx')\n\n\ndef is_index_valid_for_type(\n index: Index,\n expr_type: s_types.Type,\n schema: s_schema.Schema\n) -> bool:\n # HACK: currently this helper just hardcodes the permitted index & type\n # combinations, but this should be inferred based on index definition.\n index_name = str(index.get_name(schema))\n match index_name:\n case 'pg::hash':\n return True\n case 'pg::btree':\n return True\n case 'pg::gin':\n return (\n expr_type.is_array()\n or\n expr_type.issubclass(\n schema,\n schema.get('std::json', type=s_scalars.ScalarType),\n )\n )\n case 'fts::index':\n return is_subclass_or_tuple(expr_type, 'fts::document', schema)\n case 'pg::gist':\n return expr_type.is_range() or expr_type.is_multirange()\n case 'pg::spgist':\n return (\n expr_type.is_range()\n or\n expr_type.issubclass(\n schema,\n schema.get('std::str', type=s_scalars.ScalarType),\n )\n )\n case 'pg::brin':\n return (\n expr_type.is_range()\n or\n expr_type.issubclass(\n schema,\n (\n schema.get('std::anyreal',\n type=s_scalars.ScalarType),\n schema.get('std::bytes',\n type=s_scalars.ScalarType),\n schema.get('std::str',\n type=s_scalars.ScalarType),\n schema.get('std::uuid',\n type=s_scalars.ScalarType),\n schema.get('std::datetime',\n type=s_scalars.ScalarType),\n schema.get('std::duration',\n type=s_scalars.ScalarType),\n schema.get('cal::local_datetime',\n type=s_scalars.ScalarType),\n schema.get('cal::local_date',\n type=s_scalars.ScalarType),\n schema.get('cal::local_time',\n type=s_scalars.ScalarType),\n schema.get('cal::relative_duration',\n type=s_scalars.ScalarType),\n schema.get('cal::date_duration',\n type=s_scalars.ScalarType),\n )\n )\n )\n case (\n 'ext::pgvector::ivfflat_euclidean'\n | 'ext::pgvector::ivfflat_ip'\n | 'ext::pgvector::ivfflat_cosine'\n | 'ext::pgvector::hnsw_euclidean'\n | 'ext::pgvector::hnsw_ip'\n | 'ext::pgvector::hnsw_cosine'\n ):\n return expr_type.issubclass(\n schema,\n schema.get('ext::pgvector::vector', type=s_scalars.ScalarType),\n )\n case (\n 'ext::pg_trgm::gin'\n | 'ext::pg_trgm::gist'\n ):\n return expr_type.issubclass(\n schema,\n schema.get('std::str', type=s_scalars.ScalarType),\n )\n\n return False\n\n\ndef is_subclass_or_tuple(\n ty: s_types.Type, parent_name: str | sn.Name, schema: s_schema.Schema\n) -> bool:\n parent = schema.get(parent_name, type=s_types.Type)\n\n if isinstance(ty, s_types.Tuple):\n for (_, st) in ty.iter_subtypes(schema):\n if not st.issubclass(schema, parent):\n return False\n return True\n else:\n return ty.issubclass(schema, parent)\n\n\nclass Index(\n referencing.ReferencedInheritingObject,\n so.InheritingObject, # Help reflection figure out the right db MRO\n s_anno.AnnotationSubject,\n qlkind=qltypes.SchemaObjectClass.INDEX,\n data_safe=True,\n):\n\n subject = so.SchemaField(\n so.Object,\n default=None,\n compcoef=None,\n inheritable=False,\n )\n\n # These can only appear in base abstract index definitions. These\n # determine how indexes can be configured.\n params = so.SchemaField(\n s_func.FuncParameterList,\n coerce=True,\n compcoef=0.4,\n default=so.DEFAULT_CONSTRUCTOR,\n inheritable=False,\n )\n\n # Appears in base abstract index definitions and defines how the index\n # is represented in postgres.\n code = so.SchemaField(\n str,\n default=None,\n compcoef=None,\n inheritable=False,\n )\n\n # These can appear in abstract indexes extending an existing one in order\n # to override exisitng parameters. Also they can appear in concrete\n # indexes.\n kwargs = so.SchemaField(\n s_expr.ExpressionDict,\n coerce=True,\n compcoef=0,\n default=so.DEFAULT_CONSTRUCTOR,\n inheritable=False,\n ddl_identity=True,\n )\n\n expr = so.SchemaField(\n s_expr.Expression,\n default=None,\n coerce=True,\n compcoef=0.909,\n ddl_identity=True,\n )\n\n except_expr = so.SchemaField(\n s_expr.Expression,\n default=None,\n coerce=True,\n compcoef=0.909,\n ddl_identity=True,\n )\n\n def __repr__(self) -> str:\n cls = self.__class__\n return '<{}.{} {!r} at 0x{:x}>'.format(\n cls.__module__, cls.__name__, self.id, id(self))\n\n __str__ = __repr__\n\n def as_delete_delta(\n self,\n *,\n schema: s_schema.Schema,\n context: so.ComparisonContext,\n ) -> sd.ObjectCommand[Index]:\n delta = super().as_delete_delta(schema=schema, context=context)\n old_params = self.get_params(schema).objects(schema)\n for p in old_params:\n delta.add(p.as_delete_delta(schema=schema, context=context))\n\n return delta\n\n def get_verbosename(\n self,\n schema: s_schema.Schema,\n *,\n with_parent: bool = False\n ) -> str:\n # baseline name for indexes\n vn = self.get_displayname(schema)\n\n if self.get_abstract(schema):\n return f\"abstract index '{vn}'\"\n else:\n # concrete index must have a subject\n assert self.get_subject(schema) is not None\n\n # add kwargs (if any) to the concrete name\n kwargs = self.get_kwargs(schema)\n if kwargs:\n kw = []\n for key, val in kwargs.items():\n kw.append(f'{key}:={val.text}')\n vn = f'{vn}({\", \".join(kw)})'\n\n vn = f\"index {vn!r}\"\n\n if with_parent:\n return self.add_parent_name(vn, schema)\n return vn\n\n def add_parent_name(\n self,\n base_name: str,\n schema: s_schema.Schema,\n ) -> str:\n # Remove the placeholder name of the generic index.\n if base_name == f\"index '{DEFAULT_INDEX}'\":\n base_name = 'index'\n\n return super().add_parent_name(base_name, schema)\n\n def is_non_concrete(self, schema: s_schema.Schema) -> bool:\n return self.get_subject(schema) is None\n\n @classmethod\n def get_shortname_static(cls, name: sn.Name) -> sn.QualName:\n name = sn.shortname_from_fullname(name)\n assert isinstance(name, sn.QualName)\n return name\n\n def get_all_kwargs(\n self,\n schema: s_schema.Schema,\n ) -> s_expr.ExpressionDict:\n kwargs = s_expr.ExpressionDict()\n all_kw = type(self).get_field('kwargs').merge_fn(\n self,\n self.get_ancestors(schema).objects(schema),\n 'kwargs',\n schema=schema,\n )\n if all_kw:\n kwargs.update(all_kw)\n\n return kwargs\n\n def get_root(\n self,\n schema: s_schema.Schema,\n ) -> Index:\n if not self.get_abstract(schema):\n name = sn.shortname_from_fullname(self.get_name(schema))\n index = schema.get(name, type=Index)\n else:\n index = self\n\n if index.get_bases(schema):\n return index.get_ancestors(schema).objects(schema)[-1]\n else:\n return index\n\n def get_concrete_kwargs(\n self,\n schema: s_schema.Schema,\n ) -> s_expr.ExpressionDict:\n assert not self.get_abstract(schema)\n\n root = self.get_root(schema)\n\n kwargs = self.get_all_kwargs(schema)\n\n for param in root.get_params(schema).objects(schema):\n kwname = param.get_parameter_name(schema)\n if (\n kwname not in kwargs and\n (val := param.get_default(schema)) is not None\n ):\n kwargs[kwname] = val\n\n return kwargs\n\n def is_defined_here(\n self,\n schema: s_schema.Schema,\n ) -> bool:\n \"\"\"\n Returns True iff the index has not been inherited from a parent subject,\n and was originally defined on the subject.\n \"\"\"\n return all(\n base.get_abstract(schema)\n for base in self.get_bases(schema).objects(schema)\n )\n\n\nIndexableSubject_T = TypeVar('IndexableSubject_T', bound='IndexableSubject')\n\n\nclass IndexableSubject(so.InheritingObject):\n indexes_refs = so.RefDict(\n attr='indexes',\n ref_cls=Index)\n\n indexes = so.SchemaField(\n so.ObjectIndexByFullname[Index],\n inheritable=False, ephemeral=True, coerce=True, compcoef=0.909,\n default=so.DEFAULT_CONSTRUCTOR)\n\n def add_index(\n self,\n schema: s_schema.Schema,\n index: Index,\n ) -> s_schema.Schema:\n return self.add_classref(schema, 'indexes', index)\n\n\nclass IndexSourceCommandContext:\n pass\n\n\nclass IndexSourceCommand(\n inheriting.InheritingObjectCommand[IndexableSubject_T],\n):\n pass\n\n\nclass IndexCommandContext(sd.ObjectCommandContext[Index],\n s_anno.AnnotationSubjectCommandContext):\n pass\n\n\nclass IndexCommand(\n referencing.ReferencedInheritingObjectCommand[Index],\n s_func.ParametrizedCommand[Index],\n context_class=IndexCommandContext,\n referrer_context_class=IndexSourceCommandContext,\n):\n\n @classmethod\n def _classname_from_ast(\n cls,\n schema: s_schema.Schema,\n astnode: qlast.NamedDDL,\n context: sd.CommandContext,\n ) -> sn.QualName:\n # We actually want to override how ReferencedObjectCommand determines\n # the classname\n shortname = super(\n referencing.ReferencedObjectCommand, cls\n )._classname_from_ast(schema, astnode, context)\n\n referrer_ctx = cls.get_referrer_context(context)\n if referrer_ctx is not None:\n\n referrer_name = referrer_ctx.op.classname\n assert isinstance(referrer_name, sn.QualName)\n quals = cls._classname_quals_from_ast(\n schema, astnode, shortname, referrer_name, context)\n\n name = sn.QualName(\n module=referrer_name.module,\n name=sn.get_specialized_name(\n shortname,\n str(referrer_name),\n *quals,\n ),\n )\n else:\n name = super()._classname_from_ast(schema, astnode, context)\n\n return name\n\n @classmethod\n def _classname_quals_from_ast(\n cls,\n schema: s_schema.Schema,\n astnode: qlast.NamedDDL,\n base_name: sn.Name,\n referrer_name: sn.QualName,\n context: sd.CommandContext,\n ) -> Tuple[str, ...]:\n assert isinstance(astnode, qlast.ConcreteIndexCommand)\n exprs = []\n\n kwargs = cls._index_kwargs_from_ast(schema, astnode, context)\n for key, val in kwargs.items():\n exprs.append(f'{key}:={val.text}')\n\n # use the normalized text directly from the expression\n expr = s_expr.Expression.from_ast(\n astnode.expr, schema, context.modaliases)\n expr_text = expr.text\n assert expr_text is not None\n exprs.append(expr_text)\n\n if astnode.except_expr:\n expr = s_expr.Expression.from_ast(\n astnode.except_expr, schema, context.modaliases)\n exprs.append('!' + expr.text)\n\n return (cls._name_qual_from_exprs(schema, exprs),)\n\n @classmethod\n def _classname_quals_from_name(\n cls,\n name: sn.QualName\n ) -> Tuple[str, ...]:\n quals = sn.quals_from_fullname(name)\n return tuple(quals[-1:])\n\n @classmethod\n def _index_kwargs_from_ast(\n cls,\n schema: s_schema.Schema,\n astnode: qlast.NamedDDL,\n context: sd.CommandContext,\n ) -> Dict[str, s_expr.Expression]:\n kwargs = dict()\n # Some abstract indexes and all concrete index commands have kwargs.\n assert isinstance(astnode, (qlast.CreateIndex,\n qlast.ConcreteIndexCommand))\n\n for key, val in astnode.kwargs.items():\n kwargs[key] = s_expr.Expression.from_ast(\n val, schema, context.modaliases, as_fragment=True)\n\n return kwargs\n\n @overload\n def get_object(\n self,\n schema: s_schema.Schema,\n context: sd.CommandContext,\n *,\n name: Optional[sn.Name] = None,\n default: Union[Index, so.NoDefaultT] = so.NoDefault,\n sourcectx: Optional[parsing.ParserContext] = None,\n ) -> Index:\n ...\n\n @overload\n def get_object(\n self,\n schema: s_schema.Schema,\n context: sd.CommandContext,\n *,\n name: Optional[sn.Name] = None,\n default: None = None,\n sourcectx: Optional[parsing.ParserContext] = None,\n ) -> Optional[Index]:\n ...\n\n def get_object(\n self,\n schema: s_schema.Schema,\n context: sd.CommandContext,\n *,\n name: Optional[sn.Name] = None,\n default: Union[Index, so.NoDefaultT, None] = so.NoDefault,\n sourcectx: Optional[parsing.ParserContext] = None,\n ) -> Optional[Index]:\n try:\n return super().get_object(\n schema, context, name=name,\n default=default, sourcectx=sourcectx,\n )\n except errors.InvalidReferenceError:\n referrer_ctx = self.get_referrer_context_or_die(context)\n referrer = referrer_ctx.scls\n expr = self.get_ddl_identity('expr')\n raise errors.InvalidReferenceError(\n f\"index on ({expr.text}) does not exist on \"\n f\"{referrer.get_verbosename(schema)}\"\n ) from None\n\n @classmethod\n def _cmd_from_ast(\n cls,\n schema: s_schema.Schema,\n astnode: qlast.DDLOperation,\n context: sd.CommandContext,\n ) -> sd.ObjectCommand[Index]:\n cmd = super()._cmd_from_ast(schema, astnode, context)\n if isinstance(astnode, qlast.ConcreteIndexCommand):\n cmd.set_ddl_identity(\n 'expr',\n s_expr.Expression.from_ast(\n astnode.expr,\n schema,\n context.modaliases,\n ),\n )\n return cmd\n\n def _get_ast(\n self,\n schema: s_schema.Schema,\n context: sd.CommandContext,\n *,\n parent_node: Optional[qlast.DDLOperation] = None,\n ) -> Optional[qlast.DDLOperation]:\n astnode = super()._get_ast(schema, context, parent_node=parent_node)\n\n kwargs = self.get_resolved_attribute_value(\n 'kwargs',\n schema=schema,\n context=context,\n )\n if kwargs:\n assert isinstance(astnode, (qlast.CreateIndex,\n qlast.ConcreteIndexCommand))\n astnode.kwargs = {\n name: expr.qlast for name, expr in kwargs.items()\n }\n\n return astnode\n\n def get_ast_attr_for_field(\n self,\n field: str,\n astnode: Type[qlast.DDLOperation],\n ) -> Optional[str]:\n if field in ('kwargs', 'expr', 'except_expr'):\n return field\n else:\n return super().get_ast_attr_for_field(field, astnode)\n\n def get_ddl_identity_fields(\n self,\n context: sd.CommandContext,\n ) -> Tuple[so.Field[Any], ...]:\n id_fields = super().get_ddl_identity_fields(context)\n omit_fields = set()\n\n if (\n self.get_attribute_value('abstract')\n and not self.get_attribute_value('bases')\n ):\n # Base abstract indexes don't have kwargs at all.\n omit_fields.add('kwargs')\n\n if omit_fields:\n return tuple(f for f in id_fields if f.name not in omit_fields)\n else:\n return id_fields\n\n def compile_expr_field(\n self,\n schema: s_schema.Schema,\n context: sd.CommandContext,\n field: so.Field[Any],\n value: s_expr.Expression,\n track_schema_ref_exprs: bool=False,\n ) -> s_expr.CompiledExpression:\n from edb.ir import utils as irutils\n from edb.ir import ast as irast\n\n if field.name in {'expr', 'except_expr'}:\n # type ignore below, for the class is used as mixin\n parent_ctx = context.get_ancestor(\n IndexSourceCommandContext, # type: ignore\n self\n )\n assert parent_ctx is not None\n assert isinstance(parent_ctx.op, sd.ObjectCommand)\n subject = parent_ctx.op.get_object(schema, context)\n\n expr = value.compiled(\n schema=schema,\n options=qlcompiler.CompilerOptions(\n modaliases=context.modaliases,\n schema_object_context=self.get_schema_metaclass(),\n anchors={qlast.Subject().name: subject},\n path_prefix_anchor=qlast.Subject().name,\n singletons=frozenset([subject]),\n apply_query_rewrites=False,\n track_schema_ref_exprs=track_schema_ref_exprs,\n detached=True,\n ),\n )\n\n # Check that the inferred cardinality is no more than 1\n if expr.irast.cardinality.is_multi():\n raise errors.SchemaDefinitionError(\n f'possibly more than one element returned by '\n f'the index expression where only singletons '\n f'are allowed',\n context=value.qlast.context,\n )\n\n if expr.irast.volatility != qltypes.Volatility.Immutable:\n raise errors.SchemaDefinitionError(\n f'index expressions must be immutable',\n context=value.qlast.context,\n )\n\n refs = irutils.get_longest_paths(expr.irast)\n\n has_multi = False\n for ref in refs:\n assert subject\n while ref.rptr:\n rptr = ref.rptr\n if rptr.dir_cardinality.is_multi():\n has_multi = True\n\n # We don't need to look further than the subject,\n # which is always valid. (And which is a singleton\n # in an index expression if it is itself a\n # singleton, regardless of other parts of the path.)\n if (\n isinstance(rptr.ptrref, irast.PointerRef)\n and rptr.ptrref.id == subject.id\n ):\n break\n ref = rptr.source\n\n if has_multi and irutils.contains_set_of_op(expr.irast):\n raise errors.SchemaDefinitionError(\n \"cannot use aggregate functions or operators \"\n \"in an index expression\",\n context=self.source_context,\n )\n\n return expr\n else:\n return super().compile_expr_field(\n schema, context, field, value, track_schema_ref_exprs)\n\n def get_dummy_expr_field_value(\n self,\n schema: s_schema.Schema,\n context: sd.CommandContext,\n field: so.Field[Any],\n value: Any,\n ) -> Optional[s_expr.Expression]:\n if field.name == 'expr':\n return s_expr.Expression(text='0')\n else:\n raise NotImplementedError(f'unhandled field {field.name!r}')\n\n\nclass CreateIndex(\n IndexCommand,\n referencing.CreateReferencedInheritingObject[Index],\n):\n astnode = [qlast.CreateConcreteIndex, qlast.CreateIndex]\n referenced_astnode = qlast.CreateConcreteIndex\n\n @classmethod\n def _cmd_tree_from_ast(\n cls,\n schema: s_schema.Schema,\n astnode: qlast.DDLOperation,\n context: sd.CommandContext,\n ) -> sd.Command:\n cmd = super()._cmd_tree_from_ast(schema, astnode, context)\n\n assert isinstance(cmd, IndexCommand)\n assert isinstance(astnode, (qlast.CreateConcreteIndex,\n qlast.CreateIndex))\n\n if isinstance(astnode, qlast.CreateIndex):\n cmd.set_attribute_value('abstract', True)\n\n params = cls._get_param_desc_from_ast(\n schema, context.modaliases, astnode)\n for param in params:\n # as_create_delta requires the specific type\n cmd.add_prerequisite(param.as_create_delta(\n schema, cmd.classname, context=context))\n\n # There are several possibilities for abstract indexes:\n # 1) base abstract index\n # 2) an abstract index extending another one\n # 3) an abstract index listing index fallback alternatives\n if astnode.bases is None:\n if astnode.index_types is None:\n # This actually defines a new index (1).\n pass\n else:\n # This is for index fallback alternatives (3).\n raise NotImplementedError(\"Index fallback not implemented\")\n else:\n # Extending existing indexes for composition (2).\n kwargs = cls._index_kwargs_from_ast(schema, astnode, context)\n if kwargs:\n cmd.set_attribute_value('kwargs', kwargs)\n\n elif isinstance(astnode, qlast.CreateConcreteIndex):\n orig_text = cls.get_orig_expr_text(schema, astnode, 'expr')\n\n if (\n orig_text is not None\n and context.compat_ver_is_before(\n (1, 0, verutils.VersionStage.ALPHA, 6)\n )\n ):\n # Versions prior to a6 used a different expression\n # normalization strategy, so we must renormalize the\n # expression.\n expr_ql = qlcompiler.renormalize_compat(\n astnode.expr,\n orig_text,\n schema=schema,\n localnames=context.localnames,\n )\n else:\n expr_ql = astnode.expr\n\n kwargs = cls._index_kwargs_from_ast(schema, astnode, context)\n if kwargs:\n cmd.set_attribute_value('kwargs', kwargs)\n\n cmd.set_attribute_value(\n 'expr',\n s_expr.Expression.from_ast(\n expr_ql,\n schema,\n context.modaliases,\n ),\n )\n\n if astnode.except_expr:\n cmd.set_attribute_value(\n 'except_expr',\n s_expr.Expression.from_ast(\n astnode.except_expr,\n schema,\n context.modaliases,\n ),\n )\n\n return cmd\n\n @classmethod\n def as_inherited_ref_ast(\n cls,\n schema: s_schema.Schema,\n context: sd.CommandContext,\n name: sn.Name,\n parent: referencing.ReferencedObject,\n ) -> qlast.ObjectDDL:\n assert isinstance(parent, Index)\n astnode_cls = cls.referenced_astnode\n\n expr = parent.get_expr(schema)\n assert expr is not None\n expr_ql = edgeql.parse_fragment(expr.text)\n\n except_expr = parent.get_except_expr(schema)\n if except_expr:\n except_expr_ql = except_expr.qlast\n else:\n except_expr_ql = None\n\n qlkwargs = {\n key: val.qlast for key, val in parent.get_kwargs(schema).items()\n }\n\n return astnode_cls(\n name=cls.get_inherited_ref_name(schema, context, parent, name),\n kwargs=qlkwargs,\n expr=expr_ql,\n except_expr=except_expr_ql,\n )\n\n @classmethod\n def get_inherited_ref_name(\n cls,\n schema: s_schema.Schema,\n context: sd.CommandContext,\n parent: so.Object,\n name: sn.Name,\n ) -> qlast.ObjectRef:\n bn = sn.shortname_from_fullname(name)\n return utils.name_to_ast_ref(bn)\n\n def _validate_kwargs(\n self,\n schema: s_schema.Schema,\n params: s_func.FuncParameterList,\n kwargs: s_expr.ExpressionDict,\n ancestor_name: str,\n ) -> None:\n if not kwargs:\n return\n\n if not params:\n raise errors.SchemaDefinitionError(\n f'the {ancestor_name} does not support any parameters',\n context=self.source_context\n )\n\n # Make sure that the kwargs are valid.\n for key in kwargs:\n expr = kwargs[key]\n param = params.get_by_name(schema, key)\n if param is None:\n raise errors.SchemaDefinitionError(\n f'the {ancestor_name} does not have a parameter {key!r}',\n context=self.source_context\n )\n\n param_type = param.get_type(schema)\n comp_expr = s_expr.Expression.compiled(expr, schema=schema)\n expr_type = comp_expr.irast.stype\n\n if (\n not param_type.is_polymorphic(schema) and\n not expr_type.is_polymorphic(schema) and\n not expr_type.implicitly_castable_to(\n param_type, schema)\n ):\n raise errors.SchemaDefinitionError(\n f'the {key!r} parameter of the '\n f'{self.get_verbosename()} has type of '\n f'{expr_type.get_displayname(schema)} that '\n f'is not implicitly castable to the '\n f'corresponding parameter of the '\n f'{ancestor_name} with type '\n f'{param_type.get_displayname(schema)}',\n context=self.source_context,\n )\n\n def validate_object(\n self,\n schema: s_schema.Schema,\n context: sd.CommandContext,\n ) -> None:\n super().validate_object(schema, context)\n\n referrer_ctx = self.get_referrer_context(context)\n\n # Get kwargs if any, so that we can process them later.\n kwargs = self.get_resolved_attribute_value(\n 'kwargs',\n schema=schema,\n context=context,\n )\n\n if referrer_ctx is None:\n # Make sure that all bases are ultimately inherited from the same\n # root base class.\n bases = self.get_resolved_attribute_value(\n 'bases',\n schema=schema,\n context=context,\n )\n if bases:\n # Users can extend abstract indexes.\n root = None\n for base in bases.objects(schema):\n lineage = [base] + list(\n base.get_ancestors(schema).objects(schema))\n\n if root is None:\n root = lineage[-1]\n elif root != lineage[-1]:\n raise errors.SchemaDefinitionError(\n f'cannot create {self.get_verbosename()} '\n f'because it extends incompatible abstract indxes',\n context=self.source_context\n )\n\n # We should have found a root because we have bases.\n assert root is not None\n # Make sure that the kwargs are valid.\n self._validate_kwargs(\n schema,\n root.get_params(schema),\n kwargs,\n root.get_verbosename(schema),\n )\n\n else:\n # Creating new abstract indexes is only allowed in \"EdgeDB\n # developer\" mode, i.e. when populating std library, etc.\n if not context.stdmode and not context.testmode:\n raise errors.SchemaDefinitionError(\n f'cannot create {self.get_verbosename()} '\n f'because user-defined abstract indexes are not '\n f'supported',\n context=self.source_context\n )\n\n return\n\n # The checks below apply only to concrete indexes.\n subject = referrer_ctx.scls\n assert isinstance(subject, (s_types.Type, s_pointers.Pointer))\n\n # FTS\n if self.scls.has_base_with_name(schema, sn.QualName('fts', 'index')):\n\n if isinstance(subject, s_pointers.Pointer):\n raise errors.SchemaDefinitionError(\n \"fts::index cannot be declared on links\",\n context=self.source_context\n )\n\n # Ensure that the name of the index (if given) matches an existing\n # abstract index.\n name = sn.shortname_from_fullname(\n self.get_resolved_attribute_value(\n 'name',\n schema=schema,\n context=context,\n )\n )\n\n # HACK: the old concrete indexes all have names in form __::idx, but\n # this should be the actual name provided. Also the index without name\n # defaults to '__::idx'.\n if name != DEFAULT_INDEX and (\n abs_index := schema.get(name, type=Index)\n ):\n # only abstract indexes should have unmangled names\n assert abs_index.get_abstract(schema)\n root = abs_index.get_root(schema)\n\n # Make sure that kwargs and parameters match in name and type.\n # Also make sure that all parameters have values at this point\n # (either default or provided in kwargs).\n params = root.get_params(schema)\n inh_kwargs = self.scls.get_all_kwargs(schema)\n\n self._validate_kwargs(schema,\n params,\n kwargs,\n abs_index.get_verbosename(schema))\n\n unused_names = {p.get_parameter_name(schema)\n for p in params.objects(schema)}\n if kwargs:\n unused_names -= set(kwargs)\n if inh_kwargs:\n unused_names -= set(inh_kwargs)\n if unused_names:\n # Check that all of these parameters have defaults.\n for pname in list(unused_names):\n param = params.get_by_name(schema, pname)\n if param and param.get_default(schema) is not None:\n unused_names.discard(pname)\n\n if unused_names:\n names = ', '.join(repr(n) for n in sorted(unused_names))\n raise errors.SchemaDefinitionError(\n f'cannot create {self.get_verbosename()} '\n f'because the following parameters are still undefined: '\n f'{names}.',\n context=self.source_context\n )\n\n # Make sure that the concrete index expression type matches the\n # abstract index type.\n expr = self.get_resolved_attribute_value(\n 'expr',\n schema=schema,\n context=context,\n )\n options = qlcompiler.CompilerOptions(\n anchors={qlast.Subject().name: subject},\n path_prefix_anchor=qlast.Subject().name,\n singletons=frozenset([subject]),\n apply_query_rewrites=False,\n schema_object_context=self.get_schema_metaclass(),\n )\n comp_expr = s_expr.Expression.compiled(\n expr, schema=schema, options=options\n )\n expr_type = comp_expr.irast.stype\n\n if not is_index_valid_for_type(root, expr_type, comp_expr.schema):\n hint = None\n if str(name) == 'fts::index':\n hint = (\n 'fts::document can be constructed with '\n 'fts::with_options(str, ...)'\n )\n\n raise errors.SchemaDefinitionError(\n f'index expression ({expr.text}) '\n f'is not of a valid type for the '\n f'{self.scls.get_verbosename(comp_expr.schema)}',\n context=self.source_context,\n details=hint,\n )\n\n def get_resolved_attributes(\n self,\n schema: s_schema.Schema,\n context: sd.CommandContext,\n ) -> Dict[str, Any]:\n params = self._get_params(schema, context)\n props = super().get_resolved_attributes(schema, context)\n props['params'] = params\n return props\n\n @classmethod\n def _classbases_from_ast(\n cls,\n schema: s_schema.Schema,\n astnode: qlast.ObjectDDL,\n context: sd.CommandContext,\n ) -> List[so.ObjectShell[Index]]:\n if (\n isinstance(astnode, qlast.CreateConcreteIndex)\n and astnode.name\n and astnode.name.module != DEFAULT_INDEX.module\n and astnode.name.name != DEFAULT_INDEX.name\n ):\n base = utils.ast_objref_to_object_shell(\n astnode.name,\n metaclass=Index,\n schema=schema,\n modaliases=context.modaliases,\n )\n return [base]\n else:\n return super()._classbases_from_ast(schema, astnode, context)\n\n\nclass RenameIndex(\n IndexCommand,\n referencing.RenameReferencedInheritingObject[Index],\n):\n\n @classmethod\n def _cmd_from_ast(\n cls,\n schema: s_schema.Schema,\n astnode: qlast.DDLOperation,\n context: sd.CommandContext,\n ) -> RenameIndex:\n return cast(\n RenameIndex,\n super()._cmd_from_ast(schema, astnode, context),\n )\n\n\nclass AlterIndexOwned(\n IndexCommand,\n referencing.AlterOwned[Index],\n field='owned',\n):\n pass\n\n\nclass AlterIndex(\n IndexCommand,\n referencing.AlterReferencedInheritingObject[Index],\n):\n astnode = [qlast.AlterConcreteIndex, qlast.AlterIndex]\n referenced_astnode = qlast.AlterConcreteIndex\n\n def canonicalize_alter_from_external_ref(\n self,\n schema: s_schema.Schema,\n context: sd.CommandContext,\n ) -> None:\n if (\n not self.get_attribute_value('abstract')\n and (indexexpr := self.get_attribute_value('expr')) is not None\n ):\n # To compute the new name, we construct an AST of the\n # index, since that is the infrastructure we have for\n # computing the classname.\n name = sn.shortname_from_fullname(self.classname)\n assert isinstance(name, sn.QualName), \"expected qualified name\"\n ast = qlast.CreateConcreteIndex(\n name=qlast.ObjectRef(name=name.name, module=name.module),\n expr=indexexpr.qlast,\n )\n quals = sn.quals_from_fullname(self.classname)\n new_name = self._classname_from_ast_and_referrer(\n schema, sn.QualName.from_string(quals[0]), ast, context)\n if new_name == self.classname:\n return\n\n rename = self.scls.init_delta_command(\n schema, sd.RenameObject, new_name=new_name)\n rename.set_attribute_value(\n 'name', value=new_name, orig_value=self.classname)\n self.add(rename)\n\n\nclass DeleteIndex(\n IndexCommand,\n referencing.DeleteReferencedInheritingObject[Index],\n):\n astnode = [qlast.DropConcreteIndex, qlast.DropIndex]\n referenced_astnode = qlast.DropConcreteIndex\n\n def _delete_begin(\n self,\n schema: s_schema.Schema,\n context: sd.CommandContext,\n ) -> s_schema.Schema:\n schema = super()._delete_begin(schema, context)\n if not context.canonical:\n for param in self.scls.get_params(schema).objects(schema):\n self.add(param.init_delta_command(schema, sd.DeleteObject))\n return schema\n\n @classmethod\n def _cmd_tree_from_ast(\n cls,\n schema: s_schema.Schema,\n astnode: qlast.DDLOperation,\n context: sd.CommandContext,\n ) -> sd.Command:\n cmd = super()._cmd_tree_from_ast(schema, astnode, context)\n\n if isinstance(astnode, qlast.ConcreteIndexCommand):\n cmd.set_attribute_value(\n 'expr',\n s_expr.Expression.from_ast(\n astnode.expr, schema, context.modaliases),\n )\n\n return cmd\n\n\nclass RebaseIndex(\n IndexCommand,\n referencing.RebaseReferencedInheritingObject[Index],\n):\n pass\n\n\ndef get_effective_fts_index(\n subject: IndexableSubject, schema: s_schema.Schema\n) -> Tuple[Optional[Index], bool]:\n \"\"\"\n Returns the effective index of a subject and a boolean indicating\n if the effective index has overriden any other fts indexes on this subject.\n \"\"\"\n indexes: so.ObjectIndexByFullname[Index] = subject.get_indexes(schema)\n\n fts_name = sn.QualName('fts', 'index')\n fts_indexes = [\n ind\n for ind in indexes.objects(schema)\n if ind.has_base_with_name(schema, fts_name)\n ]\n if len(fts_indexes) == 0:\n return (None, False)\n\n fts_indexes_defined_here = [\n ind for ind in fts_indexes if ind.is_defined_here(schema)\n ]\n\n if len(fts_indexes_defined_here) > 0:\n # indexes defined here have priority\n\n if len(fts_indexes_defined_here) > 1:\n subject_name = subject.get_displayname(schema)\n raise errors.SchemaDefinitionError(\n f'multiple {fts_name} indexes defined for {subject_name}'\n )\n effective = fts_indexes_defined_here[0]\n has_overridden = len(fts_indexes) >= 2\n\n else:\n # there are no fts indexes defined on the subject\n # the inherited indexes take effect\n\n if len(fts_indexes) > 1:\n subject_name = subject.get_displayname(schema)\n raise errors.SchemaDefinitionError(\n f'multiple {fts_name} indexes inherited for {subject_name}'\n )\n\n effective = fts_indexes[0]\n has_overridden = False\n\n return (effective, has_overridden)\n", "path": "edb/schema/indexes.py"}], "after_files": [{"content": "#\n# This source file is part of the EdgeDB open source project.\n#\n# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nfrom __future__ import annotations\nfrom typing import *\nfrom typing import overload\n\nfrom edb import edgeql\nfrom edb import errors\nfrom edb.common import parsing\nfrom edb.common import verutils\nfrom edb.edgeql import ast as qlast\nfrom edb.edgeql import compiler as qlcompiler\nfrom edb.edgeql import qltypes\n\nfrom . import annos as s_anno\nfrom . import delta as sd\nfrom . import expr as s_expr\nfrom . import functions as s_func\nfrom . import inheriting\nfrom . import name as sn\nfrom . import pointers as s_pointers\nfrom . import objects as so\nfrom . import referencing\nfrom . import scalars as s_scalars\nfrom . import types as s_types\nfrom . import utils\n\n\nif TYPE_CHECKING:\n from . import schema as s_schema\n\n\n# The name used for default concrete indexes\nDEFAULT_INDEX = sn.QualName(module='__', name='idx')\n\n\ndef is_index_valid_for_type(\n index: Index,\n expr_type: s_types.Type,\n schema: s_schema.Schema\n) -> bool:\n # HACK: currently this helper just hardcodes the permitted index & type\n # combinations, but this should be inferred based on index definition.\n index_name = str(index.get_name(schema))\n match index_name:\n case 'pg::hash':\n return True\n case 'pg::btree':\n return True\n case 'pg::gin':\n return (\n expr_type.is_array()\n or\n expr_type.issubclass(\n schema,\n schema.get('std::json', type=s_scalars.ScalarType),\n )\n )\n case 'fts::index':\n return is_subclass_or_tuple(expr_type, 'fts::document', schema)\n case 'pg::gist':\n return expr_type.is_range() or expr_type.is_multirange()\n case 'pg::spgist':\n return (\n expr_type.is_range()\n or\n expr_type.issubclass(\n schema,\n schema.get('std::str', type=s_scalars.ScalarType),\n )\n )\n case 'pg::brin':\n return (\n expr_type.is_range()\n or\n expr_type.issubclass(\n schema,\n (\n schema.get('std::anyreal',\n type=s_scalars.ScalarType),\n schema.get('std::bytes',\n type=s_scalars.ScalarType),\n schema.get('std::str',\n type=s_scalars.ScalarType),\n schema.get('std::uuid',\n type=s_scalars.ScalarType),\n schema.get('std::datetime',\n type=s_scalars.ScalarType),\n schema.get('std::duration',\n type=s_scalars.ScalarType),\n schema.get('cal::local_datetime',\n type=s_scalars.ScalarType),\n schema.get('cal::local_date',\n type=s_scalars.ScalarType),\n schema.get('cal::local_time',\n type=s_scalars.ScalarType),\n schema.get('cal::relative_duration',\n type=s_scalars.ScalarType),\n schema.get('cal::date_duration',\n type=s_scalars.ScalarType),\n )\n )\n )\n case (\n 'ext::pgvector::ivfflat_euclidean'\n | 'ext::pgvector::ivfflat_ip'\n | 'ext::pgvector::ivfflat_cosine'\n | 'ext::pgvector::hnsw_euclidean'\n | 'ext::pgvector::hnsw_ip'\n | 'ext::pgvector::hnsw_cosine'\n ):\n return expr_type.issubclass(\n schema,\n schema.get('ext::pgvector::vector', type=s_scalars.ScalarType),\n )\n case (\n 'ext::pg_trgm::gin'\n | 'ext::pg_trgm::gist'\n ):\n return expr_type.issubclass(\n schema,\n schema.get('std::str', type=s_scalars.ScalarType),\n )\n\n return False\n\n\ndef is_subclass_or_tuple(\n ty: s_types.Type, parent_name: str | sn.Name, schema: s_schema.Schema\n) -> bool:\n parent = schema.get(parent_name, type=s_types.Type)\n\n if isinstance(ty, s_types.Tuple):\n for (_, st) in ty.iter_subtypes(schema):\n if not st.issubclass(schema, parent):\n return False\n return True\n else:\n return ty.issubclass(schema, parent)\n\n\nclass Index(\n referencing.ReferencedInheritingObject,\n so.InheritingObject, # Help reflection figure out the right db MRO\n s_anno.AnnotationSubject,\n qlkind=qltypes.SchemaObjectClass.INDEX,\n data_safe=True,\n):\n # redefine, so we can change compcoef\n bases = so.SchemaField(\n so.ObjectList['Index'], # type: ignore\n type_is_generic_self=True,\n default=so.DEFAULT_CONSTRUCTOR,\n coerce=True,\n inheritable=False,\n compcoef=0.0, # can't rebase\n )\n\n subject = so.SchemaField(\n so.Object,\n default=None,\n compcoef=None,\n inheritable=False,\n )\n\n # These can only appear in base abstract index definitions. These\n # determine how indexes can be configured.\n params = so.SchemaField(\n s_func.FuncParameterList,\n coerce=True,\n compcoef=0.4,\n default=so.DEFAULT_CONSTRUCTOR,\n inheritable=False,\n )\n\n # Appears in base abstract index definitions and defines how the index\n # is represented in postgres.\n code = so.SchemaField(\n str,\n default=None,\n compcoef=None,\n inheritable=False,\n )\n\n # These can appear in abstract indexes extending an existing one in order\n # to override exisitng parameters. Also they can appear in concrete\n # indexes.\n kwargs = so.SchemaField(\n s_expr.ExpressionDict,\n coerce=True,\n compcoef=0,\n default=so.DEFAULT_CONSTRUCTOR,\n inheritable=False,\n ddl_identity=True,\n )\n\n expr = so.SchemaField(\n s_expr.Expression,\n default=None,\n coerce=True,\n compcoef=0.909,\n ddl_identity=True,\n )\n\n except_expr = so.SchemaField(\n s_expr.Expression,\n default=None,\n coerce=True,\n compcoef=0.909,\n ddl_identity=True,\n )\n\n def __repr__(self) -> str:\n cls = self.__class__\n return '<{}.{} {!r} at 0x{:x}>'.format(\n cls.__module__, cls.__name__, self.id, id(self))\n\n __str__ = __repr__\n\n def as_delete_delta(\n self,\n *,\n schema: s_schema.Schema,\n context: so.ComparisonContext,\n ) -> sd.ObjectCommand[Index]:\n delta = super().as_delete_delta(schema=schema, context=context)\n old_params = self.get_params(schema).objects(schema)\n for p in old_params:\n delta.add(p.as_delete_delta(schema=schema, context=context))\n\n return delta\n\n def get_verbosename(\n self,\n schema: s_schema.Schema,\n *,\n with_parent: bool = False\n ) -> str:\n # baseline name for indexes\n vn = self.get_displayname(schema)\n\n if self.get_abstract(schema):\n return f\"abstract index '{vn}'\"\n else:\n # concrete index must have a subject\n assert self.get_subject(schema) is not None\n\n # add kwargs (if any) to the concrete name\n kwargs = self.get_kwargs(schema)\n if kwargs:\n kw = []\n for key, val in kwargs.items():\n kw.append(f'{key}:={val.text}')\n vn = f'{vn}({\", \".join(kw)})'\n\n vn = f\"index {vn!r}\"\n\n if with_parent:\n return self.add_parent_name(vn, schema)\n return vn\n\n def add_parent_name(\n self,\n base_name: str,\n schema: s_schema.Schema,\n ) -> str:\n # Remove the placeholder name of the generic index.\n if base_name == f\"index '{DEFAULT_INDEX}'\":\n base_name = 'index'\n\n return super().add_parent_name(base_name, schema)\n\n def is_non_concrete(self, schema: s_schema.Schema) -> bool:\n return self.get_subject(schema) is None\n\n @classmethod\n def get_shortname_static(cls, name: sn.Name) -> sn.QualName:\n name = sn.shortname_from_fullname(name)\n assert isinstance(name, sn.QualName)\n return name\n\n def get_all_kwargs(\n self,\n schema: s_schema.Schema,\n ) -> s_expr.ExpressionDict:\n kwargs = s_expr.ExpressionDict()\n all_kw = type(self).get_field('kwargs').merge_fn(\n self,\n self.get_ancestors(schema).objects(schema),\n 'kwargs',\n schema=schema,\n )\n if all_kw:\n kwargs.update(all_kw)\n\n return kwargs\n\n def get_root(\n self,\n schema: s_schema.Schema,\n ) -> Index:\n if not self.get_abstract(schema):\n name = sn.shortname_from_fullname(self.get_name(schema))\n index = schema.get(name, type=Index)\n else:\n index = self\n\n if index.get_bases(schema):\n return index.get_ancestors(schema).objects(schema)[-1]\n else:\n return index\n\n def get_concrete_kwargs(\n self,\n schema: s_schema.Schema,\n ) -> s_expr.ExpressionDict:\n assert not self.get_abstract(schema)\n\n root = self.get_root(schema)\n\n kwargs = self.get_all_kwargs(schema)\n\n for param in root.get_params(schema).objects(schema):\n kwname = param.get_parameter_name(schema)\n if (\n kwname not in kwargs and\n (val := param.get_default(schema)) is not None\n ):\n kwargs[kwname] = val\n\n return kwargs\n\n def is_defined_here(\n self,\n schema: s_schema.Schema,\n ) -> bool:\n \"\"\"\n Returns True iff the index has not been inherited from a parent subject,\n and was originally defined on the subject.\n \"\"\"\n return all(\n base.get_abstract(schema)\n for base in self.get_bases(schema).objects(schema)\n )\n\n\nIndexableSubject_T = TypeVar('IndexableSubject_T', bound='IndexableSubject')\n\n\nclass IndexableSubject(so.InheritingObject):\n indexes_refs = so.RefDict(\n attr='indexes',\n ref_cls=Index)\n\n indexes = so.SchemaField(\n so.ObjectIndexByFullname[Index],\n inheritable=False, ephemeral=True, coerce=True, compcoef=0.909,\n default=so.DEFAULT_CONSTRUCTOR)\n\n def add_index(\n self,\n schema: s_schema.Schema,\n index: Index,\n ) -> s_schema.Schema:\n return self.add_classref(schema, 'indexes', index)\n\n\nclass IndexSourceCommandContext:\n pass\n\n\nclass IndexSourceCommand(\n inheriting.InheritingObjectCommand[IndexableSubject_T],\n):\n pass\n\n\nclass IndexCommandContext(sd.ObjectCommandContext[Index],\n s_anno.AnnotationSubjectCommandContext):\n pass\n\n\nclass IndexCommand(\n referencing.ReferencedInheritingObjectCommand[Index],\n s_func.ParametrizedCommand[Index],\n context_class=IndexCommandContext,\n referrer_context_class=IndexSourceCommandContext,\n):\n\n @classmethod\n def _classname_from_ast(\n cls,\n schema: s_schema.Schema,\n astnode: qlast.NamedDDL,\n context: sd.CommandContext,\n ) -> sn.QualName:\n # We actually want to override how ReferencedObjectCommand determines\n # the classname\n shortname = super(\n referencing.ReferencedObjectCommand, cls\n )._classname_from_ast(schema, astnode, context)\n\n referrer_ctx = cls.get_referrer_context(context)\n if referrer_ctx is not None:\n\n referrer_name = referrer_ctx.op.classname\n assert isinstance(referrer_name, sn.QualName)\n quals = cls._classname_quals_from_ast(\n schema, astnode, shortname, referrer_name, context)\n\n name = sn.QualName(\n module=referrer_name.module,\n name=sn.get_specialized_name(\n shortname,\n str(referrer_name),\n *quals,\n ),\n )\n else:\n name = super()._classname_from_ast(schema, astnode, context)\n\n return name\n\n @classmethod\n def _classname_quals_from_ast(\n cls,\n schema: s_schema.Schema,\n astnode: qlast.NamedDDL,\n base_name: sn.Name,\n referrer_name: sn.QualName,\n context: sd.CommandContext,\n ) -> Tuple[str, ...]:\n assert isinstance(astnode, qlast.ConcreteIndexCommand)\n exprs = []\n\n kwargs = cls._index_kwargs_from_ast(schema, astnode, context)\n for key, val in kwargs.items():\n exprs.append(f'{key}:={val.text}')\n\n # use the normalized text directly from the expression\n expr = s_expr.Expression.from_ast(\n astnode.expr, schema, context.modaliases)\n expr_text = expr.text\n assert expr_text is not None\n exprs.append(expr_text)\n\n if astnode.except_expr:\n expr = s_expr.Expression.from_ast(\n astnode.except_expr, schema, context.modaliases)\n exprs.append('!' + expr.text)\n\n return (cls._name_qual_from_exprs(schema, exprs),)\n\n @classmethod\n def _classname_quals_from_name(\n cls,\n name: sn.QualName\n ) -> Tuple[str, ...]:\n quals = sn.quals_from_fullname(name)\n return tuple(quals[-1:])\n\n @classmethod\n def _index_kwargs_from_ast(\n cls,\n schema: s_schema.Schema,\n astnode: qlast.NamedDDL,\n context: sd.CommandContext,\n ) -> Dict[str, s_expr.Expression]:\n kwargs = dict()\n # Some abstract indexes and all concrete index commands have kwargs.\n assert isinstance(astnode, (qlast.CreateIndex,\n qlast.ConcreteIndexCommand))\n\n for key, val in astnode.kwargs.items():\n kwargs[key] = s_expr.Expression.from_ast(\n val, schema, context.modaliases, as_fragment=True)\n\n return kwargs\n\n @overload\n def get_object(\n self,\n schema: s_schema.Schema,\n context: sd.CommandContext,\n *,\n name: Optional[sn.Name] = None,\n default: Union[Index, so.NoDefaultT] = so.NoDefault,\n sourcectx: Optional[parsing.ParserContext] = None,\n ) -> Index:\n ...\n\n @overload\n def get_object(\n self,\n schema: s_schema.Schema,\n context: sd.CommandContext,\n *,\n name: Optional[sn.Name] = None,\n default: None = None,\n sourcectx: Optional[parsing.ParserContext] = None,\n ) -> Optional[Index]:\n ...\n\n def get_object(\n self,\n schema: s_schema.Schema,\n context: sd.CommandContext,\n *,\n name: Optional[sn.Name] = None,\n default: Union[Index, so.NoDefaultT, None] = so.NoDefault,\n sourcectx: Optional[parsing.ParserContext] = None,\n ) -> Optional[Index]:\n try:\n return super().get_object(\n schema, context, name=name,\n default=default, sourcectx=sourcectx,\n )\n except errors.InvalidReferenceError:\n referrer_ctx = self.get_referrer_context_or_die(context)\n referrer = referrer_ctx.scls\n expr = self.get_ddl_identity('expr')\n raise errors.InvalidReferenceError(\n f\"index on ({expr.text}) does not exist on \"\n f\"{referrer.get_verbosename(schema)}\"\n ) from None\n\n @classmethod\n def _cmd_from_ast(\n cls,\n schema: s_schema.Schema,\n astnode: qlast.DDLOperation,\n context: sd.CommandContext,\n ) -> sd.ObjectCommand[Index]:\n cmd = super()._cmd_from_ast(schema, astnode, context)\n if isinstance(astnode, qlast.ConcreteIndexCommand):\n cmd.set_ddl_identity(\n 'expr',\n s_expr.Expression.from_ast(\n astnode.expr,\n schema,\n context.modaliases,\n ),\n )\n return cmd\n\n def _get_ast(\n self,\n schema: s_schema.Schema,\n context: sd.CommandContext,\n *,\n parent_node: Optional[qlast.DDLOperation] = None,\n ) -> Optional[qlast.DDLOperation]:\n astnode = super()._get_ast(schema, context, parent_node=parent_node)\n\n kwargs = self.get_resolved_attribute_value(\n 'kwargs',\n schema=schema,\n context=context,\n )\n if kwargs:\n assert isinstance(astnode, (qlast.CreateIndex,\n qlast.ConcreteIndexCommand))\n astnode.kwargs = {\n name: expr.qlast for name, expr in kwargs.items()\n }\n\n return astnode\n\n def get_ast_attr_for_field(\n self,\n field: str,\n astnode: Type[qlast.DDLOperation],\n ) -> Optional[str]:\n if field in ('kwargs', 'expr', 'except_expr'):\n return field\n else:\n return super().get_ast_attr_for_field(field, astnode)\n\n def get_ddl_identity_fields(\n self,\n context: sd.CommandContext,\n ) -> Tuple[so.Field[Any], ...]:\n id_fields = super().get_ddl_identity_fields(context)\n omit_fields = set()\n\n if (\n self.get_attribute_value('abstract')\n and not self.get_attribute_value('bases')\n ):\n # Base abstract indexes don't have kwargs at all.\n omit_fields.add('kwargs')\n\n if omit_fields:\n return tuple(f for f in id_fields if f.name not in omit_fields)\n else:\n return id_fields\n\n def compile_expr_field(\n self,\n schema: s_schema.Schema,\n context: sd.CommandContext,\n field: so.Field[Any],\n value: s_expr.Expression,\n track_schema_ref_exprs: bool=False,\n ) -> s_expr.CompiledExpression:\n from edb.ir import utils as irutils\n from edb.ir import ast as irast\n\n if field.name in {'expr', 'except_expr'}:\n # type ignore below, for the class is used as mixin\n parent_ctx = context.get_ancestor(\n IndexSourceCommandContext, # type: ignore\n self\n )\n assert parent_ctx is not None\n assert isinstance(parent_ctx.op, sd.ObjectCommand)\n subject = parent_ctx.op.get_object(schema, context)\n\n expr = value.compiled(\n schema=schema,\n options=qlcompiler.CompilerOptions(\n modaliases=context.modaliases,\n schema_object_context=self.get_schema_metaclass(),\n anchors={qlast.Subject().name: subject},\n path_prefix_anchor=qlast.Subject().name,\n singletons=frozenset([subject]),\n apply_query_rewrites=False,\n track_schema_ref_exprs=track_schema_ref_exprs,\n detached=True,\n ),\n )\n\n # Check that the inferred cardinality is no more than 1\n if expr.irast.cardinality.is_multi():\n raise errors.SchemaDefinitionError(\n f'possibly more than one element returned by '\n f'the index expression where only singletons '\n f'are allowed',\n context=value.qlast.context,\n )\n\n if expr.irast.volatility != qltypes.Volatility.Immutable:\n raise errors.SchemaDefinitionError(\n f'index expressions must be immutable',\n context=value.qlast.context,\n )\n\n refs = irutils.get_longest_paths(expr.irast)\n\n has_multi = False\n for ref in refs:\n assert subject\n while ref.rptr:\n rptr = ref.rptr\n if rptr.dir_cardinality.is_multi():\n has_multi = True\n\n # We don't need to look further than the subject,\n # which is always valid. (And which is a singleton\n # in an index expression if it is itself a\n # singleton, regardless of other parts of the path.)\n if (\n isinstance(rptr.ptrref, irast.PointerRef)\n and rptr.ptrref.id == subject.id\n ):\n break\n ref = rptr.source\n\n if has_multi and irutils.contains_set_of_op(expr.irast):\n raise errors.SchemaDefinitionError(\n \"cannot use aggregate functions or operators \"\n \"in an index expression\",\n context=self.source_context,\n )\n\n return expr\n else:\n return super().compile_expr_field(\n schema, context, field, value, track_schema_ref_exprs)\n\n def get_dummy_expr_field_value(\n self,\n schema: s_schema.Schema,\n context: sd.CommandContext,\n field: so.Field[Any],\n value: Any,\n ) -> Optional[s_expr.Expression]:\n if field.name == 'expr':\n return s_expr.Expression(text='0')\n else:\n raise NotImplementedError(f'unhandled field {field.name!r}')\n\n\nclass CreateIndex(\n IndexCommand,\n referencing.CreateReferencedInheritingObject[Index],\n):\n astnode = [qlast.CreateConcreteIndex, qlast.CreateIndex]\n referenced_astnode = qlast.CreateConcreteIndex\n\n @classmethod\n def _cmd_tree_from_ast(\n cls,\n schema: s_schema.Schema,\n astnode: qlast.DDLOperation,\n context: sd.CommandContext,\n ) -> sd.Command:\n cmd = super()._cmd_tree_from_ast(schema, astnode, context)\n\n assert isinstance(cmd, IndexCommand)\n assert isinstance(astnode, (qlast.CreateConcreteIndex,\n qlast.CreateIndex))\n\n if isinstance(astnode, qlast.CreateIndex):\n cmd.set_attribute_value('abstract', True)\n\n params = cls._get_param_desc_from_ast(\n schema, context.modaliases, astnode)\n for param in params:\n # as_create_delta requires the specific type\n cmd.add_prerequisite(param.as_create_delta(\n schema, cmd.classname, context=context))\n\n # There are several possibilities for abstract indexes:\n # 1) base abstract index\n # 2) an abstract index extending another one\n # 3) an abstract index listing index fallback alternatives\n if astnode.bases is None:\n if astnode.index_types is None:\n # This actually defines a new index (1).\n pass\n else:\n # This is for index fallback alternatives (3).\n raise NotImplementedError(\"Index fallback not implemented\")\n else:\n # Extending existing indexes for composition (2).\n kwargs = cls._index_kwargs_from_ast(schema, astnode, context)\n if kwargs:\n cmd.set_attribute_value('kwargs', kwargs)\n\n elif isinstance(astnode, qlast.CreateConcreteIndex):\n orig_text = cls.get_orig_expr_text(schema, astnode, 'expr')\n\n if (\n orig_text is not None\n and context.compat_ver_is_before(\n (1, 0, verutils.VersionStage.ALPHA, 6)\n )\n ):\n # Versions prior to a6 used a different expression\n # normalization strategy, so we must renormalize the\n # expression.\n expr_ql = qlcompiler.renormalize_compat(\n astnode.expr,\n orig_text,\n schema=schema,\n localnames=context.localnames,\n )\n else:\n expr_ql = astnode.expr\n\n kwargs = cls._index_kwargs_from_ast(schema, astnode, context)\n if kwargs:\n cmd.set_attribute_value('kwargs', kwargs)\n\n cmd.set_attribute_value(\n 'expr',\n s_expr.Expression.from_ast(\n expr_ql,\n schema,\n context.modaliases,\n ),\n )\n\n if astnode.except_expr:\n cmd.set_attribute_value(\n 'except_expr',\n s_expr.Expression.from_ast(\n astnode.except_expr,\n schema,\n context.modaliases,\n ),\n )\n\n return cmd\n\n @classmethod\n def as_inherited_ref_ast(\n cls,\n schema: s_schema.Schema,\n context: sd.CommandContext,\n name: sn.Name,\n parent: referencing.ReferencedObject,\n ) -> qlast.ObjectDDL:\n assert isinstance(parent, Index)\n astnode_cls = cls.referenced_astnode\n\n expr = parent.get_expr(schema)\n assert expr is not None\n expr_ql = edgeql.parse_fragment(expr.text)\n\n except_expr = parent.get_except_expr(schema)\n if except_expr:\n except_expr_ql = except_expr.qlast\n else:\n except_expr_ql = None\n\n qlkwargs = {\n key: val.qlast for key, val in parent.get_kwargs(schema).items()\n }\n\n return astnode_cls(\n name=cls.get_inherited_ref_name(schema, context, parent, name),\n kwargs=qlkwargs,\n expr=expr_ql,\n except_expr=except_expr_ql,\n )\n\n @classmethod\n def get_inherited_ref_name(\n cls,\n schema: s_schema.Schema,\n context: sd.CommandContext,\n parent: so.Object,\n name: sn.Name,\n ) -> qlast.ObjectRef:\n bn = sn.shortname_from_fullname(name)\n return utils.name_to_ast_ref(bn)\n\n def _validate_kwargs(\n self,\n schema: s_schema.Schema,\n params: s_func.FuncParameterList,\n kwargs: s_expr.ExpressionDict,\n ancestor_name: str,\n ) -> None:\n if not kwargs:\n return\n\n if not params:\n raise errors.SchemaDefinitionError(\n f'the {ancestor_name} does not support any parameters',\n context=self.source_context\n )\n\n # Make sure that the kwargs are valid.\n for key in kwargs:\n expr = kwargs[key]\n param = params.get_by_name(schema, key)\n if param is None:\n raise errors.SchemaDefinitionError(\n f'the {ancestor_name} does not have a parameter {key!r}',\n context=self.source_context\n )\n\n param_type = param.get_type(schema)\n comp_expr = s_expr.Expression.compiled(expr, schema=schema)\n expr_type = comp_expr.irast.stype\n\n if (\n not param_type.is_polymorphic(schema) and\n not expr_type.is_polymorphic(schema) and\n not expr_type.implicitly_castable_to(\n param_type, schema)\n ):\n raise errors.SchemaDefinitionError(\n f'the {key!r} parameter of the '\n f'{self.get_verbosename()} has type of '\n f'{expr_type.get_displayname(schema)} that '\n f'is not implicitly castable to the '\n f'corresponding parameter of the '\n f'{ancestor_name} with type '\n f'{param_type.get_displayname(schema)}',\n context=self.source_context,\n )\n\n def validate_object(\n self,\n schema: s_schema.Schema,\n context: sd.CommandContext,\n ) -> None:\n super().validate_object(schema, context)\n\n referrer_ctx = self.get_referrer_context(context)\n\n # Get kwargs if any, so that we can process them later.\n kwargs = self.get_resolved_attribute_value(\n 'kwargs',\n schema=schema,\n context=context,\n )\n\n if referrer_ctx is None:\n # Make sure that all bases are ultimately inherited from the same\n # root base class.\n bases = self.get_resolved_attribute_value(\n 'bases',\n schema=schema,\n context=context,\n )\n if bases:\n # Users can extend abstract indexes.\n root = None\n for base in bases.objects(schema):\n lineage = [base] + list(\n base.get_ancestors(schema).objects(schema))\n\n if root is None:\n root = lineage[-1]\n elif root != lineage[-1]:\n raise errors.SchemaDefinitionError(\n f'cannot create {self.get_verbosename()} '\n f'because it extends incompatible abstract indxes',\n context=self.source_context\n )\n\n # We should have found a root because we have bases.\n assert root is not None\n # Make sure that the kwargs are valid.\n self._validate_kwargs(\n schema,\n root.get_params(schema),\n kwargs,\n root.get_verbosename(schema),\n )\n\n else:\n # Creating new abstract indexes is only allowed in \"EdgeDB\n # developer\" mode, i.e. when populating std library, etc.\n if not context.stdmode and not context.testmode:\n raise errors.SchemaDefinitionError(\n f'cannot create {self.get_verbosename()} '\n f'because user-defined abstract indexes are not '\n f'supported',\n context=self.source_context\n )\n\n return\n\n # The checks below apply only to concrete indexes.\n subject = referrer_ctx.scls\n assert isinstance(subject, (s_types.Type, s_pointers.Pointer))\n\n # FTS\n if self.scls.has_base_with_name(schema, sn.QualName('fts', 'index')):\n\n if isinstance(subject, s_pointers.Pointer):\n raise errors.SchemaDefinitionError(\n \"fts::index cannot be declared on links\",\n context=self.source_context\n )\n\n # Ensure that the name of the index (if given) matches an existing\n # abstract index.\n name = sn.shortname_from_fullname(\n self.get_resolved_attribute_value(\n 'name',\n schema=schema,\n context=context,\n )\n )\n\n # HACK: the old concrete indexes all have names in form __::idx, but\n # this should be the actual name provided. Also the index without name\n # defaults to '__::idx'.\n if name != DEFAULT_INDEX and (\n abs_index := schema.get(name, type=Index)\n ):\n # only abstract indexes should have unmangled names\n assert abs_index.get_abstract(schema)\n root = abs_index.get_root(schema)\n\n # Make sure that kwargs and parameters match in name and type.\n # Also make sure that all parameters have values at this point\n # (either default or provided in kwargs).\n params = root.get_params(schema)\n inh_kwargs = self.scls.get_all_kwargs(schema)\n\n self._validate_kwargs(schema,\n params,\n kwargs,\n abs_index.get_verbosename(schema))\n\n unused_names = {p.get_parameter_name(schema)\n for p in params.objects(schema)}\n if kwargs:\n unused_names -= set(kwargs)\n if inh_kwargs:\n unused_names -= set(inh_kwargs)\n if unused_names:\n # Check that all of these parameters have defaults.\n for pname in list(unused_names):\n param = params.get_by_name(schema, pname)\n if param and param.get_default(schema) is not None:\n unused_names.discard(pname)\n\n if unused_names:\n names = ', '.join(repr(n) for n in sorted(unused_names))\n raise errors.SchemaDefinitionError(\n f'cannot create {self.get_verbosename()} '\n f'because the following parameters are still undefined: '\n f'{names}.',\n context=self.source_context\n )\n\n # Make sure that the concrete index expression type matches the\n # abstract index type.\n expr = self.get_resolved_attribute_value(\n 'expr',\n schema=schema,\n context=context,\n )\n options = qlcompiler.CompilerOptions(\n anchors={qlast.Subject().name: subject},\n path_prefix_anchor=qlast.Subject().name,\n singletons=frozenset([subject]),\n apply_query_rewrites=False,\n schema_object_context=self.get_schema_metaclass(),\n )\n comp_expr = s_expr.Expression.compiled(\n expr, schema=schema, options=options\n )\n expr_type = comp_expr.irast.stype\n\n if not is_index_valid_for_type(root, expr_type, comp_expr.schema):\n hint = None\n if str(name) == 'fts::index':\n hint = (\n 'fts::document can be constructed with '\n 'fts::with_options(str, ...)'\n )\n\n raise errors.SchemaDefinitionError(\n f'index expression ({expr.text}) '\n f'is not of a valid type for the '\n f'{self.scls.get_verbosename(comp_expr.schema)}',\n context=self.source_context,\n details=hint,\n )\n\n def get_resolved_attributes(\n self,\n schema: s_schema.Schema,\n context: sd.CommandContext,\n ) -> Dict[str, Any]:\n params = self._get_params(schema, context)\n props = super().get_resolved_attributes(schema, context)\n props['params'] = params\n return props\n\n @classmethod\n def _classbases_from_ast(\n cls,\n schema: s_schema.Schema,\n astnode: qlast.ObjectDDL,\n context: sd.CommandContext,\n ) -> List[so.ObjectShell[Index]]:\n if (\n isinstance(astnode, qlast.CreateConcreteIndex)\n and astnode.name\n and astnode.name.module != DEFAULT_INDEX.module\n and astnode.name.name != DEFAULT_INDEX.name\n ):\n base = utils.ast_objref_to_object_shell(\n astnode.name,\n metaclass=Index,\n schema=schema,\n modaliases=context.modaliases,\n )\n return [base]\n else:\n return super()._classbases_from_ast(schema, astnode, context)\n\n\nclass RenameIndex(\n IndexCommand,\n referencing.RenameReferencedInheritingObject[Index],\n):\n\n @classmethod\n def _cmd_from_ast(\n cls,\n schema: s_schema.Schema,\n astnode: qlast.DDLOperation,\n context: sd.CommandContext,\n ) -> RenameIndex:\n return cast(\n RenameIndex,\n super()._cmd_from_ast(schema, astnode, context),\n )\n\n\nclass AlterIndexOwned(\n IndexCommand,\n referencing.AlterOwned[Index],\n field='owned',\n):\n pass\n\n\nclass AlterIndex(\n IndexCommand,\n referencing.AlterReferencedInheritingObject[Index],\n):\n astnode = [qlast.AlterConcreteIndex, qlast.AlterIndex]\n referenced_astnode = qlast.AlterConcreteIndex\n\n def canonicalize_alter_from_external_ref(\n self,\n schema: s_schema.Schema,\n context: sd.CommandContext,\n ) -> None:\n if (\n not self.get_attribute_value('abstract')\n and (indexexpr := self.get_attribute_value('expr')) is not None\n ):\n # To compute the new name, we construct an AST of the\n # index, since that is the infrastructure we have for\n # computing the classname.\n name = sn.shortname_from_fullname(self.classname)\n assert isinstance(name, sn.QualName), \"expected qualified name\"\n ast = qlast.CreateConcreteIndex(\n name=qlast.ObjectRef(name=name.name, module=name.module),\n expr=indexexpr.qlast,\n )\n quals = sn.quals_from_fullname(self.classname)\n new_name = self._classname_from_ast_and_referrer(\n schema, sn.QualName.from_string(quals[0]), ast, context)\n if new_name == self.classname:\n return\n\n rename = self.scls.init_delta_command(\n schema, sd.RenameObject, new_name=new_name)\n rename.set_attribute_value(\n 'name', value=new_name, orig_value=self.classname)\n self.add(rename)\n\n\nclass DeleteIndex(\n IndexCommand,\n referencing.DeleteReferencedInheritingObject[Index],\n):\n astnode = [qlast.DropConcreteIndex, qlast.DropIndex]\n referenced_astnode = qlast.DropConcreteIndex\n\n def _delete_begin(\n self,\n schema: s_schema.Schema,\n context: sd.CommandContext,\n ) -> s_schema.Schema:\n schema = super()._delete_begin(schema, context)\n if not context.canonical:\n for param in self.scls.get_params(schema).objects(schema):\n self.add(param.init_delta_command(schema, sd.DeleteObject))\n return schema\n\n @classmethod\n def _cmd_tree_from_ast(\n cls,\n schema: s_schema.Schema,\n astnode: qlast.DDLOperation,\n context: sd.CommandContext,\n ) -> sd.Command:\n cmd = super()._cmd_tree_from_ast(schema, astnode, context)\n\n if isinstance(astnode, qlast.ConcreteIndexCommand):\n cmd.set_attribute_value(\n 'expr',\n s_expr.Expression.from_ast(\n astnode.expr, schema, context.modaliases),\n )\n\n return cmd\n\n\nclass RebaseIndex(\n IndexCommand,\n referencing.RebaseReferencedInheritingObject[Index],\n):\n pass\n\n\ndef get_effective_fts_index(\n subject: IndexableSubject, schema: s_schema.Schema\n) -> Tuple[Optional[Index], bool]:\n \"\"\"\n Returns the effective index of a subject and a boolean indicating\n if the effective index has overriden any other fts indexes on this subject.\n \"\"\"\n indexes: so.ObjectIndexByFullname[Index] = subject.get_indexes(schema)\n\n fts_name = sn.QualName('fts', 'index')\n fts_indexes = [\n ind\n for ind in indexes.objects(schema)\n if ind.has_base_with_name(schema, fts_name)\n ]\n if len(fts_indexes) == 0:\n return (None, False)\n\n fts_indexes_defined_here = [\n ind for ind in fts_indexes if ind.is_defined_here(schema)\n ]\n\n if len(fts_indexes_defined_here) > 0:\n # indexes defined here have priority\n\n if len(fts_indexes_defined_here) > 1:\n subject_name = subject.get_displayname(schema)\n raise errors.SchemaDefinitionError(\n f'multiple {fts_name} indexes defined for {subject_name}'\n )\n effective = fts_indexes_defined_here[0]\n has_overridden = len(fts_indexes) >= 2\n\n else:\n # there are no fts indexes defined on the subject\n # the inherited indexes take effect\n\n if len(fts_indexes) > 1:\n subject_name = subject.get_displayname(schema)\n raise errors.SchemaDefinitionError(\n f'multiple {fts_name} indexes inherited for {subject_name}'\n )\n\n effective = fts_indexes[0]\n has_overridden = False\n\n return (effective, has_overridden)\n", "path": "edb/schema/indexes.py"}]} |
gh_patches_debug_1250 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-6181 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bouton "Comparer avec la version en ligne" quand les versions sont identiques
**Description du bug**
Dans un tutoriel publié j'ai le bouton "Comparer avec la version en ligne" dans la sidebar "Actions" (à gauche) alors que **les deux versions sont identiques**
**Comment reproduire ?**
La liste des étapes qui permet de reproduire le bug :
1. Allez sur un tutoriel publié dont la version en ligne et publié sont identique (le sien ~~ou celui d'un autre en étant staff~~) ;
2. Constater que le bouton est présent et que les deux versions sont identiques dans le lien du bouton `?from=cacc1f5d99201aa1977b3a95889611dc3ee7f9ff&to=cacc1f5d99201aa1977b3a95889611dc3ee7f9ff`
**Comportement attendu**
Vérifier que les versions soient différentes :
- Si elles sont différentes, on affiche le bouton ;
- Si elles sont identique, on masque le bouton. ***OU remplacer le bouton** par un texte : "Version identique".*
**Capture d'écran**

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zds/tutorialv2/mixins.py`
Content:
```
1 from django.contrib import messages
2 from django.core.exceptions import PermissionDenied
3 from django.urls import reverse
4 from django.http import Http404, HttpResponse, HttpResponsePermanentRedirect, StreamingHttpResponse
5 from django.template.loader import render_to_string
6 from django.shortcuts import redirect
7 from django.utils.translation import gettext_lazy as _
8 from django.views.generic import DetailView, FormView
9 from django.views.generic import View
10
11 from zds.forum.models import Topic
12 from zds.tutorialv2.models.database import PublishableContent, PublishedContent, ContentRead
13 from zds.tutorialv2.utils import mark_read
14 from zds.utils.models import HelpWriting
15
16
17 class SingleContentViewMixin:
18 """
19 Base mixin to get only one content, and its corresponding versioned content
20
21 Deals with URL resolution in the following way:
22
23 1. In ``get_object()``:
24 - Fetch the ``PublishableContent`` according to ``self.kwargs['pk']``, ``self.request.GET['pk']`` or \
25 ``self.request.POST['pk']`` (one of these have to be defined). Raise `Http404` if any.
26 - Then, check permissions with respect to ``self.must_be_author`` and ``self.authorized_for_staff`` \
27 (and define ``self.is_staff`` and ``self.is_author``). Raise ``PermissionDenied`` if any.
28
29 2. In ``get_versioned_object()``:
30 - Deal with sha : assume ``self.object.sha_draft`` by default, but reset according to \
31 ``self.request.GET['version']``, if exists. \
32 Then, check ``self.only_draft_version`` and raise ``PermissionDenied`` if any
33 - Fetch the ``VersionedContent``. Due to the use of ``self.object.load_version_or_404(sha)``,\
34 raise ``Http404``.
35 - Check if its the beta or public version, and allow access if it's the case. Raise ``PermissionDenied``.
36 - Check slug if ``self.kwargs['slug']`` is defined. Raise ``Http404`` if any.
37
38 3. In ``get_public_object()``, fetch the last published version, if any
39
40 Any redefinition of any of these two functions should take care of those points.
41 """
42
43 object = None
44 versioned_object = None
45 public_content_object = None
46
47 prefetch_all = True
48 sha = None
49 must_be_author = True
50 authorized_for_staff = True
51 is_staff = False
52 is_author = False
53 only_draft_version = True
54 must_redirect = False
55 public_is_prioritary = True
56
57 def get_object(self, queryset=None):
58 """Get database representation of the content by its `pk`, then check permissions"""
59
60 # fetch object:
61 try:
62 if "pk" in self.kwargs:
63 pk = int(self.kwargs["pk"])
64 elif "pk" in self.request.GET:
65 pk = int(self.request.GET["pk"])
66 elif "pk" in self.request.POST:
67 pk = int(self.request.POST["pk"])
68 else:
69 raise Http404("Impossible de trouver le paramètre 'pk'.")
70 except ValueError as badvalue:
71 raise Http404(f"La valeur du paramètre pk '{badvalue}' n'est pas un entier valide.")
72
73 queryset = queryset or PublishableContent.objects
74
75 if self.prefetch_all:
76 queryset = queryset.select_related("licence").prefetch_related("authors").prefetch_related("subcategory")
77 obj = queryset.filter(pk=pk).first()
78
79 if not obj:
80 raise Http404("Aucun contenu ne possède cet identifiant.")
81
82 # check permissions:
83 self.is_staff = self.request.user.has_perm("tutorialv2.change_publishablecontent")
84 self.is_author = self.request.user in obj.authors.all()
85
86 if self.must_be_author and not self.is_author:
87 if not self.authorized_for_staff or (self.authorized_for_staff and not self.is_staff):
88 raise PermissionDenied
89
90 return obj
91
92 def get_versioned_object(self):
93 """Gets the asked version of current content."""
94
95 # fetch version:
96 sha = self.object.sha_draft
97
98 if not self.only_draft_version:
99 if self.sha:
100 sha = self.sha
101 else:
102 if "version" in self.request.GET:
103 sha = self.request.GET["version"]
104 elif "version" in self.request.POST:
105 sha = self.request.POST["version"]
106
107 self.sha = sha
108
109 # if beta or public version, user can also access to it
110 is_beta = self.object.is_beta(self.sha)
111 is_public = self.object.is_public(self.sha) and self.public_is_prioritary
112
113 if not is_beta and not is_public and not self.is_author:
114 if not self.is_staff or (not self.authorized_for_staff and self.must_be_author):
115 raise PermissionDenied
116
117 # load versioned file
118 versioned = self.object.load_version_or_404(self.sha)
119
120 # check slug, if any:
121 if "slug" in self.kwargs:
122 slug = self.kwargs["slug"]
123 if versioned.slug != slug:
124 if slug != self.object.slug: # retro-compatibility, but should raise permanent redirect instead
125 raise Http404("Ce slug n'existe pas pour ce contenu.")
126
127 return versioned
128
129 def get_public_object(self):
130 """Get the published version, if any"""
131
132 object = PublishedContent.objects.filter(content_pk=self.object.pk, must_redirect=False).last()
133 if object:
134 object.load_public_version()
135 return object
136
137
138 class SingleContentPostMixin(SingleContentViewMixin):
139 """
140 Base mixin used to get content from post query
141 """
142
143 # represent the fact that we have to check if the version given in self.request.POST['version'] exists
144 versioned = True
145
146 def get_object(self, queryset=None):
147 self.object = super().get_object()
148
149 if self.versioned and "version" in self.request.POST["version"]:
150 self.object.load_version_or_404(sha=self.request.POST["version"])
151 return self.object
152
153
154 class ModalFormView(FormView):
155 """If `self.modal_form` is set `True`, this class will ensure that the redirection is made to the previous page
156 if an error appear"""
157
158 modal_form = False # `form_invalid()` will behave differently if `True`, see implementation below
159
160 def form_invalid(self, form):
161 """If `self.modal_form` is set `True`, this function is rewritten to send back to the previous page
162 with an error message, instead of using the form template which is normally provided.
163
164 The redirection is made to `form.previous_page_url`, if exists, `content:view` otherwise."""
165
166 if not self.modal_form:
167 return super().form_invalid(form)
168 else:
169 errors = form.errors.as_data()
170 if len(errors) > 0:
171 # only the first error is provided
172 error_message = list(errors.values())[0][0].messages[0]
173 messages.error(self.request, error_message)
174 else:
175 messages.error(self.request, _("Une erreur inconnue est survenue durant le traitement des données."))
176
177 if hasattr(form, "previous_page_url"):
178 return redirect(form.previous_page_url)
179 else:
180 return redirect(reverse("content:view")) # assume a default url
181
182
183 class FormWithPreview(FormView):
184 def post(self, request, *args, **kwargs):
185 form = self.form_class(request.POST)
186
187 if "preview" in request.POST:
188 self.form_invalid(form)
189 if request.is_ajax():
190 content = render_to_string("misc/preview.part.html", {"text": request.POST.get("text")})
191 return StreamingHttpResponse(content)
192
193 return super().post(request, *args, **kwargs)
194
195
196 class SingleContentFormViewMixin(SingleContentViewMixin, ModalFormView):
197 """
198 This enhanced FormView ensure,
199
200 - by surcharging `dispatch()`, that:
201 * `self.object` contains the result of `get_object()` (as for DetailView)
202 * `self.versioned_object` contains the results of `get_versioned_object()`
203 - by surcharging `get_context_data()`, that
204 * context['content'] contains `self.versioned_object`
205 """
206
207 def dispatch(self, request, *args, **kwargs):
208 self.object = self.get_object()
209 self.versioned_object = self.get_versioned_object()
210 if self.object.sha_public:
211 self.public_content_object = self.get_public_object()
212
213 return super().dispatch(request, *args, **kwargs)
214
215 def get_context_data(self, **kwargs):
216 context = super().get_context_data(**kwargs)
217 context["content"] = self.versioned_object
218 context["is_staff"] = self.is_staff
219 return context
220
221
222 class SingleContentDetailViewMixin(SingleContentViewMixin, DetailView):
223 """
224 This enhanced DetailView ensure,
225
226 - by rewriting `get()`, that:
227 * `self.object` contains the result of `get_object()` (as it must be if `get()` is not rewritten)
228 * `self.sha` is set according to `self.request.GET['version']` (if any) and `self.object.sha_draft` otherwise
229 * `self.versioned_object` contains the results of `get_versioned_object()`
230 - by surcharging `get_context_data()`, that
231 * context['content'] contains `self.versioned_object`
232 * context['can_edit'] is set
233 * context['version'] is set (if different from `self.object.sha_draft`)
234 * context['beta_topic'] is set (if any)
235 """
236
237 def get(self, request, *args, **kwargs):
238 self.object = self.get_object()
239
240 if not self.sha:
241 try:
242 self.sha = request.GET["version"]
243 except KeyError:
244 self.sha = self.object.sha_draft
245
246 self.versioned_object = self.get_versioned_object()
247 if self.object.sha_public:
248 self.public_content_object = self.get_public_object()
249
250 context = self.get_context_data(object=self.object)
251 return self.render_to_response(context)
252
253 def get_context_data(self, **kwargs):
254 context = super().get_context_data(**kwargs)
255 context["helps"] = list(HelpWriting.objects.all())
256 context["content_helps"] = list(self.object.helps.all())
257 context["content"] = self.versioned_object
258 context["can_edit"] = self.is_author
259 context["is_staff"] = self.is_staff
260 if self.object.type == "OPINION":
261 context["can_publish"] = not self.object.is_permanently_unpublished()
262 if self.sha != self.object.sha_draft:
263 context["version"] = self.sha
264
265 is_allowed = self.is_author or self.is_staff
266 is_same_version = not self.sha or self.sha == self.object.sha_draft
267 context["can_add_something"] = is_allowed and is_same_version
268
269 if self.object.beta_topic:
270 beta_topic = Topic.objects.get(pk=self.object.beta_topic.pk)
271
272 if beta_topic:
273 context["beta_topic"] = beta_topic
274
275 return context
276
277
278 class ContentTypeMixin:
279 """This class deals with the type of contents and fill context according to that"""
280
281 current_content_type = None
282
283 def get_context_data(self, **kwargs):
284 context = super().get_context_data(**kwargs)
285
286 v_type_name = _("contenu")
287 v_type_name_plural = _("contenus")
288
289 if self.current_content_type == "ARTICLE":
290 v_type_name = _("article")
291 v_type_name_plural = _("articles")
292
293 if self.current_content_type == "TUTORIAL":
294 v_type_name = _("tutoriel")
295 v_type_name_plural = _("tutoriels")
296
297 if self.current_content_type == "OPINION":
298 v_type_name = _("billet")
299 v_type_name_plural = _("billets")
300
301 context["current_content_type"] = self.current_content_type
302 context["verbose_type_name"] = v_type_name
303 context["verbose_type_name_plural"] = v_type_name_plural
304
305 return context
306
307
308 class MustRedirect(Exception):
309 """Exception raised when this is not the last version of the content which is called"""
310
311 def __init__(self, url, *args, **kwargs):
312 """
313 initialize the exception
314
315 :param url: the targetted url
316 :param args: exception *args
317 :param kwargs: exception **kwargs
318 """
319 super().__init__(*args, **kwargs)
320 self.url = url
321
322
323 class SingleOnlineContentViewMixin(ContentTypeMixin):
324
325 """
326 Base mixin to get only one content online content
327
328 Deals with URL resolution in the following way:
329
330 1. In `get_object()`:
331 - Fetch the ``PublicContent`` according to ``self.kwargs['pk']``, ``self.request.GET['pk']`` or \
332 ``self.request.POST['pk']`` 0(one of these have to be defined). Raise ``Http404`` if any.
333 - Check if ``self.current_content_type`` if defined, and use it if it's the case
334 - Check if ``slug`` is defined, also check object it if it's the case
335 - Then, define ``self.is_staff`` and ``self.is_author``.
336 2. In ``get_versioned_object()``: Fetch the ``VersionedContent``. Due to the use of
337 ``self.public_content_object.load_public_version_or_404()``, raise ``Http404`` if any.
338
339 Any redefinition of any of these two functions should take care of those points.
340
341 """
342
343 object = None
344 public_content_object = None
345 versioned_object = None
346 redirection_is_needed = True
347
348 is_author = False
349 is_staff = False
350
351 def get_redirect_url(self, public_version):
352 """Return the most recent url, based on the current public version"""
353 return public_version.content.public_version.get_absolute_url_online()
354
355 def get_public_object(self):
356 try:
357 if "pk" in self.kwargs:
358 pk = int(self.kwargs["pk"])
359 elif "pk" in self.request.GET:
360 pk = int(self.request.GET["pk"])
361 elif "pk" in self.request.POST:
362 pk = int(self.request.POST["pk"])
363 else:
364 raise Http404("Impossible de trouver le paramètre 'pk'.")
365 except ValueError as badvalue:
366 raise Http404(f"La valeur du paramètre pk '{badvalue}' n'est pas un entier valide.")
367 queryset = (
368 PublishedContent.objects.filter(content_pk=pk)
369 .prefetch_related("content")
370 .prefetch_related("content__authors")
371 .prefetch_related("content__subcategory")
372 .prefetch_related("content__tags")
373 .prefetch_related("content__public_version")
374 .select_related("content__last_note")
375 )
376
377 if self.current_content_type:
378 queryset = queryset.filter(content_type=self.current_content_type)
379
380 if "slug" in self.kwargs:
381 queryset = queryset.filter(content_public_slug=self.kwargs["slug"])
382
383 obj = queryset.order_by("publication_date").last() # 'last' version must be the most recent to be published
384
385 if obj is None:
386 raise Http404("Aucun contenu ne possède ce slug.")
387
388 # Redirection ?
389 if obj.must_redirect:
390 if obj.content.public_version and self.redirection_is_needed:
391 raise MustRedirect(self.get_redirect_url(obj))
392 elif obj.content.public_version and not self.redirection_is_needed:
393 obj = obj.content.public_version
394 else: # should only happen if the content is unpublished
395 raise Http404("La redirection est activée mais le contenu n'est pas public.")
396
397 self.is_author = self.request.user in obj.authors.all()
398 self.is_staff = self.request.user.has_perm("tutorialv2.change_publishablecontent")
399
400 self.current_content_type = obj.content_type
401 if obj and obj.content.last_note:
402 mark_read(obj.content, self.request.user)
403 return obj
404
405 def get_object(self):
406
407 obj = self.public_content_object.content
408 if obj is None:
409 raise Http404("Le contenu de la publication n'est pas trouvé.")
410 return obj
411
412 def get_versioned_object(self):
413
414 return self.public_content_object.load_public_version_or_404()
415
416
417 class SingleOnlineContentDetailViewMixin(SingleOnlineContentViewMixin, DetailView):
418 """
419 This enhanced DetailView ensures,
420
421 - by rewriting `get()`, that:
422 * `self.object` contains the result of `get_object()` (as it must be if `get()` was not rewritten)
423 * Redirection is made if we catch `MustRedirect`
424 * `self.versioned_object` contains a PublicContent object
425 * `self.public_content_object` contains a PublishedContent object
426 - by surcharging `get_context_data()`, that
427 * context['content'] is set
428 * context['is_staff'] is set
429 * context['can_edit'] is set
430 * context['public_object'] is set
431 * context['is_antispam'] is set
432 * context['db_content'] is set with the PublishableContent instance
433 """
434
435 def get(self, request, *args, **kwargs):
436
437 try:
438 self.public_content_object = self.get_public_object()
439 except MustRedirect as redirection_url:
440 return HttpResponsePermanentRedirect(redirection_url.url)
441
442 self.object = self.get_object()
443 self.versioned_object = self.get_versioned_object()
444 context = self.get_context_data(object=self.object)
445 follow = ContentRead.objects.filter(user__pk=self.request.user.pk).filter(content__pk=self.object.pk).first()
446 if follow is not None:
447 follow.note = self.object.last_note
448 follow.save()
449
450 return self.render_to_response(context)
451
452 def get_context_data(self, **kwargs):
453
454 context = super().get_context_data(**kwargs)
455
456 context["content"] = self.versioned_object
457 context["is_obsolete"] = self.object.is_obsolete
458 context["public_object"] = self.public_content_object
459 context["can_edit"] = self.request.user in self.object.authors.all()
460 context["is_antispam"] = self.object.antispam(self.request.user)
461 context["is_staff"] = self.is_staff
462 context["is_author"] = self.is_author
463 context["db_content"] = self.object
464 return context
465
466
467 class SingleOnlineContentFormViewMixin(SingleOnlineContentViewMixin, ModalFormView):
468 """
469 This enhanced FormView ensure,
470
471 - by surcharging `dispatch()`, that:
472 * `self.public_content_object` contains a PublishedContent object
473 * `self.object` contains the result of `get_object()` (as for DetailView)
474 * `self.versioned_object` contains the results of `get_versioned_object()`
475 - by surcharging `get_context_data()`, that
476 * context['content'] is set
477 * context['public_object'] is set
478
479
480 Note: does not catch `MustRedirect`, so you should not use a `slug` with POST request
481 """
482
483 denied_if_lock = False # denied the use of the form if the content is locked
484
485 def dispatch(self, request, *args, **kwargs):
486 self.public_content_object = self.get_public_object()
487 self.object = self.get_object()
488 self.versioned_object = self.get_versioned_object()
489
490 if self.denied_if_lock and self.object.is_locked:
491 raise PermissionDenied
492
493 return super().dispatch(request, *args, **kwargs)
494
495 def get_context_data(self, **kwargs):
496 context = super().get_context_data(**kwargs)
497
498 context["content"] = self.versioned_object
499 context["public_object"] = self.public_content_object
500
501 return context
502
503
504 class DownloadViewMixin(View):
505 """Basic View to return a file to download
506
507 (inspired from https://djangosnippets.org/snippets/2549/ and
508 http://stackoverflow.com/questions/16286666/send-a-file-through-django-class-based-views)
509
510 You just need to override `get_contents()` to make it works
511 """
512
513 mimetype = None
514 filename = None
515
516 def get_mimetype(self):
517 return self.mimetype
518
519 def get_filename(self):
520 return self.filename
521
522 def get_contents(self):
523 pass
524
525 def get(self, context, **response_kwargs):
526 """
527 Access to a file with only get method then write the file content in response stream.
528 Properly sets Content-Type and Content-Disposition headers
529 """
530 response = HttpResponse(content_type=self.get_mimetype())
531 response["Content-Disposition"] = "filename=" + self.get_filename()
532 response.write(self.get_contents())
533
534 return response
535
536
537 class SingleContentDownloadViewMixin(SingleContentViewMixin, DownloadViewMixin):
538 """
539 Ensure, by rewritring ``get()``, that
540 - `self.object` contains the result of `get_object()` (as it must be if `get()` is not rewritten)
541 - `self.sha` is set according to `self.request.GET['version']` (if any) and `self.object.sha_draft` otherwise
542 - `self.versioned_object` contains the results of `get_versioned_object()`
543 """
544
545 def get(self, context, **response_kwargs):
546 self.object = self.get_object()
547
548 if not self.sha:
549 try:
550 self.sha = self.request.GET["version"]
551 except KeyError:
552 self.sha = self.object.sha_draft
553
554 self.versioned_object = self.get_versioned_object()
555
556 return super().get(context, **response_kwargs)
557
558
559 class RequiresValidationViewMixin(SingleContentDetailViewMixin):
560 """
561 Ensure the content require validation before publication.
562 """
563
564 def get(self, request, *args, **kwargs):
565 if not self.get_object().requires_validation():
566 raise PermissionDenied
567 return super().get(request, *args, **kwargs)
568
569
570 class DoesNotRequireValidationFormViewMixin(SingleContentFormViewMixin):
571 """
572 Ensure the content do not require validation before publication.
573 """
574
575 def get_form_kwargs(self):
576 if self.versioned_object.requires_validation():
577 raise PermissionDenied
578 return super().get_form_kwargs()
579
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zds/tutorialv2/mixins.py b/zds/tutorialv2/mixins.py
--- a/zds/tutorialv2/mixins.py
+++ b/zds/tutorialv2/mixins.py
@@ -221,7 +221,7 @@
class SingleContentDetailViewMixin(SingleContentViewMixin, DetailView):
"""
- This enhanced DetailView ensure,
+ This enhanced DetailView ensures,
- by rewriting `get()`, that:
* `self.object` contains the result of `get_object()` (as it must be if `get()` is not rewritten)
| {"golden_diff": "diff --git a/zds/tutorialv2/mixins.py b/zds/tutorialv2/mixins.py\n--- a/zds/tutorialv2/mixins.py\n+++ b/zds/tutorialv2/mixins.py\n@@ -221,7 +221,7 @@\n \n class SingleContentDetailViewMixin(SingleContentViewMixin, DetailView):\n \"\"\"\n- This enhanced DetailView ensure,\n+ This enhanced DetailView ensures,\n \n - by rewriting `get()`, that:\n * `self.object` contains the result of `get_object()` (as it must be if `get()` is not rewritten)\n", "issue": "Bouton \"Comparer avec la version en ligne\" quand les versions sont identiques\n**Description du bug**\r\n\r\nDans un tutoriel publi\u00e9 j'ai le bouton \"Comparer avec la version en ligne\" dans la sidebar \"Actions\" (\u00e0 gauche) alors que **les deux versions sont identiques**\r\n\r\n**Comment reproduire ?**\r\n\r\nLa liste des \u00e9tapes qui permet de reproduire le bug :\r\n\r\n1. Allez sur un tutoriel publi\u00e9 dont la version en ligne et publi\u00e9 sont identique (le sien ~~ou celui d'un autre en \u00e9tant staff~~) ;\r\n2. Constater que le bouton est pr\u00e9sent et que les deux versions sont identiques dans le lien du bouton `?from=cacc1f5d99201aa1977b3a95889611dc3ee7f9ff&to=cacc1f5d99201aa1977b3a95889611dc3ee7f9ff`\r\n\r\n**Comportement attendu**\r\n\r\nV\u00e9rifier que les versions soient diff\u00e9rentes : \r\n - Si elles sont diff\u00e9rentes, on affiche le bouton ;\r\n - Si elles sont identique, on masque le bouton. ***OU remplacer le bouton** par un texte : \"Version identique\".*\r\n\r\n**Capture d'\u00e9cran**\r\n\r\n\r\n\n", "before_files": [{"content": "from django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.urls import reverse\nfrom django.http import Http404, HttpResponse, HttpResponsePermanentRedirect, StreamingHttpResponse\nfrom django.template.loader import render_to_string\nfrom django.shortcuts import redirect\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import DetailView, FormView\nfrom django.views.generic import View\n\nfrom zds.forum.models import Topic\nfrom zds.tutorialv2.models.database import PublishableContent, PublishedContent, ContentRead\nfrom zds.tutorialv2.utils import mark_read\nfrom zds.utils.models import HelpWriting\n\n\nclass SingleContentViewMixin:\n \"\"\"\n Base mixin to get only one content, and its corresponding versioned content\n\n Deals with URL resolution in the following way:\n\n 1. In ``get_object()``:\n - Fetch the ``PublishableContent`` according to ``self.kwargs['pk']``, ``self.request.GET['pk']`` or \\\n ``self.request.POST['pk']`` (one of these have to be defined). Raise `Http404` if any.\n - Then, check permissions with respect to ``self.must_be_author`` and ``self.authorized_for_staff`` \\\n (and define ``self.is_staff`` and ``self.is_author``). Raise ``PermissionDenied`` if any.\n\n 2. In ``get_versioned_object()``:\n - Deal with sha : assume ``self.object.sha_draft`` by default, but reset according to \\\n ``self.request.GET['version']``, if exists. \\\n Then, check ``self.only_draft_version`` and raise ``PermissionDenied`` if any\n - Fetch the ``VersionedContent``. Due to the use of ``self.object.load_version_or_404(sha)``,\\\n raise ``Http404``.\n - Check if its the beta or public version, and allow access if it's the case. Raise ``PermissionDenied``.\n - Check slug if ``self.kwargs['slug']`` is defined. Raise ``Http404`` if any.\n\n 3. In ``get_public_object()``, fetch the last published version, if any\n\n Any redefinition of any of these two functions should take care of those points.\n \"\"\"\n\n object = None\n versioned_object = None\n public_content_object = None\n\n prefetch_all = True\n sha = None\n must_be_author = True\n authorized_for_staff = True\n is_staff = False\n is_author = False\n only_draft_version = True\n must_redirect = False\n public_is_prioritary = True\n\n def get_object(self, queryset=None):\n \"\"\"Get database representation of the content by its `pk`, then check permissions\"\"\"\n\n # fetch object:\n try:\n if \"pk\" in self.kwargs:\n pk = int(self.kwargs[\"pk\"])\n elif \"pk\" in self.request.GET:\n pk = int(self.request.GET[\"pk\"])\n elif \"pk\" in self.request.POST:\n pk = int(self.request.POST[\"pk\"])\n else:\n raise Http404(\"Impossible de trouver le param\u00e8tre 'pk'.\")\n except ValueError as badvalue:\n raise Http404(f\"La valeur du param\u00e8tre pk '{badvalue}' n'est pas un entier valide.\")\n\n queryset = queryset or PublishableContent.objects\n\n if self.prefetch_all:\n queryset = queryset.select_related(\"licence\").prefetch_related(\"authors\").prefetch_related(\"subcategory\")\n obj = queryset.filter(pk=pk).first()\n\n if not obj:\n raise Http404(\"Aucun contenu ne poss\u00e8de cet identifiant.\")\n\n # check permissions:\n self.is_staff = self.request.user.has_perm(\"tutorialv2.change_publishablecontent\")\n self.is_author = self.request.user in obj.authors.all()\n\n if self.must_be_author and not self.is_author:\n if not self.authorized_for_staff or (self.authorized_for_staff and not self.is_staff):\n raise PermissionDenied\n\n return obj\n\n def get_versioned_object(self):\n \"\"\"Gets the asked version of current content.\"\"\"\n\n # fetch version:\n sha = self.object.sha_draft\n\n if not self.only_draft_version:\n if self.sha:\n sha = self.sha\n else:\n if \"version\" in self.request.GET:\n sha = self.request.GET[\"version\"]\n elif \"version\" in self.request.POST:\n sha = self.request.POST[\"version\"]\n\n self.sha = sha\n\n # if beta or public version, user can also access to it\n is_beta = self.object.is_beta(self.sha)\n is_public = self.object.is_public(self.sha) and self.public_is_prioritary\n\n if not is_beta and not is_public and not self.is_author:\n if not self.is_staff or (not self.authorized_for_staff and self.must_be_author):\n raise PermissionDenied\n\n # load versioned file\n versioned = self.object.load_version_or_404(self.sha)\n\n # check slug, if any:\n if \"slug\" in self.kwargs:\n slug = self.kwargs[\"slug\"]\n if versioned.slug != slug:\n if slug != self.object.slug: # retro-compatibility, but should raise permanent redirect instead\n raise Http404(\"Ce slug n'existe pas pour ce contenu.\")\n\n return versioned\n\n def get_public_object(self):\n \"\"\"Get the published version, if any\"\"\"\n\n object = PublishedContent.objects.filter(content_pk=self.object.pk, must_redirect=False).last()\n if object:\n object.load_public_version()\n return object\n\n\nclass SingleContentPostMixin(SingleContentViewMixin):\n \"\"\"\n Base mixin used to get content from post query\n \"\"\"\n\n # represent the fact that we have to check if the version given in self.request.POST['version'] exists\n versioned = True\n\n def get_object(self, queryset=None):\n self.object = super().get_object()\n\n if self.versioned and \"version\" in self.request.POST[\"version\"]:\n self.object.load_version_or_404(sha=self.request.POST[\"version\"])\n return self.object\n\n\nclass ModalFormView(FormView):\n \"\"\"If `self.modal_form` is set `True`, this class will ensure that the redirection is made to the previous page\n if an error appear\"\"\"\n\n modal_form = False # `form_invalid()` will behave differently if `True`, see implementation below\n\n def form_invalid(self, form):\n \"\"\"If `self.modal_form` is set `True`, this function is rewritten to send back to the previous page\n with an error message, instead of using the form template which is normally provided.\n\n The redirection is made to `form.previous_page_url`, if exists, `content:view` otherwise.\"\"\"\n\n if not self.modal_form:\n return super().form_invalid(form)\n else:\n errors = form.errors.as_data()\n if len(errors) > 0:\n # only the first error is provided\n error_message = list(errors.values())[0][0].messages[0]\n messages.error(self.request, error_message)\n else:\n messages.error(self.request, _(\"Une erreur inconnue est survenue durant le traitement des donn\u00e9es.\"))\n\n if hasattr(form, \"previous_page_url\"):\n return redirect(form.previous_page_url)\n else:\n return redirect(reverse(\"content:view\")) # assume a default url\n\n\nclass FormWithPreview(FormView):\n def post(self, request, *args, **kwargs):\n form = self.form_class(request.POST)\n\n if \"preview\" in request.POST:\n self.form_invalid(form)\n if request.is_ajax():\n content = render_to_string(\"misc/preview.part.html\", {\"text\": request.POST.get(\"text\")})\n return StreamingHttpResponse(content)\n\n return super().post(request, *args, **kwargs)\n\n\nclass SingleContentFormViewMixin(SingleContentViewMixin, ModalFormView):\n \"\"\"\n This enhanced FormView ensure,\n\n - by surcharging `dispatch()`, that:\n * `self.object` contains the result of `get_object()` (as for DetailView)\n * `self.versioned_object` contains the results of `get_versioned_object()`\n - by surcharging `get_context_data()`, that\n * context['content'] contains `self.versioned_object`\n \"\"\"\n\n def dispatch(self, request, *args, **kwargs):\n self.object = self.get_object()\n self.versioned_object = self.get_versioned_object()\n if self.object.sha_public:\n self.public_content_object = self.get_public_object()\n\n return super().dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"content\"] = self.versioned_object\n context[\"is_staff\"] = self.is_staff\n return context\n\n\nclass SingleContentDetailViewMixin(SingleContentViewMixin, DetailView):\n \"\"\"\n This enhanced DetailView ensure,\n\n - by rewriting `get()`, that:\n * `self.object` contains the result of `get_object()` (as it must be if `get()` is not rewritten)\n * `self.sha` is set according to `self.request.GET['version']` (if any) and `self.object.sha_draft` otherwise\n * `self.versioned_object` contains the results of `get_versioned_object()`\n - by surcharging `get_context_data()`, that\n * context['content'] contains `self.versioned_object`\n * context['can_edit'] is set\n * context['version'] is set (if different from `self.object.sha_draft`)\n * context['beta_topic'] is set (if any)\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n self.object = self.get_object()\n\n if not self.sha:\n try:\n self.sha = request.GET[\"version\"]\n except KeyError:\n self.sha = self.object.sha_draft\n\n self.versioned_object = self.get_versioned_object()\n if self.object.sha_public:\n self.public_content_object = self.get_public_object()\n\n context = self.get_context_data(object=self.object)\n return self.render_to_response(context)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"helps\"] = list(HelpWriting.objects.all())\n context[\"content_helps\"] = list(self.object.helps.all())\n context[\"content\"] = self.versioned_object\n context[\"can_edit\"] = self.is_author\n context[\"is_staff\"] = self.is_staff\n if self.object.type == \"OPINION\":\n context[\"can_publish\"] = not self.object.is_permanently_unpublished()\n if self.sha != self.object.sha_draft:\n context[\"version\"] = self.sha\n\n is_allowed = self.is_author or self.is_staff\n is_same_version = not self.sha or self.sha == self.object.sha_draft\n context[\"can_add_something\"] = is_allowed and is_same_version\n\n if self.object.beta_topic:\n beta_topic = Topic.objects.get(pk=self.object.beta_topic.pk)\n\n if beta_topic:\n context[\"beta_topic\"] = beta_topic\n\n return context\n\n\nclass ContentTypeMixin:\n \"\"\"This class deals with the type of contents and fill context according to that\"\"\"\n\n current_content_type = None\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n v_type_name = _(\"contenu\")\n v_type_name_plural = _(\"contenus\")\n\n if self.current_content_type == \"ARTICLE\":\n v_type_name = _(\"article\")\n v_type_name_plural = _(\"articles\")\n\n if self.current_content_type == \"TUTORIAL\":\n v_type_name = _(\"tutoriel\")\n v_type_name_plural = _(\"tutoriels\")\n\n if self.current_content_type == \"OPINION\":\n v_type_name = _(\"billet\")\n v_type_name_plural = _(\"billets\")\n\n context[\"current_content_type\"] = self.current_content_type\n context[\"verbose_type_name\"] = v_type_name\n context[\"verbose_type_name_plural\"] = v_type_name_plural\n\n return context\n\n\nclass MustRedirect(Exception):\n \"\"\"Exception raised when this is not the last version of the content which is called\"\"\"\n\n def __init__(self, url, *args, **kwargs):\n \"\"\"\n initialize the exception\n\n :param url: the targetted url\n :param args: exception *args\n :param kwargs: exception **kwargs\n \"\"\"\n super().__init__(*args, **kwargs)\n self.url = url\n\n\nclass SingleOnlineContentViewMixin(ContentTypeMixin):\n\n \"\"\"\n Base mixin to get only one content online content\n\n Deals with URL resolution in the following way:\n\n 1. In `get_object()`:\n - Fetch the ``PublicContent`` according to ``self.kwargs['pk']``, ``self.request.GET['pk']`` or \\\n ``self.request.POST['pk']`` 0(one of these have to be defined). Raise ``Http404`` if any.\n - Check if ``self.current_content_type`` if defined, and use it if it's the case\n - Check if ``slug`` is defined, also check object it if it's the case\n - Then, define ``self.is_staff`` and ``self.is_author``.\n 2. In ``get_versioned_object()``: Fetch the ``VersionedContent``. Due to the use of\n ``self.public_content_object.load_public_version_or_404()``, raise ``Http404`` if any.\n\n Any redefinition of any of these two functions should take care of those points.\n\n \"\"\"\n\n object = None\n public_content_object = None\n versioned_object = None\n redirection_is_needed = True\n\n is_author = False\n is_staff = False\n\n def get_redirect_url(self, public_version):\n \"\"\"Return the most recent url, based on the current public version\"\"\"\n return public_version.content.public_version.get_absolute_url_online()\n\n def get_public_object(self):\n try:\n if \"pk\" in self.kwargs:\n pk = int(self.kwargs[\"pk\"])\n elif \"pk\" in self.request.GET:\n pk = int(self.request.GET[\"pk\"])\n elif \"pk\" in self.request.POST:\n pk = int(self.request.POST[\"pk\"])\n else:\n raise Http404(\"Impossible de trouver le param\u00e8tre 'pk'.\")\n except ValueError as badvalue:\n raise Http404(f\"La valeur du param\u00e8tre pk '{badvalue}' n'est pas un entier valide.\")\n queryset = (\n PublishedContent.objects.filter(content_pk=pk)\n .prefetch_related(\"content\")\n .prefetch_related(\"content__authors\")\n .prefetch_related(\"content__subcategory\")\n .prefetch_related(\"content__tags\")\n .prefetch_related(\"content__public_version\")\n .select_related(\"content__last_note\")\n )\n\n if self.current_content_type:\n queryset = queryset.filter(content_type=self.current_content_type)\n\n if \"slug\" in self.kwargs:\n queryset = queryset.filter(content_public_slug=self.kwargs[\"slug\"])\n\n obj = queryset.order_by(\"publication_date\").last() # 'last' version must be the most recent to be published\n\n if obj is None:\n raise Http404(\"Aucun contenu ne poss\u00e8de ce slug.\")\n\n # Redirection ?\n if obj.must_redirect:\n if obj.content.public_version and self.redirection_is_needed:\n raise MustRedirect(self.get_redirect_url(obj))\n elif obj.content.public_version and not self.redirection_is_needed:\n obj = obj.content.public_version\n else: # should only happen if the content is unpublished\n raise Http404(\"La redirection est activ\u00e9e mais le contenu n'est pas public.\")\n\n self.is_author = self.request.user in obj.authors.all()\n self.is_staff = self.request.user.has_perm(\"tutorialv2.change_publishablecontent\")\n\n self.current_content_type = obj.content_type\n if obj and obj.content.last_note:\n mark_read(obj.content, self.request.user)\n return obj\n\n def get_object(self):\n\n obj = self.public_content_object.content\n if obj is None:\n raise Http404(\"Le contenu de la publication n'est pas trouv\u00e9.\")\n return obj\n\n def get_versioned_object(self):\n\n return self.public_content_object.load_public_version_or_404()\n\n\nclass SingleOnlineContentDetailViewMixin(SingleOnlineContentViewMixin, DetailView):\n \"\"\"\n This enhanced DetailView ensures,\n\n - by rewriting `get()`, that:\n * `self.object` contains the result of `get_object()` (as it must be if `get()` was not rewritten)\n * Redirection is made if we catch `MustRedirect`\n * `self.versioned_object` contains a PublicContent object\n * `self.public_content_object` contains a PublishedContent object\n - by surcharging `get_context_data()`, that\n * context['content'] is set\n * context['is_staff'] is set\n * context['can_edit'] is set\n * context['public_object'] is set\n * context['is_antispam'] is set\n * context['db_content'] is set with the PublishableContent instance\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n\n try:\n self.public_content_object = self.get_public_object()\n except MustRedirect as redirection_url:\n return HttpResponsePermanentRedirect(redirection_url.url)\n\n self.object = self.get_object()\n self.versioned_object = self.get_versioned_object()\n context = self.get_context_data(object=self.object)\n follow = ContentRead.objects.filter(user__pk=self.request.user.pk).filter(content__pk=self.object.pk).first()\n if follow is not None:\n follow.note = self.object.last_note\n follow.save()\n\n return self.render_to_response(context)\n\n def get_context_data(self, **kwargs):\n\n context = super().get_context_data(**kwargs)\n\n context[\"content\"] = self.versioned_object\n context[\"is_obsolete\"] = self.object.is_obsolete\n context[\"public_object\"] = self.public_content_object\n context[\"can_edit\"] = self.request.user in self.object.authors.all()\n context[\"is_antispam\"] = self.object.antispam(self.request.user)\n context[\"is_staff\"] = self.is_staff\n context[\"is_author\"] = self.is_author\n context[\"db_content\"] = self.object\n return context\n\n\nclass SingleOnlineContentFormViewMixin(SingleOnlineContentViewMixin, ModalFormView):\n \"\"\"\n This enhanced FormView ensure,\n\n - by surcharging `dispatch()`, that:\n * `self.public_content_object` contains a PublishedContent object\n * `self.object` contains the result of `get_object()` (as for DetailView)\n * `self.versioned_object` contains the results of `get_versioned_object()`\n - by surcharging `get_context_data()`, that\n * context['content'] is set\n * context['public_object'] is set\n\n\n Note: does not catch `MustRedirect`, so you should not use a `slug` with POST request\n \"\"\"\n\n denied_if_lock = False # denied the use of the form if the content is locked\n\n def dispatch(self, request, *args, **kwargs):\n self.public_content_object = self.get_public_object()\n self.object = self.get_object()\n self.versioned_object = self.get_versioned_object()\n\n if self.denied_if_lock and self.object.is_locked:\n raise PermissionDenied\n\n return super().dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context[\"content\"] = self.versioned_object\n context[\"public_object\"] = self.public_content_object\n\n return context\n\n\nclass DownloadViewMixin(View):\n \"\"\"Basic View to return a file to download\n\n (inspired from https://djangosnippets.org/snippets/2549/ and\n http://stackoverflow.com/questions/16286666/send-a-file-through-django-class-based-views)\n\n You just need to override `get_contents()` to make it works\n \"\"\"\n\n mimetype = None\n filename = None\n\n def get_mimetype(self):\n return self.mimetype\n\n def get_filename(self):\n return self.filename\n\n def get_contents(self):\n pass\n\n def get(self, context, **response_kwargs):\n \"\"\"\n Access to a file with only get method then write the file content in response stream.\n Properly sets Content-Type and Content-Disposition headers\n \"\"\"\n response = HttpResponse(content_type=self.get_mimetype())\n response[\"Content-Disposition\"] = \"filename=\" + self.get_filename()\n response.write(self.get_contents())\n\n return response\n\n\nclass SingleContentDownloadViewMixin(SingleContentViewMixin, DownloadViewMixin):\n \"\"\"\n Ensure, by rewritring ``get()``, that\n - `self.object` contains the result of `get_object()` (as it must be if `get()` is not rewritten)\n - `self.sha` is set according to `self.request.GET['version']` (if any) and `self.object.sha_draft` otherwise\n - `self.versioned_object` contains the results of `get_versioned_object()`\n \"\"\"\n\n def get(self, context, **response_kwargs):\n self.object = self.get_object()\n\n if not self.sha:\n try:\n self.sha = self.request.GET[\"version\"]\n except KeyError:\n self.sha = self.object.sha_draft\n\n self.versioned_object = self.get_versioned_object()\n\n return super().get(context, **response_kwargs)\n\n\nclass RequiresValidationViewMixin(SingleContentDetailViewMixin):\n \"\"\"\n Ensure the content require validation before publication.\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n if not self.get_object().requires_validation():\n raise PermissionDenied\n return super().get(request, *args, **kwargs)\n\n\nclass DoesNotRequireValidationFormViewMixin(SingleContentFormViewMixin):\n \"\"\"\n Ensure the content do not require validation before publication.\n \"\"\"\n\n def get_form_kwargs(self):\n if self.versioned_object.requires_validation():\n raise PermissionDenied\n return super().get_form_kwargs()\n", "path": "zds/tutorialv2/mixins.py"}], "after_files": [{"content": "from django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.urls import reverse\nfrom django.http import Http404, HttpResponse, HttpResponsePermanentRedirect, StreamingHttpResponse\nfrom django.template.loader import render_to_string\nfrom django.shortcuts import redirect\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import DetailView, FormView\nfrom django.views.generic import View\n\nfrom zds.forum.models import Topic\nfrom zds.tutorialv2.models.database import PublishableContent, PublishedContent, ContentRead\nfrom zds.tutorialv2.utils import mark_read\nfrom zds.utils.models import HelpWriting\n\n\nclass SingleContentViewMixin:\n \"\"\"\n Base mixin to get only one content, and its corresponding versioned content\n\n Deals with URL resolution in the following way:\n\n 1. In ``get_object()``:\n - Fetch the ``PublishableContent`` according to ``self.kwargs['pk']``, ``self.request.GET['pk']`` or \\\n ``self.request.POST['pk']`` (one of these have to be defined). Raise `Http404` if any.\n - Then, check permissions with respect to ``self.must_be_author`` and ``self.authorized_for_staff`` \\\n (and define ``self.is_staff`` and ``self.is_author``). Raise ``PermissionDenied`` if any.\n\n 2. In ``get_versioned_object()``:\n - Deal with sha : assume ``self.object.sha_draft`` by default, but reset according to \\\n ``self.request.GET['version']``, if exists. \\\n Then, check ``self.only_draft_version`` and raise ``PermissionDenied`` if any\n - Fetch the ``VersionedContent``. Due to the use of ``self.object.load_version_or_404(sha)``,\\\n raise ``Http404``.\n - Check if its the beta or public version, and allow access if it's the case. Raise ``PermissionDenied``.\n - Check slug if ``self.kwargs['slug']`` is defined. Raise ``Http404`` if any.\n\n 3. In ``get_public_object()``, fetch the last published version, if any\n\n Any redefinition of any of these two functions should take care of those points.\n \"\"\"\n\n object = None\n versioned_object = None\n public_content_object = None\n\n prefetch_all = True\n sha = None\n must_be_author = True\n authorized_for_staff = True\n is_staff = False\n is_author = False\n only_draft_version = True\n must_redirect = False\n public_is_prioritary = True\n\n def get_object(self, queryset=None):\n \"\"\"Get database representation of the content by its `pk`, then check permissions\"\"\"\n\n # fetch object:\n try:\n if \"pk\" in self.kwargs:\n pk = int(self.kwargs[\"pk\"])\n elif \"pk\" in self.request.GET:\n pk = int(self.request.GET[\"pk\"])\n elif \"pk\" in self.request.POST:\n pk = int(self.request.POST[\"pk\"])\n else:\n raise Http404(\"Impossible de trouver le param\u00e8tre 'pk'.\")\n except ValueError as badvalue:\n raise Http404(f\"La valeur du param\u00e8tre pk '{badvalue}' n'est pas un entier valide.\")\n\n queryset = queryset or PublishableContent.objects\n\n if self.prefetch_all:\n queryset = queryset.select_related(\"licence\").prefetch_related(\"authors\").prefetch_related(\"subcategory\")\n obj = queryset.filter(pk=pk).first()\n\n if not obj:\n raise Http404(\"Aucun contenu ne poss\u00e8de cet identifiant.\")\n\n # check permissions:\n self.is_staff = self.request.user.has_perm(\"tutorialv2.change_publishablecontent\")\n self.is_author = self.request.user in obj.authors.all()\n\n if self.must_be_author and not self.is_author:\n if not self.authorized_for_staff or (self.authorized_for_staff and not self.is_staff):\n raise PermissionDenied\n\n return obj\n\n def get_versioned_object(self):\n \"\"\"Gets the asked version of current content.\"\"\"\n\n # fetch version:\n sha = self.object.sha_draft\n\n if not self.only_draft_version:\n if self.sha:\n sha = self.sha\n else:\n if \"version\" in self.request.GET:\n sha = self.request.GET[\"version\"]\n elif \"version\" in self.request.POST:\n sha = self.request.POST[\"version\"]\n\n self.sha = sha\n\n # if beta or public version, user can also access to it\n is_beta = self.object.is_beta(self.sha)\n is_public = self.object.is_public(self.sha) and self.public_is_prioritary\n\n if not is_beta and not is_public and not self.is_author:\n if not self.is_staff or (not self.authorized_for_staff and self.must_be_author):\n raise PermissionDenied\n\n # load versioned file\n versioned = self.object.load_version_or_404(self.sha)\n\n # check slug, if any:\n if \"slug\" in self.kwargs:\n slug = self.kwargs[\"slug\"]\n if versioned.slug != slug:\n if slug != self.object.slug: # retro-compatibility, but should raise permanent redirect instead\n raise Http404(\"Ce slug n'existe pas pour ce contenu.\")\n\n return versioned\n\n def get_public_object(self):\n \"\"\"Get the published version, if any\"\"\"\n\n object = PublishedContent.objects.filter(content_pk=self.object.pk, must_redirect=False).last()\n if object:\n object.load_public_version()\n return object\n\n\nclass SingleContentPostMixin(SingleContentViewMixin):\n \"\"\"\n Base mixin used to get content from post query\n \"\"\"\n\n # represent the fact that we have to check if the version given in self.request.POST['version'] exists\n versioned = True\n\n def get_object(self, queryset=None):\n self.object = super().get_object()\n\n if self.versioned and \"version\" in self.request.POST[\"version\"]:\n self.object.load_version_or_404(sha=self.request.POST[\"version\"])\n return self.object\n\n\nclass ModalFormView(FormView):\n \"\"\"If `self.modal_form` is set `True`, this class will ensure that the redirection is made to the previous page\n if an error appear\"\"\"\n\n modal_form = False # `form_invalid()` will behave differently if `True`, see implementation below\n\n def form_invalid(self, form):\n \"\"\"If `self.modal_form` is set `True`, this function is rewritten to send back to the previous page\n with an error message, instead of using the form template which is normally provided.\n\n The redirection is made to `form.previous_page_url`, if exists, `content:view` otherwise.\"\"\"\n\n if not self.modal_form:\n return super().form_invalid(form)\n else:\n errors = form.errors.as_data()\n if len(errors) > 0:\n # only the first error is provided\n error_message = list(errors.values())[0][0].messages[0]\n messages.error(self.request, error_message)\n else:\n messages.error(self.request, _(\"Une erreur inconnue est survenue durant le traitement des donn\u00e9es.\"))\n\n if hasattr(form, \"previous_page_url\"):\n return redirect(form.previous_page_url)\n else:\n return redirect(reverse(\"content:view\")) # assume a default url\n\n\nclass FormWithPreview(FormView):\n def post(self, request, *args, **kwargs):\n form = self.form_class(request.POST)\n\n if \"preview\" in request.POST:\n self.form_invalid(form)\n if request.is_ajax():\n content = render_to_string(\"misc/preview.part.html\", {\"text\": request.POST.get(\"text\")})\n return StreamingHttpResponse(content)\n\n return super().post(request, *args, **kwargs)\n\n\nclass SingleContentFormViewMixin(SingleContentViewMixin, ModalFormView):\n \"\"\"\n This enhanced FormView ensure,\n\n - by surcharging `dispatch()`, that:\n * `self.object` contains the result of `get_object()` (as for DetailView)\n * `self.versioned_object` contains the results of `get_versioned_object()`\n - by surcharging `get_context_data()`, that\n * context['content'] contains `self.versioned_object`\n \"\"\"\n\n def dispatch(self, request, *args, **kwargs):\n self.object = self.get_object()\n self.versioned_object = self.get_versioned_object()\n if self.object.sha_public:\n self.public_content_object = self.get_public_object()\n\n return super().dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"content\"] = self.versioned_object\n context[\"is_staff\"] = self.is_staff\n return context\n\n\nclass SingleContentDetailViewMixin(SingleContentViewMixin, DetailView):\n \"\"\"\n This enhanced DetailView ensures,\n\n - by rewriting `get()`, that:\n * `self.object` contains the result of `get_object()` (as it must be if `get()` is not rewritten)\n * `self.sha` is set according to `self.request.GET['version']` (if any) and `self.object.sha_draft` otherwise\n * `self.versioned_object` contains the results of `get_versioned_object()`\n - by surcharging `get_context_data()`, that\n * context['content'] contains `self.versioned_object`\n * context['can_edit'] is set\n * context['version'] is set (if different from `self.object.sha_draft`)\n * context['beta_topic'] is set (if any)\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n self.object = self.get_object()\n\n if not self.sha:\n try:\n self.sha = request.GET[\"version\"]\n except KeyError:\n self.sha = self.object.sha_draft\n\n self.versioned_object = self.get_versioned_object()\n if self.object.sha_public:\n self.public_content_object = self.get_public_object()\n\n context = self.get_context_data(object=self.object)\n return self.render_to_response(context)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"helps\"] = list(HelpWriting.objects.all())\n context[\"content_helps\"] = list(self.object.helps.all())\n context[\"content\"] = self.versioned_object\n context[\"can_edit\"] = self.is_author\n context[\"is_staff\"] = self.is_staff\n if self.object.type == \"OPINION\":\n context[\"can_publish\"] = not self.object.is_permanently_unpublished()\n if self.sha != self.object.sha_draft:\n context[\"version\"] = self.sha\n\n is_allowed = self.is_author or self.is_staff\n is_same_version = not self.sha or self.sha == self.object.sha_draft\n context[\"can_add_something\"] = is_allowed and is_same_version\n\n if self.object.beta_topic:\n beta_topic = Topic.objects.get(pk=self.object.beta_topic.pk)\n\n if beta_topic:\n context[\"beta_topic\"] = beta_topic\n\n return context\n\n\nclass ContentTypeMixin:\n \"\"\"This class deals with the type of contents and fill context according to that\"\"\"\n\n current_content_type = None\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n v_type_name = _(\"contenu\")\n v_type_name_plural = _(\"contenus\")\n\n if self.current_content_type == \"ARTICLE\":\n v_type_name = _(\"article\")\n v_type_name_plural = _(\"articles\")\n\n if self.current_content_type == \"TUTORIAL\":\n v_type_name = _(\"tutoriel\")\n v_type_name_plural = _(\"tutoriels\")\n\n if self.current_content_type == \"OPINION\":\n v_type_name = _(\"billet\")\n v_type_name_plural = _(\"billets\")\n\n context[\"current_content_type\"] = self.current_content_type\n context[\"verbose_type_name\"] = v_type_name\n context[\"verbose_type_name_plural\"] = v_type_name_plural\n\n return context\n\n\nclass MustRedirect(Exception):\n \"\"\"Exception raised when this is not the last version of the content which is called\"\"\"\n\n def __init__(self, url, *args, **kwargs):\n \"\"\"\n initialize the exception\n\n :param url: the targetted url\n :param args: exception *args\n :param kwargs: exception **kwargs\n \"\"\"\n super().__init__(*args, **kwargs)\n self.url = url\n\n\nclass SingleOnlineContentViewMixin(ContentTypeMixin):\n\n \"\"\"\n Base mixin to get only one content online content\n\n Deals with URL resolution in the following way:\n\n 1. In `get_object()`:\n - Fetch the ``PublicContent`` according to ``self.kwargs['pk']``, ``self.request.GET['pk']`` or \\\n ``self.request.POST['pk']`` 0(one of these have to be defined). Raise ``Http404`` if any.\n - Check if ``self.current_content_type`` if defined, and use it if it's the case\n - Check if ``slug`` is defined, also check object it if it's the case\n - Then, define ``self.is_staff`` and ``self.is_author``.\n 2. In ``get_versioned_object()``: Fetch the ``VersionedContent``. Due to the use of\n ``self.public_content_object.load_public_version_or_404()``, raise ``Http404`` if any.\n\n Any redefinition of any of these two functions should take care of those points.\n\n \"\"\"\n\n object = None\n public_content_object = None\n versioned_object = None\n redirection_is_needed = True\n\n is_author = False\n is_staff = False\n\n def get_redirect_url(self, public_version):\n \"\"\"Return the most recent url, based on the current public version\"\"\"\n return public_version.content.public_version.get_absolute_url_online()\n\n def get_public_object(self):\n try:\n if \"pk\" in self.kwargs:\n pk = int(self.kwargs[\"pk\"])\n elif \"pk\" in self.request.GET:\n pk = int(self.request.GET[\"pk\"])\n elif \"pk\" in self.request.POST:\n pk = int(self.request.POST[\"pk\"])\n else:\n raise Http404(\"Impossible de trouver le param\u00e8tre 'pk'.\")\n except ValueError as badvalue:\n raise Http404(f\"La valeur du param\u00e8tre pk '{badvalue}' n'est pas un entier valide.\")\n queryset = (\n PublishedContent.objects.filter(content_pk=pk)\n .prefetch_related(\"content\")\n .prefetch_related(\"content__authors\")\n .prefetch_related(\"content__subcategory\")\n .prefetch_related(\"content__tags\")\n .prefetch_related(\"content__public_version\")\n .select_related(\"content__last_note\")\n )\n\n if self.current_content_type:\n queryset = queryset.filter(content_type=self.current_content_type)\n\n if \"slug\" in self.kwargs:\n queryset = queryset.filter(content_public_slug=self.kwargs[\"slug\"])\n\n obj = queryset.order_by(\"publication_date\").last() # 'last' version must be the most recent to be published\n\n if obj is None:\n raise Http404(\"Aucun contenu ne poss\u00e8de ce slug.\")\n\n # Redirection ?\n if obj.must_redirect:\n if obj.content.public_version and self.redirection_is_needed:\n raise MustRedirect(self.get_redirect_url(obj))\n elif obj.content.public_version and not self.redirection_is_needed:\n obj = obj.content.public_version\n else: # should only happen if the content is unpublished\n raise Http404(\"La redirection est activ\u00e9e mais le contenu n'est pas public.\")\n\n self.is_author = self.request.user in obj.authors.all()\n self.is_staff = self.request.user.has_perm(\"tutorialv2.change_publishablecontent\")\n\n self.current_content_type = obj.content_type\n if obj and obj.content.last_note:\n mark_read(obj.content, self.request.user)\n return obj\n\n def get_object(self):\n\n obj = self.public_content_object.content\n if obj is None:\n raise Http404(\"Le contenu de la publication n'est pas trouv\u00e9.\")\n return obj\n\n def get_versioned_object(self):\n\n return self.public_content_object.load_public_version_or_404()\n\n\nclass SingleOnlineContentDetailViewMixin(SingleOnlineContentViewMixin, DetailView):\n \"\"\"\n This enhanced DetailView ensures,\n\n - by rewriting `get()`, that:\n * `self.object` contains the result of `get_object()` (as it must be if `get()` was not rewritten)\n * Redirection is made if we catch `MustRedirect`\n * `self.versioned_object` contains a PublicContent object\n * `self.public_content_object` contains a PublishedContent object\n - by surcharging `get_context_data()`, that\n * context['content'] is set\n * context['is_staff'] is set\n * context['can_edit'] is set\n * context['public_object'] is set\n * context['is_antispam'] is set\n * context['db_content'] is set with the PublishableContent instance\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n\n try:\n self.public_content_object = self.get_public_object()\n except MustRedirect as redirection_url:\n return HttpResponsePermanentRedirect(redirection_url.url)\n\n self.object = self.get_object()\n self.versioned_object = self.get_versioned_object()\n context = self.get_context_data(object=self.object)\n follow = ContentRead.objects.filter(user__pk=self.request.user.pk).filter(content__pk=self.object.pk).first()\n if follow is not None:\n follow.note = self.object.last_note\n follow.save()\n\n return self.render_to_response(context)\n\n def get_context_data(self, **kwargs):\n\n context = super().get_context_data(**kwargs)\n\n context[\"content\"] = self.versioned_object\n context[\"is_obsolete\"] = self.object.is_obsolete\n context[\"public_object\"] = self.public_content_object\n context[\"can_edit\"] = self.request.user in self.object.authors.all()\n context[\"is_antispam\"] = self.object.antispam(self.request.user)\n context[\"is_staff\"] = self.is_staff\n context[\"is_author\"] = self.is_author\n context[\"db_content\"] = self.object\n return context\n\n\nclass SingleOnlineContentFormViewMixin(SingleOnlineContentViewMixin, ModalFormView):\n \"\"\"\n This enhanced FormView ensure,\n\n - by surcharging `dispatch()`, that:\n * `self.public_content_object` contains a PublishedContent object\n * `self.object` contains the result of `get_object()` (as for DetailView)\n * `self.versioned_object` contains the results of `get_versioned_object()`\n - by surcharging `get_context_data()`, that\n * context['content'] is set\n * context['public_object'] is set\n\n\n Note: does not catch `MustRedirect`, so you should not use a `slug` with POST request\n \"\"\"\n\n denied_if_lock = False # denied the use of the form if the content is locked\n\n def dispatch(self, request, *args, **kwargs):\n self.public_content_object = self.get_public_object()\n self.object = self.get_object()\n self.versioned_object = self.get_versioned_object()\n\n if self.denied_if_lock and self.object.is_locked:\n raise PermissionDenied\n\n return super().dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context[\"content\"] = self.versioned_object\n context[\"public_object\"] = self.public_content_object\n\n return context\n\n\nclass DownloadViewMixin(View):\n \"\"\"Basic View to return a file to download\n\n (inspired from https://djangosnippets.org/snippets/2549/ and\n http://stackoverflow.com/questions/16286666/send-a-file-through-django-class-based-views)\n\n You just need to override `get_contents()` to make it works\n \"\"\"\n\n mimetype = None\n filename = None\n\n def get_mimetype(self):\n return self.mimetype\n\n def get_filename(self):\n return self.filename\n\n def get_contents(self):\n pass\n\n def get(self, context, **response_kwargs):\n \"\"\"\n Access to a file with only get method then write the file content in response stream.\n Properly sets Content-Type and Content-Disposition headers\n \"\"\"\n response = HttpResponse(content_type=self.get_mimetype())\n response[\"Content-Disposition\"] = \"filename=\" + self.get_filename()\n response.write(self.get_contents())\n\n return response\n\n\nclass SingleContentDownloadViewMixin(SingleContentViewMixin, DownloadViewMixin):\n \"\"\"\n Ensure, by rewritring ``get()``, that\n - `self.object` contains the result of `get_object()` (as it must be if `get()` is not rewritten)\n - `self.sha` is set according to `self.request.GET['version']` (if any) and `self.object.sha_draft` otherwise\n - `self.versioned_object` contains the results of `get_versioned_object()`\n \"\"\"\n\n def get(self, context, **response_kwargs):\n self.object = self.get_object()\n\n if not self.sha:\n try:\n self.sha = self.request.GET[\"version\"]\n except KeyError:\n self.sha = self.object.sha_draft\n\n self.versioned_object = self.get_versioned_object()\n\n return super().get(context, **response_kwargs)\n\n\nclass RequiresValidationViewMixin(SingleContentDetailViewMixin):\n \"\"\"\n Ensure the content require validation before publication.\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n if not self.get_object().requires_validation():\n raise PermissionDenied\n return super().get(request, *args, **kwargs)\n\n\nclass DoesNotRequireValidationFormViewMixin(SingleContentFormViewMixin):\n \"\"\"\n Ensure the content do not require validation before publication.\n \"\"\"\n\n def get_form_kwargs(self):\n if self.versioned_object.requires_validation():\n raise PermissionDenied\n return super().get_form_kwargs()\n", "path": "zds/tutorialv2/mixins.py"}]} |
gh_patches_debug_1251 | rasdani/github-patches | git_diff | tensorflow__tensor2tensor-360 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error with `.decode` on `str` object when generating `summarize_cnn_dailymail32k` data
## CMD
```
t2t-trainer \
--generate_data \
--data_dir="$data" \
--problems=summarize_cnn_dailymail32k \
--model=transformer \
--hparams_set=transformer_base_single_gpu \
--output_dir="$root"
```
## OUTPUT
```
INFO:tensorflow:Generating data for summarize_cnn_dailymail32k
INFO:tensorflow:Generating vocab file: t2t_data/vocab.cnndailymail.32768
INFO:tensorflow:Downloading https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfTHk4NFg2SndKcjQ&confirm=ak1U to /tmp/t2t_datagen/cnn_stories.tgz
INFO:tensorflow:Succesfully downloaded cnn_stories.tgz, 158577824 bytes.
INFO:tensorflow:Downloading https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfM1BxdkxVaTY2bWs&confirm=1N53 to /tmp/t2t_datagen/dailymail_stories.tgz
INFO:tensorflow:Succesfully downloaded dailymail_stories.tgz, 375893739 bytes.
Traceback (most recent call last):
File "/home/pltrdy/anaconda3/bin/t2t-trainer", line 96, in <module>
tf.app.run()
File "/home/pltrdy/anaconda3/lib/python3.6/site-packages/tensorflow/python/platform/app.py", line 48, in run
_sys.exit(main(_sys.argv[:1] + flags_passthrough))
File "/home/pltrdy/anaconda3/bin/t2t-trainer", line 83, in main
problem.generate_data(data_dir, tmp_dir)
File "/home/pltrdy/anaconda3/lib/python3.6/site-packages/tensor2tensor/data_generators/problem.py", line 625, in generate_data
self.generator(data_dir, tmp_dir, True), all_paths)
File "/home/pltrdy/anaconda3/lib/python3.6/site-packages/tensor2tensor/data_generators/generator_utils.py", line 143, in generate_files
for case in generator:
File "/home/pltrdy/anaconda3/lib/python3.6/site-packages/tensor2tensor/data_generators/cnn_dailymail.py", line 132, in generator
story_generator(tmp_dir))
File "/home/pltrdy/anaconda3/lib/python3.6/site-packages/tensor2tensor/data_generators/generator_utils.py", line 328, in get_or_generate_vocab_inner
for item in generator:
File "/home/pltrdy/anaconda3/lib/python3.6/site-packages/tensor2tensor/data_generators/cnn_dailymail.py", line 78, in story_generator
line = unicode(line, "utf-8") if six.PY2 else line.decode("utf-8")
AttributeError: 'str' object has no attribute 'decode'
```
## CONFIG
* Python version: `Python 3.6.1 |Anaconda custom (64-bit)| (default, May 11 2017, 13:09:58)`
* Tensor2Tensor version:
```
$ pip show tensor2tensor
Name: tensor2tensor
Version: 1.2.4
Summary: Tensor2Tensor
Home-page: http://github.com/tensorflow/tensor2tensor
Author: Google Inc.
Author-email: [email protected]
License: Apache 2.0
Location: /home/pltrdy/anaconda3/lib/python3.6/site-packages
Requires: sympy, numpy, requests, six, future, bz2file
```
## QUICKFIX
```
--- a/tensor2tensor/data_generators/cnn_dailymail.py
+++ b/tensor2tensor/data_generators/cnn_dailymail.py
@@ -74,7 +74,7 @@ def story_generator(tmp_dir):
for path in paths:
for story_file in tf.gfile.Glob(path + "*"):
story = u""
- for line in tf.gfile.Open(story_file):
+ for line in tf.gfile.Open(story_file, 'rb'):
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tensor2tensor/data_generators/cnn_dailymail.py`
Content:
```
1 # coding=utf-8
2 # Copyright 2017 The Tensor2Tensor Authors.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 """Data generators for the CNN and Daily Mail datasets."""
17
18 from __future__ import absolute_import
19 from __future__ import division
20 from __future__ import print_function
21
22 import os
23 import tarfile
24
25 # Dependency imports
26
27 import six
28 from tensor2tensor.data_generators import generator_utils
29 from tensor2tensor.data_generators import problem
30 from tensor2tensor.data_generators import text_encoder
31 from tensor2tensor.utils import registry
32
33 import tensorflow as tf
34
35
36 # Links to data from http://cs.nyu.edu/~kcho/DMQA/
37 _CNN_STORIES_DRIVE_URL = "https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfTHk4NFg2SndKcjQ"
38
39 _DAILYMAIL_STORIES_DRIVE_URL = "https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfM1BxdkxVaTY2bWs"
40
41
42 # End-of-sentence marker.
43 EOS = text_encoder.EOS_ID
44
45
46 def _maybe_download_corpora(tmp_dir):
47 """Download corpora if necessary and unzip them.
48
49 Args:
50 tmp_dir: directory containing dataset.
51
52 Returns:
53 filepath of the downloaded corpus file.
54 """
55 cnn_filename = "cnn_stories.tgz"
56 cnn_finalpath = os.path.join(tmp_dir, "cnn/stories/")
57 dailymail_filename = "dailymail_stories.tgz"
58 dailymail_finalpath = os.path.join(tmp_dir, "dailymail/stories/")
59 if not tf.gfile.Exists(cnn_finalpath):
60 cnn_file = generator_utils.maybe_download_from_drive(
61 tmp_dir, cnn_filename, _CNN_STORIES_DRIVE_URL)
62 with tarfile.open(cnn_file, "r:gz") as cnn_tar:
63 cnn_tar.extractall(tmp_dir)
64 if not tf.gfile.Exists(dailymail_finalpath):
65 dailymail_file = generator_utils.maybe_download_from_drive(
66 tmp_dir, dailymail_filename, _DAILYMAIL_STORIES_DRIVE_URL)
67 with tarfile.open(dailymail_file, "r:gz") as dailymail_tar:
68 dailymail_tar.extractall(tmp_dir)
69 return [cnn_finalpath, dailymail_finalpath]
70
71
72 def story_generator(tmp_dir):
73 paths = _maybe_download_corpora(tmp_dir)
74 for path in paths:
75 for story_file in tf.gfile.Glob(path + "*"):
76 story = u""
77 for line in tf.gfile.Open(story_file):
78 line = unicode(line, "utf-8") if six.PY2 else line.decode("utf-8")
79 story += line
80 yield story
81
82
83 def _story_summary_split(story):
84 end_pos = story.find("\n\n") # Upto first empty line.
85 assert end_pos != -1
86 return story[:end_pos], story[end_pos:].strip()
87
88
89 @registry.register_problem
90 class SummarizeCnnDailymail32k(problem.Text2TextProblem):
91 """Summarize CNN and Daily Mail articles to their first paragraph."""
92
93 @property
94 def is_character_level(self):
95 return False
96
97 @property
98 def has_inputs(self):
99 return True
100
101 @property
102 def input_space_id(self):
103 return problem.SpaceID.EN_TOK
104
105 @property
106 def target_space_id(self):
107 return problem.SpaceID.EN_TOK
108
109 @property
110 def num_shards(self):
111 return 100
112
113 @property
114 def vocab_name(self):
115 return "vocab.cnndailymail"
116
117 @property
118 def use_subword_tokenizer(self):
119 return True
120
121 @property
122 def targeted_vocab_size(self):
123 return 2**15 # 32768
124
125 @property
126 def use_train_shards_for_dev(self):
127 return True
128
129 def generator(self, data_dir, tmp_dir, _):
130 encoder = generator_utils.get_or_generate_vocab_inner(
131 data_dir, self.vocab_file, self.targeted_vocab_size,
132 story_generator(tmp_dir))
133 for story in story_generator(tmp_dir):
134 summary, rest = _story_summary_split(story)
135 encoded_summary = encoder.encode(summary) + [EOS]
136 encoded_story = encoder.encode(rest) + [EOS]
137 yield {"inputs": encoded_story, "targets": encoded_summary}
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tensor2tensor/data_generators/cnn_dailymail.py b/tensor2tensor/data_generators/cnn_dailymail.py
--- a/tensor2tensor/data_generators/cnn_dailymail.py
+++ b/tensor2tensor/data_generators/cnn_dailymail.py
@@ -74,7 +74,7 @@
for path in paths:
for story_file in tf.gfile.Glob(path + "*"):
story = u""
- for line in tf.gfile.Open(story_file):
+ for line in tf.gfile.Open(story_file, 'rb'):
line = unicode(line, "utf-8") if six.PY2 else line.decode("utf-8")
story += line
yield story
| {"golden_diff": "diff --git a/tensor2tensor/data_generators/cnn_dailymail.py b/tensor2tensor/data_generators/cnn_dailymail.py\n--- a/tensor2tensor/data_generators/cnn_dailymail.py\n+++ b/tensor2tensor/data_generators/cnn_dailymail.py\n@@ -74,7 +74,7 @@\n for path in paths:\n for story_file in tf.gfile.Glob(path + \"*\"):\n story = u\"\"\n- for line in tf.gfile.Open(story_file):\n+ for line in tf.gfile.Open(story_file, 'rb'):\n line = unicode(line, \"utf-8\") if six.PY2 else line.decode(\"utf-8\")\n story += line\n yield story\n", "issue": "Error with `.decode` on `str` object when generating `summarize_cnn_dailymail32k` data\n## CMD\r\n```\r\nt2t-trainer \\\r\n --generate_data \\\r\n --data_dir=\"$data\" \\\r\n --problems=summarize_cnn_dailymail32k \\\r\n --model=transformer \\\r\n --hparams_set=transformer_base_single_gpu \\\r\n --output_dir=\"$root\"\r\n```\r\n\r\n## OUTPUT\r\n```\r\nINFO:tensorflow:Generating data for summarize_cnn_dailymail32k\r\nINFO:tensorflow:Generating vocab file: t2t_data/vocab.cnndailymail.32768\r\nINFO:tensorflow:Downloading https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfTHk4NFg2SndKcjQ&confirm=ak1U to /tmp/t2t_datagen/cnn_stories.tgz\r\n\r\nINFO:tensorflow:Succesfully downloaded cnn_stories.tgz, 158577824 bytes.\r\nINFO:tensorflow:Downloading https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfM1BxdkxVaTY2bWs&confirm=1N53 to /tmp/t2t_datagen/dailymail_stories.tgz\r\n\r\nINFO:tensorflow:Succesfully downloaded dailymail_stories.tgz, 375893739 bytes.\r\nTraceback (most recent call last):\r\n File \"/home/pltrdy/anaconda3/bin/t2t-trainer\", line 96, in <module>\r\n tf.app.run()\r\n File \"/home/pltrdy/anaconda3/lib/python3.6/site-packages/tensorflow/python/platform/app.py\", line 48, in run\r\n _sys.exit(main(_sys.argv[:1] + flags_passthrough))\r\n File \"/home/pltrdy/anaconda3/bin/t2t-trainer\", line 83, in main\r\n problem.generate_data(data_dir, tmp_dir)\r\n File \"/home/pltrdy/anaconda3/lib/python3.6/site-packages/tensor2tensor/data_generators/problem.py\", line 625, in generate_data\r\n self.generator(data_dir, tmp_dir, True), all_paths)\r\n File \"/home/pltrdy/anaconda3/lib/python3.6/site-packages/tensor2tensor/data_generators/generator_utils.py\", line 143, in generate_files\r\n for case in generator:\r\n File \"/home/pltrdy/anaconda3/lib/python3.6/site-packages/tensor2tensor/data_generators/cnn_dailymail.py\", line 132, in generator\r\n story_generator(tmp_dir))\r\n File \"/home/pltrdy/anaconda3/lib/python3.6/site-packages/tensor2tensor/data_generators/generator_utils.py\", line 328, in get_or_generate_vocab_inner\r\n for item in generator:\r\n File \"/home/pltrdy/anaconda3/lib/python3.6/site-packages/tensor2tensor/data_generators/cnn_dailymail.py\", line 78, in story_generator\r\n line = unicode(line, \"utf-8\") if six.PY2 else line.decode(\"utf-8\")\r\nAttributeError: 'str' object has no attribute 'decode'\r\n```\r\n\r\n## CONFIG\r\n* Python version: `Python 3.6.1 |Anaconda custom (64-bit)| (default, May 11 2017, 13:09:58)`\r\n* Tensor2Tensor version:\r\n```\r\n$ pip show tensor2tensor\r\nName: tensor2tensor\r\nVersion: 1.2.4\r\nSummary: Tensor2Tensor\r\nHome-page: http://github.com/tensorflow/tensor2tensor\r\nAuthor: Google Inc.\r\nAuthor-email: [email protected]\r\nLicense: Apache 2.0\r\nLocation: /home/pltrdy/anaconda3/lib/python3.6/site-packages\r\nRequires: sympy, numpy, requests, six, future, bz2file\r\n```\r\n\r\n## QUICKFIX\r\n```\r\n--- a/tensor2tensor/data_generators/cnn_dailymail.py\r\n+++ b/tensor2tensor/data_generators/cnn_dailymail.py\r\n@@ -74,7 +74,7 @@ def story_generator(tmp_dir):\r\n for path in paths:\r\n for story_file in tf.gfile.Glob(path + \"*\"):\r\n story = u\"\"\r\n- for line in tf.gfile.Open(story_file):\r\n+ for line in tf.gfile.Open(story_file, 'rb'):\r\n\r\n```\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2017 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Data generators for the CNN and Daily Mail datasets.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tarfile\n\n# Dependency imports\n\nimport six\nfrom tensor2tensor.data_generators import generator_utils\nfrom tensor2tensor.data_generators import problem\nfrom tensor2tensor.data_generators import text_encoder\nfrom tensor2tensor.utils import registry\n\nimport tensorflow as tf\n\n\n# Links to data from http://cs.nyu.edu/~kcho/DMQA/\n_CNN_STORIES_DRIVE_URL = \"https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfTHk4NFg2SndKcjQ\"\n\n_DAILYMAIL_STORIES_DRIVE_URL = \"https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfM1BxdkxVaTY2bWs\"\n\n\n# End-of-sentence marker.\nEOS = text_encoder.EOS_ID\n\n\ndef _maybe_download_corpora(tmp_dir):\n \"\"\"Download corpora if necessary and unzip them.\n\n Args:\n tmp_dir: directory containing dataset.\n\n Returns:\n filepath of the downloaded corpus file.\n \"\"\"\n cnn_filename = \"cnn_stories.tgz\"\n cnn_finalpath = os.path.join(tmp_dir, \"cnn/stories/\")\n dailymail_filename = \"dailymail_stories.tgz\"\n dailymail_finalpath = os.path.join(tmp_dir, \"dailymail/stories/\")\n if not tf.gfile.Exists(cnn_finalpath):\n cnn_file = generator_utils.maybe_download_from_drive(\n tmp_dir, cnn_filename, _CNN_STORIES_DRIVE_URL)\n with tarfile.open(cnn_file, \"r:gz\") as cnn_tar:\n cnn_tar.extractall(tmp_dir)\n if not tf.gfile.Exists(dailymail_finalpath):\n dailymail_file = generator_utils.maybe_download_from_drive(\n tmp_dir, dailymail_filename, _DAILYMAIL_STORIES_DRIVE_URL)\n with tarfile.open(dailymail_file, \"r:gz\") as dailymail_tar:\n dailymail_tar.extractall(tmp_dir)\n return [cnn_finalpath, dailymail_finalpath]\n\n\ndef story_generator(tmp_dir):\n paths = _maybe_download_corpora(tmp_dir)\n for path in paths:\n for story_file in tf.gfile.Glob(path + \"*\"):\n story = u\"\"\n for line in tf.gfile.Open(story_file):\n line = unicode(line, \"utf-8\") if six.PY2 else line.decode(\"utf-8\")\n story += line\n yield story\n\n\ndef _story_summary_split(story):\n end_pos = story.find(\"\\n\\n\") # Upto first empty line.\n assert end_pos != -1\n return story[:end_pos], story[end_pos:].strip()\n\n\[email protected]_problem\nclass SummarizeCnnDailymail32k(problem.Text2TextProblem):\n \"\"\"Summarize CNN and Daily Mail articles to their first paragraph.\"\"\"\n\n @property\n def is_character_level(self):\n return False\n\n @property\n def has_inputs(self):\n return True\n\n @property\n def input_space_id(self):\n return problem.SpaceID.EN_TOK\n\n @property\n def target_space_id(self):\n return problem.SpaceID.EN_TOK\n\n @property\n def num_shards(self):\n return 100\n\n @property\n def vocab_name(self):\n return \"vocab.cnndailymail\"\n\n @property\n def use_subword_tokenizer(self):\n return True\n\n @property\n def targeted_vocab_size(self):\n return 2**15 # 32768\n\n @property\n def use_train_shards_for_dev(self):\n return True\n\n def generator(self, data_dir, tmp_dir, _):\n encoder = generator_utils.get_or_generate_vocab_inner(\n data_dir, self.vocab_file, self.targeted_vocab_size,\n story_generator(tmp_dir))\n for story in story_generator(tmp_dir):\n summary, rest = _story_summary_split(story)\n encoded_summary = encoder.encode(summary) + [EOS]\n encoded_story = encoder.encode(rest) + [EOS]\n yield {\"inputs\": encoded_story, \"targets\": encoded_summary}\n", "path": "tensor2tensor/data_generators/cnn_dailymail.py"}], "after_files": [{"content": "# coding=utf-8\n# Copyright 2017 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Data generators for the CNN and Daily Mail datasets.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tarfile\n\n# Dependency imports\n\nimport six\nfrom tensor2tensor.data_generators import generator_utils\nfrom tensor2tensor.data_generators import problem\nfrom tensor2tensor.data_generators import text_encoder\nfrom tensor2tensor.utils import registry\n\nimport tensorflow as tf\n\n\n# Links to data from http://cs.nyu.edu/~kcho/DMQA/\n_CNN_STORIES_DRIVE_URL = \"https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfTHk4NFg2SndKcjQ\"\n\n_DAILYMAIL_STORIES_DRIVE_URL = \"https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfM1BxdkxVaTY2bWs\"\n\n\n# End-of-sentence marker.\nEOS = text_encoder.EOS_ID\n\n\ndef _maybe_download_corpora(tmp_dir):\n \"\"\"Download corpora if necessary and unzip them.\n\n Args:\n tmp_dir: directory containing dataset.\n\n Returns:\n filepath of the downloaded corpus file.\n \"\"\"\n cnn_filename = \"cnn_stories.tgz\"\n cnn_finalpath = os.path.join(tmp_dir, \"cnn/stories/\")\n dailymail_filename = \"dailymail_stories.tgz\"\n dailymail_finalpath = os.path.join(tmp_dir, \"dailymail/stories/\")\n if not tf.gfile.Exists(cnn_finalpath):\n cnn_file = generator_utils.maybe_download_from_drive(\n tmp_dir, cnn_filename, _CNN_STORIES_DRIVE_URL)\n with tarfile.open(cnn_file, \"r:gz\") as cnn_tar:\n cnn_tar.extractall(tmp_dir)\n if not tf.gfile.Exists(dailymail_finalpath):\n dailymail_file = generator_utils.maybe_download_from_drive(\n tmp_dir, dailymail_filename, _DAILYMAIL_STORIES_DRIVE_URL)\n with tarfile.open(dailymail_file, \"r:gz\") as dailymail_tar:\n dailymail_tar.extractall(tmp_dir)\n return [cnn_finalpath, dailymail_finalpath]\n\n\ndef story_generator(tmp_dir):\n paths = _maybe_download_corpora(tmp_dir)\n for path in paths:\n for story_file in tf.gfile.Glob(path + \"*\"):\n story = u\"\"\n for line in tf.gfile.Open(story_file, 'rb'):\n line = unicode(line, \"utf-8\") if six.PY2 else line.decode(\"utf-8\")\n story += line\n yield story\n\n\ndef _story_summary_split(story):\n end_pos = story.find(\"\\n\\n\") # Upto first empty line.\n assert end_pos != -1\n return story[:end_pos], story[end_pos:].strip()\n\n\[email protected]_problem\nclass SummarizeCnnDailymail32k(problem.Text2TextProblem):\n \"\"\"Summarize CNN and Daily Mail articles to their first paragraph.\"\"\"\n\n @property\n def is_character_level(self):\n return False\n\n @property\n def has_inputs(self):\n return True\n\n @property\n def input_space_id(self):\n return problem.SpaceID.EN_TOK\n\n @property\n def target_space_id(self):\n return problem.SpaceID.EN_TOK\n\n @property\n def num_shards(self):\n return 100\n\n @property\n def vocab_name(self):\n return \"vocab.cnndailymail\"\n\n @property\n def use_subword_tokenizer(self):\n return True\n\n @property\n def targeted_vocab_size(self):\n return 2**15 # 32768\n\n @property\n def use_train_shards_for_dev(self):\n return True\n\n def generator(self, data_dir, tmp_dir, _):\n encoder = generator_utils.get_or_generate_vocab_inner(\n data_dir, self.vocab_file, self.targeted_vocab_size,\n story_generator(tmp_dir))\n for story in story_generator(tmp_dir):\n summary, rest = _story_summary_split(story)\n encoded_summary = encoder.encode(summary) + [EOS]\n encoded_story = encoder.encode(rest) + [EOS]\n yield {\"inputs\": encoded_story, \"targets\": encoded_summary}\n", "path": "tensor2tensor/data_generators/cnn_dailymail.py"}]} |
gh_patches_debug_1252 | rasdani/github-patches | git_diff | akvo__akvo-rsr-1858 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Akvo Pages with hostnames in uppercase does not work properly
## Test plan
1. Go to 'old' RSR admin: `/en/admin/rsr/partnersite/add/`
2. Add an Akvo Page with capitals in the hostname
3. Save
4. The hostname should be all lowercase
## Issue description
If changed to lower case it does. Since hostnames are case insensitive we should make sure that works properly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rsr/models/partner_site.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """Akvo RSR is covered by the GNU Affero General Public License.
4
5 See more details in the license.txt file located at the root folder of the Akvo RSR module.
6 For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
7 """
8
9 from django.conf import settings
10 from django.db import models
11 from django.utils.translation import ugettext_lazy as _
12
13 from akvo.utils import rsr_show_keywords
14
15 from ..fields import NullCharField, ValidXMLCharField, ValidXMLTextField
16 from ..mixins import TimestampsMixin
17
18
19 def about_image_path(instance, file_name):
20 """Return absolute image path."""
21 return 'db/partner_sites/%s/image/%s' % (instance.hostname, file_name)
22
23
24 def custom_css_path(instance, filename):
25 """Return custom css path."""
26 return 'db/partner_sites/%s/custom.css' % instance.hostname
27
28
29 def custom_favicon_path(instance, filename):
30 """Return custom favicon path."""
31 return 'db/partner_sites/%s/favicon.ico' % instance.hostname
32
33
34 def custom_logo_path(instance, filename):
35 """Return custom logo path."""
36 return 'db/partner_sites/%s/logo/%s' % (instance.hostname, filename)
37
38
39 class PartnerSite(TimestampsMixin, models.Model):
40
41 """Model makes it possible to cater different data sets based on request's hostname."""
42
43 def show_keywords(self):
44 """Return keywords for PartnerSite."""
45 return rsr_show_keywords(self)
46 show_keywords.short_description = 'Keywords'
47 show_keywords.allow_tags = True
48 show_keywords.admin_order_field = 'keywords'
49
50 organisation = models.ForeignKey(
51 'Organisation', verbose_name=_(u'organisation'),
52 help_text=_('Select your organisation from the drop-down list.'))
53 notes = ValidXMLTextField(verbose_name=_(u'Akvo page notes'), blank=True, default='')
54 hostname = ValidXMLCharField(
55 _(u'hostname'), max_length=50, unique=True, help_text=_(
56 u'<p>Your hostname is used in the default web address of your Akvo page. '
57 u'The web address created from the hostname <em>myorganisation</em> would be '
58 u'<em>http://myorganisation.akvoapp.org/</em>.</p>'
59 )
60 )
61 cname = NullCharField(
62 _(u'CNAME'), max_length=100, unique=True, blank=True, null=True, help_text=_(
63 u'<p>Enter a custom domain name for accessing the Akvo page, for example '
64 u'<i>projects.mydomain.org</i>. Optional. Requires additional DNS setup.</p>'
65 )
66 )
67 custom_return_url = models.URLField(
68 _(u'Return URL'), blank=True, help_text=_(
69 u'<p>Enter the full URL (including http://) for the page to which users '
70 u'should be returned when leaving the Akvo page.</p>'
71 )
72 )
73 custom_return_url_text = ValidXMLCharField(
74 _(u'Return URL text'), blank=True, max_length=50, default='', help_text=_(
75 u'<p>Enter a text for the back button and return URL. '
76 u'Leave empty to display "Back to <em>myorganisation</em>".</p>'
77 )
78 )
79 piwik_id = models.PositiveIntegerField(_(u'Piwik analytics ID'), blank=True, null=True)
80 custom_css = models.FileField(_(u'stylesheet'), blank=True, upload_to=custom_css_path)
81 custom_logo = models.FileField(
82 _(u'organisation banner logo'), blank=True, upload_to=custom_logo_path, help_text=_(
83 u'<p>Upload a logo file for the logo at the top of the Akvo page. By default '
84 u'logo of the organisation belonging to the Akvo Page will be displayed.</p>'
85 )
86 )
87 custom_favicon = models.FileField(
88 _(u'favicon'), blank=True, upload_to=custom_favicon_path, help_text=_(
89 u'<p>A favicon (.ico file) is the 16x16 pixel image shown inside the browser\'s '
90 u'location bar, on tabs and in the bookmark menu.</p>'
91 )
92 )
93 show_keyword_logos = models.BooleanField(_(u'Show keyword logos on project pages'),
94 default=False)
95 about_box = ValidXMLTextField(
96 _(u'about box text'), max_length=500, blank=True, help_text=_(
97 u'Enter HTML that will make up the top left box of the home page. (500 characters)'
98 u'<p>'
99 u' Any text added should be wrapped in 2 <div> tags, an outer one specifying '
100 u' position and width of the text, and an inner for formatting of the text .'
101 u'</p>'
102 u'<p>'
103 u' The Outer <div> tag can use the classes <code>quarter, half, '
104 u' three_quarters and full</code> to specify the'
105 u' width of the text. It can use the classes <code>bottom</code> and '
106 u' <code>right</code> to specify a position other than top left.'
107 u'</p>'
108 u'<p>'
109 u' The Inner <div> tag can use the class <code>text_bg</code> to create a '
110 u' semi-transparent text background if a background image will be uploaded. '
111 u' Any other inline styles can also be used within the inner <div>. The '
112 u' tags <h1>, <h3>, <h5> and <a> are blue, while '
113 u' <p> tags are black by default. Use the classes <code>first</code> and '
114 u' <code>last</code> with <p> tags to reduce the margins above or below '
115 u' respectively.'
116 u'</p>'
117 u'<p>'
118 u' Add additional styling inline, or upload a .css stylesheet in the Stylesheet '
119 u' setting above. <em>Tip:</em> When using a .css file, use the #about_box ID '
120 u' selector to apply a style only to the About box.'
121 u'</p>'
122 )
123 )
124 about_image = models.ImageField(
125 _(u'about box image'), blank=True, upload_to=about_image_path, help_text=_(
126 u'<p>The optional background image for the About box '
127 u'<em>must</em> be 470 pixels wide and 250 pixels tall.</p>'
128 )
129 )
130
131 enabled = models.BooleanField(_(u'enabled'), default=True)
132 default_language = ValidXMLCharField(
133 _(u'Site UI default language'), max_length=5, choices=settings.LANGUAGES,
134 default=settings.LANGUAGE_CODE)
135 ui_translation = models.BooleanField(_(u'Translate user interface'), default=False)
136 google_translation = models.BooleanField(_(u'Google translation widget'), default=False)
137 facebook_button = models.BooleanField(_(u'Facebook share button'), default=False)
138 twitter_button = models.BooleanField(_(u'Twitter share button'), default=False)
139 facebook_app_id = ValidXMLCharField(
140 _(u'Facebook App Id'), max_length=40, blank=True, null=True, help_text=_(
141 u'<p>Your FaceBook app id is used when sharing pages from your partner site. '
142 u'It can be obtained by creating a Facebook app, which will let you monitor when your '
143 u'pages are referenced. Follow the instructions '
144 u'<a href="http://help.yahoo.com/l/us/yahoo/smallbusiness/store/edit/social/'
145 u'social-06.html">here</a>'
146 )
147 )
148 partner_projects = models.BooleanField(
149 _(u'Show only projects of partner'), default=True,
150 help_text=_(u'Uncheck to list all projects on this Akvo page.')
151 )
152 keywords = models.ManyToManyField(
153 'Keyword', verbose_name=_(u'keywords'), related_name='partnersites', blank=True)
154 exclude_keywords = models.BooleanField(
155 _(u'Exclude projects with selected keyword(s)'), default=False)
156 all_maps = models.BooleanField(
157 _(u'Show all projects, updates and organisations on the maps.'), default=False
158 )
159
160 def __unicode__(self):
161 """Unicode representation."""
162 return _(u'Akvo page for {}').format(self.organisation.name)
163
164 @property
165 def logo(self):
166 """Return logo."""
167 return self.custom_logo or None
168
169 @property
170 def return_url(self):
171 """Return custom url or /."""
172 return self.custom_return_url or "/"
173
174 @property
175 def stylesheet(self):
176 """Return stylesheet."""
177 return self.custom_css or None
178
179 @property
180 def favicon(self):
181 """Return favicon."""
182 return self.custom_favicon or None
183
184 @property
185 def full_domain(self):
186 """Return full domain."""
187 return '%s.%s' % (self.hostname, getattr(settings, 'AKVOAPP_DOMAIN', 'akvoapp.org'))
188
189 def get_absolute_url(self):
190 """Return absolute url."""
191 url = ''
192 # TODO: consider the ramifications of get_absolute_url using CNAME if available
193 if self.cname:
194 return self.cname
195
196 protocol = 'http'
197 if getattr(settings, 'HTTPS_SUPPORT', True):
198 protocol = '%ss' % protocol
199
200 url = '%s://%s/' % (protocol, self.full_domain)
201 return url
202
203 @classmethod
204 def yank_hostname(cls, netloc):
205 """Get <partner1> from <partner1.akvoapp.org>.
206
207 From a netloc return what is stored as "hostname" on the PartnerSite model.
208 """
209 return netloc.replace('.{}'.format(settings.AKVOAPP_DOMAIN), '')
210
211 class Meta:
212 app_label = 'rsr'
213 verbose_name = _(u'Akvo page')
214 verbose_name_plural = _(u'Akvo pages')
215 ordering = ('organisation__name',)
216
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rsr/models/partner_site.py b/akvo/rsr/models/partner_site.py
--- a/akvo/rsr/models/partner_site.py
+++ b/akvo/rsr/models/partner_site.py
@@ -161,6 +161,12 @@
"""Unicode representation."""
return _(u'Akvo page for {}').format(self.organisation.name)
+ def save(self, *args, **kwargs):
+ if self.hostname:
+ self.hostname = self.hostname.lower()
+
+ super(PartnerSite, self).save(*args, **kwargs)
+
@property
def logo(self):
"""Return logo."""
| {"golden_diff": "diff --git a/akvo/rsr/models/partner_site.py b/akvo/rsr/models/partner_site.py\n--- a/akvo/rsr/models/partner_site.py\n+++ b/akvo/rsr/models/partner_site.py\n@@ -161,6 +161,12 @@\n \"\"\"Unicode representation.\"\"\"\n return _(u'Akvo page for {}').format(self.organisation.name)\n \n+ def save(self, *args, **kwargs):\n+ if self.hostname:\n+ self.hostname = self.hostname.lower()\n+\n+ super(PartnerSite, self).save(*args, **kwargs)\n+\n @property\n def logo(self):\n \"\"\"Return logo.\"\"\"\n", "issue": "Akvo Pages with hostnames in uppercase does not work properly\n## Test plan\n1. Go to 'old' RSR admin: `/en/admin/rsr/partnersite/add/`\n2. Add an Akvo Page with capitals in the hostname\n3. Save\n4. The hostname should be all lowercase\n## Issue description\n\nIf changed to lower case it does. Since hostnames are case insensitive we should make sure that works properly.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom akvo.utils import rsr_show_keywords\n\nfrom ..fields import NullCharField, ValidXMLCharField, ValidXMLTextField\nfrom ..mixins import TimestampsMixin\n\n\ndef about_image_path(instance, file_name):\n \"\"\"Return absolute image path.\"\"\"\n return 'db/partner_sites/%s/image/%s' % (instance.hostname, file_name)\n\n\ndef custom_css_path(instance, filename):\n \"\"\"Return custom css path.\"\"\"\n return 'db/partner_sites/%s/custom.css' % instance.hostname\n\n\ndef custom_favicon_path(instance, filename):\n \"\"\"Return custom favicon path.\"\"\"\n return 'db/partner_sites/%s/favicon.ico' % instance.hostname\n\n\ndef custom_logo_path(instance, filename):\n \"\"\"Return custom logo path.\"\"\"\n return 'db/partner_sites/%s/logo/%s' % (instance.hostname, filename)\n\n\nclass PartnerSite(TimestampsMixin, models.Model):\n\n \"\"\"Model makes it possible to cater different data sets based on request's hostname.\"\"\"\n\n def show_keywords(self):\n \"\"\"Return keywords for PartnerSite.\"\"\"\n return rsr_show_keywords(self)\n show_keywords.short_description = 'Keywords'\n show_keywords.allow_tags = True\n show_keywords.admin_order_field = 'keywords'\n\n organisation = models.ForeignKey(\n 'Organisation', verbose_name=_(u'organisation'),\n help_text=_('Select your organisation from the drop-down list.'))\n notes = ValidXMLTextField(verbose_name=_(u'Akvo page notes'), blank=True, default='')\n hostname = ValidXMLCharField(\n _(u'hostname'), max_length=50, unique=True, help_text=_(\n u'<p>Your hostname is used in the default web address of your Akvo page. '\n u'The web address created from the hostname <em>myorganisation</em> would be '\n u'<em>http://myorganisation.akvoapp.org/</em>.</p>'\n )\n )\n cname = NullCharField(\n _(u'CNAME'), max_length=100, unique=True, blank=True, null=True, help_text=_(\n u'<p>Enter a custom domain name for accessing the Akvo page, for example '\n u'<i>projects.mydomain.org</i>. Optional. Requires additional DNS setup.</p>'\n )\n )\n custom_return_url = models.URLField(\n _(u'Return URL'), blank=True, help_text=_(\n u'<p>Enter the full URL (including http://) for the page to which users '\n u'should be returned when leaving the Akvo page.</p>'\n )\n )\n custom_return_url_text = ValidXMLCharField(\n _(u'Return URL text'), blank=True, max_length=50, default='', help_text=_(\n u'<p>Enter a text for the back button and return URL. '\n u'Leave empty to display \"Back to <em>myorganisation</em>\".</p>'\n )\n )\n piwik_id = models.PositiveIntegerField(_(u'Piwik analytics ID'), blank=True, null=True)\n custom_css = models.FileField(_(u'stylesheet'), blank=True, upload_to=custom_css_path)\n custom_logo = models.FileField(\n _(u'organisation banner logo'), blank=True, upload_to=custom_logo_path, help_text=_(\n u'<p>Upload a logo file for the logo at the top of the Akvo page. By default '\n u'logo of the organisation belonging to the Akvo Page will be displayed.</p>'\n )\n )\n custom_favicon = models.FileField(\n _(u'favicon'), blank=True, upload_to=custom_favicon_path, help_text=_(\n u'<p>A favicon (.ico file) is the 16x16 pixel image shown inside the browser\\'s '\n u'location bar, on tabs and in the bookmark menu.</p>'\n )\n )\n show_keyword_logos = models.BooleanField(_(u'Show keyword logos on project pages'),\n default=False)\n about_box = ValidXMLTextField(\n _(u'about box text'), max_length=500, blank=True, help_text=_(\n u'Enter HTML that will make up the top left box of the home page. (500 characters)'\n u'<p>'\n u' Any text added should be wrapped in 2 <div> tags, an outer one specifying '\n u' position and width of the text, and an inner for formatting of the text .'\n u'</p>'\n u'<p>'\n u' The Outer <div> tag can use the classes <code>quarter, half, '\n u' three_quarters and full</code> to specify the'\n u' width of the text. It can use the classes <code>bottom</code> and '\n u' <code>right</code> to specify a position other than top left.'\n u'</p>'\n u'<p>'\n u' The Inner <div> tag can use the class <code>text_bg</code> to create a '\n u' semi-transparent text background if a background image will be uploaded. '\n u' Any other inline styles can also be used within the inner <div>. The '\n u' tags <h1>, <h3>, <h5> and <a> are blue, while '\n u' <p> tags are black by default. Use the classes <code>first</code> and '\n u' <code>last</code> with <p> tags to reduce the margins above or below '\n u' respectively.'\n u'</p>'\n u'<p>'\n u' Add additional styling inline, or upload a .css stylesheet in the Stylesheet '\n u' setting above. <em>Tip:</em> When using a .css file, use the #about_box ID '\n u' selector to apply a style only to the About box.'\n u'</p>'\n )\n )\n about_image = models.ImageField(\n _(u'about box image'), blank=True, upload_to=about_image_path, help_text=_(\n u'<p>The optional background image for the About box '\n u'<em>must</em> be 470 pixels wide and 250 pixels tall.</p>'\n )\n )\n\n enabled = models.BooleanField(_(u'enabled'), default=True)\n default_language = ValidXMLCharField(\n _(u'Site UI default language'), max_length=5, choices=settings.LANGUAGES,\n default=settings.LANGUAGE_CODE)\n ui_translation = models.BooleanField(_(u'Translate user interface'), default=False)\n google_translation = models.BooleanField(_(u'Google translation widget'), default=False)\n facebook_button = models.BooleanField(_(u'Facebook share button'), default=False)\n twitter_button = models.BooleanField(_(u'Twitter share button'), default=False)\n facebook_app_id = ValidXMLCharField(\n _(u'Facebook App Id'), max_length=40, blank=True, null=True, help_text=_(\n u'<p>Your FaceBook app id is used when sharing pages from your partner site. '\n u'It can be obtained by creating a Facebook app, which will let you monitor when your '\n u'pages are referenced. Follow the instructions '\n u'<a href=\"http://help.yahoo.com/l/us/yahoo/smallbusiness/store/edit/social/'\n u'social-06.html\">here</a>'\n )\n )\n partner_projects = models.BooleanField(\n _(u'Show only projects of partner'), default=True,\n help_text=_(u'Uncheck to list all projects on this Akvo page.')\n )\n keywords = models.ManyToManyField(\n 'Keyword', verbose_name=_(u'keywords'), related_name='partnersites', blank=True)\n exclude_keywords = models.BooleanField(\n _(u'Exclude projects with selected keyword(s)'), default=False)\n all_maps = models.BooleanField(\n _(u'Show all projects, updates and organisations on the maps.'), default=False\n )\n\n def __unicode__(self):\n \"\"\"Unicode representation.\"\"\"\n return _(u'Akvo page for {}').format(self.organisation.name)\n\n @property\n def logo(self):\n \"\"\"Return logo.\"\"\"\n return self.custom_logo or None\n\n @property\n def return_url(self):\n \"\"\"Return custom url or /.\"\"\"\n return self.custom_return_url or \"/\"\n\n @property\n def stylesheet(self):\n \"\"\"Return stylesheet.\"\"\"\n return self.custom_css or None\n\n @property\n def favicon(self):\n \"\"\"Return favicon.\"\"\"\n return self.custom_favicon or None\n\n @property\n def full_domain(self):\n \"\"\"Return full domain.\"\"\"\n return '%s.%s' % (self.hostname, getattr(settings, 'AKVOAPP_DOMAIN', 'akvoapp.org'))\n\n def get_absolute_url(self):\n \"\"\"Return absolute url.\"\"\"\n url = ''\n # TODO: consider the ramifications of get_absolute_url using CNAME if available\n if self.cname:\n return self.cname\n\n protocol = 'http'\n if getattr(settings, 'HTTPS_SUPPORT', True):\n protocol = '%ss' % protocol\n\n url = '%s://%s/' % (protocol, self.full_domain)\n return url\n\n @classmethod\n def yank_hostname(cls, netloc):\n \"\"\"Get <partner1> from <partner1.akvoapp.org>.\n\n From a netloc return what is stored as \"hostname\" on the PartnerSite model.\n \"\"\"\n return netloc.replace('.{}'.format(settings.AKVOAPP_DOMAIN), '')\n\n class Meta:\n app_label = 'rsr'\n verbose_name = _(u'Akvo page')\n verbose_name_plural = _(u'Akvo pages')\n ordering = ('organisation__name',)\n", "path": "akvo/rsr/models/partner_site.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom akvo.utils import rsr_show_keywords\n\nfrom ..fields import NullCharField, ValidXMLCharField, ValidXMLTextField\nfrom ..mixins import TimestampsMixin\n\n\ndef about_image_path(instance, file_name):\n \"\"\"Return absolute image path.\"\"\"\n return 'db/partner_sites/%s/image/%s' % (instance.hostname, file_name)\n\n\ndef custom_css_path(instance, filename):\n \"\"\"Return custom css path.\"\"\"\n return 'db/partner_sites/%s/custom.css' % instance.hostname\n\n\ndef custom_favicon_path(instance, filename):\n \"\"\"Return custom favicon path.\"\"\"\n return 'db/partner_sites/%s/favicon.ico' % instance.hostname\n\n\ndef custom_logo_path(instance, filename):\n \"\"\"Return custom logo path.\"\"\"\n return 'db/partner_sites/%s/logo/%s' % (instance.hostname, filename)\n\n\nclass PartnerSite(TimestampsMixin, models.Model):\n\n \"\"\"Model makes it possible to cater different data sets based on request's hostname.\"\"\"\n\n def show_keywords(self):\n \"\"\"Return keywords for PartnerSite.\"\"\"\n return rsr_show_keywords(self)\n show_keywords.short_description = 'Keywords'\n show_keywords.allow_tags = True\n show_keywords.admin_order_field = 'keywords'\n\n organisation = models.ForeignKey(\n 'Organisation', verbose_name=_(u'organisation'),\n help_text=_('Select your organisation from the drop-down list.'))\n notes = ValidXMLTextField(verbose_name=_(u'Akvo page notes'), blank=True, default='')\n hostname = ValidXMLCharField(\n _(u'hostname'), max_length=50, unique=True, help_text=_(\n u'<p>Your hostname is used in the default web address of your Akvo page. '\n u'The web address created from the hostname <em>myorganisation</em> would be '\n u'<em>http://myorganisation.akvoapp.org/</em>.</p>'\n )\n )\n cname = NullCharField(\n _(u'CNAME'), max_length=100, unique=True, blank=True, null=True, help_text=_(\n u'<p>Enter a custom domain name for accessing the Akvo page, for example '\n u'<i>projects.mydomain.org</i>. Optional. Requires additional DNS setup.</p>'\n )\n )\n custom_return_url = models.URLField(\n _(u'Return URL'), blank=True, help_text=_(\n u'<p>Enter the full URL (including http://) for the page to which users '\n u'should be returned when leaving the Akvo page.</p>'\n )\n )\n custom_return_url_text = ValidXMLCharField(\n _(u'Return URL text'), blank=True, max_length=50, default='', help_text=_(\n u'<p>Enter a text for the back button and return URL. '\n u'Leave empty to display \"Back to <em>myorganisation</em>\".</p>'\n )\n )\n piwik_id = models.PositiveIntegerField(_(u'Piwik analytics ID'), blank=True, null=True)\n custom_css = models.FileField(_(u'stylesheet'), blank=True, upload_to=custom_css_path)\n custom_logo = models.FileField(\n _(u'organisation banner logo'), blank=True, upload_to=custom_logo_path, help_text=_(\n u'<p>Upload a logo file for the logo at the top of the Akvo page. By default '\n u'logo of the organisation belonging to the Akvo Page will be displayed.</p>'\n )\n )\n custom_favicon = models.FileField(\n _(u'favicon'), blank=True, upload_to=custom_favicon_path, help_text=_(\n u'<p>A favicon (.ico file) is the 16x16 pixel image shown inside the browser\\'s '\n u'location bar, on tabs and in the bookmark menu.</p>'\n )\n )\n show_keyword_logos = models.BooleanField(_(u'Show keyword logos on project pages'),\n default=False)\n about_box = ValidXMLTextField(\n _(u'about box text'), max_length=500, blank=True, help_text=_(\n u'Enter HTML that will make up the top left box of the home page. (500 characters)'\n u'<p>'\n u' Any text added should be wrapped in 2 <div> tags, an outer one specifying '\n u' position and width of the text, and an inner for formatting of the text .'\n u'</p>'\n u'<p>'\n u' The Outer <div> tag can use the classes <code>quarter, half, '\n u' three_quarters and full</code> to specify the'\n u' width of the text. It can use the classes <code>bottom</code> and '\n u' <code>right</code> to specify a position other than top left.'\n u'</p>'\n u'<p>'\n u' The Inner <div> tag can use the class <code>text_bg</code> to create a '\n u' semi-transparent text background if a background image will be uploaded. '\n u' Any other inline styles can also be used within the inner <div>. The '\n u' tags <h1>, <h3>, <h5> and <a> are blue, while '\n u' <p> tags are black by default. Use the classes <code>first</code> and '\n u' <code>last</code> with <p> tags to reduce the margins above or below '\n u' respectively.'\n u'</p>'\n u'<p>'\n u' Add additional styling inline, or upload a .css stylesheet in the Stylesheet '\n u' setting above. <em>Tip:</em> When using a .css file, use the #about_box ID '\n u' selector to apply a style only to the About box.'\n u'</p>'\n )\n )\n about_image = models.ImageField(\n _(u'about box image'), blank=True, upload_to=about_image_path, help_text=_(\n u'<p>The optional background image for the About box '\n u'<em>must</em> be 470 pixels wide and 250 pixels tall.</p>'\n )\n )\n\n enabled = models.BooleanField(_(u'enabled'), default=True)\n default_language = ValidXMLCharField(\n _(u'Site UI default language'), max_length=5, choices=settings.LANGUAGES,\n default=settings.LANGUAGE_CODE)\n ui_translation = models.BooleanField(_(u'Translate user interface'), default=False)\n google_translation = models.BooleanField(_(u'Google translation widget'), default=False)\n facebook_button = models.BooleanField(_(u'Facebook share button'), default=False)\n twitter_button = models.BooleanField(_(u'Twitter share button'), default=False)\n facebook_app_id = ValidXMLCharField(\n _(u'Facebook App Id'), max_length=40, blank=True, null=True, help_text=_(\n u'<p>Your FaceBook app id is used when sharing pages from your partner site. '\n u'It can be obtained by creating a Facebook app, which will let you monitor when your '\n u'pages are referenced. Follow the instructions '\n u'<a href=\"http://help.yahoo.com/l/us/yahoo/smallbusiness/store/edit/social/'\n u'social-06.html\">here</a>'\n )\n )\n partner_projects = models.BooleanField(\n _(u'Show only projects of partner'), default=True,\n help_text=_(u'Uncheck to list all projects on this Akvo page.')\n )\n keywords = models.ManyToManyField(\n 'Keyword', verbose_name=_(u'keywords'), related_name='partnersites', blank=True)\n exclude_keywords = models.BooleanField(\n _(u'Exclude projects with selected keyword(s)'), default=False)\n all_maps = models.BooleanField(\n _(u'Show all projects, updates and organisations on the maps.'), default=False\n )\n\n def __unicode__(self):\n \"\"\"Unicode representation.\"\"\"\n return _(u'Akvo page for {}').format(self.organisation.name)\n\n def save(self, *args, **kwargs):\n if self.hostname:\n self.hostname = self.hostname.lower()\n\n super(PartnerSite, self).save(*args, **kwargs)\n\n @property\n def logo(self):\n \"\"\"Return logo.\"\"\"\n return self.custom_logo or None\n\n @property\n def return_url(self):\n \"\"\"Return custom url or /.\"\"\"\n return self.custom_return_url or \"/\"\n\n @property\n def stylesheet(self):\n \"\"\"Return stylesheet.\"\"\"\n return self.custom_css or None\n\n @property\n def favicon(self):\n \"\"\"Return favicon.\"\"\"\n return self.custom_favicon or None\n\n @property\n def full_domain(self):\n \"\"\"Return full domain.\"\"\"\n return '%s.%s' % (self.hostname, getattr(settings, 'AKVOAPP_DOMAIN', 'akvoapp.org'))\n\n def get_absolute_url(self):\n \"\"\"Return absolute url.\"\"\"\n url = ''\n # TODO: consider the ramifications of get_absolute_url using CNAME if available\n if self.cname:\n return self.cname\n\n protocol = 'http'\n if getattr(settings, 'HTTPS_SUPPORT', True):\n protocol = '%ss' % protocol\n\n url = '%s://%s/' % (protocol, self.full_domain)\n return url\n\n @classmethod\n def yank_hostname(cls, netloc):\n \"\"\"Get <partner1> from <partner1.akvoapp.org>.\n\n From a netloc return what is stored as \"hostname\" on the PartnerSite model.\n \"\"\"\n return netloc.replace('.{}'.format(settings.AKVOAPP_DOMAIN), '')\n\n class Meta:\n app_label = 'rsr'\n verbose_name = _(u'Akvo page')\n verbose_name_plural = _(u'Akvo pages')\n ordering = ('organisation__name',)\n", "path": "akvo/rsr/models/partner_site.py"}]} |
gh_patches_debug_1253 | rasdani/github-patches | git_diff | uccser__cs-unplugged-717 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Workaround Crowdin bug where integer yaml keys are not preserved
When downloading in-context localisation files, integer keys in YAML files are not preserved. This is only an issue in the file `topics/content/en/programming-challenges-structure-difficulties.yaml`, which uses the difficulty number as the key.
As a work around, we can use the string value of the integer as the key, i.e. `"0"` instead of `0`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py`
Content:
```
1 """Custom loader for loading structure of programming challenges."""
2
3 import os
4 from django.db import transaction
5 from utils.errors.MissingRequiredFieldError import MissingRequiredFieldError
6 from utils.TranslatableModelLoader import TranslatableModelLoader
7 from topics.models import ProgrammingChallengeLanguage, ProgrammingChallengeDifficulty
8
9
10 class ProgrammingChallengesStructureLoader(TranslatableModelLoader):
11 """Custom loader for loading structure of programming challenges."""
12
13 @transaction.atomic
14 def load(self):
15 """Load the content for structure of programming challenges.
16
17 Raises:
18 MissingRequiredFieldError: when no object can be found with the matching
19 attribute.
20 """
21 structure = self.load_yaml_file(self.structure_file_path)
22
23 prog_languages = structure.get("languages", None)
24 difficulty_levels = structure.get("difficulties", None)
25 if None in [prog_languages, difficulty_levels]:
26 raise MissingRequiredFieldError(
27 self.structure_file_path,
28 ["lanugages", "difficulties"],
29 "Programming Challenge Structure"
30 )
31
32 # Add "-languages" to the structure filename
33 prog_languages_translation_filename = "{}-languages.yaml".format(
34 os.path.splitext(self.structure_filename)[0]
35 )
36 prog_languages_translations = self.get_yaml_translations(
37 prog_languages_translation_filename,
38 required_slugs=prog_languages.keys(),
39 required_fields=["name"]
40 )
41
42 for (prog_language, prog_language_data) in prog_languages.items():
43
44 if prog_language_data is None:
45 raise MissingRequiredFieldError(
46 self.structure_file_path,
47 ["number"],
48 "Programming Challenge Language"
49 )
50
51 # Check for required fields
52 prog_language_number = prog_language_data.get("number", None)
53 if prog_language_number is None:
54 raise MissingRequiredFieldError(
55 self.structure_file_path,
56 ["number"],
57 "Programming Challenge Language"
58 )
59
60 # Check if icon is given
61 if "icon" in prog_language_data:
62 prog_language_icon = prog_language_data["icon"]
63 else:
64 prog_language_icon = None
65
66 new_prog_language = ProgrammingChallengeLanguage(
67 slug=prog_language,
68 number=prog_language_number,
69 icon=prog_language_icon
70 )
71
72 translations = prog_languages_translations.get(prog_language, dict())
73 self.populate_translations(new_prog_language, translations)
74 self.mark_translation_availability(new_prog_language, required_fields=["name"])
75 new_prog_language.save()
76
77 self.log("Added programming language: {}".format(new_prog_language.__str__()))
78
79 # Add "-languages" to the structure filename
80 difficulties_translation_filename = "{}-difficulties.yaml".format(
81 os.path.splitext(self.structure_filename)[0]
82 )
83 difficulties_translations = self.get_yaml_translations(
84 difficulties_translation_filename,
85 required_slugs=difficulty_levels,
86 required_fields=["name"],
87 )
88
89 for difficulty in difficulty_levels:
90
91 new_difficulty = ProgrammingChallengeDifficulty(
92 level=difficulty,
93 )
94
95 translations = difficulties_translations.get(difficulty, dict())
96 self.populate_translations(new_difficulty, translations)
97 self.mark_translation_availability(new_difficulty, required_fields=["name"])
98 new_difficulty.save()
99
100 self.log("Added programming difficulty level: {}".format(new_difficulty.__str__()))
101
102 self.log("")
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py b/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py
--- a/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py
+++ b/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py
@@ -89,7 +89,7 @@
for difficulty in difficulty_levels:
new_difficulty = ProgrammingChallengeDifficulty(
- level=difficulty,
+ level=int(difficulty),
)
translations = difficulties_translations.get(difficulty, dict())
| {"golden_diff": "diff --git a/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py b/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py\n--- a/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py\n+++ b/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py\n@@ -89,7 +89,7 @@\n for difficulty in difficulty_levels:\n \n new_difficulty = ProgrammingChallengeDifficulty(\n- level=difficulty,\n+ level=int(difficulty),\n )\n \n translations = difficulties_translations.get(difficulty, dict())\n", "issue": "Workaround Crowdin bug where integer yaml keys are not preserved\nWhen downloading in-context localisation files, integer keys in YAML files are not preserved. This is only an issue in the file `topics/content/en/programming-challenges-structure-difficulties.yaml`, which uses the difficulty number as the key.\r\n\r\nAs a work around, we can use the string value of the integer as the key, i.e. `\"0\"` instead of `0`\n", "before_files": [{"content": "\"\"\"Custom loader for loading structure of programming challenges.\"\"\"\n\nimport os\nfrom django.db import transaction\nfrom utils.errors.MissingRequiredFieldError import MissingRequiredFieldError\nfrom utils.TranslatableModelLoader import TranslatableModelLoader\nfrom topics.models import ProgrammingChallengeLanguage, ProgrammingChallengeDifficulty\n\n\nclass ProgrammingChallengesStructureLoader(TranslatableModelLoader):\n \"\"\"Custom loader for loading structure of programming challenges.\"\"\"\n\n @transaction.atomic\n def load(self):\n \"\"\"Load the content for structure of programming challenges.\n\n Raises:\n MissingRequiredFieldError: when no object can be found with the matching\n attribute.\n \"\"\"\n structure = self.load_yaml_file(self.structure_file_path)\n\n prog_languages = structure.get(\"languages\", None)\n difficulty_levels = structure.get(\"difficulties\", None)\n if None in [prog_languages, difficulty_levels]:\n raise MissingRequiredFieldError(\n self.structure_file_path,\n [\"lanugages\", \"difficulties\"],\n \"Programming Challenge Structure\"\n )\n\n # Add \"-languages\" to the structure filename\n prog_languages_translation_filename = \"{}-languages.yaml\".format(\n os.path.splitext(self.structure_filename)[0]\n )\n prog_languages_translations = self.get_yaml_translations(\n prog_languages_translation_filename,\n required_slugs=prog_languages.keys(),\n required_fields=[\"name\"]\n )\n\n for (prog_language, prog_language_data) in prog_languages.items():\n\n if prog_language_data is None:\n raise MissingRequiredFieldError(\n self.structure_file_path,\n [\"number\"],\n \"Programming Challenge Language\"\n )\n\n # Check for required fields\n prog_language_number = prog_language_data.get(\"number\", None)\n if prog_language_number is None:\n raise MissingRequiredFieldError(\n self.structure_file_path,\n [\"number\"],\n \"Programming Challenge Language\"\n )\n\n # Check if icon is given\n if \"icon\" in prog_language_data:\n prog_language_icon = prog_language_data[\"icon\"]\n else:\n prog_language_icon = None\n\n new_prog_language = ProgrammingChallengeLanguage(\n slug=prog_language,\n number=prog_language_number,\n icon=prog_language_icon\n )\n\n translations = prog_languages_translations.get(prog_language, dict())\n self.populate_translations(new_prog_language, translations)\n self.mark_translation_availability(new_prog_language, required_fields=[\"name\"])\n new_prog_language.save()\n\n self.log(\"Added programming language: {}\".format(new_prog_language.__str__()))\n\n # Add \"-languages\" to the structure filename\n difficulties_translation_filename = \"{}-difficulties.yaml\".format(\n os.path.splitext(self.structure_filename)[0]\n )\n difficulties_translations = self.get_yaml_translations(\n difficulties_translation_filename,\n required_slugs=difficulty_levels,\n required_fields=[\"name\"],\n )\n\n for difficulty in difficulty_levels:\n\n new_difficulty = ProgrammingChallengeDifficulty(\n level=difficulty,\n )\n\n translations = difficulties_translations.get(difficulty, dict())\n self.populate_translations(new_difficulty, translations)\n self.mark_translation_availability(new_difficulty, required_fields=[\"name\"])\n new_difficulty.save()\n\n self.log(\"Added programming difficulty level: {}\".format(new_difficulty.__str__()))\n\n self.log(\"\")\n", "path": "csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py"}], "after_files": [{"content": "\"\"\"Custom loader for loading structure of programming challenges.\"\"\"\n\nimport os\nfrom django.db import transaction\nfrom utils.errors.MissingRequiredFieldError import MissingRequiredFieldError\nfrom utils.TranslatableModelLoader import TranslatableModelLoader\nfrom topics.models import ProgrammingChallengeLanguage, ProgrammingChallengeDifficulty\n\n\nclass ProgrammingChallengesStructureLoader(TranslatableModelLoader):\n \"\"\"Custom loader for loading structure of programming challenges.\"\"\"\n\n @transaction.atomic\n def load(self):\n \"\"\"Load the content for structure of programming challenges.\n\n Raises:\n MissingRequiredFieldError: when no object can be found with the matching\n attribute.\n \"\"\"\n structure = self.load_yaml_file(self.structure_file_path)\n\n prog_languages = structure.get(\"languages\", None)\n difficulty_levels = structure.get(\"difficulties\", None)\n if None in [prog_languages, difficulty_levels]:\n raise MissingRequiredFieldError(\n self.structure_file_path,\n [\"lanugages\", \"difficulties\"],\n \"Programming Challenge Structure\"\n )\n\n # Add \"-languages\" to the structure filename\n prog_languages_translation_filename = \"{}-languages.yaml\".format(\n os.path.splitext(self.structure_filename)[0]\n )\n prog_languages_translations = self.get_yaml_translations(\n prog_languages_translation_filename,\n required_slugs=prog_languages.keys(),\n required_fields=[\"name\"]\n )\n\n for (prog_language, prog_language_data) in prog_languages.items():\n\n if prog_language_data is None:\n raise MissingRequiredFieldError(\n self.structure_file_path,\n [\"number\"],\n \"Programming Challenge Language\"\n )\n\n # Check for required fields\n prog_language_number = prog_language_data.get(\"number\", None)\n if prog_language_number is None:\n raise MissingRequiredFieldError(\n self.structure_file_path,\n [\"number\"],\n \"Programming Challenge Language\"\n )\n\n # Check if icon is given\n if \"icon\" in prog_language_data:\n prog_language_icon = prog_language_data[\"icon\"]\n else:\n prog_language_icon = None\n\n new_prog_language = ProgrammingChallengeLanguage(\n slug=prog_language,\n number=prog_language_number,\n icon=prog_language_icon\n )\n\n translations = prog_languages_translations.get(prog_language, dict())\n self.populate_translations(new_prog_language, translations)\n self.mark_translation_availability(new_prog_language, required_fields=[\"name\"])\n new_prog_language.save()\n\n self.log(\"Added programming language: {}\".format(new_prog_language.__str__()))\n\n # Add \"-languages\" to the structure filename\n difficulties_translation_filename = \"{}-difficulties.yaml\".format(\n os.path.splitext(self.structure_filename)[0]\n )\n difficulties_translations = self.get_yaml_translations(\n difficulties_translation_filename,\n required_slugs=difficulty_levels,\n required_fields=[\"name\"],\n )\n\n for difficulty in difficulty_levels:\n\n new_difficulty = ProgrammingChallengeDifficulty(\n level=int(difficulty),\n )\n\n translations = difficulties_translations.get(difficulty, dict())\n self.populate_translations(new_difficulty, translations)\n self.mark_translation_availability(new_difficulty, required_fields=[\"name\"])\n new_difficulty.save()\n\n self.log(\"Added programming difficulty level: {}\".format(new_difficulty.__str__()))\n\n self.log(\"\")\n", "path": "csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py"}]} |
gh_patches_debug_1254 | rasdani/github-patches | git_diff | pre-commit__pre-commit-1467 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Expose an env variable that indicates if pre-commit is running
In some circumstances, it could be helpful to know inside an executable / function / script if it was invoked from pre-commit or from some other process. I wrote a hook that just printed all env variables set but I could not find an env variable like `PRECOMMIT_RUNNING` or similar. Would you consider setting such an env variable during running pre-commit? I searched old issues and documentation but I could not find anything.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/commands/run.py`
Content:
```
1 import argparse
2 import contextlib
3 import functools
4 import logging
5 import os
6 import re
7 import subprocess
8 import time
9 import unicodedata
10 from typing import Any
11 from typing import Collection
12 from typing import Dict
13 from typing import List
14 from typing import Sequence
15 from typing import Set
16 from typing import Tuple
17
18 from identify.identify import tags_from_path
19
20 from pre_commit import color
21 from pre_commit import git
22 from pre_commit import output
23 from pre_commit.clientlib import load_config
24 from pre_commit.hook import Hook
25 from pre_commit.languages.all import languages
26 from pre_commit.repository import all_hooks
27 from pre_commit.repository import install_hook_envs
28 from pre_commit.staged_files_only import staged_files_only
29 from pre_commit.store import Store
30 from pre_commit.util import cmd_output_b
31 from pre_commit.util import EnvironT
32
33
34 logger = logging.getLogger('pre_commit')
35
36
37 def _len_cjk(msg: str) -> int:
38 widths = {'A': 1, 'F': 2, 'H': 1, 'N': 1, 'Na': 1, 'W': 2}
39 return sum(widths[unicodedata.east_asian_width(c)] for c in msg)
40
41
42 def _start_msg(*, start: str, cols: int, end_len: int) -> str:
43 dots = '.' * (cols - _len_cjk(start) - end_len - 1)
44 return f'{start}{dots}'
45
46
47 def _full_msg(
48 *,
49 start: str,
50 cols: int,
51 end_msg: str,
52 end_color: str,
53 use_color: bool,
54 postfix: str = '',
55 ) -> str:
56 dots = '.' * (cols - _len_cjk(start) - len(postfix) - len(end_msg) - 1)
57 end = color.format_color(end_msg, end_color, use_color)
58 return f'{start}{dots}{postfix}{end}\n'
59
60
61 def filter_by_include_exclude(
62 names: Collection[str],
63 include: str,
64 exclude: str,
65 ) -> List[str]:
66 include_re, exclude_re = re.compile(include), re.compile(exclude)
67 return [
68 filename for filename in names
69 if include_re.search(filename)
70 if not exclude_re.search(filename)
71 ]
72
73
74 class Classifier:
75 def __init__(self, filenames: Sequence[str]) -> None:
76 # on windows we normalize all filenames to use forward slashes
77 # this makes it easier to filter using the `files:` regex
78 # this also makes improperly quoted shell-based hooks work better
79 # see #1173
80 if os.altsep == '/' and os.sep == '\\':
81 filenames = [f.replace(os.sep, os.altsep) for f in filenames]
82 self.filenames = [f for f in filenames if os.path.lexists(f)]
83
84 @functools.lru_cache(maxsize=None)
85 def _types_for_file(self, filename: str) -> Set[str]:
86 return tags_from_path(filename)
87
88 def by_types(
89 self,
90 names: Sequence[str],
91 types: Collection[str],
92 exclude_types: Collection[str],
93 ) -> List[str]:
94 types, exclude_types = frozenset(types), frozenset(exclude_types)
95 ret = []
96 for filename in names:
97 tags = self._types_for_file(filename)
98 if tags >= types and not tags & exclude_types:
99 ret.append(filename)
100 return ret
101
102 def filenames_for_hook(self, hook: Hook) -> Tuple[str, ...]:
103 names = self.filenames
104 names = filter_by_include_exclude(names, hook.files, hook.exclude)
105 names = self.by_types(names, hook.types, hook.exclude_types)
106 return tuple(names)
107
108
109 def _get_skips(environ: EnvironT) -> Set[str]:
110 skips = environ.get('SKIP', '')
111 return {skip.strip() for skip in skips.split(',') if skip.strip()}
112
113
114 SKIPPED = 'Skipped'
115 NO_FILES = '(no files to check)'
116
117
118 def _subtle_line(s: str, use_color: bool) -> None:
119 output.write_line(color.format_color(s, color.SUBTLE, use_color))
120
121
122 def _run_single_hook(
123 classifier: Classifier,
124 hook: Hook,
125 skips: Set[str],
126 cols: int,
127 verbose: bool,
128 use_color: bool,
129 ) -> bool:
130 filenames = classifier.filenames_for_hook(hook)
131
132 if hook.id in skips or hook.alias in skips:
133 output.write(
134 _full_msg(
135 start=hook.name,
136 end_msg=SKIPPED,
137 end_color=color.YELLOW,
138 use_color=use_color,
139 cols=cols,
140 ),
141 )
142 duration = None
143 retcode = 0
144 files_modified = False
145 out = b''
146 elif not filenames and not hook.always_run:
147 output.write(
148 _full_msg(
149 start=hook.name,
150 postfix=NO_FILES,
151 end_msg=SKIPPED,
152 end_color=color.TURQUOISE,
153 use_color=use_color,
154 cols=cols,
155 ),
156 )
157 duration = None
158 retcode = 0
159 files_modified = False
160 out = b''
161 else:
162 # print hook and dots first in case the hook takes a while to run
163 output.write(_start_msg(start=hook.name, end_len=6, cols=cols))
164
165 diff_cmd = ('git', 'diff', '--no-ext-diff')
166 diff_before = cmd_output_b(*diff_cmd, retcode=None)
167 if not hook.pass_filenames:
168 filenames = ()
169 time_before = time.time()
170 language = languages[hook.language]
171 retcode, out = language.run_hook(hook, filenames, use_color)
172 duration = round(time.time() - time_before, 2) or 0
173 diff_after = cmd_output_b(*diff_cmd, retcode=None)
174
175 # if the hook makes changes, fail the commit
176 files_modified = diff_before != diff_after
177
178 if retcode or files_modified:
179 print_color = color.RED
180 status = 'Failed'
181 else:
182 print_color = color.GREEN
183 status = 'Passed'
184
185 output.write_line(color.format_color(status, print_color, use_color))
186
187 if verbose or hook.verbose or retcode or files_modified:
188 _subtle_line(f'- hook id: {hook.id}', use_color)
189
190 if (verbose or hook.verbose) and duration is not None:
191 _subtle_line(f'- duration: {duration}s', use_color)
192
193 if retcode:
194 _subtle_line(f'- exit code: {retcode}', use_color)
195
196 # Print a message if failing due to file modifications
197 if files_modified:
198 _subtle_line('- files were modified by this hook', use_color)
199
200 if out.strip():
201 output.write_line()
202 output.write_line_b(out.strip(), logfile_name=hook.log_file)
203 output.write_line()
204
205 return files_modified or bool(retcode)
206
207
208 def _compute_cols(hooks: Sequence[Hook]) -> int:
209 """Compute the number of columns to display hook messages. The widest
210 that will be displayed is in the no files skipped case:
211
212 Hook name...(no files to check) Skipped
213 """
214 if hooks:
215 name_len = max(_len_cjk(hook.name) for hook in hooks)
216 else:
217 name_len = 0
218
219 cols = name_len + 3 + len(NO_FILES) + 1 + len(SKIPPED)
220 return max(cols, 80)
221
222
223 def _all_filenames(args: argparse.Namespace) -> Collection[str]:
224 # these hooks do not operate on files
225 if args.hook_stage in {'post-checkout', 'post-commit'}:
226 return ()
227 elif args.hook_stage in {'prepare-commit-msg', 'commit-msg'}:
228 return (args.commit_msg_filename,)
229 elif args.from_ref and args.to_ref:
230 return git.get_changed_files(args.from_ref, args.to_ref)
231 elif args.files:
232 return args.files
233 elif args.all_files:
234 return git.get_all_files()
235 elif git.is_in_merge_conflict():
236 return git.get_conflicted_files()
237 else:
238 return git.get_staged_files()
239
240
241 def _run_hooks(
242 config: Dict[str, Any],
243 hooks: Sequence[Hook],
244 args: argparse.Namespace,
245 environ: EnvironT,
246 ) -> int:
247 """Actually run the hooks."""
248 skips = _get_skips(environ)
249 cols = _compute_cols(hooks)
250 filenames = filter_by_include_exclude(
251 _all_filenames(args), config['files'], config['exclude'],
252 )
253 classifier = Classifier(filenames)
254 retval = 0
255 for hook in hooks:
256 retval |= _run_single_hook(
257 classifier, hook, skips, cols,
258 verbose=args.verbose, use_color=args.color,
259 )
260 if retval and config['fail_fast']:
261 break
262 if retval and args.show_diff_on_failure and git.has_diff():
263 if args.all_files:
264 output.write_line(
265 'pre-commit hook(s) made changes.\n'
266 'If you are seeing this message in CI, '
267 'reproduce locally with: `pre-commit run --all-files`.\n'
268 'To run `pre-commit` as part of git workflow, use '
269 '`pre-commit install`.',
270 )
271 output.write_line('All changes made by hooks:')
272 # args.color is a boolean.
273 # See user_color function in color.py
274 git_color_opt = 'always' if args.color else 'never'
275 subprocess.call((
276 'git', '--no-pager', 'diff', '--no-ext-diff',
277 f'--color={git_color_opt}',
278 ))
279
280 return retval
281
282
283 def _has_unmerged_paths() -> bool:
284 _, stdout, _ = cmd_output_b('git', 'ls-files', '--unmerged')
285 return bool(stdout.strip())
286
287
288 def _has_unstaged_config(config_file: str) -> bool:
289 retcode, _, _ = cmd_output_b(
290 'git', 'diff', '--no-ext-diff', '--exit-code', config_file,
291 retcode=None,
292 )
293 # be explicit, other git errors don't mean it has an unstaged config.
294 return retcode == 1
295
296
297 def run(
298 config_file: str,
299 store: Store,
300 args: argparse.Namespace,
301 environ: EnvironT = os.environ,
302 ) -> int:
303 stash = not args.all_files and not args.files
304
305 # Check if we have unresolved merge conflict files and fail fast.
306 if _has_unmerged_paths():
307 logger.error('Unmerged files. Resolve before committing.')
308 return 1
309 if bool(args.from_ref) != bool(args.to_ref):
310 logger.error('Specify both --from-ref and --to-ref.')
311 return 1
312 if stash and _has_unstaged_config(config_file):
313 logger.error(
314 f'Your pre-commit configuration is unstaged.\n'
315 f'`git add {config_file}` to fix this.',
316 )
317 return 1
318 if (
319 args.hook_stage in {'prepare-commit-msg', 'commit-msg'} and
320 not args.commit_msg_filename
321 ):
322 logger.error(
323 f'`--commit-msg-filename` is required for '
324 f'`--hook-stage {args.hook_stage}`',
325 )
326 return 1
327 # prevent recursive post-checkout hooks (#1418)
328 if (
329 args.hook_stage == 'post-checkout' and
330 environ.get('_PRE_COMMIT_SKIP_POST_CHECKOUT')
331 ):
332 return 0
333
334 # Expose from-ref / to-ref as environment variables for hooks to consume
335 if args.from_ref and args.to_ref:
336 # legacy names
337 environ['PRE_COMMIT_ORIGIN'] = args.from_ref
338 environ['PRE_COMMIT_SOURCE'] = args.to_ref
339 # new names
340 environ['PRE_COMMIT_FROM_REF'] = args.from_ref
341 environ['PRE_COMMIT_TO_REF'] = args.to_ref
342
343 if args.remote_name and args.remote_url:
344 environ['PRE_COMMIT_REMOTE_NAME'] = args.remote_name
345 environ['PRE_COMMIT_REMOTE_URL'] = args.remote_url
346
347 if args.checkout_type:
348 environ['PRE_COMMIT_CHECKOUT_TYPE'] = args.checkout_type
349
350 with contextlib.ExitStack() as exit_stack:
351 if stash:
352 exit_stack.enter_context(staged_files_only(store.directory))
353
354 config = load_config(config_file)
355 hooks = [
356 hook
357 for hook in all_hooks(config, store)
358 if not args.hook or hook.id == args.hook or hook.alias == args.hook
359 if args.hook_stage in hook.stages
360 ]
361
362 if args.hook and not hooks:
363 output.write_line(
364 f'No hook with id `{args.hook}` in stage `{args.hook_stage}`',
365 )
366 return 1
367
368 install_hook_envs(hooks, store)
369
370 return _run_hooks(config, hooks, args, environ)
371
372 # https://github.com/python/mypy/issues/7726
373 raise AssertionError('unreachable')
374
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/commands/run.py b/pre_commit/commands/run.py
--- a/pre_commit/commands/run.py
+++ b/pre_commit/commands/run.py
@@ -347,6 +347,9 @@
if args.checkout_type:
environ['PRE_COMMIT_CHECKOUT_TYPE'] = args.checkout_type
+ # Set pre_commit flag
+ environ['PRE_COMMIT'] = '1'
+
with contextlib.ExitStack() as exit_stack:
if stash:
exit_stack.enter_context(staged_files_only(store.directory))
| {"golden_diff": "diff --git a/pre_commit/commands/run.py b/pre_commit/commands/run.py\n--- a/pre_commit/commands/run.py\n+++ b/pre_commit/commands/run.py\n@@ -347,6 +347,9 @@\n if args.checkout_type:\n environ['PRE_COMMIT_CHECKOUT_TYPE'] = args.checkout_type\n \n+ # Set pre_commit flag\n+ environ['PRE_COMMIT'] = '1'\n+\n with contextlib.ExitStack() as exit_stack:\n if stash:\n exit_stack.enter_context(staged_files_only(store.directory))\n", "issue": "Expose an env variable that indicates if pre-commit is running\nIn some circumstances, it could be helpful to know inside an executable / function / script if it was invoked from pre-commit or from some other process. I wrote a hook that just printed all env variables set but I could not find an env variable like `PRECOMMIT_RUNNING` or similar. Would you consider setting such an env variable during running pre-commit? I searched old issues and documentation but I could not find anything.\n", "before_files": [{"content": "import argparse\nimport contextlib\nimport functools\nimport logging\nimport os\nimport re\nimport subprocess\nimport time\nimport unicodedata\nfrom typing import Any\nfrom typing import Collection\nfrom typing import Dict\nfrom typing import List\nfrom typing import Sequence\nfrom typing import Set\nfrom typing import Tuple\n\nfrom identify.identify import tags_from_path\n\nfrom pre_commit import color\nfrom pre_commit import git\nfrom pre_commit import output\nfrom pre_commit.clientlib import load_config\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages.all import languages\nfrom pre_commit.repository import all_hooks\nfrom pre_commit.repository import install_hook_envs\nfrom pre_commit.staged_files_only import staged_files_only\nfrom pre_commit.store import Store\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.util import EnvironT\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _len_cjk(msg: str) -> int:\n widths = {'A': 1, 'F': 2, 'H': 1, 'N': 1, 'Na': 1, 'W': 2}\n return sum(widths[unicodedata.east_asian_width(c)] for c in msg)\n\n\ndef _start_msg(*, start: str, cols: int, end_len: int) -> str:\n dots = '.' * (cols - _len_cjk(start) - end_len - 1)\n return f'{start}{dots}'\n\n\ndef _full_msg(\n *,\n start: str,\n cols: int,\n end_msg: str,\n end_color: str,\n use_color: bool,\n postfix: str = '',\n) -> str:\n dots = '.' * (cols - _len_cjk(start) - len(postfix) - len(end_msg) - 1)\n end = color.format_color(end_msg, end_color, use_color)\n return f'{start}{dots}{postfix}{end}\\n'\n\n\ndef filter_by_include_exclude(\n names: Collection[str],\n include: str,\n exclude: str,\n) -> List[str]:\n include_re, exclude_re = re.compile(include), re.compile(exclude)\n return [\n filename for filename in names\n if include_re.search(filename)\n if not exclude_re.search(filename)\n ]\n\n\nclass Classifier:\n def __init__(self, filenames: Sequence[str]) -> None:\n # on windows we normalize all filenames to use forward slashes\n # this makes it easier to filter using the `files:` regex\n # this also makes improperly quoted shell-based hooks work better\n # see #1173\n if os.altsep == '/' and os.sep == '\\\\':\n filenames = [f.replace(os.sep, os.altsep) for f in filenames]\n self.filenames = [f for f in filenames if os.path.lexists(f)]\n\n @functools.lru_cache(maxsize=None)\n def _types_for_file(self, filename: str) -> Set[str]:\n return tags_from_path(filename)\n\n def by_types(\n self,\n names: Sequence[str],\n types: Collection[str],\n exclude_types: Collection[str],\n ) -> List[str]:\n types, exclude_types = frozenset(types), frozenset(exclude_types)\n ret = []\n for filename in names:\n tags = self._types_for_file(filename)\n if tags >= types and not tags & exclude_types:\n ret.append(filename)\n return ret\n\n def filenames_for_hook(self, hook: Hook) -> Tuple[str, ...]:\n names = self.filenames\n names = filter_by_include_exclude(names, hook.files, hook.exclude)\n names = self.by_types(names, hook.types, hook.exclude_types)\n return tuple(names)\n\n\ndef _get_skips(environ: EnvironT) -> Set[str]:\n skips = environ.get('SKIP', '')\n return {skip.strip() for skip in skips.split(',') if skip.strip()}\n\n\nSKIPPED = 'Skipped'\nNO_FILES = '(no files to check)'\n\n\ndef _subtle_line(s: str, use_color: bool) -> None:\n output.write_line(color.format_color(s, color.SUBTLE, use_color))\n\n\ndef _run_single_hook(\n classifier: Classifier,\n hook: Hook,\n skips: Set[str],\n cols: int,\n verbose: bool,\n use_color: bool,\n) -> bool:\n filenames = classifier.filenames_for_hook(hook)\n\n if hook.id in skips or hook.alias in skips:\n output.write(\n _full_msg(\n start=hook.name,\n end_msg=SKIPPED,\n end_color=color.YELLOW,\n use_color=use_color,\n cols=cols,\n ),\n )\n duration = None\n retcode = 0\n files_modified = False\n out = b''\n elif not filenames and not hook.always_run:\n output.write(\n _full_msg(\n start=hook.name,\n postfix=NO_FILES,\n end_msg=SKIPPED,\n end_color=color.TURQUOISE,\n use_color=use_color,\n cols=cols,\n ),\n )\n duration = None\n retcode = 0\n files_modified = False\n out = b''\n else:\n # print hook and dots first in case the hook takes a while to run\n output.write(_start_msg(start=hook.name, end_len=6, cols=cols))\n\n diff_cmd = ('git', 'diff', '--no-ext-diff')\n diff_before = cmd_output_b(*diff_cmd, retcode=None)\n if not hook.pass_filenames:\n filenames = ()\n time_before = time.time()\n language = languages[hook.language]\n retcode, out = language.run_hook(hook, filenames, use_color)\n duration = round(time.time() - time_before, 2) or 0\n diff_after = cmd_output_b(*diff_cmd, retcode=None)\n\n # if the hook makes changes, fail the commit\n files_modified = diff_before != diff_after\n\n if retcode or files_modified:\n print_color = color.RED\n status = 'Failed'\n else:\n print_color = color.GREEN\n status = 'Passed'\n\n output.write_line(color.format_color(status, print_color, use_color))\n\n if verbose or hook.verbose or retcode or files_modified:\n _subtle_line(f'- hook id: {hook.id}', use_color)\n\n if (verbose or hook.verbose) and duration is not None:\n _subtle_line(f'- duration: {duration}s', use_color)\n\n if retcode:\n _subtle_line(f'- exit code: {retcode}', use_color)\n\n # Print a message if failing due to file modifications\n if files_modified:\n _subtle_line('- files were modified by this hook', use_color)\n\n if out.strip():\n output.write_line()\n output.write_line_b(out.strip(), logfile_name=hook.log_file)\n output.write_line()\n\n return files_modified or bool(retcode)\n\n\ndef _compute_cols(hooks: Sequence[Hook]) -> int:\n \"\"\"Compute the number of columns to display hook messages. The widest\n that will be displayed is in the no files skipped case:\n\n Hook name...(no files to check) Skipped\n \"\"\"\n if hooks:\n name_len = max(_len_cjk(hook.name) for hook in hooks)\n else:\n name_len = 0\n\n cols = name_len + 3 + len(NO_FILES) + 1 + len(SKIPPED)\n return max(cols, 80)\n\n\ndef _all_filenames(args: argparse.Namespace) -> Collection[str]:\n # these hooks do not operate on files\n if args.hook_stage in {'post-checkout', 'post-commit'}:\n return ()\n elif args.hook_stage in {'prepare-commit-msg', 'commit-msg'}:\n return (args.commit_msg_filename,)\n elif args.from_ref and args.to_ref:\n return git.get_changed_files(args.from_ref, args.to_ref)\n elif args.files:\n return args.files\n elif args.all_files:\n return git.get_all_files()\n elif git.is_in_merge_conflict():\n return git.get_conflicted_files()\n else:\n return git.get_staged_files()\n\n\ndef _run_hooks(\n config: Dict[str, Any],\n hooks: Sequence[Hook],\n args: argparse.Namespace,\n environ: EnvironT,\n) -> int:\n \"\"\"Actually run the hooks.\"\"\"\n skips = _get_skips(environ)\n cols = _compute_cols(hooks)\n filenames = filter_by_include_exclude(\n _all_filenames(args), config['files'], config['exclude'],\n )\n classifier = Classifier(filenames)\n retval = 0\n for hook in hooks:\n retval |= _run_single_hook(\n classifier, hook, skips, cols,\n verbose=args.verbose, use_color=args.color,\n )\n if retval and config['fail_fast']:\n break\n if retval and args.show_diff_on_failure and git.has_diff():\n if args.all_files:\n output.write_line(\n 'pre-commit hook(s) made changes.\\n'\n 'If you are seeing this message in CI, '\n 'reproduce locally with: `pre-commit run --all-files`.\\n'\n 'To run `pre-commit` as part of git workflow, use '\n '`pre-commit install`.',\n )\n output.write_line('All changes made by hooks:')\n # args.color is a boolean.\n # See user_color function in color.py\n git_color_opt = 'always' if args.color else 'never'\n subprocess.call((\n 'git', '--no-pager', 'diff', '--no-ext-diff',\n f'--color={git_color_opt}',\n ))\n\n return retval\n\n\ndef _has_unmerged_paths() -> bool:\n _, stdout, _ = cmd_output_b('git', 'ls-files', '--unmerged')\n return bool(stdout.strip())\n\n\ndef _has_unstaged_config(config_file: str) -> bool:\n retcode, _, _ = cmd_output_b(\n 'git', 'diff', '--no-ext-diff', '--exit-code', config_file,\n retcode=None,\n )\n # be explicit, other git errors don't mean it has an unstaged config.\n return retcode == 1\n\n\ndef run(\n config_file: str,\n store: Store,\n args: argparse.Namespace,\n environ: EnvironT = os.environ,\n) -> int:\n stash = not args.all_files and not args.files\n\n # Check if we have unresolved merge conflict files and fail fast.\n if _has_unmerged_paths():\n logger.error('Unmerged files. Resolve before committing.')\n return 1\n if bool(args.from_ref) != bool(args.to_ref):\n logger.error('Specify both --from-ref and --to-ref.')\n return 1\n if stash and _has_unstaged_config(config_file):\n logger.error(\n f'Your pre-commit configuration is unstaged.\\n'\n f'`git add {config_file}` to fix this.',\n )\n return 1\n if (\n args.hook_stage in {'prepare-commit-msg', 'commit-msg'} and\n not args.commit_msg_filename\n ):\n logger.error(\n f'`--commit-msg-filename` is required for '\n f'`--hook-stage {args.hook_stage}`',\n )\n return 1\n # prevent recursive post-checkout hooks (#1418)\n if (\n args.hook_stage == 'post-checkout' and\n environ.get('_PRE_COMMIT_SKIP_POST_CHECKOUT')\n ):\n return 0\n\n # Expose from-ref / to-ref as environment variables for hooks to consume\n if args.from_ref and args.to_ref:\n # legacy names\n environ['PRE_COMMIT_ORIGIN'] = args.from_ref\n environ['PRE_COMMIT_SOURCE'] = args.to_ref\n # new names\n environ['PRE_COMMIT_FROM_REF'] = args.from_ref\n environ['PRE_COMMIT_TO_REF'] = args.to_ref\n\n if args.remote_name and args.remote_url:\n environ['PRE_COMMIT_REMOTE_NAME'] = args.remote_name\n environ['PRE_COMMIT_REMOTE_URL'] = args.remote_url\n\n if args.checkout_type:\n environ['PRE_COMMIT_CHECKOUT_TYPE'] = args.checkout_type\n\n with contextlib.ExitStack() as exit_stack:\n if stash:\n exit_stack.enter_context(staged_files_only(store.directory))\n\n config = load_config(config_file)\n hooks = [\n hook\n for hook in all_hooks(config, store)\n if not args.hook or hook.id == args.hook or hook.alias == args.hook\n if args.hook_stage in hook.stages\n ]\n\n if args.hook and not hooks:\n output.write_line(\n f'No hook with id `{args.hook}` in stage `{args.hook_stage}`',\n )\n return 1\n\n install_hook_envs(hooks, store)\n\n return _run_hooks(config, hooks, args, environ)\n\n # https://github.com/python/mypy/issues/7726\n raise AssertionError('unreachable')\n", "path": "pre_commit/commands/run.py"}], "after_files": [{"content": "import argparse\nimport contextlib\nimport functools\nimport logging\nimport os\nimport re\nimport subprocess\nimport time\nimport unicodedata\nfrom typing import Any\nfrom typing import Collection\nfrom typing import Dict\nfrom typing import List\nfrom typing import Sequence\nfrom typing import Set\nfrom typing import Tuple\n\nfrom identify.identify import tags_from_path\n\nfrom pre_commit import color\nfrom pre_commit import git\nfrom pre_commit import output\nfrom pre_commit.clientlib import load_config\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages.all import languages\nfrom pre_commit.repository import all_hooks\nfrom pre_commit.repository import install_hook_envs\nfrom pre_commit.staged_files_only import staged_files_only\nfrom pre_commit.store import Store\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.util import EnvironT\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _len_cjk(msg: str) -> int:\n widths = {'A': 1, 'F': 2, 'H': 1, 'N': 1, 'Na': 1, 'W': 2}\n return sum(widths[unicodedata.east_asian_width(c)] for c in msg)\n\n\ndef _start_msg(*, start: str, cols: int, end_len: int) -> str:\n dots = '.' * (cols - _len_cjk(start) - end_len - 1)\n return f'{start}{dots}'\n\n\ndef _full_msg(\n *,\n start: str,\n cols: int,\n end_msg: str,\n end_color: str,\n use_color: bool,\n postfix: str = '',\n) -> str:\n dots = '.' * (cols - _len_cjk(start) - len(postfix) - len(end_msg) - 1)\n end = color.format_color(end_msg, end_color, use_color)\n return f'{start}{dots}{postfix}{end}\\n'\n\n\ndef filter_by_include_exclude(\n names: Collection[str],\n include: str,\n exclude: str,\n) -> List[str]:\n include_re, exclude_re = re.compile(include), re.compile(exclude)\n return [\n filename for filename in names\n if include_re.search(filename)\n if not exclude_re.search(filename)\n ]\n\n\nclass Classifier:\n def __init__(self, filenames: Sequence[str]) -> None:\n # on windows we normalize all filenames to use forward slashes\n # this makes it easier to filter using the `files:` regex\n # this also makes improperly quoted shell-based hooks work better\n # see #1173\n if os.altsep == '/' and os.sep == '\\\\':\n filenames = [f.replace(os.sep, os.altsep) for f in filenames]\n self.filenames = [f for f in filenames if os.path.lexists(f)]\n\n @functools.lru_cache(maxsize=None)\n def _types_for_file(self, filename: str) -> Set[str]:\n return tags_from_path(filename)\n\n def by_types(\n self,\n names: Sequence[str],\n types: Collection[str],\n exclude_types: Collection[str],\n ) -> List[str]:\n types, exclude_types = frozenset(types), frozenset(exclude_types)\n ret = []\n for filename in names:\n tags = self._types_for_file(filename)\n if tags >= types and not tags & exclude_types:\n ret.append(filename)\n return ret\n\n def filenames_for_hook(self, hook: Hook) -> Tuple[str, ...]:\n names = self.filenames\n names = filter_by_include_exclude(names, hook.files, hook.exclude)\n names = self.by_types(names, hook.types, hook.exclude_types)\n return tuple(names)\n\n\ndef _get_skips(environ: EnvironT) -> Set[str]:\n skips = environ.get('SKIP', '')\n return {skip.strip() for skip in skips.split(',') if skip.strip()}\n\n\nSKIPPED = 'Skipped'\nNO_FILES = '(no files to check)'\n\n\ndef _subtle_line(s: str, use_color: bool) -> None:\n output.write_line(color.format_color(s, color.SUBTLE, use_color))\n\n\ndef _run_single_hook(\n classifier: Classifier,\n hook: Hook,\n skips: Set[str],\n cols: int,\n verbose: bool,\n use_color: bool,\n) -> bool:\n filenames = classifier.filenames_for_hook(hook)\n\n if hook.id in skips or hook.alias in skips:\n output.write(\n _full_msg(\n start=hook.name,\n end_msg=SKIPPED,\n end_color=color.YELLOW,\n use_color=use_color,\n cols=cols,\n ),\n )\n duration = None\n retcode = 0\n files_modified = False\n out = b''\n elif not filenames and not hook.always_run:\n output.write(\n _full_msg(\n start=hook.name,\n postfix=NO_FILES,\n end_msg=SKIPPED,\n end_color=color.TURQUOISE,\n use_color=use_color,\n cols=cols,\n ),\n )\n duration = None\n retcode = 0\n files_modified = False\n out = b''\n else:\n # print hook and dots first in case the hook takes a while to run\n output.write(_start_msg(start=hook.name, end_len=6, cols=cols))\n\n diff_cmd = ('git', 'diff', '--no-ext-diff')\n diff_before = cmd_output_b(*diff_cmd, retcode=None)\n if not hook.pass_filenames:\n filenames = ()\n time_before = time.time()\n language = languages[hook.language]\n retcode, out = language.run_hook(hook, filenames, use_color)\n duration = round(time.time() - time_before, 2) or 0\n diff_after = cmd_output_b(*diff_cmd, retcode=None)\n\n # if the hook makes changes, fail the commit\n files_modified = diff_before != diff_after\n\n if retcode or files_modified:\n print_color = color.RED\n status = 'Failed'\n else:\n print_color = color.GREEN\n status = 'Passed'\n\n output.write_line(color.format_color(status, print_color, use_color))\n\n if verbose or hook.verbose or retcode or files_modified:\n _subtle_line(f'- hook id: {hook.id}', use_color)\n\n if (verbose or hook.verbose) and duration is not None:\n _subtle_line(f'- duration: {duration}s', use_color)\n\n if retcode:\n _subtle_line(f'- exit code: {retcode}', use_color)\n\n # Print a message if failing due to file modifications\n if files_modified:\n _subtle_line('- files were modified by this hook', use_color)\n\n if out.strip():\n output.write_line()\n output.write_line_b(out.strip(), logfile_name=hook.log_file)\n output.write_line()\n\n return files_modified or bool(retcode)\n\n\ndef _compute_cols(hooks: Sequence[Hook]) -> int:\n \"\"\"Compute the number of columns to display hook messages. The widest\n that will be displayed is in the no files skipped case:\n\n Hook name...(no files to check) Skipped\n \"\"\"\n if hooks:\n name_len = max(_len_cjk(hook.name) for hook in hooks)\n else:\n name_len = 0\n\n cols = name_len + 3 + len(NO_FILES) + 1 + len(SKIPPED)\n return max(cols, 80)\n\n\ndef _all_filenames(args: argparse.Namespace) -> Collection[str]:\n # these hooks do not operate on files\n if args.hook_stage in {'post-checkout', 'post-commit'}:\n return ()\n elif args.hook_stage in {'prepare-commit-msg', 'commit-msg'}:\n return (args.commit_msg_filename,)\n elif args.from_ref and args.to_ref:\n return git.get_changed_files(args.from_ref, args.to_ref)\n elif args.files:\n return args.files\n elif args.all_files:\n return git.get_all_files()\n elif git.is_in_merge_conflict():\n return git.get_conflicted_files()\n else:\n return git.get_staged_files()\n\n\ndef _run_hooks(\n config: Dict[str, Any],\n hooks: Sequence[Hook],\n args: argparse.Namespace,\n environ: EnvironT,\n) -> int:\n \"\"\"Actually run the hooks.\"\"\"\n skips = _get_skips(environ)\n cols = _compute_cols(hooks)\n filenames = filter_by_include_exclude(\n _all_filenames(args), config['files'], config['exclude'],\n )\n classifier = Classifier(filenames)\n retval = 0\n for hook in hooks:\n retval |= _run_single_hook(\n classifier, hook, skips, cols,\n verbose=args.verbose, use_color=args.color,\n )\n if retval and config['fail_fast']:\n break\n if retval and args.show_diff_on_failure and git.has_diff():\n if args.all_files:\n output.write_line(\n 'pre-commit hook(s) made changes.\\n'\n 'If you are seeing this message in CI, '\n 'reproduce locally with: `pre-commit run --all-files`.\\n'\n 'To run `pre-commit` as part of git workflow, use '\n '`pre-commit install`.',\n )\n output.write_line('All changes made by hooks:')\n # args.color is a boolean.\n # See user_color function in color.py\n git_color_opt = 'always' if args.color else 'never'\n subprocess.call((\n 'git', '--no-pager', 'diff', '--no-ext-diff',\n f'--color={git_color_opt}',\n ))\n\n return retval\n\n\ndef _has_unmerged_paths() -> bool:\n _, stdout, _ = cmd_output_b('git', 'ls-files', '--unmerged')\n return bool(stdout.strip())\n\n\ndef _has_unstaged_config(config_file: str) -> bool:\n retcode, _, _ = cmd_output_b(\n 'git', 'diff', '--no-ext-diff', '--exit-code', config_file,\n retcode=None,\n )\n # be explicit, other git errors don't mean it has an unstaged config.\n return retcode == 1\n\n\ndef run(\n config_file: str,\n store: Store,\n args: argparse.Namespace,\n environ: EnvironT = os.environ,\n) -> int:\n stash = not args.all_files and not args.files\n\n # Check if we have unresolved merge conflict files and fail fast.\n if _has_unmerged_paths():\n logger.error('Unmerged files. Resolve before committing.')\n return 1\n if bool(args.from_ref) != bool(args.to_ref):\n logger.error('Specify both --from-ref and --to-ref.')\n return 1\n if stash and _has_unstaged_config(config_file):\n logger.error(\n f'Your pre-commit configuration is unstaged.\\n'\n f'`git add {config_file}` to fix this.',\n )\n return 1\n if (\n args.hook_stage in {'prepare-commit-msg', 'commit-msg'} and\n not args.commit_msg_filename\n ):\n logger.error(\n f'`--commit-msg-filename` is required for '\n f'`--hook-stage {args.hook_stage}`',\n )\n return 1\n # prevent recursive post-checkout hooks (#1418)\n if (\n args.hook_stage == 'post-checkout' and\n environ.get('_PRE_COMMIT_SKIP_POST_CHECKOUT')\n ):\n return 0\n\n # Expose from-ref / to-ref as environment variables for hooks to consume\n if args.from_ref and args.to_ref:\n # legacy names\n environ['PRE_COMMIT_ORIGIN'] = args.from_ref\n environ['PRE_COMMIT_SOURCE'] = args.to_ref\n # new names\n environ['PRE_COMMIT_FROM_REF'] = args.from_ref\n environ['PRE_COMMIT_TO_REF'] = args.to_ref\n\n if args.remote_name and args.remote_url:\n environ['PRE_COMMIT_REMOTE_NAME'] = args.remote_name\n environ['PRE_COMMIT_REMOTE_URL'] = args.remote_url\n\n if args.checkout_type:\n environ['PRE_COMMIT_CHECKOUT_TYPE'] = args.checkout_type\n\n # Set pre_commit flag\n environ['PRE_COMMIT'] = '1'\n\n with contextlib.ExitStack() as exit_stack:\n if stash:\n exit_stack.enter_context(staged_files_only(store.directory))\n\n config = load_config(config_file)\n hooks = [\n hook\n for hook in all_hooks(config, store)\n if not args.hook or hook.id == args.hook or hook.alias == args.hook\n if args.hook_stage in hook.stages\n ]\n\n if args.hook and not hooks:\n output.write_line(\n f'No hook with id `{args.hook}` in stage `{args.hook_stage}`',\n )\n return 1\n\n install_hook_envs(hooks, store)\n\n return _run_hooks(config, hooks, args, environ)\n\n # https://github.com/python/mypy/issues/7726\n raise AssertionError('unreachable')\n", "path": "pre_commit/commands/run.py"}]} |
gh_patches_debug_1255 | rasdani/github-patches | git_diff | conda__conda-build-2451 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Invalid cross-device-link
Example output:
```bash
INFO:conda_build.build:Packaging <...>
number of files: 63
Fixing permissions
Traceback (most recent call last):
File "./build.py", line 599, in build_single
built = cb.api.build(RECIPE, **bargs)
File "$CONDA_ROOT/lib/python3.6/site-packages/conda_build/api.py", line 185, in build
need_source_download=need_source_download, config=config, variants=variants)
File "$CONDA_ROOT/lib/python3.6/site-packages/conda_build/build.py", line 1782, in build_tree
notest=notest,
File "$CONDA_ROOT/lib/python3.6/site-packages/conda_build/build.py", line 1241, in build
built_package = bundlers[output_d.get('type', 'conda')](output_d, m, env)
File "$CONDA_ROOT/lib/python3.6/site-packages/conda_build/build.py", line 719, in bundle_conda
files = post_process_files(metadata, initial_files)
File "$CONDA_ROOT/lib/python3.6/site-packages/conda_build/build.py", line 656, in post_process_files
croot=m.config.croot)
File "$CONDA_ROOT/lib/python3.6/site-packages/conda_build/post.py", line 444, in post_build
make_hardlink_copy(f, prefix)
File "$CONDA_ROOT/lib/python3.6/site-packages/conda_build/post.py", line 517, in make_hardlink_copy
os.rename(os.path.join(dest, fn), path)
OSError: [Errno 18] Invalid cross-device link: '/tmp/tmpg6gbg6di/libgcc_s.so' -> '$PREFIX/lib/libgcc_s.so'
```
I bisected the issue to 9914f397053312172feb4bc9f312adbe98b0ae3c
The issue seems to appear when packing runtime libraries with always_include_files e.g. libgcc, libllvm, libmpich, ...
**meta.yaml**
```yaml
<...>
build:
always_include_files:
- lib/libgcc_s.so
- <...>
```
No issue with 3.0.25, but issues since 3.0.26.
Maybe related: https://github.com/conda/conda-build/issues/1659
Thanks.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda_build/post.py`
Content:
```
1 from __future__ import absolute_import, division, print_function
2
3 from collections import defaultdict
4 import fnmatch
5 from functools import partial
6 from glob import glob
7 import io
8 import locale
9 import re
10 import os
11 import stat
12 from subprocess import call, check_output
13 import sys
14 try:
15 from os import readlink
16 except ImportError:
17 readlink = False
18
19 from conda_build.os_utils import external
20 from .conda_interface import lchmod
21 from .conda_interface import walk_prefix
22 from .conda_interface import md5_file
23 from .conda_interface import PY3
24 from .conda_interface import TemporaryDirectory
25
26 from conda_build import utils
27 from conda_build.os_utils.pyldd import is_codefile
28
29 if sys.platform == 'darwin':
30 from conda_build.os_utils import macho
31
32
33 def is_obj(path):
34 return is_codefile(path)
35
36
37 def fix_shebang(f, prefix, build_python, osx_is_app=False):
38 path = os.path.join(prefix, f)
39 if is_obj(path):
40 return
41 elif os.path.islink(path):
42 return
43 elif not os.path.isfile(path):
44 return
45
46 if os.stat(path).st_size == 0:
47 return
48
49 bytes_ = False
50
51 with io.open(path, encoding=locale.getpreferredencoding(), mode='r+') as fi:
52 try:
53 data = fi.read(100)
54 fi.seek(0)
55 except UnicodeDecodeError: # file is binary
56 return
57
58 SHEBANG_PAT = re.compile(r'^#!.+$', re.M)
59
60 # regexp on the memory mapped file so we only read it into
61 # memory if the regexp matches.
62 try:
63 mm = utils.mmap_mmap(fi.fileno(), 0, tagname=None, flags=utils.mmap_MAP_PRIVATE)
64 except OSError:
65 mm = fi.read()
66 try:
67 m = SHEBANG_PAT.match(mm)
68 except TypeError:
69 SHEBANG_PAT = re.compile(br'^#!.+$', re.M)
70 bytes_ = True
71 m = SHEBANG_PAT.match(mm)
72
73 python_str = b'python' if bytes_ else 'python'
74
75 if not (m and python_str in m.group()):
76 return
77
78 data = mm[:]
79
80 py_exec = '#!' + ('/bin/bash ' + prefix + '/bin/pythonw'
81 if sys.platform == 'darwin' and osx_is_app else
82 prefix + '/bin/' + os.path.basename(build_python))
83 if bytes_ and hasattr(py_exec, 'encode'):
84 py_exec = py_exec.encode()
85 new_data = SHEBANG_PAT.sub(py_exec, data, count=1)
86 if new_data == data:
87 return
88 print("updating shebang:", f)
89 with io.open(path, 'w', encoding=locale.getpreferredencoding()) as fo:
90 try:
91 fo.write(new_data)
92 except TypeError:
93 fo.write(new_data.decode())
94 os.chmod(path, 0o775)
95
96
97 def write_pth(egg_path, config):
98 fn = os.path.basename(egg_path)
99 py_ver = '.'.join(config.variant['python'].split('.')[:2])
100 with open(os.path.join(utils.get_site_packages(config.host_prefix, py_ver),
101 '%s.pth' % (fn.split('-')[0])), 'w') as fo:
102 fo.write('./%s\n' % fn)
103
104
105 def remove_easy_install_pth(files, prefix, config, preserve_egg_dir=False):
106 """
107 remove the need for easy-install.pth and finally remove easy-install.pth
108 itself
109 """
110 absfiles = [os.path.join(prefix, f) for f in files]
111 py_ver = '.'.join(config.variant['python'].split('.')[:2])
112 sp_dir = utils.get_site_packages(prefix, py_ver)
113 for egg_path in glob(os.path.join(sp_dir, '*-py*.egg')):
114 if os.path.isdir(egg_path):
115 if preserve_egg_dir or not any(os.path.join(egg_path, i) in absfiles for i
116 in walk_prefix(egg_path, False, windows_forward_slashes=False)):
117 write_pth(egg_path, config=config)
118 continue
119
120 print('found egg dir:', egg_path)
121 try:
122 os.rename(os.path.join(egg_path, 'EGG-INFO'),
123 egg_path + '-info')
124 except OSError:
125 pass
126 utils.rm_rf(os.path.join(egg_path, 'EGG-INFO'))
127 for fn in os.listdir(egg_path):
128 if fn == '__pycache__':
129 utils.rm_rf(os.path.join(egg_path, fn))
130 else:
131 # this might be a name-space package
132 # so the package directory already exists
133 # from another installed dependency
134 if os.path.exists(os.path.join(sp_dir, fn)):
135 try:
136 utils.copy_into(os.path.join(egg_path, fn),
137 os.path.join(sp_dir, fn), config.timeout,
138 locking=config.locking)
139 utils.rm_rf(os.path.join(egg_path, fn))
140 except IOError as e:
141 fn = os.path.basename(str(e).split()[-1])
142 raise IOError("Tried to merge folder {egg_path} into {sp_dir}, but {fn}"
143 " exists in both locations. Please either add "
144 "build/preserve_egg_dir: True to meta.yaml, or manually "
145 "remove the file during your install process to avoid "
146 "this conflict."
147 .format(egg_path=egg_path, sp_dir=sp_dir, fn=fn))
148 else:
149 os.rename(os.path.join(egg_path, fn), os.path.join(sp_dir, fn))
150
151 elif os.path.isfile(egg_path):
152 if egg_path not in absfiles:
153 continue
154 print('found egg:', egg_path)
155 write_pth(egg_path, config=config)
156
157 utils.rm_rf(os.path.join(sp_dir, 'easy-install.pth'))
158
159
160 def rm_py_along_so(prefix):
161 """remove .py (.pyc) files alongside .so or .pyd files"""
162 for root, _, files in os.walk(prefix):
163 for fn in files:
164 if fn.endswith(('.so', '.pyd')):
165 name, _ = os.path.splitext(fn)
166 for ext in '.py', '.pyc', '.pyo':
167 if name + ext in files:
168 os.unlink(os.path.join(root, name + ext))
169
170
171 def rm_pyo(files, prefix):
172 """pyo considered harmful: https://www.python.org/dev/peps/pep-0488/
173
174 The build may have proceeded with:
175 [install]
176 optimize = 1
177 .. in setup.cfg in which case we can end up with some stdlib __pycache__
178 files ending in .opt-N.pyc on Python 3, as well as .pyo files for the
179 package's own python. """
180 re_pyo = re.compile(r'.*(?:\.pyo$|\.opt-[0-9]\.pyc)')
181 for fn in files:
182 if re_pyo.match(fn):
183 os.unlink(os.path.join(prefix, fn))
184
185
186 def rm_pyc(files, prefix):
187 re_pyc = re.compile(r'.*(?:\.pyc$)')
188 for fn in files:
189 if re_pyc.match(fn):
190 os.unlink(os.path.join(prefix, fn))
191
192
193 def compile_missing_pyc(files, cwd, python_exe, skip_compile_pyc=()):
194 if not os.path.isfile(python_exe):
195 return
196 compile_files = []
197 skip_compile_pyc_n = [os.path.normpath(skip) for skip in skip_compile_pyc]
198 skipped_files = set()
199 for skip in skip_compile_pyc_n:
200 skipped_files.update(set(fnmatch.filter(files, skip)))
201 unskipped_files = set(files) - skipped_files
202 for fn in unskipped_files:
203 # omit files in Library/bin, Scripts, and the root prefix - they are not generally imported
204 if sys.platform == 'win32':
205 if any([fn.lower().startswith(start) for start in ['library/bin', 'library\\bin',
206 'scripts']]):
207 continue
208 else:
209 if fn.startswith('bin'):
210 continue
211 cache_prefix = ("__pycache__" + os.sep) if PY3 else ""
212 if (fn.endswith(".py") and
213 os.path.dirname(fn) + cache_prefix + os.path.basename(fn) + 'c' not in files):
214 compile_files.append(fn)
215
216 if compile_files:
217 if not os.path.isfile(python_exe):
218 print('compiling .pyc files... failed as no python interpreter was found')
219 else:
220 print('compiling .pyc files...')
221 for f in compile_files:
222 call([python_exe, '-Wi', '-m', 'py_compile', f], cwd=cwd)
223
224
225 def post_process(files, prefix, config, preserve_egg_dir=False, noarch=False, skip_compile_pyc=()):
226 rm_pyo(files, prefix)
227 if noarch:
228 rm_pyc(files, prefix)
229 else:
230 python_exe = (config.build_python if os.path.isfile(config.build_python) else
231 config.host_python)
232 compile_missing_pyc(files, cwd=prefix, python_exe=python_exe,
233 skip_compile_pyc=skip_compile_pyc)
234 remove_easy_install_pth(files, prefix, config, preserve_egg_dir=preserve_egg_dir)
235 rm_py_along_so(prefix)
236
237
238 def find_lib(link, prefix, path=None):
239 files = utils.prefix_files(prefix)
240 if link.startswith(prefix):
241 link = os.path.normpath(link[len(prefix) + 1:])
242 if link not in files:
243 sys.exit("Error: Could not find %s" % link)
244 return link
245 if link.startswith('/'): # but doesn't start with the build prefix
246 return
247 if link.startswith('@rpath/'):
248 # Assume the rpath already points to lib, so there is no need to
249 # change it.
250 return
251 if '/' not in link or link.startswith('@executable_path/'):
252 link = os.path.basename(link)
253 file_names = defaultdict(list)
254 for f in files:
255 file_names[os.path.basename(f)].append(f)
256 if link not in file_names:
257 sys.exit("Error: Could not find %s" % link)
258 if len(file_names[link]) > 1:
259 if path and os.path.basename(path) == link:
260 # The link is for the file itself, just use it
261 return path
262 # Allow for the possibility of the same library appearing in
263 # multiple places.
264 md5s = set()
265 for f in file_names[link]:
266 md5s.add(md5_file(os.path.join(prefix, f)))
267 if len(md5s) > 1:
268 sys.exit("Error: Found multiple instances of %s: %s" % (link, file_names[link]))
269 else:
270 file_names[link].sort()
271 print("Found multiple instances of %s (%s). "
272 "Choosing the first one." % (link, file_names[link]))
273 return file_names[link][0]
274 print("Don't know how to find %s, skipping" % link)
275
276
277 def osx_ch_link(path, link_dict, prefix):
278 link = link_dict['name']
279 print("Fixing linking of %s in %s" % (link, path))
280 link_loc = find_lib(link, prefix, path)
281 if not link_loc:
282 return
283
284 lib_to_link = os.path.relpath(os.path.dirname(link_loc), 'lib')
285 # path_to_lib = utils.relative(path[len(prefix) + 1:])
286
287 # e.g., if
288 # path = '/build_prefix/lib/some/stuff/libstuff.dylib'
289 # link_loc = 'lib/things/libthings.dylib'
290
291 # then
292
293 # lib_to_link = 'things'
294 # path_to_lib = '../..'
295
296 # @rpath always means 'lib', link will be at
297 # @rpath/lib_to_link/basename(link), like @rpath/things/libthings.dylib.
298
299 # For when we can't use @rpath, @loader_path means the path to the library
300 # ('path'), so from path to link is
301 # @loader_path/path_to_lib/lib_to_link/basename(link), like
302 # @loader_path/../../things/libthings.dylib.
303
304 ret = '@rpath/%s/%s' % (lib_to_link, os.path.basename(link))
305
306 # XXX: IF the above fails for whatever reason, the below can be used
307 # TODO: This might contain redundant ..'s if link and path are both in
308 # some subdirectory of lib.
309 # ret = '@loader_path/%s/%s/%s' % (path_to_lib, lib_to_link, basename(link))
310
311 ret = ret.replace('/./', '/')
312
313 return ret
314
315
316 def mk_relative_osx(path, prefix, build_prefix=None):
317 '''
318 if build_prefix is None, the_n this is a standard conda build. The path
319 and all dependencies are in the build_prefix.
320
321 if package is built in develop mode, build_prefix is specified. Object
322 specified by 'path' needs to relink runtime dependences to libs found in
323 build_prefix/lib/. Also, in develop mode, 'path' is not in 'build_prefix'
324 '''
325 if build_prefix is None:
326 assert path.startswith(prefix + '/')
327 else:
328 prefix = build_prefix
329
330 assert sys.platform == 'darwin' and is_obj(path)
331 s = macho.install_name_change(path, partial(osx_ch_link, prefix=prefix))
332
333 names = macho.otool(path)
334 if names:
335 # Add an rpath to every executable to increase the chances of it
336 # being found.
337 rpath = os.path.join('@loader_path',
338 os.path.relpath(os.path.join(prefix, 'lib'),
339 os.path.dirname(path)), '').replace('/./', '/')
340 macho.add_rpath(path, rpath, verbose=True)
341
342 # 10.7 install_name_tool -delete_rpath causes broken dylibs, I will revisit this ASAP.
343 # .. and remove config.build_prefix/lib which was added in-place of
344 # DYLD_FALLBACK_LIBRARY_PATH since El Capitan's SIP.
345 # macho.delete_rpath(path, config.build_prefix + '/lib', verbose = True)
346
347 if s:
348 # Skip for stub files, which have to use binary_has_prefix_files to be
349 # made relocatable.
350 assert_relative_osx(path, prefix)
351
352
353 def mk_relative_linux(f, prefix, rpaths=('lib',)):
354 'Respects the original values and converts abs to $ORIGIN-relative'
355
356 elf = os.path.join(prefix, f)
357 origin = os.path.dirname(elf)
358
359 patchelf = external.find_executable('patchelf', prefix)
360 try:
361 existing = check_output([patchelf, '--print-rpath', elf]).decode('utf-8').splitlines()[0]
362 except:
363 print('patchelf: --print-rpath failed for %s\n' % (elf))
364 return
365 existing = existing.split(os.pathsep)
366 new = []
367 for old in existing:
368 if old.startswith('$ORIGIN'):
369 new.append(old)
370 elif old.startswith('/'):
371 # Test if this absolute path is outside of prefix. That is fatal.
372 relpath = os.path.relpath(old, prefix)
373 if relpath.startswith('..' + os.sep):
374 print('Warning: rpath {0} is outside prefix {1} (removing it)'.format(old, prefix))
375 else:
376 relpath = '$ORIGIN/' + os.path.relpath(old, origin)
377 if relpath not in new:
378 new.append(relpath)
379 # Ensure that the asked-for paths are also in new.
380 for rpath in rpaths:
381 if not rpath.startswith('/'):
382 # IMHO utils.relative shouldn't exist, but I am too paranoid to remove
383 # it, so instead, make sure that what I think it should be replaced by
384 # gives the same result and assert if not. Yeah, I am a chicken.
385 rel_ours = os.path.normpath(utils.relative(f, rpath))
386 rel_stdlib = os.path.normpath(os.path.relpath(rpath, os.path.dirname(f)))
387 assert rel_ours == rel_stdlib, \
388 'utils.relative {0} and relpath {1} disagree for {2}, {3}'.format(
389 rel_ours, rel_stdlib, f, rpath)
390 rpath = '$ORIGIN/' + rel_stdlib
391 if rpath not in new:
392 new.append(rpath)
393 rpath = ':'.join(new)
394 print('patchelf: file: %s\n setting rpath to: %s' % (elf, rpath))
395 call([patchelf, '--force-rpath', '--set-rpath', rpath, elf])
396
397
398 def assert_relative_osx(path, prefix):
399 for name in macho.get_dylibs(path):
400 assert not name.startswith(prefix), path
401
402
403 def mk_relative(m, f, prefix):
404 assert sys.platform != 'win32'
405 path = os.path.join(prefix, f)
406 if not is_obj(path):
407 return
408
409 if sys.platform.startswith('linux'):
410 mk_relative_linux(f, prefix=prefix, rpaths=m.get_value('build/rpaths', ['lib']))
411 elif sys.platform == 'darwin':
412 mk_relative_osx(path, prefix=prefix)
413
414
415 def fix_permissions(files, prefix):
416 print("Fixing permissions")
417 for root, dirs, _ in os.walk(prefix):
418 for dn in dirs:
419 lchmod(os.path.join(root, dn), 0o775)
420
421 for f in files:
422 path = os.path.join(prefix, f)
423 st = os.lstat(path)
424 old_mode = stat.S_IMODE(st.st_mode)
425 new_mode = old_mode
426 # broadcast execute
427 if old_mode & stat.S_IXUSR:
428 new_mode = new_mode | stat.S_IXGRP | stat.S_IXOTH
429 # ensure user and group can write and all can read
430 new_mode = new_mode | stat.S_IWUSR | stat.S_IWGRP | stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH # noqa
431 if old_mode != new_mode:
432 try:
433 lchmod(path, new_mode)
434 except (OSError, utils.PermissionError) as e:
435 log = utils.get_logger(__name__)
436 log.warn(str(e))
437
438
439 def post_build(m, files, prefix, build_python, croot):
440 print('number of files:', len(files))
441 fix_permissions(files, prefix)
442
443 for f in files:
444 make_hardlink_copy(f, prefix)
445
446 if sys.platform == 'win32':
447 return
448
449 binary_relocation = m.binary_relocation()
450 if not binary_relocation:
451 print("Skipping binary relocation logic")
452 osx_is_app = bool(m.get_value('build/osx_is_app', False)) and sys.platform == 'darwin'
453
454 check_symlinks(files, prefix, croot)
455
456 for f in files:
457 if f.startswith('bin/'):
458 fix_shebang(f, prefix=prefix, build_python=build_python, osx_is_app=osx_is_app)
459 if binary_relocation is True or (isinstance(binary_relocation, list) and
460 f in binary_relocation):
461 mk_relative(m, f, prefix)
462
463
464 def check_symlinks(files, prefix, croot):
465 if readlink is False:
466 return # Not on Unix system
467 msgs = []
468 real_build_prefix = os.path.realpath(prefix)
469 for f in files:
470 path = os.path.join(real_build_prefix, f)
471 if os.path.islink(path):
472 link_path = readlink(path)
473 real_link_path = os.path.realpath(path)
474 # symlinks to binaries outside of the same dir don't work. RPATH stuff gets confused
475 # because ld.so follows symlinks in RPATHS
476 # If condition exists, then copy the file rather than symlink it.
477 if (not os.path.dirname(link_path) == os.path.dirname(real_link_path) and
478 is_obj(f)):
479 os.remove(path)
480 utils.copy_into(real_link_path, path)
481 elif real_link_path.startswith(real_build_prefix):
482 # If the path is in the build prefix, this is fine, but
483 # the link needs to be relative
484 if not link_path.startswith('.'):
485 # Don't change the link structure if it is already a
486 # relative link. It's possible that ..'s later in the path
487 # can result in a broken link still, but we'll assume that
488 # such crazy things don't happen.
489 print("Making absolute symlink %s -> %s relative" % (f, link_path))
490 os.unlink(path)
491 os.symlink(os.path.relpath(real_link_path, os.path.dirname(path)), path)
492 else:
493 # Symlinks to absolute paths on the system (like /usr) are fine.
494 if real_link_path.startswith(croot):
495 msgs.append("%s is a symlink to a path that may not "
496 "exist after the build is completed (%s)" % (f, link_path))
497
498 if msgs:
499 for msg in msgs:
500 print("Error: %s" % msg, file=sys.stderr)
501 sys.exit(1)
502
503
504 def make_hardlink_copy(path, prefix):
505 """Hardlinks create invalid packages. Copy files to break the link.
506 Symlinks are OK, and unaffected here."""
507 if not os.path.isabs(path):
508 path = os.path.normpath(os.path.join(prefix, path))
509 fn = os.path.basename(path)
510 if os.lstat(path).st_nlink > 1:
511 with TemporaryDirectory() as dest:
512 # copy file to new name
513 utils.copy_into(path, dest)
514 # remove old file
515 utils.rm_rf(path)
516 # rename copy to original filename
517 os.rename(os.path.join(dest, fn), path)
518
519
520 def get_build_metadata(m):
521 src_dir = m.config.work_dir
522 if os.path.exists(os.path.join(src_dir, '__conda_version__.txt')):
523 raise ValueError("support for __conda_version__ has been removed as of Conda-build 3.0."
524 "Try Jinja templates instead: "
525 "http://conda.pydata.org/docs/building/meta-yaml.html#templating-with-jinja")
526 if os.path.exists(os.path.join(src_dir, '__conda_buildnum__.txt')):
527 raise ValueError("support for __conda_buildnum__ has been removed as of Conda-build 3.0."
528 "Try Jinja templates instead: "
529 "http://conda.pydata.org/docs/building/meta-yaml.html#templating-with-jinja")
530 if os.path.exists(os.path.join(src_dir, '__conda_buildstr__.txt')):
531 raise ValueError("support for __conda_buildstr__ has been removed as of Conda-build 3.0."
532 "Try Jinja templates instead: "
533 "http://conda.pydata.org/docs/building/meta-yaml.html#templating-with-jinja")
534
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conda_build/post.py b/conda_build/post.py
--- a/conda_build/post.py
+++ b/conda_build/post.py
@@ -514,7 +514,9 @@
# remove old file
utils.rm_rf(path)
# rename copy to original filename
- os.rename(os.path.join(dest, fn), path)
+ # It is essential here to use copying (as opposed to os.rename), so that
+ # crossing volume boundaries works
+ utils.copy_into(os.path.join(dest, fn), path)
def get_build_metadata(m):
| {"golden_diff": "diff --git a/conda_build/post.py b/conda_build/post.py\n--- a/conda_build/post.py\n+++ b/conda_build/post.py\n@@ -514,7 +514,9 @@\n # remove old file\n utils.rm_rf(path)\n # rename copy to original filename\n- os.rename(os.path.join(dest, fn), path)\n+ # It is essential here to use copying (as opposed to os.rename), so that\n+ # crossing volume boundaries works\n+ utils.copy_into(os.path.join(dest, fn), path)\n \n \n def get_build_metadata(m):\n", "issue": "Invalid cross-device-link\nExample output:\r\n```bash\r\nINFO:conda_build.build:Packaging <...>\r\nnumber of files: 63\r\nFixing permissions\r\nTraceback (most recent call last):\r\n File \"./build.py\", line 599, in build_single\r\n built = cb.api.build(RECIPE, **bargs)\r\n File \"$CONDA_ROOT/lib/python3.6/site-packages/conda_build/api.py\", line 185, in build\r\n need_source_download=need_source_download, config=config, variants=variants)\r\n File \"$CONDA_ROOT/lib/python3.6/site-packages/conda_build/build.py\", line 1782, in build_tree\r\n notest=notest,\r\n File \"$CONDA_ROOT/lib/python3.6/site-packages/conda_build/build.py\", line 1241, in build\r\n built_package = bundlers[output_d.get('type', 'conda')](output_d, m, env)\r\n File \"$CONDA_ROOT/lib/python3.6/site-packages/conda_build/build.py\", line 719, in bundle_conda\r\n files = post_process_files(metadata, initial_files)\r\n File \"$CONDA_ROOT/lib/python3.6/site-packages/conda_build/build.py\", line 656, in post_process_files\r\n croot=m.config.croot)\r\n File \"$CONDA_ROOT/lib/python3.6/site-packages/conda_build/post.py\", line 444, in post_build\r\n make_hardlink_copy(f, prefix)\r\n File \"$CONDA_ROOT/lib/python3.6/site-packages/conda_build/post.py\", line 517, in make_hardlink_copy\r\n os.rename(os.path.join(dest, fn), path)\r\nOSError: [Errno 18] Invalid cross-device link: '/tmp/tmpg6gbg6di/libgcc_s.so' -> '$PREFIX/lib/libgcc_s.so'\r\n```\r\nI bisected the issue to 9914f397053312172feb4bc9f312adbe98b0ae3c\r\n\r\nThe issue seems to appear when packing runtime libraries with always_include_files e.g. libgcc, libllvm, libmpich, ...\r\n\r\n**meta.yaml**\r\n```yaml\r\n<...>\r\nbuild:\r\n always_include_files:\r\n - lib/libgcc_s.so\r\n - <...>\r\n```\r\n\r\nNo issue with 3.0.25, but issues since 3.0.26.\r\nMaybe related: https://github.com/conda/conda-build/issues/1659\r\nThanks.\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nfrom collections import defaultdict\nimport fnmatch\nfrom functools import partial\nfrom glob import glob\nimport io\nimport locale\nimport re\nimport os\nimport stat\nfrom subprocess import call, check_output\nimport sys\ntry:\n from os import readlink\nexcept ImportError:\n readlink = False\n\nfrom conda_build.os_utils import external\nfrom .conda_interface import lchmod\nfrom .conda_interface import walk_prefix\nfrom .conda_interface import md5_file\nfrom .conda_interface import PY3\nfrom .conda_interface import TemporaryDirectory\n\nfrom conda_build import utils\nfrom conda_build.os_utils.pyldd import is_codefile\n\nif sys.platform == 'darwin':\n from conda_build.os_utils import macho\n\n\ndef is_obj(path):\n return is_codefile(path)\n\n\ndef fix_shebang(f, prefix, build_python, osx_is_app=False):\n path = os.path.join(prefix, f)\n if is_obj(path):\n return\n elif os.path.islink(path):\n return\n elif not os.path.isfile(path):\n return\n\n if os.stat(path).st_size == 0:\n return\n\n bytes_ = False\n\n with io.open(path, encoding=locale.getpreferredencoding(), mode='r+') as fi:\n try:\n data = fi.read(100)\n fi.seek(0)\n except UnicodeDecodeError: # file is binary\n return\n\n SHEBANG_PAT = re.compile(r'^#!.+$', re.M)\n\n # regexp on the memory mapped file so we only read it into\n # memory if the regexp matches.\n try:\n mm = utils.mmap_mmap(fi.fileno(), 0, tagname=None, flags=utils.mmap_MAP_PRIVATE)\n except OSError:\n mm = fi.read()\n try:\n m = SHEBANG_PAT.match(mm)\n except TypeError:\n SHEBANG_PAT = re.compile(br'^#!.+$', re.M)\n bytes_ = True\n m = SHEBANG_PAT.match(mm)\n\n python_str = b'python' if bytes_ else 'python'\n\n if not (m and python_str in m.group()):\n return\n\n data = mm[:]\n\n py_exec = '#!' + ('/bin/bash ' + prefix + '/bin/pythonw'\n if sys.platform == 'darwin' and osx_is_app else\n prefix + '/bin/' + os.path.basename(build_python))\n if bytes_ and hasattr(py_exec, 'encode'):\n py_exec = py_exec.encode()\n new_data = SHEBANG_PAT.sub(py_exec, data, count=1)\n if new_data == data:\n return\n print(\"updating shebang:\", f)\n with io.open(path, 'w', encoding=locale.getpreferredencoding()) as fo:\n try:\n fo.write(new_data)\n except TypeError:\n fo.write(new_data.decode())\n os.chmod(path, 0o775)\n\n\ndef write_pth(egg_path, config):\n fn = os.path.basename(egg_path)\n py_ver = '.'.join(config.variant['python'].split('.')[:2])\n with open(os.path.join(utils.get_site_packages(config.host_prefix, py_ver),\n '%s.pth' % (fn.split('-')[0])), 'w') as fo:\n fo.write('./%s\\n' % fn)\n\n\ndef remove_easy_install_pth(files, prefix, config, preserve_egg_dir=False):\n \"\"\"\n remove the need for easy-install.pth and finally remove easy-install.pth\n itself\n \"\"\"\n absfiles = [os.path.join(prefix, f) for f in files]\n py_ver = '.'.join(config.variant['python'].split('.')[:2])\n sp_dir = utils.get_site_packages(prefix, py_ver)\n for egg_path in glob(os.path.join(sp_dir, '*-py*.egg')):\n if os.path.isdir(egg_path):\n if preserve_egg_dir or not any(os.path.join(egg_path, i) in absfiles for i\n in walk_prefix(egg_path, False, windows_forward_slashes=False)):\n write_pth(egg_path, config=config)\n continue\n\n print('found egg dir:', egg_path)\n try:\n os.rename(os.path.join(egg_path, 'EGG-INFO'),\n egg_path + '-info')\n except OSError:\n pass\n utils.rm_rf(os.path.join(egg_path, 'EGG-INFO'))\n for fn in os.listdir(egg_path):\n if fn == '__pycache__':\n utils.rm_rf(os.path.join(egg_path, fn))\n else:\n # this might be a name-space package\n # so the package directory already exists\n # from another installed dependency\n if os.path.exists(os.path.join(sp_dir, fn)):\n try:\n utils.copy_into(os.path.join(egg_path, fn),\n os.path.join(sp_dir, fn), config.timeout,\n locking=config.locking)\n utils.rm_rf(os.path.join(egg_path, fn))\n except IOError as e:\n fn = os.path.basename(str(e).split()[-1])\n raise IOError(\"Tried to merge folder {egg_path} into {sp_dir}, but {fn}\"\n \" exists in both locations. Please either add \"\n \"build/preserve_egg_dir: True to meta.yaml, or manually \"\n \"remove the file during your install process to avoid \"\n \"this conflict.\"\n .format(egg_path=egg_path, sp_dir=sp_dir, fn=fn))\n else:\n os.rename(os.path.join(egg_path, fn), os.path.join(sp_dir, fn))\n\n elif os.path.isfile(egg_path):\n if egg_path not in absfiles:\n continue\n print('found egg:', egg_path)\n write_pth(egg_path, config=config)\n\n utils.rm_rf(os.path.join(sp_dir, 'easy-install.pth'))\n\n\ndef rm_py_along_so(prefix):\n \"\"\"remove .py (.pyc) files alongside .so or .pyd files\"\"\"\n for root, _, files in os.walk(prefix):\n for fn in files:\n if fn.endswith(('.so', '.pyd')):\n name, _ = os.path.splitext(fn)\n for ext in '.py', '.pyc', '.pyo':\n if name + ext in files:\n os.unlink(os.path.join(root, name + ext))\n\n\ndef rm_pyo(files, prefix):\n \"\"\"pyo considered harmful: https://www.python.org/dev/peps/pep-0488/\n\n The build may have proceeded with:\n [install]\n optimize = 1\n .. in setup.cfg in which case we can end up with some stdlib __pycache__\n files ending in .opt-N.pyc on Python 3, as well as .pyo files for the\n package's own python. \"\"\"\n re_pyo = re.compile(r'.*(?:\\.pyo$|\\.opt-[0-9]\\.pyc)')\n for fn in files:\n if re_pyo.match(fn):\n os.unlink(os.path.join(prefix, fn))\n\n\ndef rm_pyc(files, prefix):\n re_pyc = re.compile(r'.*(?:\\.pyc$)')\n for fn in files:\n if re_pyc.match(fn):\n os.unlink(os.path.join(prefix, fn))\n\n\ndef compile_missing_pyc(files, cwd, python_exe, skip_compile_pyc=()):\n if not os.path.isfile(python_exe):\n return\n compile_files = []\n skip_compile_pyc_n = [os.path.normpath(skip) for skip in skip_compile_pyc]\n skipped_files = set()\n for skip in skip_compile_pyc_n:\n skipped_files.update(set(fnmatch.filter(files, skip)))\n unskipped_files = set(files) - skipped_files\n for fn in unskipped_files:\n # omit files in Library/bin, Scripts, and the root prefix - they are not generally imported\n if sys.platform == 'win32':\n if any([fn.lower().startswith(start) for start in ['library/bin', 'library\\\\bin',\n 'scripts']]):\n continue\n else:\n if fn.startswith('bin'):\n continue\n cache_prefix = (\"__pycache__\" + os.sep) if PY3 else \"\"\n if (fn.endswith(\".py\") and\n os.path.dirname(fn) + cache_prefix + os.path.basename(fn) + 'c' not in files):\n compile_files.append(fn)\n\n if compile_files:\n if not os.path.isfile(python_exe):\n print('compiling .pyc files... failed as no python interpreter was found')\n else:\n print('compiling .pyc files...')\n for f in compile_files:\n call([python_exe, '-Wi', '-m', 'py_compile', f], cwd=cwd)\n\n\ndef post_process(files, prefix, config, preserve_egg_dir=False, noarch=False, skip_compile_pyc=()):\n rm_pyo(files, prefix)\n if noarch:\n rm_pyc(files, prefix)\n else:\n python_exe = (config.build_python if os.path.isfile(config.build_python) else\n config.host_python)\n compile_missing_pyc(files, cwd=prefix, python_exe=python_exe,\n skip_compile_pyc=skip_compile_pyc)\n remove_easy_install_pth(files, prefix, config, preserve_egg_dir=preserve_egg_dir)\n rm_py_along_so(prefix)\n\n\ndef find_lib(link, prefix, path=None):\n files = utils.prefix_files(prefix)\n if link.startswith(prefix):\n link = os.path.normpath(link[len(prefix) + 1:])\n if link not in files:\n sys.exit(\"Error: Could not find %s\" % link)\n return link\n if link.startswith('/'): # but doesn't start with the build prefix\n return\n if link.startswith('@rpath/'):\n # Assume the rpath already points to lib, so there is no need to\n # change it.\n return\n if '/' not in link or link.startswith('@executable_path/'):\n link = os.path.basename(link)\n file_names = defaultdict(list)\n for f in files:\n file_names[os.path.basename(f)].append(f)\n if link not in file_names:\n sys.exit(\"Error: Could not find %s\" % link)\n if len(file_names[link]) > 1:\n if path and os.path.basename(path) == link:\n # The link is for the file itself, just use it\n return path\n # Allow for the possibility of the same library appearing in\n # multiple places.\n md5s = set()\n for f in file_names[link]:\n md5s.add(md5_file(os.path.join(prefix, f)))\n if len(md5s) > 1:\n sys.exit(\"Error: Found multiple instances of %s: %s\" % (link, file_names[link]))\n else:\n file_names[link].sort()\n print(\"Found multiple instances of %s (%s). \"\n \"Choosing the first one.\" % (link, file_names[link]))\n return file_names[link][0]\n print(\"Don't know how to find %s, skipping\" % link)\n\n\ndef osx_ch_link(path, link_dict, prefix):\n link = link_dict['name']\n print(\"Fixing linking of %s in %s\" % (link, path))\n link_loc = find_lib(link, prefix, path)\n if not link_loc:\n return\n\n lib_to_link = os.path.relpath(os.path.dirname(link_loc), 'lib')\n # path_to_lib = utils.relative(path[len(prefix) + 1:])\n\n # e.g., if\n # path = '/build_prefix/lib/some/stuff/libstuff.dylib'\n # link_loc = 'lib/things/libthings.dylib'\n\n # then\n\n # lib_to_link = 'things'\n # path_to_lib = '../..'\n\n # @rpath always means 'lib', link will be at\n # @rpath/lib_to_link/basename(link), like @rpath/things/libthings.dylib.\n\n # For when we can't use @rpath, @loader_path means the path to the library\n # ('path'), so from path to link is\n # @loader_path/path_to_lib/lib_to_link/basename(link), like\n # @loader_path/../../things/libthings.dylib.\n\n ret = '@rpath/%s/%s' % (lib_to_link, os.path.basename(link))\n\n # XXX: IF the above fails for whatever reason, the below can be used\n # TODO: This might contain redundant ..'s if link and path are both in\n # some subdirectory of lib.\n # ret = '@loader_path/%s/%s/%s' % (path_to_lib, lib_to_link, basename(link))\n\n ret = ret.replace('/./', '/')\n\n return ret\n\n\ndef mk_relative_osx(path, prefix, build_prefix=None):\n '''\n if build_prefix is None, the_n this is a standard conda build. The path\n and all dependencies are in the build_prefix.\n\n if package is built in develop mode, build_prefix is specified. Object\n specified by 'path' needs to relink runtime dependences to libs found in\n build_prefix/lib/. Also, in develop mode, 'path' is not in 'build_prefix'\n '''\n if build_prefix is None:\n assert path.startswith(prefix + '/')\n else:\n prefix = build_prefix\n\n assert sys.platform == 'darwin' and is_obj(path)\n s = macho.install_name_change(path, partial(osx_ch_link, prefix=prefix))\n\n names = macho.otool(path)\n if names:\n # Add an rpath to every executable to increase the chances of it\n # being found.\n rpath = os.path.join('@loader_path',\n os.path.relpath(os.path.join(prefix, 'lib'),\n os.path.dirname(path)), '').replace('/./', '/')\n macho.add_rpath(path, rpath, verbose=True)\n\n # 10.7 install_name_tool -delete_rpath causes broken dylibs, I will revisit this ASAP.\n # .. and remove config.build_prefix/lib which was added in-place of\n # DYLD_FALLBACK_LIBRARY_PATH since El Capitan's SIP.\n # macho.delete_rpath(path, config.build_prefix + '/lib', verbose = True)\n\n if s:\n # Skip for stub files, which have to use binary_has_prefix_files to be\n # made relocatable.\n assert_relative_osx(path, prefix)\n\n\ndef mk_relative_linux(f, prefix, rpaths=('lib',)):\n 'Respects the original values and converts abs to $ORIGIN-relative'\n\n elf = os.path.join(prefix, f)\n origin = os.path.dirname(elf)\n\n patchelf = external.find_executable('patchelf', prefix)\n try:\n existing = check_output([patchelf, '--print-rpath', elf]).decode('utf-8').splitlines()[0]\n except:\n print('patchelf: --print-rpath failed for %s\\n' % (elf))\n return\n existing = existing.split(os.pathsep)\n new = []\n for old in existing:\n if old.startswith('$ORIGIN'):\n new.append(old)\n elif old.startswith('/'):\n # Test if this absolute path is outside of prefix. That is fatal.\n relpath = os.path.relpath(old, prefix)\n if relpath.startswith('..' + os.sep):\n print('Warning: rpath {0} is outside prefix {1} (removing it)'.format(old, prefix))\n else:\n relpath = '$ORIGIN/' + os.path.relpath(old, origin)\n if relpath not in new:\n new.append(relpath)\n # Ensure that the asked-for paths are also in new.\n for rpath in rpaths:\n if not rpath.startswith('/'):\n # IMHO utils.relative shouldn't exist, but I am too paranoid to remove\n # it, so instead, make sure that what I think it should be replaced by\n # gives the same result and assert if not. Yeah, I am a chicken.\n rel_ours = os.path.normpath(utils.relative(f, rpath))\n rel_stdlib = os.path.normpath(os.path.relpath(rpath, os.path.dirname(f)))\n assert rel_ours == rel_stdlib, \\\n 'utils.relative {0} and relpath {1} disagree for {2}, {3}'.format(\n rel_ours, rel_stdlib, f, rpath)\n rpath = '$ORIGIN/' + rel_stdlib\n if rpath not in new:\n new.append(rpath)\n rpath = ':'.join(new)\n print('patchelf: file: %s\\n setting rpath to: %s' % (elf, rpath))\n call([patchelf, '--force-rpath', '--set-rpath', rpath, elf])\n\n\ndef assert_relative_osx(path, prefix):\n for name in macho.get_dylibs(path):\n assert not name.startswith(prefix), path\n\n\ndef mk_relative(m, f, prefix):\n assert sys.platform != 'win32'\n path = os.path.join(prefix, f)\n if not is_obj(path):\n return\n\n if sys.platform.startswith('linux'):\n mk_relative_linux(f, prefix=prefix, rpaths=m.get_value('build/rpaths', ['lib']))\n elif sys.platform == 'darwin':\n mk_relative_osx(path, prefix=prefix)\n\n\ndef fix_permissions(files, prefix):\n print(\"Fixing permissions\")\n for root, dirs, _ in os.walk(prefix):\n for dn in dirs:\n lchmod(os.path.join(root, dn), 0o775)\n\n for f in files:\n path = os.path.join(prefix, f)\n st = os.lstat(path)\n old_mode = stat.S_IMODE(st.st_mode)\n new_mode = old_mode\n # broadcast execute\n if old_mode & stat.S_IXUSR:\n new_mode = new_mode | stat.S_IXGRP | stat.S_IXOTH\n # ensure user and group can write and all can read\n new_mode = new_mode | stat.S_IWUSR | stat.S_IWGRP | stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH # noqa\n if old_mode != new_mode:\n try:\n lchmod(path, new_mode)\n except (OSError, utils.PermissionError) as e:\n log = utils.get_logger(__name__)\n log.warn(str(e))\n\n\ndef post_build(m, files, prefix, build_python, croot):\n print('number of files:', len(files))\n fix_permissions(files, prefix)\n\n for f in files:\n make_hardlink_copy(f, prefix)\n\n if sys.platform == 'win32':\n return\n\n binary_relocation = m.binary_relocation()\n if not binary_relocation:\n print(\"Skipping binary relocation logic\")\n osx_is_app = bool(m.get_value('build/osx_is_app', False)) and sys.platform == 'darwin'\n\n check_symlinks(files, prefix, croot)\n\n for f in files:\n if f.startswith('bin/'):\n fix_shebang(f, prefix=prefix, build_python=build_python, osx_is_app=osx_is_app)\n if binary_relocation is True or (isinstance(binary_relocation, list) and\n f in binary_relocation):\n mk_relative(m, f, prefix)\n\n\ndef check_symlinks(files, prefix, croot):\n if readlink is False:\n return # Not on Unix system\n msgs = []\n real_build_prefix = os.path.realpath(prefix)\n for f in files:\n path = os.path.join(real_build_prefix, f)\n if os.path.islink(path):\n link_path = readlink(path)\n real_link_path = os.path.realpath(path)\n # symlinks to binaries outside of the same dir don't work. RPATH stuff gets confused\n # because ld.so follows symlinks in RPATHS\n # If condition exists, then copy the file rather than symlink it.\n if (not os.path.dirname(link_path) == os.path.dirname(real_link_path) and\n is_obj(f)):\n os.remove(path)\n utils.copy_into(real_link_path, path)\n elif real_link_path.startswith(real_build_prefix):\n # If the path is in the build prefix, this is fine, but\n # the link needs to be relative\n if not link_path.startswith('.'):\n # Don't change the link structure if it is already a\n # relative link. It's possible that ..'s later in the path\n # can result in a broken link still, but we'll assume that\n # such crazy things don't happen.\n print(\"Making absolute symlink %s -> %s relative\" % (f, link_path))\n os.unlink(path)\n os.symlink(os.path.relpath(real_link_path, os.path.dirname(path)), path)\n else:\n # Symlinks to absolute paths on the system (like /usr) are fine.\n if real_link_path.startswith(croot):\n msgs.append(\"%s is a symlink to a path that may not \"\n \"exist after the build is completed (%s)\" % (f, link_path))\n\n if msgs:\n for msg in msgs:\n print(\"Error: %s\" % msg, file=sys.stderr)\n sys.exit(1)\n\n\ndef make_hardlink_copy(path, prefix):\n \"\"\"Hardlinks create invalid packages. Copy files to break the link.\n Symlinks are OK, and unaffected here.\"\"\"\n if not os.path.isabs(path):\n path = os.path.normpath(os.path.join(prefix, path))\n fn = os.path.basename(path)\n if os.lstat(path).st_nlink > 1:\n with TemporaryDirectory() as dest:\n # copy file to new name\n utils.copy_into(path, dest)\n # remove old file\n utils.rm_rf(path)\n # rename copy to original filename\n os.rename(os.path.join(dest, fn), path)\n\n\ndef get_build_metadata(m):\n src_dir = m.config.work_dir\n if os.path.exists(os.path.join(src_dir, '__conda_version__.txt')):\n raise ValueError(\"support for __conda_version__ has been removed as of Conda-build 3.0.\"\n \"Try Jinja templates instead: \"\n \"http://conda.pydata.org/docs/building/meta-yaml.html#templating-with-jinja\")\n if os.path.exists(os.path.join(src_dir, '__conda_buildnum__.txt')):\n raise ValueError(\"support for __conda_buildnum__ has been removed as of Conda-build 3.0.\"\n \"Try Jinja templates instead: \"\n \"http://conda.pydata.org/docs/building/meta-yaml.html#templating-with-jinja\")\n if os.path.exists(os.path.join(src_dir, '__conda_buildstr__.txt')):\n raise ValueError(\"support for __conda_buildstr__ has been removed as of Conda-build 3.0.\"\n \"Try Jinja templates instead: \"\n \"http://conda.pydata.org/docs/building/meta-yaml.html#templating-with-jinja\")\n", "path": "conda_build/post.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nfrom collections import defaultdict\nimport fnmatch\nfrom functools import partial\nfrom glob import glob\nimport io\nimport locale\nimport re\nimport os\nimport stat\nfrom subprocess import call, check_output\nimport sys\ntry:\n from os import readlink\nexcept ImportError:\n readlink = False\n\nfrom conda_build.os_utils import external\nfrom .conda_interface import lchmod\nfrom .conda_interface import walk_prefix\nfrom .conda_interface import md5_file\nfrom .conda_interface import PY3\nfrom .conda_interface import TemporaryDirectory\n\nfrom conda_build import utils\nfrom conda_build.os_utils.pyldd import is_codefile\n\nif sys.platform == 'darwin':\n from conda_build.os_utils import macho\n\n\ndef is_obj(path):\n return is_codefile(path)\n\n\ndef fix_shebang(f, prefix, build_python, osx_is_app=False):\n path = os.path.join(prefix, f)\n if is_obj(path):\n return\n elif os.path.islink(path):\n return\n elif not os.path.isfile(path):\n return\n\n if os.stat(path).st_size == 0:\n return\n\n bytes_ = False\n\n with io.open(path, encoding=locale.getpreferredencoding(), mode='r+') as fi:\n try:\n data = fi.read(100)\n fi.seek(0)\n except UnicodeDecodeError: # file is binary\n return\n\n SHEBANG_PAT = re.compile(r'^#!.+$', re.M)\n\n # regexp on the memory mapped file so we only read it into\n # memory if the regexp matches.\n try:\n mm = utils.mmap_mmap(fi.fileno(), 0, tagname=None, flags=utils.mmap_MAP_PRIVATE)\n except OSError:\n mm = fi.read()\n try:\n m = SHEBANG_PAT.match(mm)\n except TypeError:\n SHEBANG_PAT = re.compile(br'^#!.+$', re.M)\n bytes_ = True\n m = SHEBANG_PAT.match(mm)\n\n python_str = b'python' if bytes_ else 'python'\n\n if not (m and python_str in m.group()):\n return\n\n data = mm[:]\n\n py_exec = '#!' + ('/bin/bash ' + prefix + '/bin/pythonw'\n if sys.platform == 'darwin' and osx_is_app else\n prefix + '/bin/' + os.path.basename(build_python))\n if bytes_ and hasattr(py_exec, 'encode'):\n py_exec = py_exec.encode()\n new_data = SHEBANG_PAT.sub(py_exec, data, count=1)\n if new_data == data:\n return\n print(\"updating shebang:\", f)\n with io.open(path, 'w', encoding=locale.getpreferredencoding()) as fo:\n try:\n fo.write(new_data)\n except TypeError:\n fo.write(new_data.decode())\n os.chmod(path, 0o775)\n\n\ndef write_pth(egg_path, config):\n fn = os.path.basename(egg_path)\n py_ver = '.'.join(config.variant['python'].split('.')[:2])\n with open(os.path.join(utils.get_site_packages(config.host_prefix, py_ver),\n '%s.pth' % (fn.split('-')[0])), 'w') as fo:\n fo.write('./%s\\n' % fn)\n\n\ndef remove_easy_install_pth(files, prefix, config, preserve_egg_dir=False):\n \"\"\"\n remove the need for easy-install.pth and finally remove easy-install.pth\n itself\n \"\"\"\n absfiles = [os.path.join(prefix, f) for f in files]\n py_ver = '.'.join(config.variant['python'].split('.')[:2])\n sp_dir = utils.get_site_packages(prefix, py_ver)\n for egg_path in glob(os.path.join(sp_dir, '*-py*.egg')):\n if os.path.isdir(egg_path):\n if preserve_egg_dir or not any(os.path.join(egg_path, i) in absfiles for i\n in walk_prefix(egg_path, False, windows_forward_slashes=False)):\n write_pth(egg_path, config=config)\n continue\n\n print('found egg dir:', egg_path)\n try:\n os.rename(os.path.join(egg_path, 'EGG-INFO'),\n egg_path + '-info')\n except OSError:\n pass\n utils.rm_rf(os.path.join(egg_path, 'EGG-INFO'))\n for fn in os.listdir(egg_path):\n if fn == '__pycache__':\n utils.rm_rf(os.path.join(egg_path, fn))\n else:\n # this might be a name-space package\n # so the package directory already exists\n # from another installed dependency\n if os.path.exists(os.path.join(sp_dir, fn)):\n try:\n utils.copy_into(os.path.join(egg_path, fn),\n os.path.join(sp_dir, fn), config.timeout,\n locking=config.locking)\n utils.rm_rf(os.path.join(egg_path, fn))\n except IOError as e:\n fn = os.path.basename(str(e).split()[-1])\n raise IOError(\"Tried to merge folder {egg_path} into {sp_dir}, but {fn}\"\n \" exists in both locations. Please either add \"\n \"build/preserve_egg_dir: True to meta.yaml, or manually \"\n \"remove the file during your install process to avoid \"\n \"this conflict.\"\n .format(egg_path=egg_path, sp_dir=sp_dir, fn=fn))\n else:\n os.rename(os.path.join(egg_path, fn), os.path.join(sp_dir, fn))\n\n elif os.path.isfile(egg_path):\n if egg_path not in absfiles:\n continue\n print('found egg:', egg_path)\n write_pth(egg_path, config=config)\n\n utils.rm_rf(os.path.join(sp_dir, 'easy-install.pth'))\n\n\ndef rm_py_along_so(prefix):\n \"\"\"remove .py (.pyc) files alongside .so or .pyd files\"\"\"\n for root, _, files in os.walk(prefix):\n for fn in files:\n if fn.endswith(('.so', '.pyd')):\n name, _ = os.path.splitext(fn)\n for ext in '.py', '.pyc', '.pyo':\n if name + ext in files:\n os.unlink(os.path.join(root, name + ext))\n\n\ndef rm_pyo(files, prefix):\n \"\"\"pyo considered harmful: https://www.python.org/dev/peps/pep-0488/\n\n The build may have proceeded with:\n [install]\n optimize = 1\n .. in setup.cfg in which case we can end up with some stdlib __pycache__\n files ending in .opt-N.pyc on Python 3, as well as .pyo files for the\n package's own python. \"\"\"\n re_pyo = re.compile(r'.*(?:\\.pyo$|\\.opt-[0-9]\\.pyc)')\n for fn in files:\n if re_pyo.match(fn):\n os.unlink(os.path.join(prefix, fn))\n\n\ndef rm_pyc(files, prefix):\n re_pyc = re.compile(r'.*(?:\\.pyc$)')\n for fn in files:\n if re_pyc.match(fn):\n os.unlink(os.path.join(prefix, fn))\n\n\ndef compile_missing_pyc(files, cwd, python_exe, skip_compile_pyc=()):\n if not os.path.isfile(python_exe):\n return\n compile_files = []\n skip_compile_pyc_n = [os.path.normpath(skip) for skip in skip_compile_pyc]\n skipped_files = set()\n for skip in skip_compile_pyc_n:\n skipped_files.update(set(fnmatch.filter(files, skip)))\n unskipped_files = set(files) - skipped_files\n for fn in unskipped_files:\n # omit files in Library/bin, Scripts, and the root prefix - they are not generally imported\n if sys.platform == 'win32':\n if any([fn.lower().startswith(start) for start in ['library/bin', 'library\\\\bin',\n 'scripts']]):\n continue\n else:\n if fn.startswith('bin'):\n continue\n cache_prefix = (\"__pycache__\" + os.sep) if PY3 else \"\"\n if (fn.endswith(\".py\") and\n os.path.dirname(fn) + cache_prefix + os.path.basename(fn) + 'c' not in files):\n compile_files.append(fn)\n\n if compile_files:\n if not os.path.isfile(python_exe):\n print('compiling .pyc files... failed as no python interpreter was found')\n else:\n print('compiling .pyc files...')\n for f in compile_files:\n call([python_exe, '-Wi', '-m', 'py_compile', f], cwd=cwd)\n\n\ndef post_process(files, prefix, config, preserve_egg_dir=False, noarch=False, skip_compile_pyc=()):\n rm_pyo(files, prefix)\n if noarch:\n rm_pyc(files, prefix)\n else:\n python_exe = (config.build_python if os.path.isfile(config.build_python) else\n config.host_python)\n compile_missing_pyc(files, cwd=prefix, python_exe=python_exe,\n skip_compile_pyc=skip_compile_pyc)\n remove_easy_install_pth(files, prefix, config, preserve_egg_dir=preserve_egg_dir)\n rm_py_along_so(prefix)\n\n\ndef find_lib(link, prefix, path=None):\n files = utils.prefix_files(prefix)\n if link.startswith(prefix):\n link = os.path.normpath(link[len(prefix) + 1:])\n if link not in files:\n sys.exit(\"Error: Could not find %s\" % link)\n return link\n if link.startswith('/'): # but doesn't start with the build prefix\n return\n if link.startswith('@rpath/'):\n # Assume the rpath already points to lib, so there is no need to\n # change it.\n return\n if '/' not in link or link.startswith('@executable_path/'):\n link = os.path.basename(link)\n file_names = defaultdict(list)\n for f in files:\n file_names[os.path.basename(f)].append(f)\n if link not in file_names:\n sys.exit(\"Error: Could not find %s\" % link)\n if len(file_names[link]) > 1:\n if path and os.path.basename(path) == link:\n # The link is for the file itself, just use it\n return path\n # Allow for the possibility of the same library appearing in\n # multiple places.\n md5s = set()\n for f in file_names[link]:\n md5s.add(md5_file(os.path.join(prefix, f)))\n if len(md5s) > 1:\n sys.exit(\"Error: Found multiple instances of %s: %s\" % (link, file_names[link]))\n else:\n file_names[link].sort()\n print(\"Found multiple instances of %s (%s). \"\n \"Choosing the first one.\" % (link, file_names[link]))\n return file_names[link][0]\n print(\"Don't know how to find %s, skipping\" % link)\n\n\ndef osx_ch_link(path, link_dict, prefix):\n link = link_dict['name']\n print(\"Fixing linking of %s in %s\" % (link, path))\n link_loc = find_lib(link, prefix, path)\n if not link_loc:\n return\n\n lib_to_link = os.path.relpath(os.path.dirname(link_loc), 'lib')\n # path_to_lib = utils.relative(path[len(prefix) + 1:])\n\n # e.g., if\n # path = '/build_prefix/lib/some/stuff/libstuff.dylib'\n # link_loc = 'lib/things/libthings.dylib'\n\n # then\n\n # lib_to_link = 'things'\n # path_to_lib = '../..'\n\n # @rpath always means 'lib', link will be at\n # @rpath/lib_to_link/basename(link), like @rpath/things/libthings.dylib.\n\n # For when we can't use @rpath, @loader_path means the path to the library\n # ('path'), so from path to link is\n # @loader_path/path_to_lib/lib_to_link/basename(link), like\n # @loader_path/../../things/libthings.dylib.\n\n ret = '@rpath/%s/%s' % (lib_to_link, os.path.basename(link))\n\n # XXX: IF the above fails for whatever reason, the below can be used\n # TODO: This might contain redundant ..'s if link and path are both in\n # some subdirectory of lib.\n # ret = '@loader_path/%s/%s/%s' % (path_to_lib, lib_to_link, basename(link))\n\n ret = ret.replace('/./', '/')\n\n return ret\n\n\ndef mk_relative_osx(path, prefix, build_prefix=None):\n '''\n if build_prefix is None, the_n this is a standard conda build. The path\n and all dependencies are in the build_prefix.\n\n if package is built in develop mode, build_prefix is specified. Object\n specified by 'path' needs to relink runtime dependences to libs found in\n build_prefix/lib/. Also, in develop mode, 'path' is not in 'build_prefix'\n '''\n if build_prefix is None:\n assert path.startswith(prefix + '/')\n else:\n prefix = build_prefix\n\n assert sys.platform == 'darwin' and is_obj(path)\n s = macho.install_name_change(path, partial(osx_ch_link, prefix=prefix))\n\n names = macho.otool(path)\n if names:\n # Add an rpath to every executable to increase the chances of it\n # being found.\n rpath = os.path.join('@loader_path',\n os.path.relpath(os.path.join(prefix, 'lib'),\n os.path.dirname(path)), '').replace('/./', '/')\n macho.add_rpath(path, rpath, verbose=True)\n\n # 10.7 install_name_tool -delete_rpath causes broken dylibs, I will revisit this ASAP.\n # .. and remove config.build_prefix/lib which was added in-place of\n # DYLD_FALLBACK_LIBRARY_PATH since El Capitan's SIP.\n # macho.delete_rpath(path, config.build_prefix + '/lib', verbose = True)\n\n if s:\n # Skip for stub files, which have to use binary_has_prefix_files to be\n # made relocatable.\n assert_relative_osx(path, prefix)\n\n\ndef mk_relative_linux(f, prefix, rpaths=('lib',)):\n 'Respects the original values and converts abs to $ORIGIN-relative'\n\n elf = os.path.join(prefix, f)\n origin = os.path.dirname(elf)\n\n patchelf = external.find_executable('patchelf', prefix)\n try:\n existing = check_output([patchelf, '--print-rpath', elf]).decode('utf-8').splitlines()[0]\n except:\n print('patchelf: --print-rpath failed for %s\\n' % (elf))\n return\n existing = existing.split(os.pathsep)\n new = []\n for old in existing:\n if old.startswith('$ORIGIN'):\n new.append(old)\n elif old.startswith('/'):\n # Test if this absolute path is outside of prefix. That is fatal.\n relpath = os.path.relpath(old, prefix)\n if relpath.startswith('..' + os.sep):\n print('Warning: rpath {0} is outside prefix {1} (removing it)'.format(old, prefix))\n else:\n relpath = '$ORIGIN/' + os.path.relpath(old, origin)\n if relpath not in new:\n new.append(relpath)\n # Ensure that the asked-for paths are also in new.\n for rpath in rpaths:\n if not rpath.startswith('/'):\n # IMHO utils.relative shouldn't exist, but I am too paranoid to remove\n # it, so instead, make sure that what I think it should be replaced by\n # gives the same result and assert if not. Yeah, I am a chicken.\n rel_ours = os.path.normpath(utils.relative(f, rpath))\n rel_stdlib = os.path.normpath(os.path.relpath(rpath, os.path.dirname(f)))\n assert rel_ours == rel_stdlib, \\\n 'utils.relative {0} and relpath {1} disagree for {2}, {3}'.format(\n rel_ours, rel_stdlib, f, rpath)\n rpath = '$ORIGIN/' + rel_stdlib\n if rpath not in new:\n new.append(rpath)\n rpath = ':'.join(new)\n print('patchelf: file: %s\\n setting rpath to: %s' % (elf, rpath))\n call([patchelf, '--force-rpath', '--set-rpath', rpath, elf])\n\n\ndef assert_relative_osx(path, prefix):\n for name in macho.get_dylibs(path):\n assert not name.startswith(prefix), path\n\n\ndef mk_relative(m, f, prefix):\n assert sys.platform != 'win32'\n path = os.path.join(prefix, f)\n if not is_obj(path):\n return\n\n if sys.platform.startswith('linux'):\n mk_relative_linux(f, prefix=prefix, rpaths=m.get_value('build/rpaths', ['lib']))\n elif sys.platform == 'darwin':\n mk_relative_osx(path, prefix=prefix)\n\n\ndef fix_permissions(files, prefix):\n print(\"Fixing permissions\")\n for root, dirs, _ in os.walk(prefix):\n for dn in dirs:\n lchmod(os.path.join(root, dn), 0o775)\n\n for f in files:\n path = os.path.join(prefix, f)\n st = os.lstat(path)\n old_mode = stat.S_IMODE(st.st_mode)\n new_mode = old_mode\n # broadcast execute\n if old_mode & stat.S_IXUSR:\n new_mode = new_mode | stat.S_IXGRP | stat.S_IXOTH\n # ensure user and group can write and all can read\n new_mode = new_mode | stat.S_IWUSR | stat.S_IWGRP | stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH # noqa\n if old_mode != new_mode:\n try:\n lchmod(path, new_mode)\n except (OSError, utils.PermissionError) as e:\n log = utils.get_logger(__name__)\n log.warn(str(e))\n\n\ndef post_build(m, files, prefix, build_python, croot):\n print('number of files:', len(files))\n fix_permissions(files, prefix)\n\n for f in files:\n make_hardlink_copy(f, prefix)\n\n if sys.platform == 'win32':\n return\n\n binary_relocation = m.binary_relocation()\n if not binary_relocation:\n print(\"Skipping binary relocation logic\")\n osx_is_app = bool(m.get_value('build/osx_is_app', False)) and sys.platform == 'darwin'\n\n check_symlinks(files, prefix, croot)\n\n for f in files:\n if f.startswith('bin/'):\n fix_shebang(f, prefix=prefix, build_python=build_python, osx_is_app=osx_is_app)\n if binary_relocation is True or (isinstance(binary_relocation, list) and\n f in binary_relocation):\n mk_relative(m, f, prefix)\n\n\ndef check_symlinks(files, prefix, croot):\n if readlink is False:\n return # Not on Unix system\n msgs = []\n real_build_prefix = os.path.realpath(prefix)\n for f in files:\n path = os.path.join(real_build_prefix, f)\n if os.path.islink(path):\n link_path = readlink(path)\n real_link_path = os.path.realpath(path)\n # symlinks to binaries outside of the same dir don't work. RPATH stuff gets confused\n # because ld.so follows symlinks in RPATHS\n # If condition exists, then copy the file rather than symlink it.\n if (not os.path.dirname(link_path) == os.path.dirname(real_link_path) and\n is_obj(f)):\n os.remove(path)\n utils.copy_into(real_link_path, path)\n elif real_link_path.startswith(real_build_prefix):\n # If the path is in the build prefix, this is fine, but\n # the link needs to be relative\n if not link_path.startswith('.'):\n # Don't change the link structure if it is already a\n # relative link. It's possible that ..'s later in the path\n # can result in a broken link still, but we'll assume that\n # such crazy things don't happen.\n print(\"Making absolute symlink %s -> %s relative\" % (f, link_path))\n os.unlink(path)\n os.symlink(os.path.relpath(real_link_path, os.path.dirname(path)), path)\n else:\n # Symlinks to absolute paths on the system (like /usr) are fine.\n if real_link_path.startswith(croot):\n msgs.append(\"%s is a symlink to a path that may not \"\n \"exist after the build is completed (%s)\" % (f, link_path))\n\n if msgs:\n for msg in msgs:\n print(\"Error: %s\" % msg, file=sys.stderr)\n sys.exit(1)\n\n\ndef make_hardlink_copy(path, prefix):\n \"\"\"Hardlinks create invalid packages. Copy files to break the link.\n Symlinks are OK, and unaffected here.\"\"\"\n if not os.path.isabs(path):\n path = os.path.normpath(os.path.join(prefix, path))\n fn = os.path.basename(path)\n if os.lstat(path).st_nlink > 1:\n with TemporaryDirectory() as dest:\n # copy file to new name\n utils.copy_into(path, dest)\n # remove old file\n utils.rm_rf(path)\n # rename copy to original filename\n # It is essential here to use copying (as opposed to os.rename), so that\n # crossing volume boundaries works\n utils.copy_into(os.path.join(dest, fn), path)\n\n\ndef get_build_metadata(m):\n src_dir = m.config.work_dir\n if os.path.exists(os.path.join(src_dir, '__conda_version__.txt')):\n raise ValueError(\"support for __conda_version__ has been removed as of Conda-build 3.0.\"\n \"Try Jinja templates instead: \"\n \"http://conda.pydata.org/docs/building/meta-yaml.html#templating-with-jinja\")\n if os.path.exists(os.path.join(src_dir, '__conda_buildnum__.txt')):\n raise ValueError(\"support for __conda_buildnum__ has been removed as of Conda-build 3.0.\"\n \"Try Jinja templates instead: \"\n \"http://conda.pydata.org/docs/building/meta-yaml.html#templating-with-jinja\")\n if os.path.exists(os.path.join(src_dir, '__conda_buildstr__.txt')):\n raise ValueError(\"support for __conda_buildstr__ has been removed as of Conda-build 3.0.\"\n \"Try Jinja templates instead: \"\n \"http://conda.pydata.org/docs/building/meta-yaml.html#templating-with-jinja\")\n", "path": "conda_build/post.py"}]} |
gh_patches_debug_1256 | rasdani/github-patches | git_diff | kserve__kserve-3034 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
option to load credentials directly from a secret for s3
Currently to download from a private S3 bucket [you create both a secret and a service account that you link it to](https://github.com/kubeflow/kfserving/blob/master/docs/samples/s3/s3_secret.yaml). You then set the [serviceAccountName on the KFService ](https://github.com/kubeflow/kfserving/blob/master/docs/samples/s3/tensorflow_s3.yaml#L7) and it is [used to add env vars based on the secret to the initContainer that downloads the model](https://github.com/kubeflow/kfserving/blob/master/pkg/controller/kfservice/resources/credentials/service_account_credentials.go#L94).
It might be easier for s3 users to just create a secret containing entries intended as environment variables and link that directly by having a 'envSecretRefName' entry in the CRD. This could be used in the implementation to use 'envFrom' to apply the secret's values.
It seems the[ original idea for credentials was to use a Secret ](https://github.com/kubeflow/kfserving/issues/36)and this morphed into a ServiceAccount as a 'first pass'. Presumably there's no in principle objection to also supporting a direct secret for s3?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/kserve/kserve/api_client.py`
Content:
```
1 # Copyright 2023 The KServe Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # coding: utf-8
16 """
17 KServe
18
19 Python SDK for KServe # noqa: E501
20
21 The version of the OpenAPI document: v0.1
22 Generated by: https://openapi-generator.tech
23 """
24
25 from __future__ import absolute_import
26
27 import atexit
28 import datetime
29 from dateutil.parser import parse
30 import json
31 import mimetypes
32 from multiprocessing.pool import ThreadPool
33 import os
34 import re
35 import tempfile
36
37 # python 2 and python 3 compatibility library
38 import six
39 from six.moves.urllib.parse import quote
40
41 from kserve.configuration import Configuration
42 import kserve.models
43 from kserve import rest
44 from kserve.exceptions import ApiValueError, ApiException
45
46
47 class ApiClient(object):
48 """Generic API client for OpenAPI client library builds.
49
50 OpenAPI generic API client. This client handles the client-
51 server communication, and is invariant across implementations. Specifics of
52 the methods and models for each application are generated from the OpenAPI
53 templates.
54
55 NOTE: This class is auto generated by OpenAPI Generator.
56 Ref: https://openapi-generator.tech
57 Do not edit the class manually.
58
59 :param configuration: .Configuration object for this client
60 :param header_name: a header to pass when making calls to the API.
61 :param header_value: a header value to pass when making calls to
62 the API.
63 :param cookie: a cookie to include in the header when making calls
64 to the API
65 :param pool_threads: The number of threads to use for async requests
66 to the API. More threads means more concurrent API requests.
67 """
68
69 PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types
70 NATIVE_TYPES_MAPPING = {
71 'int': int,
72 'long': int if six.PY3 else long, # noqa: F821
73 'float': float,
74 'str': str,
75 'bool': bool,
76 'date': datetime.date,
77 'datetime': datetime.datetime,
78 'object': object,
79 }
80 _pool = None
81
82 def __init__(self, configuration=None, header_name=None, header_value=None,
83 cookie=None, pool_threads=1):
84 if configuration is None:
85 configuration = Configuration.get_default_copy()
86 self.configuration = configuration
87 self.pool_threads = pool_threads
88
89 self.rest_client = rest.RESTClientObject(configuration)
90 self.default_headers = {}
91 if header_name is not None:
92 self.default_headers[header_name] = header_value
93 self.cookie = cookie
94 # Set default User-Agent.
95 self.user_agent = 'OpenAPI-Generator/0.1/python'
96 self.client_side_validation = configuration.client_side_validation
97
98 def __enter__(self):
99 return self
100
101 def __exit__(self, exc_type, exc_value, traceback):
102 self.close()
103
104 def close(self):
105 if self._pool:
106 self._pool.close()
107 self._pool.join()
108 self._pool = None
109 if hasattr(atexit, 'unregister'):
110 atexit.unregister(self.close)
111
112 @property
113 def pool(self):
114 """Create thread pool on first request
115 avoids instantiating unused threadpool for blocking clients.
116 """
117 if self._pool is None:
118 atexit.register(self.close)
119 self._pool = ThreadPool(self.pool_threads)
120 return self._pool
121
122 @property
123 def user_agent(self):
124 """User agent for this API client"""
125 return self.default_headers['User-Agent']
126
127 @user_agent.setter
128 def user_agent(self, value):
129 self.default_headers['User-Agent'] = value
130
131 def set_default_header(self, header_name, header_value):
132 self.default_headers[header_name] = header_value
133
134 def __call_api(
135 self, resource_path, method, path_params=None,
136 query_params=None, header_params=None, body=None, post_params=None,
137 files=None, response_type=None, auth_settings=None,
138 _return_http_data_only=None, collection_formats=None,
139 _preload_content=True, _request_timeout=None, _host=None):
140
141 config = self.configuration
142
143 # header parameters
144 header_params = header_params or {}
145 header_params.update(self.default_headers)
146 if self.cookie:
147 header_params['Cookie'] = self.cookie
148 if header_params:
149 header_params = self.sanitize_for_serialization(header_params)
150 header_params = dict(self.parameters_to_tuples(header_params,
151 collection_formats))
152
153 # path parameters
154 if path_params:
155 path_params = self.sanitize_for_serialization(path_params)
156 path_params = self.parameters_to_tuples(path_params,
157 collection_formats)
158 for k, v in path_params:
159 # specified safe chars, encode everything
160 resource_path = resource_path.replace(
161 '{%s}' % k,
162 quote(str(v), safe=config.safe_chars_for_path_param)
163 )
164
165 # query parameters
166 if query_params:
167 query_params = self.sanitize_for_serialization(query_params)
168 query_params = self.parameters_to_tuples(query_params,
169 collection_formats)
170
171 # post parameters
172 if post_params or files:
173 post_params = post_params if post_params else []
174 post_params = self.sanitize_for_serialization(post_params)
175 post_params = self.parameters_to_tuples(post_params,
176 collection_formats)
177 post_params.extend(self.files_parameters(files))
178
179 # auth setting
180 self.update_params_for_auth(header_params, query_params, auth_settings)
181
182 # body
183 if body:
184 body = self.sanitize_for_serialization(body)
185
186 # request url
187 if _host is None:
188 url = self.configuration.host + resource_path
189 else:
190 # use server/host defined in path or operation instead
191 url = _host + resource_path
192
193 try:
194 # perform request and return response
195 response_data = self.request(
196 method, url, query_params=query_params, headers=header_params,
197 post_params=post_params, body=body,
198 _preload_content=_preload_content,
199 _request_timeout=_request_timeout)
200 except ApiException as e:
201 e.body = e.body.decode('utf-8') if six.PY3 else e.body
202 raise e
203
204 content_type = response_data.getheader('content-type')
205
206 self.last_response = response_data
207
208 return_data = response_data
209
210 if not _preload_content:
211 return return_data
212
213 if six.PY3 and response_type not in ["file", "bytes"]:
214 match = None
215 if content_type is not None:
216 match = re.search(r"charset=([a-zA-Z\-\d]+)[\s\;]?", content_type)
217 encoding = match.group(1) if match else "utf-8"
218 response_data.data = response_data.data.decode(encoding)
219
220 # deserialize response data
221 if response_type:
222 return_data = self.deserialize(response_data, response_type)
223 else:
224 return_data = None
225
226 if _return_http_data_only:
227 return (return_data)
228 else:
229 return (return_data, response_data.status,
230 response_data.getheaders())
231
232 def sanitize_for_serialization(self, obj):
233 """Builds a JSON POST object.
234
235 If obj is None, return None.
236 If obj is str, int, long, float, bool, return directly.
237 If obj is datetime.datetime, datetime.date
238 convert to string in iso8601 format.
239 If obj is list, sanitize each element in the list.
240 If obj is dict, return the dict.
241 If obj is OpenAPI model, return the properties dict.
242
243 :param obj: The data to serialize.
244 :return: The serialized form of data.
245 """
246 if obj is None:
247 return None
248 elif isinstance(obj, self.PRIMITIVE_TYPES):
249 return obj
250 elif isinstance(obj, list):
251 return [self.sanitize_for_serialization(sub_obj)
252 for sub_obj in obj]
253 elif isinstance(obj, tuple):
254 return tuple(self.sanitize_for_serialization(sub_obj)
255 for sub_obj in obj)
256 elif isinstance(obj, (datetime.datetime, datetime.date)):
257 return obj.isoformat()
258
259 if isinstance(obj, dict):
260 obj_dict = obj
261 else:
262 # Convert model obj to dict except
263 # attributes `openapi_types`, `attribute_map`
264 # and attributes which value is not None.
265 # Convert attribute name to json key in
266 # model definition for request.
267 obj_dict = {obj.attribute_map[attr]: getattr(obj, attr)
268 for attr, _ in six.iteritems(obj.openapi_types)
269 if getattr(obj, attr) is not None}
270
271 return {key: self.sanitize_for_serialization(val)
272 for key, val in six.iteritems(obj_dict)}
273
274 def deserialize(self, response, response_type):
275 """Deserializes response into an object.
276
277 :param response: RESTResponse object to be deserialized.
278 :param response_type: class literal for
279 deserialized object, or string of class name.
280
281 :return: deserialized object.
282 """
283 # handle file downloading
284 # save response body into a tmp file and return the instance
285 if response_type == "file":
286 return self.__deserialize_file(response)
287
288 # fetch data from response object
289 try:
290 data = json.loads(response.data)
291 except ValueError:
292 data = response.data
293
294 return self.__deserialize(data, response_type)
295
296 def __deserialize(self, data, klass):
297 """Deserializes dict, list, str into an object.
298
299 :param data: dict, list or str.
300 :param klass: class literal, or string of class name.
301
302 :return: object.
303 """
304 if data is None:
305 return None
306
307 if type(klass) == str:
308 if klass.startswith('list['):
309 sub_kls = re.match(r'list\[(.*)\]', klass).group(1)
310 return [self.__deserialize(sub_data, sub_kls)
311 for sub_data in data]
312
313 if klass.startswith('dict('):
314 sub_kls = re.match(r'dict\(([^,]*), (.*)\)', klass).group(2)
315 return {k: self.__deserialize(v, sub_kls)
316 for k, v in six.iteritems(data)}
317
318 # convert str to class
319 if klass in self.NATIVE_TYPES_MAPPING:
320 klass = self.NATIVE_TYPES_MAPPING[klass]
321 else:
322 klass = getattr(kserve.models, klass)
323
324 if klass in self.PRIMITIVE_TYPES:
325 return self.__deserialize_primitive(data, klass)
326 elif klass == object:
327 return self.__deserialize_object(data)
328 elif klass == datetime.date:
329 return self.__deserialize_date(data)
330 elif klass == datetime.datetime:
331 return self.__deserialize_datetime(data)
332 else:
333 return self.__deserialize_model(data, klass)
334
335 def call_api(self, resource_path, method,
336 path_params=None, query_params=None, header_params=None,
337 body=None, post_params=None, files=None,
338 response_type=None, auth_settings=None, async_req=None,
339 _return_http_data_only=None, collection_formats=None,
340 _preload_content=True, _request_timeout=None, _host=None):
341 """Makes the HTTP request (synchronous) and returns deserialized data.
342
343 To make an async_req request, set the async_req parameter.
344
345 :param resource_path: Path to method endpoint.
346 :param method: Method to call.
347 :param path_params: Path parameters in the url.
348 :param query_params: Query parameters in the url.
349 :param header_params: Header parameters to be
350 placed in the request header.
351 :param body: Request body.
352 :param post_params dict: Request post form parameters,
353 for `application/x-www-form-urlencoded`, `multipart/form-data`.
354 :param auth_settings list: Auth Settings names for the request.
355 :param response: Response data type.
356 :param files dict: key -> filename, value -> filepath,
357 for `multipart/form-data`.
358 :param async_req bool: execute request asynchronously
359 :param _return_http_data_only: response data without head status code
360 and headers
361 :param collection_formats: dict of collection formats for path, query,
362 header, and post parameters.
363 :param _preload_content: if False, the urllib3.HTTPResponse object will
364 be returned without reading/decoding response
365 data. Default is True.
366 :param _request_timeout: timeout setting for this request. If one
367 number provided, it will be total request
368 timeout. It can also be a pair (tuple) of
369 (connection, read) timeouts.
370 :return:
371 If async_req parameter is True,
372 the request will be called asynchronously.
373 The method will return the request thread.
374 If parameter async_req is False or missing,
375 then the method will return the response directly.
376 """
377 if not async_req:
378 return self.__call_api(resource_path, method,
379 path_params, query_params, header_params,
380 body, post_params, files,
381 response_type, auth_settings,
382 _return_http_data_only, collection_formats,
383 _preload_content, _request_timeout, _host)
384
385 return self.pool.apply_async(self.__call_api, (resource_path,
386 method, path_params,
387 query_params,
388 header_params, body,
389 post_params, files,
390 response_type,
391 auth_settings,
392 _return_http_data_only,
393 collection_formats,
394 _preload_content,
395 _request_timeout,
396 _host))
397
398 def request(self, method, url, query_params=None, headers=None,
399 post_params=None, body=None, _preload_content=True,
400 _request_timeout=None):
401 """Makes the HTTP request using RESTClient."""
402 if method == "GET":
403 return self.rest_client.GET(url,
404 query_params=query_params,
405 _preload_content=_preload_content,
406 _request_timeout=_request_timeout,
407 headers=headers)
408 elif method == "HEAD":
409 return self.rest_client.HEAD(url,
410 query_params=query_params,
411 _preload_content=_preload_content,
412 _request_timeout=_request_timeout,
413 headers=headers)
414 elif method == "OPTIONS":
415 return self.rest_client.OPTIONS(url,
416 query_params=query_params,
417 headers=headers,
418 _preload_content=_preload_content,
419 _request_timeout=_request_timeout)
420 elif method == "POST":
421 return self.rest_client.POST(url,
422 query_params=query_params,
423 headers=headers,
424 post_params=post_params,
425 _preload_content=_preload_content,
426 _request_timeout=_request_timeout,
427 body=body)
428 elif method == "PUT":
429 return self.rest_client.PUT(url,
430 query_params=query_params,
431 headers=headers,
432 post_params=post_params,
433 _preload_content=_preload_content,
434 _request_timeout=_request_timeout,
435 body=body)
436 elif method == "PATCH":
437 return self.rest_client.PATCH(url,
438 query_params=query_params,
439 headers=headers,
440 post_params=post_params,
441 _preload_content=_preload_content,
442 _request_timeout=_request_timeout,
443 body=body)
444 elif method == "DELETE":
445 return self.rest_client.DELETE(url,
446 query_params=query_params,
447 headers=headers,
448 _preload_content=_preload_content,
449 _request_timeout=_request_timeout,
450 body=body)
451 else:
452 raise ApiValueError(
453 "http method must be `GET`, `HEAD`, `OPTIONS`,"
454 " `POST`, `PATCH`, `PUT` or `DELETE`."
455 )
456
457 def parameters_to_tuples(self, params, collection_formats):
458 """Get parameters as list of tuples, formatting collections.
459
460 :param params: Parameters as dict or list of two-tuples
461 :param dict collection_formats: Parameter collection formats
462 :return: Parameters as list of tuples, collections formatted
463 """
464 new_params = []
465 if collection_formats is None:
466 collection_formats = {}
467 for k, v in six.iteritems(params) if isinstance(params, dict) else params: # noqa: E501
468 if k in collection_formats:
469 collection_format = collection_formats[k]
470 if collection_format == 'multi':
471 new_params.extend((k, value) for value in v)
472 else:
473 if collection_format == 'ssv':
474 delimiter = ' '
475 elif collection_format == 'tsv':
476 delimiter = '\t'
477 elif collection_format == 'pipes':
478 delimiter = '|'
479 else: # csv is the default
480 delimiter = ','
481 new_params.append(
482 (k, delimiter.join(str(value) for value in v)))
483 else:
484 new_params.append((k, v))
485 return new_params
486
487 def files_parameters(self, files=None):
488 """Builds form parameters.
489
490 :param files: File parameters.
491 :return: Form parameters with files.
492 """
493 params = []
494
495 if files:
496 for k, v in six.iteritems(files):
497 if not v:
498 continue
499 file_names = v if type(v) is list else [v]
500 for n in file_names:
501 with open(n, 'rb') as f:
502 filename = os.path.basename(f.name)
503 filedata = f.read()
504 mimetype = (mimetypes.guess_type(filename)[0] or
505 'application/octet-stream')
506 params.append(
507 tuple([k, tuple([filename, filedata, mimetype])]))
508
509 return params
510
511 def select_header_accept(self, accepts):
512 """Returns `Accept` based on an array of accepts provided.
513
514 :param accepts: List of headers.
515 :return: Accept (e.g. application/json).
516 """
517 if not accepts:
518 return
519
520 accepts = [x.lower() for x in accepts]
521
522 if 'application/json' in accepts:
523 return 'application/json'
524 else:
525 return ', '.join(accepts)
526
527 def select_header_content_type(self, content_types):
528 """Returns `Content-Type` based on an array of content_types provided.
529
530 :param content_types: List of content-types.
531 :return: Content-Type (e.g. application/json).
532 """
533 if not content_types:
534 return 'application/json'
535
536 content_types = [x.lower() for x in content_types]
537
538 if 'application/json' in content_types or '*/*' in content_types:
539 return 'application/json'
540 else:
541 return content_types[0]
542
543 def update_params_for_auth(self, headers, querys, auth_settings):
544 """Updates header and query params based on authentication setting.
545
546 :param headers: Header parameters dict to be updated.
547 :param querys: Query parameters tuple list to be updated.
548 :param auth_settings: Authentication setting identifiers list.
549 """
550 if not auth_settings:
551 return
552
553 for auth in auth_settings:
554 auth_setting = self.configuration.auth_settings().get(auth)
555 if auth_setting:
556 if auth_setting['in'] == 'cookie':
557 headers['Cookie'] = auth_setting['value']
558 elif auth_setting['in'] == 'header':
559 headers[auth_setting['key']] = auth_setting['value']
560 elif auth_setting['in'] == 'query':
561 querys.append((auth_setting['key'], auth_setting['value']))
562 else:
563 raise ApiValueError(
564 'Authentication token must be in `query` or `header`'
565 )
566
567 def __deserialize_file(self, response):
568 """Deserializes body to file
569
570 Saves response body into a file in a temporary folder,
571 using the filename from the `Content-Disposition` header if provided.
572
573 :param response: RESTResponse.
574 :return: file path.
575 """
576 fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path)
577 os.close(fd)
578 os.remove(path)
579
580 content_disposition = response.getheader("Content-Disposition")
581 if content_disposition:
582 filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
583 content_disposition).group(1)
584 path = os.path.join(os.path.dirname(path), filename)
585
586 with open(path, "wb") as f:
587 f.write(response.data)
588
589 return path
590
591 def __deserialize_primitive(self, data, klass):
592 """Deserializes string to primitive type.
593
594 :param data: str.
595 :param klass: class literal.
596
597 :return: int, long, float, str, bool.
598 """
599 try:
600 return klass(data)
601 except UnicodeEncodeError:
602 return six.text_type(data)
603 except TypeError:
604 return data
605
606 def __deserialize_object(self, value):
607 """Return an original value.
608
609 :return: object.
610 """
611 return value
612
613 def __deserialize_date(self, string):
614 """Deserializes string to date.
615
616 :param string: str.
617 :return: date.
618 """
619 try:
620 return parse(string).date()
621 except ImportError:
622 return string
623 except ValueError:
624 raise rest.ApiException(
625 status=0,
626 reason="Failed to parse `{0}` as date object".format(string)
627 )
628
629 def __deserialize_datetime(self, string):
630 """Deserializes string to datetime.
631
632 The string should be in iso8601 datetime format.
633
634 :param string: str.
635 :return: datetime.
636 """
637 try:
638 return parse(string)
639 except ImportError:
640 return string
641 except ValueError:
642 raise rest.ApiException(
643 status=0,
644 reason=(
645 "Failed to parse `{0}` as datetime object"
646 .format(string)
647 )
648 )
649
650 def __deserialize_model(self, data, klass):
651 """Deserializes list or dict to model.
652
653 :param data: dict, list.
654 :param klass: class literal.
655 :return: model object.
656 """
657 has_discriminator = False
658 if (hasattr(klass, 'get_real_child_model')
659 and klass.discriminator_value_class_map):
660 has_discriminator = True
661
662 if not klass.openapi_types and has_discriminator is False:
663 return data
664
665 kwargs = {}
666 if (data is not None and
667 klass.openapi_types is not None and
668 isinstance(data, (list, dict))):
669 for attr, attr_type in six.iteritems(klass.openapi_types):
670 if klass.attribute_map[attr] in data:
671 value = data[klass.attribute_map[attr]]
672 kwargs[attr] = self.__deserialize(value, attr_type)
673
674 instance = klass(**kwargs)
675
676 if has_discriminator:
677 klass_name = instance.get_real_child_model(data)
678 if klass_name:
679 instance = self.__deserialize(data, klass_name)
680 return instance
681
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/kserve/kserve/api_client.py b/python/kserve/kserve/api_client.py
--- a/python/kserve/kserve/api_client.py
+++ b/python/kserve/kserve/api_client.py
@@ -304,7 +304,7 @@
if data is None:
return None
- if type(klass) == str:
+ if type(klass) is str:
if klass.startswith('list['):
sub_kls = re.match(r'list\[(.*)\]', klass).group(1)
return [self.__deserialize(sub_data, sub_kls)
| {"golden_diff": "diff --git a/python/kserve/kserve/api_client.py b/python/kserve/kserve/api_client.py\n--- a/python/kserve/kserve/api_client.py\n+++ b/python/kserve/kserve/api_client.py\n@@ -304,7 +304,7 @@\n if data is None:\n return None\n \n- if type(klass) == str:\n+ if type(klass) is str:\n if klass.startswith('list['):\n sub_kls = re.match(r'list\\[(.*)\\]', klass).group(1)\n return [self.__deserialize(sub_data, sub_kls)\n", "issue": "option to load credentials directly from a secret for s3\nCurrently to download from a private S3 bucket [you create both a secret and a service account that you link it to](https://github.com/kubeflow/kfserving/blob/master/docs/samples/s3/s3_secret.yaml). You then set the [serviceAccountName on the KFService ](https://github.com/kubeflow/kfserving/blob/master/docs/samples/s3/tensorflow_s3.yaml#L7) and it is [used to add env vars based on the secret to the initContainer that downloads the model](https://github.com/kubeflow/kfserving/blob/master/pkg/controller/kfservice/resources/credentials/service_account_credentials.go#L94).\r\n\r\nIt might be easier for s3 users to just create a secret containing entries intended as environment variables and link that directly by having a 'envSecretRefName' entry in the CRD. This could be used in the implementation to use 'envFrom' to apply the secret's values.\r\n\r\nIt seems the[ original idea for credentials was to use a Secret ](https://github.com/kubeflow/kfserving/issues/36)and this morphed into a ServiceAccount as a 'first pass'. Presumably there's no in principle objection to also supporting a direct secret for s3?\n", "before_files": [{"content": "# Copyright 2023 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# coding: utf-8\n\"\"\"\n KServe\n\n Python SDK for KServe # noqa: E501\n\n The version of the OpenAPI document: v0.1\n Generated by: https://openapi-generator.tech\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport atexit\nimport datetime\nfrom dateutil.parser import parse\nimport json\nimport mimetypes\nfrom multiprocessing.pool import ThreadPool\nimport os\nimport re\nimport tempfile\n\n# python 2 and python 3 compatibility library\nimport six\nfrom six.moves.urllib.parse import quote\n\nfrom kserve.configuration import Configuration\nimport kserve.models\nfrom kserve import rest\nfrom kserve.exceptions import ApiValueError, ApiException\n\n\nclass ApiClient(object):\n \"\"\"Generic API client for OpenAPI client library builds.\n\n OpenAPI generic API client. This client handles the client-\n server communication, and is invariant across implementations. Specifics of\n the methods and models for each application are generated from the OpenAPI\n templates.\n\n NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n Do not edit the class manually.\n\n :param configuration: .Configuration object for this client\n :param header_name: a header to pass when making calls to the API.\n :param header_value: a header value to pass when making calls to\n the API.\n :param cookie: a cookie to include in the header when making calls\n to the API\n :param pool_threads: The number of threads to use for async requests\n to the API. More threads means more concurrent API requests.\n \"\"\"\n\n PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types\n NATIVE_TYPES_MAPPING = {\n 'int': int,\n 'long': int if six.PY3 else long, # noqa: F821\n 'float': float,\n 'str': str,\n 'bool': bool,\n 'date': datetime.date,\n 'datetime': datetime.datetime,\n 'object': object,\n }\n _pool = None\n\n def __init__(self, configuration=None, header_name=None, header_value=None,\n cookie=None, pool_threads=1):\n if configuration is None:\n configuration = Configuration.get_default_copy()\n self.configuration = configuration\n self.pool_threads = pool_threads\n\n self.rest_client = rest.RESTClientObject(configuration)\n self.default_headers = {}\n if header_name is not None:\n self.default_headers[header_name] = header_value\n self.cookie = cookie\n # Set default User-Agent.\n self.user_agent = 'OpenAPI-Generator/0.1/python'\n self.client_side_validation = configuration.client_side_validation\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.close()\n\n def close(self):\n if self._pool:\n self._pool.close()\n self._pool.join()\n self._pool = None\n if hasattr(atexit, 'unregister'):\n atexit.unregister(self.close)\n\n @property\n def pool(self):\n \"\"\"Create thread pool on first request\n avoids instantiating unused threadpool for blocking clients.\n \"\"\"\n if self._pool is None:\n atexit.register(self.close)\n self._pool = ThreadPool(self.pool_threads)\n return self._pool\n\n @property\n def user_agent(self):\n \"\"\"User agent for this API client\"\"\"\n return self.default_headers['User-Agent']\n\n @user_agent.setter\n def user_agent(self, value):\n self.default_headers['User-Agent'] = value\n\n def set_default_header(self, header_name, header_value):\n self.default_headers[header_name] = header_value\n\n def __call_api(\n self, resource_path, method, path_params=None,\n query_params=None, header_params=None, body=None, post_params=None,\n files=None, response_type=None, auth_settings=None,\n _return_http_data_only=None, collection_formats=None,\n _preload_content=True, _request_timeout=None, _host=None):\n\n config = self.configuration\n\n # header parameters\n header_params = header_params or {}\n header_params.update(self.default_headers)\n if self.cookie:\n header_params['Cookie'] = self.cookie\n if header_params:\n header_params = self.sanitize_for_serialization(header_params)\n header_params = dict(self.parameters_to_tuples(header_params,\n collection_formats))\n\n # path parameters\n if path_params:\n path_params = self.sanitize_for_serialization(path_params)\n path_params = self.parameters_to_tuples(path_params,\n collection_formats)\n for k, v in path_params:\n # specified safe chars, encode everything\n resource_path = resource_path.replace(\n '{%s}' % k,\n quote(str(v), safe=config.safe_chars_for_path_param)\n )\n\n # query parameters\n if query_params:\n query_params = self.sanitize_for_serialization(query_params)\n query_params = self.parameters_to_tuples(query_params,\n collection_formats)\n\n # post parameters\n if post_params or files:\n post_params = post_params if post_params else []\n post_params = self.sanitize_for_serialization(post_params)\n post_params = self.parameters_to_tuples(post_params,\n collection_formats)\n post_params.extend(self.files_parameters(files))\n\n # auth setting\n self.update_params_for_auth(header_params, query_params, auth_settings)\n\n # body\n if body:\n body = self.sanitize_for_serialization(body)\n\n # request url\n if _host is None:\n url = self.configuration.host + resource_path\n else:\n # use server/host defined in path or operation instead\n url = _host + resource_path\n\n try:\n # perform request and return response\n response_data = self.request(\n method, url, query_params=query_params, headers=header_params,\n post_params=post_params, body=body,\n _preload_content=_preload_content,\n _request_timeout=_request_timeout)\n except ApiException as e:\n e.body = e.body.decode('utf-8') if six.PY3 else e.body\n raise e\n\n content_type = response_data.getheader('content-type')\n\n self.last_response = response_data\n\n return_data = response_data\n\n if not _preload_content:\n return return_data\n\n if six.PY3 and response_type not in [\"file\", \"bytes\"]:\n match = None\n if content_type is not None:\n match = re.search(r\"charset=([a-zA-Z\\-\\d]+)[\\s\\;]?\", content_type)\n encoding = match.group(1) if match else \"utf-8\"\n response_data.data = response_data.data.decode(encoding)\n\n # deserialize response data\n if response_type:\n return_data = self.deserialize(response_data, response_type)\n else:\n return_data = None\n\n if _return_http_data_only:\n return (return_data)\n else:\n return (return_data, response_data.status,\n response_data.getheaders())\n\n def sanitize_for_serialization(self, obj):\n \"\"\"Builds a JSON POST object.\n\n If obj is None, return None.\n If obj is str, int, long, float, bool, return directly.\n If obj is datetime.datetime, datetime.date\n convert to string in iso8601 format.\n If obj is list, sanitize each element in the list.\n If obj is dict, return the dict.\n If obj is OpenAPI model, return the properties dict.\n\n :param obj: The data to serialize.\n :return: The serialized form of data.\n \"\"\"\n if obj is None:\n return None\n elif isinstance(obj, self.PRIMITIVE_TYPES):\n return obj\n elif isinstance(obj, list):\n return [self.sanitize_for_serialization(sub_obj)\n for sub_obj in obj]\n elif isinstance(obj, tuple):\n return tuple(self.sanitize_for_serialization(sub_obj)\n for sub_obj in obj)\n elif isinstance(obj, (datetime.datetime, datetime.date)):\n return obj.isoformat()\n\n if isinstance(obj, dict):\n obj_dict = obj\n else:\n # Convert model obj to dict except\n # attributes `openapi_types`, `attribute_map`\n # and attributes which value is not None.\n # Convert attribute name to json key in\n # model definition for request.\n obj_dict = {obj.attribute_map[attr]: getattr(obj, attr)\n for attr, _ in six.iteritems(obj.openapi_types)\n if getattr(obj, attr) is not None}\n\n return {key: self.sanitize_for_serialization(val)\n for key, val in six.iteritems(obj_dict)}\n\n def deserialize(self, response, response_type):\n \"\"\"Deserializes response into an object.\n\n :param response: RESTResponse object to be deserialized.\n :param response_type: class literal for\n deserialized object, or string of class name.\n\n :return: deserialized object.\n \"\"\"\n # handle file downloading\n # save response body into a tmp file and return the instance\n if response_type == \"file\":\n return self.__deserialize_file(response)\n\n # fetch data from response object\n try:\n data = json.loads(response.data)\n except ValueError:\n data = response.data\n\n return self.__deserialize(data, response_type)\n\n def __deserialize(self, data, klass):\n \"\"\"Deserializes dict, list, str into an object.\n\n :param data: dict, list or str.\n :param klass: class literal, or string of class name.\n\n :return: object.\n \"\"\"\n if data is None:\n return None\n\n if type(klass) == str:\n if klass.startswith('list['):\n sub_kls = re.match(r'list\\[(.*)\\]', klass).group(1)\n return [self.__deserialize(sub_data, sub_kls)\n for sub_data in data]\n\n if klass.startswith('dict('):\n sub_kls = re.match(r'dict\\(([^,]*), (.*)\\)', klass).group(2)\n return {k: self.__deserialize(v, sub_kls)\n for k, v in six.iteritems(data)}\n\n # convert str to class\n if klass in self.NATIVE_TYPES_MAPPING:\n klass = self.NATIVE_TYPES_MAPPING[klass]\n else:\n klass = getattr(kserve.models, klass)\n\n if klass in self.PRIMITIVE_TYPES:\n return self.__deserialize_primitive(data, klass)\n elif klass == object:\n return self.__deserialize_object(data)\n elif klass == datetime.date:\n return self.__deserialize_date(data)\n elif klass == datetime.datetime:\n return self.__deserialize_datetime(data)\n else:\n return self.__deserialize_model(data, klass)\n\n def call_api(self, resource_path, method,\n path_params=None, query_params=None, header_params=None,\n body=None, post_params=None, files=None,\n response_type=None, auth_settings=None, async_req=None,\n _return_http_data_only=None, collection_formats=None,\n _preload_content=True, _request_timeout=None, _host=None):\n \"\"\"Makes the HTTP request (synchronous) and returns deserialized data.\n\n To make an async_req request, set the async_req parameter.\n\n :param resource_path: Path to method endpoint.\n :param method: Method to call.\n :param path_params: Path parameters in the url.\n :param query_params: Query parameters in the url.\n :param header_params: Header parameters to be\n placed in the request header.\n :param body: Request body.\n :param post_params dict: Request post form parameters,\n for `application/x-www-form-urlencoded`, `multipart/form-data`.\n :param auth_settings list: Auth Settings names for the request.\n :param response: Response data type.\n :param files dict: key -> filename, value -> filepath,\n for `multipart/form-data`.\n :param async_req bool: execute request asynchronously\n :param _return_http_data_only: response data without head status code\n and headers\n :param collection_formats: dict of collection formats for path, query,\n header, and post parameters.\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return:\n If async_req parameter is True,\n the request will be called asynchronously.\n The method will return the request thread.\n If parameter async_req is False or missing,\n then the method will return the response directly.\n \"\"\"\n if not async_req:\n return self.__call_api(resource_path, method,\n path_params, query_params, header_params,\n body, post_params, files,\n response_type, auth_settings,\n _return_http_data_only, collection_formats,\n _preload_content, _request_timeout, _host)\n\n return self.pool.apply_async(self.__call_api, (resource_path,\n method, path_params,\n query_params,\n header_params, body,\n post_params, files,\n response_type,\n auth_settings,\n _return_http_data_only,\n collection_formats,\n _preload_content,\n _request_timeout,\n _host))\n\n def request(self, method, url, query_params=None, headers=None,\n post_params=None, body=None, _preload_content=True,\n _request_timeout=None):\n \"\"\"Makes the HTTP request using RESTClient.\"\"\"\n if method == \"GET\":\n return self.rest_client.GET(url,\n query_params=query_params,\n _preload_content=_preload_content,\n _request_timeout=_request_timeout,\n headers=headers)\n elif method == \"HEAD\":\n return self.rest_client.HEAD(url,\n query_params=query_params,\n _preload_content=_preload_content,\n _request_timeout=_request_timeout,\n headers=headers)\n elif method == \"OPTIONS\":\n return self.rest_client.OPTIONS(url,\n query_params=query_params,\n headers=headers,\n _preload_content=_preload_content,\n _request_timeout=_request_timeout)\n elif method == \"POST\":\n return self.rest_client.POST(url,\n query_params=query_params,\n headers=headers,\n post_params=post_params,\n _preload_content=_preload_content,\n _request_timeout=_request_timeout,\n body=body)\n elif method == \"PUT\":\n return self.rest_client.PUT(url,\n query_params=query_params,\n headers=headers,\n post_params=post_params,\n _preload_content=_preload_content,\n _request_timeout=_request_timeout,\n body=body)\n elif method == \"PATCH\":\n return self.rest_client.PATCH(url,\n query_params=query_params,\n headers=headers,\n post_params=post_params,\n _preload_content=_preload_content,\n _request_timeout=_request_timeout,\n body=body)\n elif method == \"DELETE\":\n return self.rest_client.DELETE(url,\n query_params=query_params,\n headers=headers,\n _preload_content=_preload_content,\n _request_timeout=_request_timeout,\n body=body)\n else:\n raise ApiValueError(\n \"http method must be `GET`, `HEAD`, `OPTIONS`,\"\n \" `POST`, `PATCH`, `PUT` or `DELETE`.\"\n )\n\n def parameters_to_tuples(self, params, collection_formats):\n \"\"\"Get parameters as list of tuples, formatting collections.\n\n :param params: Parameters as dict or list of two-tuples\n :param dict collection_formats: Parameter collection formats\n :return: Parameters as list of tuples, collections formatted\n \"\"\"\n new_params = []\n if collection_formats is None:\n collection_formats = {}\n for k, v in six.iteritems(params) if isinstance(params, dict) else params: # noqa: E501\n if k in collection_formats:\n collection_format = collection_formats[k]\n if collection_format == 'multi':\n new_params.extend((k, value) for value in v)\n else:\n if collection_format == 'ssv':\n delimiter = ' '\n elif collection_format == 'tsv':\n delimiter = '\\t'\n elif collection_format == 'pipes':\n delimiter = '|'\n else: # csv is the default\n delimiter = ','\n new_params.append(\n (k, delimiter.join(str(value) for value in v)))\n else:\n new_params.append((k, v))\n return new_params\n\n def files_parameters(self, files=None):\n \"\"\"Builds form parameters.\n\n :param files: File parameters.\n :return: Form parameters with files.\n \"\"\"\n params = []\n\n if files:\n for k, v in six.iteritems(files):\n if not v:\n continue\n file_names = v if type(v) is list else [v]\n for n in file_names:\n with open(n, 'rb') as f:\n filename = os.path.basename(f.name)\n filedata = f.read()\n mimetype = (mimetypes.guess_type(filename)[0] or\n 'application/octet-stream')\n params.append(\n tuple([k, tuple([filename, filedata, mimetype])]))\n\n return params\n\n def select_header_accept(self, accepts):\n \"\"\"Returns `Accept` based on an array of accepts provided.\n\n :param accepts: List of headers.\n :return: Accept (e.g. application/json).\n \"\"\"\n if not accepts:\n return\n\n accepts = [x.lower() for x in accepts]\n\n if 'application/json' in accepts:\n return 'application/json'\n else:\n return ', '.join(accepts)\n\n def select_header_content_type(self, content_types):\n \"\"\"Returns `Content-Type` based on an array of content_types provided.\n\n :param content_types: List of content-types.\n :return: Content-Type (e.g. application/json).\n \"\"\"\n if not content_types:\n return 'application/json'\n\n content_types = [x.lower() for x in content_types]\n\n if 'application/json' in content_types or '*/*' in content_types:\n return 'application/json'\n else:\n return content_types[0]\n\n def update_params_for_auth(self, headers, querys, auth_settings):\n \"\"\"Updates header and query params based on authentication setting.\n\n :param headers: Header parameters dict to be updated.\n :param querys: Query parameters tuple list to be updated.\n :param auth_settings: Authentication setting identifiers list.\n \"\"\"\n if not auth_settings:\n return\n\n for auth in auth_settings:\n auth_setting = self.configuration.auth_settings().get(auth)\n if auth_setting:\n if auth_setting['in'] == 'cookie':\n headers['Cookie'] = auth_setting['value']\n elif auth_setting['in'] == 'header':\n headers[auth_setting['key']] = auth_setting['value']\n elif auth_setting['in'] == 'query':\n querys.append((auth_setting['key'], auth_setting['value']))\n else:\n raise ApiValueError(\n 'Authentication token must be in `query` or `header`'\n )\n\n def __deserialize_file(self, response):\n \"\"\"Deserializes body to file\n\n Saves response body into a file in a temporary folder,\n using the filename from the `Content-Disposition` header if provided.\n\n :param response: RESTResponse.\n :return: file path.\n \"\"\"\n fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path)\n os.close(fd)\n os.remove(path)\n\n content_disposition = response.getheader(\"Content-Disposition\")\n if content_disposition:\n filename = re.search(r'filename=[\\'\"]?([^\\'\"\\s]+)[\\'\"]?',\n content_disposition).group(1)\n path = os.path.join(os.path.dirname(path), filename)\n\n with open(path, \"wb\") as f:\n f.write(response.data)\n\n return path\n\n def __deserialize_primitive(self, data, klass):\n \"\"\"Deserializes string to primitive type.\n\n :param data: str.\n :param klass: class literal.\n\n :return: int, long, float, str, bool.\n \"\"\"\n try:\n return klass(data)\n except UnicodeEncodeError:\n return six.text_type(data)\n except TypeError:\n return data\n\n def __deserialize_object(self, value):\n \"\"\"Return an original value.\n\n :return: object.\n \"\"\"\n return value\n\n def __deserialize_date(self, string):\n \"\"\"Deserializes string to date.\n\n :param string: str.\n :return: date.\n \"\"\"\n try:\n return parse(string).date()\n except ImportError:\n return string\n except ValueError:\n raise rest.ApiException(\n status=0,\n reason=\"Failed to parse `{0}` as date object\".format(string)\n )\n\n def __deserialize_datetime(self, string):\n \"\"\"Deserializes string to datetime.\n\n The string should be in iso8601 datetime format.\n\n :param string: str.\n :return: datetime.\n \"\"\"\n try:\n return parse(string)\n except ImportError:\n return string\n except ValueError:\n raise rest.ApiException(\n status=0,\n reason=(\n \"Failed to parse `{0}` as datetime object\"\n .format(string)\n )\n )\n\n def __deserialize_model(self, data, klass):\n \"\"\"Deserializes list or dict to model.\n\n :param data: dict, list.\n :param klass: class literal.\n :return: model object.\n \"\"\"\n has_discriminator = False\n if (hasattr(klass, 'get_real_child_model')\n and klass.discriminator_value_class_map):\n has_discriminator = True\n\n if not klass.openapi_types and has_discriminator is False:\n return data\n\n kwargs = {}\n if (data is not None and\n klass.openapi_types is not None and\n isinstance(data, (list, dict))):\n for attr, attr_type in six.iteritems(klass.openapi_types):\n if klass.attribute_map[attr] in data:\n value = data[klass.attribute_map[attr]]\n kwargs[attr] = self.__deserialize(value, attr_type)\n\n instance = klass(**kwargs)\n\n if has_discriminator:\n klass_name = instance.get_real_child_model(data)\n if klass_name:\n instance = self.__deserialize(data, klass_name)\n return instance\n", "path": "python/kserve/kserve/api_client.py"}], "after_files": [{"content": "# Copyright 2023 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# coding: utf-8\n\"\"\"\n KServe\n\n Python SDK for KServe # noqa: E501\n\n The version of the OpenAPI document: v0.1\n Generated by: https://openapi-generator.tech\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport atexit\nimport datetime\nfrom dateutil.parser import parse\nimport json\nimport mimetypes\nfrom multiprocessing.pool import ThreadPool\nimport os\nimport re\nimport tempfile\n\n# python 2 and python 3 compatibility library\nimport six\nfrom six.moves.urllib.parse import quote\n\nfrom kserve.configuration import Configuration\nimport kserve.models\nfrom kserve import rest\nfrom kserve.exceptions import ApiValueError, ApiException\n\n\nclass ApiClient(object):\n \"\"\"Generic API client for OpenAPI client library builds.\n\n OpenAPI generic API client. This client handles the client-\n server communication, and is invariant across implementations. Specifics of\n the methods and models for each application are generated from the OpenAPI\n templates.\n\n NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n Do not edit the class manually.\n\n :param configuration: .Configuration object for this client\n :param header_name: a header to pass when making calls to the API.\n :param header_value: a header value to pass when making calls to\n the API.\n :param cookie: a cookie to include in the header when making calls\n to the API\n :param pool_threads: The number of threads to use for async requests\n to the API. More threads means more concurrent API requests.\n \"\"\"\n\n PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types\n NATIVE_TYPES_MAPPING = {\n 'int': int,\n 'long': int if six.PY3 else long, # noqa: F821\n 'float': float,\n 'str': str,\n 'bool': bool,\n 'date': datetime.date,\n 'datetime': datetime.datetime,\n 'object': object,\n }\n _pool = None\n\n def __init__(self, configuration=None, header_name=None, header_value=None,\n cookie=None, pool_threads=1):\n if configuration is None:\n configuration = Configuration.get_default_copy()\n self.configuration = configuration\n self.pool_threads = pool_threads\n\n self.rest_client = rest.RESTClientObject(configuration)\n self.default_headers = {}\n if header_name is not None:\n self.default_headers[header_name] = header_value\n self.cookie = cookie\n # Set default User-Agent.\n self.user_agent = 'OpenAPI-Generator/0.1/python'\n self.client_side_validation = configuration.client_side_validation\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.close()\n\n def close(self):\n if self._pool:\n self._pool.close()\n self._pool.join()\n self._pool = None\n if hasattr(atexit, 'unregister'):\n atexit.unregister(self.close)\n\n @property\n def pool(self):\n \"\"\"Create thread pool on first request\n avoids instantiating unused threadpool for blocking clients.\n \"\"\"\n if self._pool is None:\n atexit.register(self.close)\n self._pool = ThreadPool(self.pool_threads)\n return self._pool\n\n @property\n def user_agent(self):\n \"\"\"User agent for this API client\"\"\"\n return self.default_headers['User-Agent']\n\n @user_agent.setter\n def user_agent(self, value):\n self.default_headers['User-Agent'] = value\n\n def set_default_header(self, header_name, header_value):\n self.default_headers[header_name] = header_value\n\n def __call_api(\n self, resource_path, method, path_params=None,\n query_params=None, header_params=None, body=None, post_params=None,\n files=None, response_type=None, auth_settings=None,\n _return_http_data_only=None, collection_formats=None,\n _preload_content=True, _request_timeout=None, _host=None):\n\n config = self.configuration\n\n # header parameters\n header_params = header_params or {}\n header_params.update(self.default_headers)\n if self.cookie:\n header_params['Cookie'] = self.cookie\n if header_params:\n header_params = self.sanitize_for_serialization(header_params)\n header_params = dict(self.parameters_to_tuples(header_params,\n collection_formats))\n\n # path parameters\n if path_params:\n path_params = self.sanitize_for_serialization(path_params)\n path_params = self.parameters_to_tuples(path_params,\n collection_formats)\n for k, v in path_params:\n # specified safe chars, encode everything\n resource_path = resource_path.replace(\n '{%s}' % k,\n quote(str(v), safe=config.safe_chars_for_path_param)\n )\n\n # query parameters\n if query_params:\n query_params = self.sanitize_for_serialization(query_params)\n query_params = self.parameters_to_tuples(query_params,\n collection_formats)\n\n # post parameters\n if post_params or files:\n post_params = post_params if post_params else []\n post_params = self.sanitize_for_serialization(post_params)\n post_params = self.parameters_to_tuples(post_params,\n collection_formats)\n post_params.extend(self.files_parameters(files))\n\n # auth setting\n self.update_params_for_auth(header_params, query_params, auth_settings)\n\n # body\n if body:\n body = self.sanitize_for_serialization(body)\n\n # request url\n if _host is None:\n url = self.configuration.host + resource_path\n else:\n # use server/host defined in path or operation instead\n url = _host + resource_path\n\n try:\n # perform request and return response\n response_data = self.request(\n method, url, query_params=query_params, headers=header_params,\n post_params=post_params, body=body,\n _preload_content=_preload_content,\n _request_timeout=_request_timeout)\n except ApiException as e:\n e.body = e.body.decode('utf-8') if six.PY3 else e.body\n raise e\n\n content_type = response_data.getheader('content-type')\n\n self.last_response = response_data\n\n return_data = response_data\n\n if not _preload_content:\n return return_data\n\n if six.PY3 and response_type not in [\"file\", \"bytes\"]:\n match = None\n if content_type is not None:\n match = re.search(r\"charset=([a-zA-Z\\-\\d]+)[\\s\\;]?\", content_type)\n encoding = match.group(1) if match else \"utf-8\"\n response_data.data = response_data.data.decode(encoding)\n\n # deserialize response data\n if response_type:\n return_data = self.deserialize(response_data, response_type)\n else:\n return_data = None\n\n if _return_http_data_only:\n return (return_data)\n else:\n return (return_data, response_data.status,\n response_data.getheaders())\n\n def sanitize_for_serialization(self, obj):\n \"\"\"Builds a JSON POST object.\n\n If obj is None, return None.\n If obj is str, int, long, float, bool, return directly.\n If obj is datetime.datetime, datetime.date\n convert to string in iso8601 format.\n If obj is list, sanitize each element in the list.\n If obj is dict, return the dict.\n If obj is OpenAPI model, return the properties dict.\n\n :param obj: The data to serialize.\n :return: The serialized form of data.\n \"\"\"\n if obj is None:\n return None\n elif isinstance(obj, self.PRIMITIVE_TYPES):\n return obj\n elif isinstance(obj, list):\n return [self.sanitize_for_serialization(sub_obj)\n for sub_obj in obj]\n elif isinstance(obj, tuple):\n return tuple(self.sanitize_for_serialization(sub_obj)\n for sub_obj in obj)\n elif isinstance(obj, (datetime.datetime, datetime.date)):\n return obj.isoformat()\n\n if isinstance(obj, dict):\n obj_dict = obj\n else:\n # Convert model obj to dict except\n # attributes `openapi_types`, `attribute_map`\n # and attributes which value is not None.\n # Convert attribute name to json key in\n # model definition for request.\n obj_dict = {obj.attribute_map[attr]: getattr(obj, attr)\n for attr, _ in six.iteritems(obj.openapi_types)\n if getattr(obj, attr) is not None}\n\n return {key: self.sanitize_for_serialization(val)\n for key, val in six.iteritems(obj_dict)}\n\n def deserialize(self, response, response_type):\n \"\"\"Deserializes response into an object.\n\n :param response: RESTResponse object to be deserialized.\n :param response_type: class literal for\n deserialized object, or string of class name.\n\n :return: deserialized object.\n \"\"\"\n # handle file downloading\n # save response body into a tmp file and return the instance\n if response_type == \"file\":\n return self.__deserialize_file(response)\n\n # fetch data from response object\n try:\n data = json.loads(response.data)\n except ValueError:\n data = response.data\n\n return self.__deserialize(data, response_type)\n\n def __deserialize(self, data, klass):\n \"\"\"Deserializes dict, list, str into an object.\n\n :param data: dict, list or str.\n :param klass: class literal, or string of class name.\n\n :return: object.\n \"\"\"\n if data is None:\n return None\n\n if type(klass) is str:\n if klass.startswith('list['):\n sub_kls = re.match(r'list\\[(.*)\\]', klass).group(1)\n return [self.__deserialize(sub_data, sub_kls)\n for sub_data in data]\n\n if klass.startswith('dict('):\n sub_kls = re.match(r'dict\\(([^,]*), (.*)\\)', klass).group(2)\n return {k: self.__deserialize(v, sub_kls)\n for k, v in six.iteritems(data)}\n\n # convert str to class\n if klass in self.NATIVE_TYPES_MAPPING:\n klass = self.NATIVE_TYPES_MAPPING[klass]\n else:\n klass = getattr(kserve.models, klass)\n\n if klass in self.PRIMITIVE_TYPES:\n return self.__deserialize_primitive(data, klass)\n elif klass == object:\n return self.__deserialize_object(data)\n elif klass == datetime.date:\n return self.__deserialize_date(data)\n elif klass == datetime.datetime:\n return self.__deserialize_datetime(data)\n else:\n return self.__deserialize_model(data, klass)\n\n def call_api(self, resource_path, method,\n path_params=None, query_params=None, header_params=None,\n body=None, post_params=None, files=None,\n response_type=None, auth_settings=None, async_req=None,\n _return_http_data_only=None, collection_formats=None,\n _preload_content=True, _request_timeout=None, _host=None):\n \"\"\"Makes the HTTP request (synchronous) and returns deserialized data.\n\n To make an async_req request, set the async_req parameter.\n\n :param resource_path: Path to method endpoint.\n :param method: Method to call.\n :param path_params: Path parameters in the url.\n :param query_params: Query parameters in the url.\n :param header_params: Header parameters to be\n placed in the request header.\n :param body: Request body.\n :param post_params dict: Request post form parameters,\n for `application/x-www-form-urlencoded`, `multipart/form-data`.\n :param auth_settings list: Auth Settings names for the request.\n :param response: Response data type.\n :param files dict: key -> filename, value -> filepath,\n for `multipart/form-data`.\n :param async_req bool: execute request asynchronously\n :param _return_http_data_only: response data without head status code\n and headers\n :param collection_formats: dict of collection formats for path, query,\n header, and post parameters.\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return:\n If async_req parameter is True,\n the request will be called asynchronously.\n The method will return the request thread.\n If parameter async_req is False or missing,\n then the method will return the response directly.\n \"\"\"\n if not async_req:\n return self.__call_api(resource_path, method,\n path_params, query_params, header_params,\n body, post_params, files,\n response_type, auth_settings,\n _return_http_data_only, collection_formats,\n _preload_content, _request_timeout, _host)\n\n return self.pool.apply_async(self.__call_api, (resource_path,\n method, path_params,\n query_params,\n header_params, body,\n post_params, files,\n response_type,\n auth_settings,\n _return_http_data_only,\n collection_formats,\n _preload_content,\n _request_timeout,\n _host))\n\n def request(self, method, url, query_params=None, headers=None,\n post_params=None, body=None, _preload_content=True,\n _request_timeout=None):\n \"\"\"Makes the HTTP request using RESTClient.\"\"\"\n if method == \"GET\":\n return self.rest_client.GET(url,\n query_params=query_params,\n _preload_content=_preload_content,\n _request_timeout=_request_timeout,\n headers=headers)\n elif method == \"HEAD\":\n return self.rest_client.HEAD(url,\n query_params=query_params,\n _preload_content=_preload_content,\n _request_timeout=_request_timeout,\n headers=headers)\n elif method == \"OPTIONS\":\n return self.rest_client.OPTIONS(url,\n query_params=query_params,\n headers=headers,\n _preload_content=_preload_content,\n _request_timeout=_request_timeout)\n elif method == \"POST\":\n return self.rest_client.POST(url,\n query_params=query_params,\n headers=headers,\n post_params=post_params,\n _preload_content=_preload_content,\n _request_timeout=_request_timeout,\n body=body)\n elif method == \"PUT\":\n return self.rest_client.PUT(url,\n query_params=query_params,\n headers=headers,\n post_params=post_params,\n _preload_content=_preload_content,\n _request_timeout=_request_timeout,\n body=body)\n elif method == \"PATCH\":\n return self.rest_client.PATCH(url,\n query_params=query_params,\n headers=headers,\n post_params=post_params,\n _preload_content=_preload_content,\n _request_timeout=_request_timeout,\n body=body)\n elif method == \"DELETE\":\n return self.rest_client.DELETE(url,\n query_params=query_params,\n headers=headers,\n _preload_content=_preload_content,\n _request_timeout=_request_timeout,\n body=body)\n else:\n raise ApiValueError(\n \"http method must be `GET`, `HEAD`, `OPTIONS`,\"\n \" `POST`, `PATCH`, `PUT` or `DELETE`.\"\n )\n\n def parameters_to_tuples(self, params, collection_formats):\n \"\"\"Get parameters as list of tuples, formatting collections.\n\n :param params: Parameters as dict or list of two-tuples\n :param dict collection_formats: Parameter collection formats\n :return: Parameters as list of tuples, collections formatted\n \"\"\"\n new_params = []\n if collection_formats is None:\n collection_formats = {}\n for k, v in six.iteritems(params) if isinstance(params, dict) else params: # noqa: E501\n if k in collection_formats:\n collection_format = collection_formats[k]\n if collection_format == 'multi':\n new_params.extend((k, value) for value in v)\n else:\n if collection_format == 'ssv':\n delimiter = ' '\n elif collection_format == 'tsv':\n delimiter = '\\t'\n elif collection_format == 'pipes':\n delimiter = '|'\n else: # csv is the default\n delimiter = ','\n new_params.append(\n (k, delimiter.join(str(value) for value in v)))\n else:\n new_params.append((k, v))\n return new_params\n\n def files_parameters(self, files=None):\n \"\"\"Builds form parameters.\n\n :param files: File parameters.\n :return: Form parameters with files.\n \"\"\"\n params = []\n\n if files:\n for k, v in six.iteritems(files):\n if not v:\n continue\n file_names = v if type(v) is list else [v]\n for n in file_names:\n with open(n, 'rb') as f:\n filename = os.path.basename(f.name)\n filedata = f.read()\n mimetype = (mimetypes.guess_type(filename)[0] or\n 'application/octet-stream')\n params.append(\n tuple([k, tuple([filename, filedata, mimetype])]))\n\n return params\n\n def select_header_accept(self, accepts):\n \"\"\"Returns `Accept` based on an array of accepts provided.\n\n :param accepts: List of headers.\n :return: Accept (e.g. application/json).\n \"\"\"\n if not accepts:\n return\n\n accepts = [x.lower() for x in accepts]\n\n if 'application/json' in accepts:\n return 'application/json'\n else:\n return ', '.join(accepts)\n\n def select_header_content_type(self, content_types):\n \"\"\"Returns `Content-Type` based on an array of content_types provided.\n\n :param content_types: List of content-types.\n :return: Content-Type (e.g. application/json).\n \"\"\"\n if not content_types:\n return 'application/json'\n\n content_types = [x.lower() for x in content_types]\n\n if 'application/json' in content_types or '*/*' in content_types:\n return 'application/json'\n else:\n return content_types[0]\n\n def update_params_for_auth(self, headers, querys, auth_settings):\n \"\"\"Updates header and query params based on authentication setting.\n\n :param headers: Header parameters dict to be updated.\n :param querys: Query parameters tuple list to be updated.\n :param auth_settings: Authentication setting identifiers list.\n \"\"\"\n if not auth_settings:\n return\n\n for auth in auth_settings:\n auth_setting = self.configuration.auth_settings().get(auth)\n if auth_setting:\n if auth_setting['in'] == 'cookie':\n headers['Cookie'] = auth_setting['value']\n elif auth_setting['in'] == 'header':\n headers[auth_setting['key']] = auth_setting['value']\n elif auth_setting['in'] == 'query':\n querys.append((auth_setting['key'], auth_setting['value']))\n else:\n raise ApiValueError(\n 'Authentication token must be in `query` or `header`'\n )\n\n def __deserialize_file(self, response):\n \"\"\"Deserializes body to file\n\n Saves response body into a file in a temporary folder,\n using the filename from the `Content-Disposition` header if provided.\n\n :param response: RESTResponse.\n :return: file path.\n \"\"\"\n fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path)\n os.close(fd)\n os.remove(path)\n\n content_disposition = response.getheader(\"Content-Disposition\")\n if content_disposition:\n filename = re.search(r'filename=[\\'\"]?([^\\'\"\\s]+)[\\'\"]?',\n content_disposition).group(1)\n path = os.path.join(os.path.dirname(path), filename)\n\n with open(path, \"wb\") as f:\n f.write(response.data)\n\n return path\n\n def __deserialize_primitive(self, data, klass):\n \"\"\"Deserializes string to primitive type.\n\n :param data: str.\n :param klass: class literal.\n\n :return: int, long, float, str, bool.\n \"\"\"\n try:\n return klass(data)\n except UnicodeEncodeError:\n return six.text_type(data)\n except TypeError:\n return data\n\n def __deserialize_object(self, value):\n \"\"\"Return an original value.\n\n :return: object.\n \"\"\"\n return value\n\n def __deserialize_date(self, string):\n \"\"\"Deserializes string to date.\n\n :param string: str.\n :return: date.\n \"\"\"\n try:\n return parse(string).date()\n except ImportError:\n return string\n except ValueError:\n raise rest.ApiException(\n status=0,\n reason=\"Failed to parse `{0}` as date object\".format(string)\n )\n\n def __deserialize_datetime(self, string):\n \"\"\"Deserializes string to datetime.\n\n The string should be in iso8601 datetime format.\n\n :param string: str.\n :return: datetime.\n \"\"\"\n try:\n return parse(string)\n except ImportError:\n return string\n except ValueError:\n raise rest.ApiException(\n status=0,\n reason=(\n \"Failed to parse `{0}` as datetime object\"\n .format(string)\n )\n )\n\n def __deserialize_model(self, data, klass):\n \"\"\"Deserializes list or dict to model.\n\n :param data: dict, list.\n :param klass: class literal.\n :return: model object.\n \"\"\"\n has_discriminator = False\n if (hasattr(klass, 'get_real_child_model')\n and klass.discriminator_value_class_map):\n has_discriminator = True\n\n if not klass.openapi_types and has_discriminator is False:\n return data\n\n kwargs = {}\n if (data is not None and\n klass.openapi_types is not None and\n isinstance(data, (list, dict))):\n for attr, attr_type in six.iteritems(klass.openapi_types):\n if klass.attribute_map[attr] in data:\n value = data[klass.attribute_map[attr]]\n kwargs[attr] = self.__deserialize(value, attr_type)\n\n instance = klass(**kwargs)\n\n if has_discriminator:\n klass_name = instance.get_real_child_model(data)\n if klass_name:\n instance = self.__deserialize(data, klass_name)\n return instance\n", "path": "python/kserve/kserve/api_client.py"}]} |
gh_patches_debug_1257 | rasdani/github-patches | git_diff | canonical__microk8s-3535 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TypeError: sequence item 10: expected str instance, NoneType found (microk8s dashboard-proxy)
#### Summary
I installed MicroK8s on Windows 10 [Version 10.0.19043.2130]. I can run `microk8s dashboard-proxy` inside `microk8s-vm` VM, but I can't run `microk8s dashboard-proxy` on the Host.
#### What Should Happen Instead?
It should run without error on host.
#### Reproduction Steps
On Windows PowerShell
1. `multipass launch --name 'microk8s-vm' --bridged --disk 50G --cpus 2 --mem 4G`
2. `multipass shell microk8s-vm`
On `microk8s-vm`
1. `sudo snap install microk8s --classic --channel 1.25/stable`
```txt
microk8s (1.25/stable) v1.25.2 from Canonical✓ installed
```
3. `sudo microk8s status --wait-ready`
```txt
microk8s is running
high-availability: no
datastore master nodes: 127.0.0.1:19001
datastore standby nodes: none
addons:
enabled:
ha-cluster # (core) Configure high availability on the current node
helm # (core) Helm - the package manager for Kubernetes
helm3 # (core) Helm 3 - the package manager for Kubernetes
disabled:
cert-manager # (core) Cloud native certificate management
community # (core) The community addons repository
dashboard # (core) The Kubernetes dashboard
dns # (core) CoreDNS
gpu # (core) Automatic enablement of Nvidia CUDA
host-access # (core) Allow Pods connecting to Host services smoothly
hostpath-storage # (core) Storage class; allocates storage from host directory
ingress # (core) Ingress controller for external access
kube-ovn # (core) An advanced network fabric for Kubernetes
mayastor # (core) OpenEBS MayaStor
metallb # (core) Loadbalancer for your Kubernetes cluster
metrics-server # (core) K8s Metrics Server for API access to service metrics
observability # (core) A lightweight observability stack for logs, traces and metrics
prometheus # (core) Prometheus operator for monitoring and logging
rbac # (core) Role-Based Access Control for authorisation
registry # (core) Private image registry exposed on localhost:32000
storage # (core) Alias to hostpath-storage add-on, deprecated
```
4. `sudo microk8s enable dashboard`
```txt
Infer repository core for addon dashboard
Enabling Kubernetes Dashboard
Infer repository core for addon metrics-server
Enabling Metrics-Server
serviceaccount/metrics-server created
clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
clusterrole.rbac.authorization.k8s.io/system:metrics-server created
rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created
clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created
clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created
service/metrics-server created
deployment.apps/metrics-server created
apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created
clusterrolebinding.rbac.authorization.k8s.io/microk8s-admin created
Metrics-Server is enabled
Applying manifest
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created
secret/microk8s-dashboard-token created
If RBAC is not enabled access the dashboard using the token retrieved with:
microk8s kubectl describe secret -n kube-system microk8s-dashboard-token
Use this token in the https login UI of the kubernetes-dashboard service.
In an RBAC enabled setup (microk8s enable RBAC) you need to create a user with restricted
permissions as shown in:
https://github.com/kubernetes/dashboard/blob/master/docs/user/access-control/creating-sample-user.md
```
5. `sudo microk8s dashboard-proxy`
```txt
Checking if Dashboard is running.
Infer repository core for addon dashboard
Infer repository core for addon metrics-server
Waiting for Dashboard to come up.
Trying to get token from microk8s-dashboard-token
Waiting for secret token (attempt 0)
Dashboard will be available at https://127.0.0.1:10443
Use the following token to login:
eyJhbGciOiJSUzI1NiI...(redacted)
```
6. `exit`
Exit to the Windows PowerShell
On Windows PowerShell
1. `microk8s config > $env:LOCALAPPDATA\MicroK8s\config`
2. `microk8s dashboard-proxy`
```txt
Checking if Dashboard is running.
Infer repository core for addon dashboard
Waiting for Dashboard to come up.
Cannot find the dashboard secret.
An unexpected error occurred.
sequence item 10: expected str instance, NoneType found
Traceback (most recent call last):
File "cli\microk8s.py", line 57, in cli
File "cli\microk8s.py", line 253, in dashboard_proxy
File "vm_providers\_multipass\_multipass.py", line 54, in run
File "vm_providers\_multipass\_multipass_command.py", line 232, in execute
File "vm_providers\_multipass\_multipass_command.py", line 38, in _run_output
TypeError: sequence item 10: expected str instance, NoneType found
None
```
Run under Host. It will produce error.

Run under Ubuntu VM. It's OK.

#### Introspection Report
[inspection-report-20221019_111239.tar.gz](https://github.com/canonical/microk8s/files/9816525/inspection-report-20221019_111239.tar.gz)
#### Can you suggest a fix?
No.
#### Are you interested in contributing with a fix?
yes if I can.
### More details
I checked the source code here:
https://github.com/canonical/microk8s/blob/e1d115f46a38ada6a2c5b236c9e26687b3529c45/installer/cli/microk8s.py#L308-L314
Here is my output:
```txt
$ microk8s.kubectl -n kube-system get secret
NAME TYPE DATA AGE
kubernetes-dashboard-certs Opaque 0 159m
microk8s-dashboard-token kubernetes.io/service-account-token 3 159m
kubernetes-dashboard-csrf Opaque 1 159m
kubernetes-dashboard-key-holder Opaque 2 159m
```
It seems I don't have a `default-token` in the output. I don't know why I still can run `microk8s dashboard-proxy` inside Ubuntu Linux?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `installer/cli/microk8s.py`
Content:
```
1 import argparse
2 import logging
3 import traceback
4 from typing import List
5 from sys import exit, platform
6 from os import getcwd
7
8 import click
9
10 from cli.echo import Echo
11 from common import definitions
12 from common.auxiliary import Windows, MacOS, Linux
13 from common.errors import BaseError
14 from common.file_utils import get_kubeconfig_path, clear_kubeconfig
15 from vm_providers.factory import get_provider_for
16 from vm_providers.errors import ProviderNotFound, ProviderInstanceNotFoundError
17
18 logger = logging.getLogger(__name__)
19
20
21 @click.command(
22 name="microk8s",
23 context_settings=dict(
24 ignore_unknown_options=True,
25 allow_extra_args=True,
26 ),
27 )
28 @click.option("-h", "--help", is_flag=True)
29 @click.pass_context
30 def cli(ctx, help):
31 try:
32 if help and len(ctx.args) == 0:
33 show_help()
34 exit(0)
35 elif help:
36 ctx.args.append("--help")
37
38 if len(ctx.args) == 0:
39 show_error()
40 exit(1)
41 if ctx.args[0] == "install":
42 install(ctx.args[1:])
43 exit(0)
44 elif ctx.args[0] == "uninstall":
45 uninstall()
46 exit(0)
47 elif ctx.args[0] == "start":
48 start()
49 run(ctx.args)
50 exit(0)
51 elif ctx.args[0] == "stop":
52 run(ctx.args)
53 stop()
54 exit(0)
55 elif ctx.args[0] == "kubectl":
56 exit(kubectl(ctx.args[1:]))
57 elif ctx.args[0] == "dashboard-proxy":
58 dashboard_proxy()
59 exit(0)
60 elif ctx.args[0] == "inspect":
61 inspect()
62 exit(0)
63 else:
64 run(ctx.args)
65 exit(0)
66
67 except BaseError as e:
68 Echo.error(str(e))
69 exit(e.get_exit_code())
70 except Exception as e:
71 Echo.error("An unexpected error occurred.")
72 Echo.info(str(e))
73 Echo.info(traceback.print_exc())
74 exit(254)
75
76
77 def show_error():
78 msg = """Usage: microk8s [OPTIONS] COMMAND [ARGS]...
79
80 Options:
81 --help Shows the available COMMANDS."""
82 click.echo(msg)
83
84
85 def show_help():
86 msg = """Usage: microk8s [OPTIONS] COMMAND [ARGS]...
87
88 Options:
89 --help Show this message and exit.
90
91 Commands:
92 install Installs MicroK8s. Use --cpu, --mem, --disk and --channel to configure your setup.
93 uninstall Removes MicroK8s"""
94 click.echo(msg)
95 commands = _get_microk8s_commands()
96 for command in commands:
97 if command in definitions.command_descriptions:
98 click.echo(" {:<15} {}".format(command, definitions.command_descriptions[command]))
99 else:
100 click.echo(" {:<15}".format(command))
101 if len(commands) == 2:
102 click.echo("")
103 click.echo("Install and start MicroK8s to see the full list of commands.")
104
105
106 def _show_install_help():
107 msg = f"""Usage: microk8s install OPTIONS
108
109 Options:
110 --help Show this message and exit.
111 --cpu Cores used by MicroK8s (default={definitions.DEFAULT_CORES}, min={definitions.MIN_CORES})
112 --mem RAM in GB used by MicroK8s (default={definitions.DEFAULT_MEMORY_GB}, min={definitions.MIN_MEMORY_GB})
113 --disk Max volume in GB of the dynamically expandable hard disk to be used (default={definitions.DEFAULT_DISK_GB}, min={definitions.MIN_DISK_GB})
114 --channel Kubernetes version to install (default={definitions.DEFAULT_CHANNEL})
115 -y, --assume-yes Automatic yes to prompts""" # noqa
116 Echo.info(msg)
117
118
119 def memory(mem_gb: str) -> int:
120 """
121 Validates the value in --mem parameter of the install command.
122 """
123 mem_gb = int(mem_gb)
124 if mem_gb < definitions.MIN_MEMORY_GB:
125 raise ValueError("Out of valid memory range")
126 return mem_gb
127
128
129 def cpu(cpus: str) -> int:
130 """
131 Validates the value in --cpu parameter of the install command.
132 """
133 cpus = int(cpus)
134 if cpus < definitions.MIN_CORES:
135 raise ValueError("Invalid number of cpus")
136 return cpus
137
138
139 def disk(disk_gb: str) -> int:
140 """
141 Validates the value in --disk parameter of the install command.
142 """
143 disk_gb = int(disk_gb)
144 if disk_gb < definitions.MIN_DISK_GB:
145 raise ValueError("Out of valid disk range")
146 return disk_gb
147
148
149 def install(args) -> None:
150 if "--help" in args or "-h" in args:
151 _show_install_help()
152 return
153
154 parser = argparse.ArgumentParser("microk8s install")
155 parser.add_argument("--cpu", default=definitions.DEFAULT_CORES, type=cpu)
156 parser.add_argument("--mem", default=definitions.DEFAULT_MEMORY_GB, type=memory)
157 parser.add_argument("--disk", default=definitions.DEFAULT_DISK_GB, type=disk)
158 parser.add_argument("--channel", default=definitions.DEFAULT_CHANNEL, type=str)
159 parser.add_argument(
160 "-y", "--assume-yes", action="store_true", default=definitions.DEFAULT_ASSUME
161 )
162 args = parser.parse_args(args)
163
164 echo = Echo()
165
166 if platform == "win32":
167 host = Windows(args)
168 elif platform == "darwin":
169 host = MacOS(args)
170 else:
171 host = Linux(args)
172
173 if not host.has_enough_cpus():
174 echo.error("VM cpus requested exceed number of available cores on host.")
175 exit(1)
176 if not host.has_enough_memory():
177 echo.warning("VM memory requested exceeds the total memory on host.")
178 exit(1)
179 if not host.has_enough_disk_space():
180 echo.warning("VM disk size requested exceeds free space on host.")
181
182 vm_provider_name: str = "multipass"
183 vm_provider_class = get_provider_for(vm_provider_name)
184 try:
185 vm_provider_class.ensure_provider()
186 except ProviderNotFound as provider_error:
187 if provider_error.prompt_installable:
188 if args.assume_yes or (
189 echo.is_tty_connected()
190 and echo.confirm(
191 "Support for {!r} needs to be set up. "
192 "Would you like to do that now?".format(provider_error.provider)
193 )
194 ):
195 vm_provider_class.setup_provider(echoer=echo)
196 else:
197 raise provider_error
198 else:
199 raise provider_error
200
201 instance = vm_provider_class(echoer=echo)
202 spec = vars(args)
203 spec.update({"kubeconfig": get_kubeconfig_path()})
204 instance.launch_instance(spec)
205 echo.info("MicroK8s is up and running. See the available commands with `microk8s --help`.")
206
207
208 def uninstall() -> None:
209 vm_provider_name = "multipass"
210 vm_provider_class = get_provider_for(vm_provider_name)
211 echo = Echo()
212 try:
213 vm_provider_class.ensure_provider()
214 except ProviderNotFound as provider_error:
215 if provider_error.prompt_installable:
216 if echo.is_tty_connected():
217 echo.warning(
218 (
219 "MicroK8s is not running. VM provider {!r} has been removed.".format(
220 provider_error.provider
221 )
222 )
223 )
224 return 1
225 else:
226 raise provider_error
227
228 instance = vm_provider_class(echoer=echo)
229 instance.destroy()
230 clear_kubeconfig()
231 echo.info("Thank you for using MicroK8s!")
232
233
234 def kubectl(args) -> int:
235 if platform == "win32":
236 return Windows(args).kubectl()
237 if platform == "darwin":
238 return MacOS(args).kubectl()
239 else:
240 return Linux(args).kubectl()
241
242
243 def inspect() -> None:
244 vm_provider_name = "multipass"
245 vm_provider_class = get_provider_for(vm_provider_name)
246 echo = Echo()
247 try:
248 vm_provider_class.ensure_provider()
249 instance = vm_provider_class(echoer=echo)
250 instance.get_instance_info()
251
252 command = ["microk8s.inspect"]
253 output = instance.run(command, hide_output=True)
254 tarball_location = None
255 host_destination = getcwd()
256 if b"Report tarball is at" not in output:
257 echo.error("Report tarball not generated")
258 else:
259 for line_out in output.split(b"\n"):
260 line_out = line_out.decode()
261 line = line_out.strip()
262 if line.startswith("Report tarball is at "):
263 tarball_location = line.split("Report tarball is at ")[1]
264 break
265 echo.wrapped(line_out)
266 if not tarball_location:
267 echo.error("Cannot find tarball file location")
268 else:
269 instance.pull_file(name=tarball_location, destination=host_destination)
270 echo.wrapped(
271 "The report tarball {} is stored on the current directory".format(
272 tarball_location.split("/")[-1]
273 )
274 )
275
276 except ProviderInstanceNotFoundError:
277 _not_installed(echo)
278 return 1
279
280
281 def dashboard_proxy() -> None:
282 vm_provider_name = "multipass"
283 vm_provider_class = get_provider_for(vm_provider_name)
284 echo = Echo()
285 try:
286 vm_provider_class.ensure_provider()
287 instance = vm_provider_class(echoer=echo)
288 instance.get_instance_info()
289
290 echo.info("Checking if Dashboard is running.")
291 command = ["microk8s.enable", "dashboard"]
292 output = instance.run(command, hide_output=True)
293 if b"Addon dashboard is already enabled." not in output:
294 echo.info("Waiting for Dashboard to come up.")
295 command = [
296 "microk8s.kubectl",
297 "-n",
298 "kube-system",
299 "wait",
300 "--timeout=240s",
301 "deployment",
302 "kubernetes-dashboard",
303 "--for",
304 "condition=available",
305 ]
306 instance.run(command, hide_output=True)
307
308 command = ["microk8s.kubectl", "-n", "kube-system", "get", "secret"]
309 output = instance.run(command, hide_output=True)
310 secret_name = None
311 for line in output.split(b"\n"):
312 if line.startswith(b"default-token"):
313 secret_name = line.split()[0].decode()
314 break
315
316 if not secret_name:
317 echo.error("Cannot find the dashboard secret.")
318
319 command = ["microk8s.kubectl", "-n", "kube-system", "describe", "secret", secret_name]
320 output = instance.run(command, hide_output=True)
321 token = None
322 for line in output.split(b"\n"):
323 if line.startswith(b"token:"):
324 token = line.split()[1].decode()
325
326 if not token:
327 echo.error("Cannot find token from secret.")
328
329 ip = instance.get_instance_info().ipv4[0]
330
331 echo.info("Dashboard will be available at https://{}:10443".format(ip))
332 echo.info("Use the following token to login:")
333 echo.info(token)
334
335 command = [
336 "microk8s.kubectl",
337 "port-forward",
338 "-n",
339 "kube-system",
340 "service/kubernetes-dashboard",
341 "10443:443",
342 "--address",
343 "0.0.0.0",
344 ]
345
346 try:
347 instance.run(command)
348 except KeyboardInterrupt:
349 return
350 except ProviderInstanceNotFoundError:
351 _not_installed(echo)
352 return 1
353
354
355 def start() -> None:
356 vm_provider_name = "multipass"
357 vm_provider_class = get_provider_for(vm_provider_name)
358 vm_provider_class.ensure_provider()
359 instance = vm_provider_class(echoer=Echo())
360 instance_info = instance.get_instance_info()
361 if not instance_info.is_running():
362 instance.start()
363 instance.run(["microk8s.start"])
364
365
366 def stop() -> None:
367 vm_provider_name = "multipass"
368 vm_provider_class = get_provider_for(vm_provider_name)
369 vm_provider_class.ensure_provider()
370 instance = vm_provider_class(echoer=Echo())
371 instance_info = instance.get_instance_info()
372 if instance_info.is_running():
373 instance.stop()
374
375
376 def run(cmd) -> None:
377 vm_provider_name = "multipass"
378 vm_provider_class = get_provider_for(vm_provider_name)
379 echo = Echo()
380 try:
381 vm_provider_class.ensure_provider()
382 instance = vm_provider_class(echoer=echo)
383 instance_info = instance.get_instance_info()
384 if not instance_info.is_running():
385 echo.warning("MicroK8s is not running. Please run `microk8s start`.")
386 return 1
387 command = cmd[0]
388 cmd[0] = "microk8s.{}".format(command)
389 instance.run(cmd)
390 except ProviderInstanceNotFoundError:
391 _not_installed(echo)
392 return 1
393
394
395 def _not_installed(echo) -> None:
396 if echo.is_tty_connected():
397 echo.warning("MicroK8s is not installed. Please run `microk8s install`.")
398
399
400 def _get_microk8s_commands() -> List:
401 vm_provider_name = "multipass"
402 vm_provider_class = get_provider_for(vm_provider_name)
403 echo = Echo()
404 try:
405 vm_provider_class.ensure_provider()
406 instance = vm_provider_class(echoer=echo)
407 instance_info = instance.get_instance_info()
408 if instance_info.is_running():
409 commands = instance.run("ls -1 /snap/bin/".split(), hide_output=True)
410 mk8s = [
411 c.decode().replace("microk8s.", "")
412 for c in commands.split()
413 if c.decode().startswith("microk8s.")
414 ]
415 complete = mk8s
416 if "dashboard-proxy" not in mk8s:
417 complete += ["dashboard-proxy"]
418 complete.sort()
419 return complete
420 else:
421 return ["start", "stop"]
422 except ProviderNotFound:
423 return ["start", "stop"]
424
425
426 if __name__ == "__main__":
427 cli()
428
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/installer/cli/microk8s.py b/installer/cli/microk8s.py
--- a/installer/cli/microk8s.py
+++ b/installer/cli/microk8s.py
@@ -309,7 +309,7 @@
output = instance.run(command, hide_output=True)
secret_name = None
for line in output.split(b"\n"):
- if line.startswith(b"default-token"):
+ if line.startswith(b"microk8s-dashboard-token"):
secret_name = line.split()[0].decode()
break
| {"golden_diff": "diff --git a/installer/cli/microk8s.py b/installer/cli/microk8s.py\n--- a/installer/cli/microk8s.py\n+++ b/installer/cli/microk8s.py\n@@ -309,7 +309,7 @@\n output = instance.run(command, hide_output=True)\n secret_name = None\n for line in output.split(b\"\\n\"):\n- if line.startswith(b\"default-token\"):\n+ if line.startswith(b\"microk8s-dashboard-token\"):\n secret_name = line.split()[0].decode()\n break\n", "issue": "TypeError: sequence item 10: expected str instance, NoneType found (microk8s dashboard-proxy)\n#### Summary\r\n\r\nI installed MicroK8s on Windows 10 [Version 10.0.19043.2130]. I can run `microk8s dashboard-proxy` inside `microk8s-vm` VM, but I can't run `microk8s dashboard-proxy` on the Host.\r\n\r\n#### What Should Happen Instead?\r\n\r\nIt should run without error on host.\r\n\r\n#### Reproduction Steps\r\n\r\nOn Windows PowerShell\r\n\r\n1. `multipass launch --name 'microk8s-vm' --bridged --disk 50G --cpus 2 --mem 4G`\r\n2. `multipass shell microk8s-vm`\r\n\r\nOn `microk8s-vm`\r\n\r\n1. `sudo snap install microk8s --classic --channel 1.25/stable`\r\n\r\n ```txt\r\n microk8s (1.25/stable) v1.25.2 from Canonical\u2713 installed\r\n ```\r\n\r\n3. `sudo microk8s status --wait-ready`\r\n\r\n ```txt\r\n microk8s is running\r\n high-availability: no\r\n datastore master nodes: 127.0.0.1:19001\r\n datastore standby nodes: none\r\n addons:\r\n enabled:\r\n ha-cluster # (core) Configure high availability on the current node\r\n helm # (core) Helm - the package manager for Kubernetes\r\n helm3 # (core) Helm 3 - the package manager for Kubernetes\r\n disabled:\r\n cert-manager # (core) Cloud native certificate management\r\n community # (core) The community addons repository\r\n dashboard # (core) The Kubernetes dashboard\r\n dns # (core) CoreDNS\r\n gpu # (core) Automatic enablement of Nvidia CUDA\r\n host-access # (core) Allow Pods connecting to Host services smoothly\r\n hostpath-storage # (core) Storage class; allocates storage from host directory\r\n ingress # (core) Ingress controller for external access\r\n kube-ovn # (core) An advanced network fabric for Kubernetes\r\n mayastor # (core) OpenEBS MayaStor\r\n metallb # (core) Loadbalancer for your Kubernetes cluster\r\n metrics-server # (core) K8s Metrics Server for API access to service metrics\r\n observability # (core) A lightweight observability stack for logs, traces and metrics\r\n prometheus # (core) Prometheus operator for monitoring and logging\r\n rbac # (core) Role-Based Access Control for authorisation\r\n registry # (core) Private image registry exposed on localhost:32000\r\n storage # (core) Alias to hostpath-storage add-on, deprecated\r\n ```\r\n\r\n4. `sudo microk8s enable dashboard`\r\n\r\n ```txt\r\n Infer repository core for addon dashboard\r\n Enabling Kubernetes Dashboard\r\n Infer repository core for addon metrics-server\r\n Enabling Metrics-Server\r\n serviceaccount/metrics-server created\r\n clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created\r\n clusterrole.rbac.authorization.k8s.io/system:metrics-server created\r\n rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created\r\n clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created\r\n clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created\r\n service/metrics-server created\r\n deployment.apps/metrics-server created\r\n apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created\r\n clusterrolebinding.rbac.authorization.k8s.io/microk8s-admin created\r\n Metrics-Server is enabled\r\n Applying manifest\r\n serviceaccount/kubernetes-dashboard created\r\n service/kubernetes-dashboard created\r\n secret/kubernetes-dashboard-certs created\r\n secret/kubernetes-dashboard-csrf created\r\n secret/kubernetes-dashboard-key-holder created\r\n configmap/kubernetes-dashboard-settings created\r\n role.rbac.authorization.k8s.io/kubernetes-dashboard created\r\n clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created\r\n rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created\r\n clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created\r\n deployment.apps/kubernetes-dashboard created\r\n service/dashboard-metrics-scraper created\r\n deployment.apps/dashboard-metrics-scraper created\r\n secret/microk8s-dashboard-token created\r\n\r\n If RBAC is not enabled access the dashboard using the token retrieved with:\r\n\r\n microk8s kubectl describe secret -n kube-system microk8s-dashboard-token\r\n\r\n Use this token in the https login UI of the kubernetes-dashboard service.\r\n\r\n In an RBAC enabled setup (microk8s enable RBAC) you need to create a user with restricted\r\n permissions as shown in:\r\n https://github.com/kubernetes/dashboard/blob/master/docs/user/access-control/creating-sample-user.md\r\n ```\r\n\r\n5. `sudo microk8s dashboard-proxy`\r\n\r\n ```txt\r\n Checking if Dashboard is running.\r\n Infer repository core for addon dashboard\r\n Infer repository core for addon metrics-server\r\n Waiting for Dashboard to come up.\r\n Trying to get token from microk8s-dashboard-token\r\n Waiting for secret token (attempt 0)\r\n Dashboard will be available at https://127.0.0.1:10443\r\n Use the following token to login:\r\n eyJhbGciOiJSUzI1NiI...(redacted)\r\n ```\r\n\r\n6. `exit`\r\n\r\n Exit to the Windows PowerShell\r\n \r\nOn Windows PowerShell\r\n\r\n1. `microk8s config > $env:LOCALAPPDATA\\MicroK8s\\config`\r\n\r\n2. `microk8s dashboard-proxy`\r\n\r\n ```txt\r\n Checking if Dashboard is running.\r\n Infer repository core for addon dashboard\r\n Waiting for Dashboard to come up.\r\n Cannot find the dashboard secret.\r\n An unexpected error occurred.\r\n sequence item 10: expected str instance, NoneType found\r\n Traceback (most recent call last):\r\n File \"cli\\microk8s.py\", line 57, in cli\r\n File \"cli\\microk8s.py\", line 253, in dashboard_proxy\r\n File \"vm_providers\\_multipass\\_multipass.py\", line 54, in run\r\n File \"vm_providers\\_multipass\\_multipass_command.py\", line 232, in execute\r\n File \"vm_providers\\_multipass\\_multipass_command.py\", line 38, in _run_output\r\n TypeError: sequence item 10: expected str instance, NoneType found\r\n None\r\n ```\r\n\r\nRun under Host. It will produce error.\r\n\r\n\r\n\r\nRun under Ubuntu VM. It's OK.\r\n\r\n\r\n\r\n\r\n#### Introspection Report\r\n\r\n[inspection-report-20221019_111239.tar.gz](https://github.com/canonical/microk8s/files/9816525/inspection-report-20221019_111239.tar.gz)\r\n\r\n#### Can you suggest a fix?\r\n\r\nNo.\r\n\r\n#### Are you interested in contributing with a fix?\r\n\r\nyes if I can.\r\n\r\n### More details\r\n\r\nI checked the source code here:\r\nhttps://github.com/canonical/microk8s/blob/e1d115f46a38ada6a2c5b236c9e26687b3529c45/installer/cli/microk8s.py#L308-L314\r\n\r\nHere is my output:\r\n\r\n```txt\r\n$ microk8s.kubectl -n kube-system get secret\r\nNAME TYPE DATA AGE\r\nkubernetes-dashboard-certs Opaque 0 159m\r\nmicrok8s-dashboard-token kubernetes.io/service-account-token 3 159m\r\nkubernetes-dashboard-csrf Opaque 1 159m\r\nkubernetes-dashboard-key-holder Opaque 2 159m\r\n```\r\n\r\nIt seems I don't have a `default-token` in the output. I don't know why I still can run `microk8s dashboard-proxy` inside Ubuntu Linux?\r\n\n", "before_files": [{"content": "import argparse\nimport logging\nimport traceback\nfrom typing import List\nfrom sys import exit, platform\nfrom os import getcwd\n\nimport click\n\nfrom cli.echo import Echo\nfrom common import definitions\nfrom common.auxiliary import Windows, MacOS, Linux\nfrom common.errors import BaseError\nfrom common.file_utils import get_kubeconfig_path, clear_kubeconfig\nfrom vm_providers.factory import get_provider_for\nfrom vm_providers.errors import ProviderNotFound, ProviderInstanceNotFoundError\n\nlogger = logging.getLogger(__name__)\n\n\[email protected](\n name=\"microk8s\",\n context_settings=dict(\n ignore_unknown_options=True,\n allow_extra_args=True,\n ),\n)\[email protected](\"-h\", \"--help\", is_flag=True)\[email protected]_context\ndef cli(ctx, help):\n try:\n if help and len(ctx.args) == 0:\n show_help()\n exit(0)\n elif help:\n ctx.args.append(\"--help\")\n\n if len(ctx.args) == 0:\n show_error()\n exit(1)\n if ctx.args[0] == \"install\":\n install(ctx.args[1:])\n exit(0)\n elif ctx.args[0] == \"uninstall\":\n uninstall()\n exit(0)\n elif ctx.args[0] == \"start\":\n start()\n run(ctx.args)\n exit(0)\n elif ctx.args[0] == \"stop\":\n run(ctx.args)\n stop()\n exit(0)\n elif ctx.args[0] == \"kubectl\":\n exit(kubectl(ctx.args[1:]))\n elif ctx.args[0] == \"dashboard-proxy\":\n dashboard_proxy()\n exit(0)\n elif ctx.args[0] == \"inspect\":\n inspect()\n exit(0)\n else:\n run(ctx.args)\n exit(0)\n\n except BaseError as e:\n Echo.error(str(e))\n exit(e.get_exit_code())\n except Exception as e:\n Echo.error(\"An unexpected error occurred.\")\n Echo.info(str(e))\n Echo.info(traceback.print_exc())\n exit(254)\n\n\ndef show_error():\n msg = \"\"\"Usage: microk8s [OPTIONS] COMMAND [ARGS]...\n\nOptions:\n --help Shows the available COMMANDS.\"\"\"\n click.echo(msg)\n\n\ndef show_help():\n msg = \"\"\"Usage: microk8s [OPTIONS] COMMAND [ARGS]...\n\nOptions:\n --help Show this message and exit.\n\nCommands:\n install Installs MicroK8s. Use --cpu, --mem, --disk and --channel to configure your setup.\n uninstall Removes MicroK8s\"\"\"\n click.echo(msg)\n commands = _get_microk8s_commands()\n for command in commands:\n if command in definitions.command_descriptions:\n click.echo(\" {:<15} {}\".format(command, definitions.command_descriptions[command]))\n else:\n click.echo(\" {:<15}\".format(command))\n if len(commands) == 2:\n click.echo(\"\")\n click.echo(\"Install and start MicroK8s to see the full list of commands.\")\n\n\ndef _show_install_help():\n msg = f\"\"\"Usage: microk8s install OPTIONS\n\n Options:\n --help Show this message and exit.\n --cpu Cores used by MicroK8s (default={definitions.DEFAULT_CORES}, min={definitions.MIN_CORES})\n --mem RAM in GB used by MicroK8s (default={definitions.DEFAULT_MEMORY_GB}, min={definitions.MIN_MEMORY_GB})\n --disk Max volume in GB of the dynamically expandable hard disk to be used (default={definitions.DEFAULT_DISK_GB}, min={definitions.MIN_DISK_GB})\n --channel Kubernetes version to install (default={definitions.DEFAULT_CHANNEL})\n -y, --assume-yes Automatic yes to prompts\"\"\" # noqa\n Echo.info(msg)\n\n\ndef memory(mem_gb: str) -> int:\n \"\"\"\n Validates the value in --mem parameter of the install command.\n \"\"\"\n mem_gb = int(mem_gb)\n if mem_gb < definitions.MIN_MEMORY_GB:\n raise ValueError(\"Out of valid memory range\")\n return mem_gb\n\n\ndef cpu(cpus: str) -> int:\n \"\"\"\n Validates the value in --cpu parameter of the install command.\n \"\"\"\n cpus = int(cpus)\n if cpus < definitions.MIN_CORES:\n raise ValueError(\"Invalid number of cpus\")\n return cpus\n\n\ndef disk(disk_gb: str) -> int:\n \"\"\"\n Validates the value in --disk parameter of the install command.\n \"\"\"\n disk_gb = int(disk_gb)\n if disk_gb < definitions.MIN_DISK_GB:\n raise ValueError(\"Out of valid disk range\")\n return disk_gb\n\n\ndef install(args) -> None:\n if \"--help\" in args or \"-h\" in args:\n _show_install_help()\n return\n\n parser = argparse.ArgumentParser(\"microk8s install\")\n parser.add_argument(\"--cpu\", default=definitions.DEFAULT_CORES, type=cpu)\n parser.add_argument(\"--mem\", default=definitions.DEFAULT_MEMORY_GB, type=memory)\n parser.add_argument(\"--disk\", default=definitions.DEFAULT_DISK_GB, type=disk)\n parser.add_argument(\"--channel\", default=definitions.DEFAULT_CHANNEL, type=str)\n parser.add_argument(\n \"-y\", \"--assume-yes\", action=\"store_true\", default=definitions.DEFAULT_ASSUME\n )\n args = parser.parse_args(args)\n\n echo = Echo()\n\n if platform == \"win32\":\n host = Windows(args)\n elif platform == \"darwin\":\n host = MacOS(args)\n else:\n host = Linux(args)\n\n if not host.has_enough_cpus():\n echo.error(\"VM cpus requested exceed number of available cores on host.\")\n exit(1)\n if not host.has_enough_memory():\n echo.warning(\"VM memory requested exceeds the total memory on host.\")\n exit(1)\n if not host.has_enough_disk_space():\n echo.warning(\"VM disk size requested exceeds free space on host.\")\n\n vm_provider_name: str = \"multipass\"\n vm_provider_class = get_provider_for(vm_provider_name)\n try:\n vm_provider_class.ensure_provider()\n except ProviderNotFound as provider_error:\n if provider_error.prompt_installable:\n if args.assume_yes or (\n echo.is_tty_connected()\n and echo.confirm(\n \"Support for {!r} needs to be set up. \"\n \"Would you like to do that now?\".format(provider_error.provider)\n )\n ):\n vm_provider_class.setup_provider(echoer=echo)\n else:\n raise provider_error\n else:\n raise provider_error\n\n instance = vm_provider_class(echoer=echo)\n spec = vars(args)\n spec.update({\"kubeconfig\": get_kubeconfig_path()})\n instance.launch_instance(spec)\n echo.info(\"MicroK8s is up and running. See the available commands with `microk8s --help`.\")\n\n\ndef uninstall() -> None:\n vm_provider_name = \"multipass\"\n vm_provider_class = get_provider_for(vm_provider_name)\n echo = Echo()\n try:\n vm_provider_class.ensure_provider()\n except ProviderNotFound as provider_error:\n if provider_error.prompt_installable:\n if echo.is_tty_connected():\n echo.warning(\n (\n \"MicroK8s is not running. VM provider {!r} has been removed.\".format(\n provider_error.provider\n )\n )\n )\n return 1\n else:\n raise provider_error\n\n instance = vm_provider_class(echoer=echo)\n instance.destroy()\n clear_kubeconfig()\n echo.info(\"Thank you for using MicroK8s!\")\n\n\ndef kubectl(args) -> int:\n if platform == \"win32\":\n return Windows(args).kubectl()\n if platform == \"darwin\":\n return MacOS(args).kubectl()\n else:\n return Linux(args).kubectl()\n\n\ndef inspect() -> None:\n vm_provider_name = \"multipass\"\n vm_provider_class = get_provider_for(vm_provider_name)\n echo = Echo()\n try:\n vm_provider_class.ensure_provider()\n instance = vm_provider_class(echoer=echo)\n instance.get_instance_info()\n\n command = [\"microk8s.inspect\"]\n output = instance.run(command, hide_output=True)\n tarball_location = None\n host_destination = getcwd()\n if b\"Report tarball is at\" not in output:\n echo.error(\"Report tarball not generated\")\n else:\n for line_out in output.split(b\"\\n\"):\n line_out = line_out.decode()\n line = line_out.strip()\n if line.startswith(\"Report tarball is at \"):\n tarball_location = line.split(\"Report tarball is at \")[1]\n break\n echo.wrapped(line_out)\n if not tarball_location:\n echo.error(\"Cannot find tarball file location\")\n else:\n instance.pull_file(name=tarball_location, destination=host_destination)\n echo.wrapped(\n \"The report tarball {} is stored on the current directory\".format(\n tarball_location.split(\"/\")[-1]\n )\n )\n\n except ProviderInstanceNotFoundError:\n _not_installed(echo)\n return 1\n\n\ndef dashboard_proxy() -> None:\n vm_provider_name = \"multipass\"\n vm_provider_class = get_provider_for(vm_provider_name)\n echo = Echo()\n try:\n vm_provider_class.ensure_provider()\n instance = vm_provider_class(echoer=echo)\n instance.get_instance_info()\n\n echo.info(\"Checking if Dashboard is running.\")\n command = [\"microk8s.enable\", \"dashboard\"]\n output = instance.run(command, hide_output=True)\n if b\"Addon dashboard is already enabled.\" not in output:\n echo.info(\"Waiting for Dashboard to come up.\")\n command = [\n \"microk8s.kubectl\",\n \"-n\",\n \"kube-system\",\n \"wait\",\n \"--timeout=240s\",\n \"deployment\",\n \"kubernetes-dashboard\",\n \"--for\",\n \"condition=available\",\n ]\n instance.run(command, hide_output=True)\n\n command = [\"microk8s.kubectl\", \"-n\", \"kube-system\", \"get\", \"secret\"]\n output = instance.run(command, hide_output=True)\n secret_name = None\n for line in output.split(b\"\\n\"):\n if line.startswith(b\"default-token\"):\n secret_name = line.split()[0].decode()\n break\n\n if not secret_name:\n echo.error(\"Cannot find the dashboard secret.\")\n\n command = [\"microk8s.kubectl\", \"-n\", \"kube-system\", \"describe\", \"secret\", secret_name]\n output = instance.run(command, hide_output=True)\n token = None\n for line in output.split(b\"\\n\"):\n if line.startswith(b\"token:\"):\n token = line.split()[1].decode()\n\n if not token:\n echo.error(\"Cannot find token from secret.\")\n\n ip = instance.get_instance_info().ipv4[0]\n\n echo.info(\"Dashboard will be available at https://{}:10443\".format(ip))\n echo.info(\"Use the following token to login:\")\n echo.info(token)\n\n command = [\n \"microk8s.kubectl\",\n \"port-forward\",\n \"-n\",\n \"kube-system\",\n \"service/kubernetes-dashboard\",\n \"10443:443\",\n \"--address\",\n \"0.0.0.0\",\n ]\n\n try:\n instance.run(command)\n except KeyboardInterrupt:\n return\n except ProviderInstanceNotFoundError:\n _not_installed(echo)\n return 1\n\n\ndef start() -> None:\n vm_provider_name = \"multipass\"\n vm_provider_class = get_provider_for(vm_provider_name)\n vm_provider_class.ensure_provider()\n instance = vm_provider_class(echoer=Echo())\n instance_info = instance.get_instance_info()\n if not instance_info.is_running():\n instance.start()\n instance.run([\"microk8s.start\"])\n\n\ndef stop() -> None:\n vm_provider_name = \"multipass\"\n vm_provider_class = get_provider_for(vm_provider_name)\n vm_provider_class.ensure_provider()\n instance = vm_provider_class(echoer=Echo())\n instance_info = instance.get_instance_info()\n if instance_info.is_running():\n instance.stop()\n\n\ndef run(cmd) -> None:\n vm_provider_name = \"multipass\"\n vm_provider_class = get_provider_for(vm_provider_name)\n echo = Echo()\n try:\n vm_provider_class.ensure_provider()\n instance = vm_provider_class(echoer=echo)\n instance_info = instance.get_instance_info()\n if not instance_info.is_running():\n echo.warning(\"MicroK8s is not running. Please run `microk8s start`.\")\n return 1\n command = cmd[0]\n cmd[0] = \"microk8s.{}\".format(command)\n instance.run(cmd)\n except ProviderInstanceNotFoundError:\n _not_installed(echo)\n return 1\n\n\ndef _not_installed(echo) -> None:\n if echo.is_tty_connected():\n echo.warning(\"MicroK8s is not installed. Please run `microk8s install`.\")\n\n\ndef _get_microk8s_commands() -> List:\n vm_provider_name = \"multipass\"\n vm_provider_class = get_provider_for(vm_provider_name)\n echo = Echo()\n try:\n vm_provider_class.ensure_provider()\n instance = vm_provider_class(echoer=echo)\n instance_info = instance.get_instance_info()\n if instance_info.is_running():\n commands = instance.run(\"ls -1 /snap/bin/\".split(), hide_output=True)\n mk8s = [\n c.decode().replace(\"microk8s.\", \"\")\n for c in commands.split()\n if c.decode().startswith(\"microk8s.\")\n ]\n complete = mk8s\n if \"dashboard-proxy\" not in mk8s:\n complete += [\"dashboard-proxy\"]\n complete.sort()\n return complete\n else:\n return [\"start\", \"stop\"]\n except ProviderNotFound:\n return [\"start\", \"stop\"]\n\n\nif __name__ == \"__main__\":\n cli()\n", "path": "installer/cli/microk8s.py"}], "after_files": [{"content": "import argparse\nimport logging\nimport traceback\nfrom typing import List\nfrom sys import exit, platform\nfrom os import getcwd\n\nimport click\n\nfrom cli.echo import Echo\nfrom common import definitions\nfrom common.auxiliary import Windows, MacOS, Linux\nfrom common.errors import BaseError\nfrom common.file_utils import get_kubeconfig_path, clear_kubeconfig\nfrom vm_providers.factory import get_provider_for\nfrom vm_providers.errors import ProviderNotFound, ProviderInstanceNotFoundError\n\nlogger = logging.getLogger(__name__)\n\n\[email protected](\n name=\"microk8s\",\n context_settings=dict(\n ignore_unknown_options=True,\n allow_extra_args=True,\n ),\n)\[email protected](\"-h\", \"--help\", is_flag=True)\[email protected]_context\ndef cli(ctx, help):\n try:\n if help and len(ctx.args) == 0:\n show_help()\n exit(0)\n elif help:\n ctx.args.append(\"--help\")\n\n if len(ctx.args) == 0:\n show_error()\n exit(1)\n if ctx.args[0] == \"install\":\n install(ctx.args[1:])\n exit(0)\n elif ctx.args[0] == \"uninstall\":\n uninstall()\n exit(0)\n elif ctx.args[0] == \"start\":\n start()\n run(ctx.args)\n exit(0)\n elif ctx.args[0] == \"stop\":\n run(ctx.args)\n stop()\n exit(0)\n elif ctx.args[0] == \"kubectl\":\n exit(kubectl(ctx.args[1:]))\n elif ctx.args[0] == \"dashboard-proxy\":\n dashboard_proxy()\n exit(0)\n elif ctx.args[0] == \"inspect\":\n inspect()\n exit(0)\n else:\n run(ctx.args)\n exit(0)\n\n except BaseError as e:\n Echo.error(str(e))\n exit(e.get_exit_code())\n except Exception as e:\n Echo.error(\"An unexpected error occurred.\")\n Echo.info(str(e))\n Echo.info(traceback.print_exc())\n exit(254)\n\n\ndef show_error():\n msg = \"\"\"Usage: microk8s [OPTIONS] COMMAND [ARGS]...\n\nOptions:\n --help Shows the available COMMANDS.\"\"\"\n click.echo(msg)\n\n\ndef show_help():\n msg = \"\"\"Usage: microk8s [OPTIONS] COMMAND [ARGS]...\n\nOptions:\n --help Show this message and exit.\n\nCommands:\n install Installs MicroK8s. Use --cpu, --mem, --disk and --channel to configure your setup.\n uninstall Removes MicroK8s\"\"\"\n click.echo(msg)\n commands = _get_microk8s_commands()\n for command in commands:\n if command in definitions.command_descriptions:\n click.echo(\" {:<15} {}\".format(command, definitions.command_descriptions[command]))\n else:\n click.echo(\" {:<15}\".format(command))\n if len(commands) == 2:\n click.echo(\"\")\n click.echo(\"Install and start MicroK8s to see the full list of commands.\")\n\n\ndef _show_install_help():\n msg = f\"\"\"Usage: microk8s install OPTIONS\n\n Options:\n --help Show this message and exit.\n --cpu Cores used by MicroK8s (default={definitions.DEFAULT_CORES}, min={definitions.MIN_CORES})\n --mem RAM in GB used by MicroK8s (default={definitions.DEFAULT_MEMORY_GB}, min={definitions.MIN_MEMORY_GB})\n --disk Max volume in GB of the dynamically expandable hard disk to be used (default={definitions.DEFAULT_DISK_GB}, min={definitions.MIN_DISK_GB})\n --channel Kubernetes version to install (default={definitions.DEFAULT_CHANNEL})\n -y, --assume-yes Automatic yes to prompts\"\"\" # noqa\n Echo.info(msg)\n\n\ndef memory(mem_gb: str) -> int:\n \"\"\"\n Validates the value in --mem parameter of the install command.\n \"\"\"\n mem_gb = int(mem_gb)\n if mem_gb < definitions.MIN_MEMORY_GB:\n raise ValueError(\"Out of valid memory range\")\n return mem_gb\n\n\ndef cpu(cpus: str) -> int:\n \"\"\"\n Validates the value in --cpu parameter of the install command.\n \"\"\"\n cpus = int(cpus)\n if cpus < definitions.MIN_CORES:\n raise ValueError(\"Invalid number of cpus\")\n return cpus\n\n\ndef disk(disk_gb: str) -> int:\n \"\"\"\n Validates the value in --disk parameter of the install command.\n \"\"\"\n disk_gb = int(disk_gb)\n if disk_gb < definitions.MIN_DISK_GB:\n raise ValueError(\"Out of valid disk range\")\n return disk_gb\n\n\ndef install(args) -> None:\n if \"--help\" in args or \"-h\" in args:\n _show_install_help()\n return\n\n parser = argparse.ArgumentParser(\"microk8s install\")\n parser.add_argument(\"--cpu\", default=definitions.DEFAULT_CORES, type=cpu)\n parser.add_argument(\"--mem\", default=definitions.DEFAULT_MEMORY_GB, type=memory)\n parser.add_argument(\"--disk\", default=definitions.DEFAULT_DISK_GB, type=disk)\n parser.add_argument(\"--channel\", default=definitions.DEFAULT_CHANNEL, type=str)\n parser.add_argument(\n \"-y\", \"--assume-yes\", action=\"store_true\", default=definitions.DEFAULT_ASSUME\n )\n args = parser.parse_args(args)\n\n echo = Echo()\n\n if platform == \"win32\":\n host = Windows(args)\n elif platform == \"darwin\":\n host = MacOS(args)\n else:\n host = Linux(args)\n\n if not host.has_enough_cpus():\n echo.error(\"VM cpus requested exceed number of available cores on host.\")\n exit(1)\n if not host.has_enough_memory():\n echo.warning(\"VM memory requested exceeds the total memory on host.\")\n exit(1)\n if not host.has_enough_disk_space():\n echo.warning(\"VM disk size requested exceeds free space on host.\")\n\n vm_provider_name: str = \"multipass\"\n vm_provider_class = get_provider_for(vm_provider_name)\n try:\n vm_provider_class.ensure_provider()\n except ProviderNotFound as provider_error:\n if provider_error.prompt_installable:\n if args.assume_yes or (\n echo.is_tty_connected()\n and echo.confirm(\n \"Support for {!r} needs to be set up. \"\n \"Would you like to do that now?\".format(provider_error.provider)\n )\n ):\n vm_provider_class.setup_provider(echoer=echo)\n else:\n raise provider_error\n else:\n raise provider_error\n\n instance = vm_provider_class(echoer=echo)\n spec = vars(args)\n spec.update({\"kubeconfig\": get_kubeconfig_path()})\n instance.launch_instance(spec)\n echo.info(\"MicroK8s is up and running. See the available commands with `microk8s --help`.\")\n\n\ndef uninstall() -> None:\n vm_provider_name = \"multipass\"\n vm_provider_class = get_provider_for(vm_provider_name)\n echo = Echo()\n try:\n vm_provider_class.ensure_provider()\n except ProviderNotFound as provider_error:\n if provider_error.prompt_installable:\n if echo.is_tty_connected():\n echo.warning(\n (\n \"MicroK8s is not running. VM provider {!r} has been removed.\".format(\n provider_error.provider\n )\n )\n )\n return 1\n else:\n raise provider_error\n\n instance = vm_provider_class(echoer=echo)\n instance.destroy()\n clear_kubeconfig()\n echo.info(\"Thank you for using MicroK8s!\")\n\n\ndef kubectl(args) -> int:\n if platform == \"win32\":\n return Windows(args).kubectl()\n if platform == \"darwin\":\n return MacOS(args).kubectl()\n else:\n return Linux(args).kubectl()\n\n\ndef inspect() -> None:\n vm_provider_name = \"multipass\"\n vm_provider_class = get_provider_for(vm_provider_name)\n echo = Echo()\n try:\n vm_provider_class.ensure_provider()\n instance = vm_provider_class(echoer=echo)\n instance.get_instance_info()\n\n command = [\"microk8s.inspect\"]\n output = instance.run(command, hide_output=True)\n tarball_location = None\n host_destination = getcwd()\n if b\"Report tarball is at\" not in output:\n echo.error(\"Report tarball not generated\")\n else:\n for line_out in output.split(b\"\\n\"):\n line_out = line_out.decode()\n line = line_out.strip()\n if line.startswith(\"Report tarball is at \"):\n tarball_location = line.split(\"Report tarball is at \")[1]\n break\n echo.wrapped(line_out)\n if not tarball_location:\n echo.error(\"Cannot find tarball file location\")\n else:\n instance.pull_file(name=tarball_location, destination=host_destination)\n echo.wrapped(\n \"The report tarball {} is stored on the current directory\".format(\n tarball_location.split(\"/\")[-1]\n )\n )\n\n except ProviderInstanceNotFoundError:\n _not_installed(echo)\n return 1\n\n\ndef dashboard_proxy() -> None:\n vm_provider_name = \"multipass\"\n vm_provider_class = get_provider_for(vm_provider_name)\n echo = Echo()\n try:\n vm_provider_class.ensure_provider()\n instance = vm_provider_class(echoer=echo)\n instance.get_instance_info()\n\n echo.info(\"Checking if Dashboard is running.\")\n command = [\"microk8s.enable\", \"dashboard\"]\n output = instance.run(command, hide_output=True)\n if b\"Addon dashboard is already enabled.\" not in output:\n echo.info(\"Waiting for Dashboard to come up.\")\n command = [\n \"microk8s.kubectl\",\n \"-n\",\n \"kube-system\",\n \"wait\",\n \"--timeout=240s\",\n \"deployment\",\n \"kubernetes-dashboard\",\n \"--for\",\n \"condition=available\",\n ]\n instance.run(command, hide_output=True)\n\n command = [\"microk8s.kubectl\", \"-n\", \"kube-system\", \"get\", \"secret\"]\n output = instance.run(command, hide_output=True)\n secret_name = None\n for line in output.split(b\"\\n\"):\n if line.startswith(b\"microk8s-dashboard-token\"):\n secret_name = line.split()[0].decode()\n break\n\n if not secret_name:\n echo.error(\"Cannot find the dashboard secret.\")\n\n command = [\"microk8s.kubectl\", \"-n\", \"kube-system\", \"describe\", \"secret\", secret_name]\n output = instance.run(command, hide_output=True)\n token = None\n for line in output.split(b\"\\n\"):\n if line.startswith(b\"token:\"):\n token = line.split()[1].decode()\n\n if not token:\n echo.error(\"Cannot find token from secret.\")\n\n ip = instance.get_instance_info().ipv4[0]\n\n echo.info(\"Dashboard will be available at https://{}:10443\".format(ip))\n echo.info(\"Use the following token to login:\")\n echo.info(token)\n\n command = [\n \"microk8s.kubectl\",\n \"port-forward\",\n \"-n\",\n \"kube-system\",\n \"service/kubernetes-dashboard\",\n \"10443:443\",\n \"--address\",\n \"0.0.0.0\",\n ]\n\n try:\n instance.run(command)\n except KeyboardInterrupt:\n return\n except ProviderInstanceNotFoundError:\n _not_installed(echo)\n return 1\n\n\ndef start() -> None:\n vm_provider_name = \"multipass\"\n vm_provider_class = get_provider_for(vm_provider_name)\n vm_provider_class.ensure_provider()\n instance = vm_provider_class(echoer=Echo())\n instance_info = instance.get_instance_info()\n if not instance_info.is_running():\n instance.start()\n instance.run([\"microk8s.start\"])\n\n\ndef stop() -> None:\n vm_provider_name = \"multipass\"\n vm_provider_class = get_provider_for(vm_provider_name)\n vm_provider_class.ensure_provider()\n instance = vm_provider_class(echoer=Echo())\n instance_info = instance.get_instance_info()\n if instance_info.is_running():\n instance.stop()\n\n\ndef run(cmd) -> None:\n vm_provider_name = \"multipass\"\n vm_provider_class = get_provider_for(vm_provider_name)\n echo = Echo()\n try:\n vm_provider_class.ensure_provider()\n instance = vm_provider_class(echoer=echo)\n instance_info = instance.get_instance_info()\n if not instance_info.is_running():\n echo.warning(\"MicroK8s is not running. Please run `microk8s start`.\")\n return 1\n command = cmd[0]\n cmd[0] = \"microk8s.{}\".format(command)\n instance.run(cmd)\n except ProviderInstanceNotFoundError:\n _not_installed(echo)\n return 1\n\n\ndef _not_installed(echo) -> None:\n if echo.is_tty_connected():\n echo.warning(\"MicroK8s is not installed. Please run `microk8s install`.\")\n\n\ndef _get_microk8s_commands() -> List:\n vm_provider_name = \"multipass\"\n vm_provider_class = get_provider_for(vm_provider_name)\n echo = Echo()\n try:\n vm_provider_class.ensure_provider()\n instance = vm_provider_class(echoer=echo)\n instance_info = instance.get_instance_info()\n if instance_info.is_running():\n commands = instance.run(\"ls -1 /snap/bin/\".split(), hide_output=True)\n mk8s = [\n c.decode().replace(\"microk8s.\", \"\")\n for c in commands.split()\n if c.decode().startswith(\"microk8s.\")\n ]\n complete = mk8s\n if \"dashboard-proxy\" not in mk8s:\n complete += [\"dashboard-proxy\"]\n complete.sort()\n return complete\n else:\n return [\"start\", \"stop\"]\n except ProviderNotFound:\n return [\"start\", \"stop\"]\n\n\nif __name__ == \"__main__\":\n cli()\n", "path": "installer/cli/microk8s.py"}]} |
gh_patches_debug_1258 | rasdani/github-patches | git_diff | facebookresearch__ParlAI-1956 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Quickstart AttributeError: 'HogwildWorld' object has no attribute 'acts'
**Bug description**
When going through the ParlAI [quickstart](https://parl.ai/docs/tutorial_quick.html#install), I got the following error:
``` python
Traceback (most recent call last):
File "examples/interactive.py", line 18, in <module>
interactive(opt, print_parser=parser)
File "/root/ParlAI/parlai/scripts/interactive.py", line 68, in interactive
agent = create_agent(opt, requireModelExists=True)
File "/root/ParlAI/parlai/core/agents.py", line 683, in create_agent
model = load_agent_module(opt)
File "/root/ParlAI/parlai/core/agents.py", line 548, in load_agent_module
return model_class(new_opt)
File "/root/ParlAI/parlai/agents/memnn/memnn.py", line 86, in __init__
super().__init__(opt, shared)
File "/root/ParlAI/parlai/core/torch_ranker_agent.py", line 135, in __init__
super().__init__(opt, shared)
File "/root/ParlAI/parlai/core/torch_agent.py", line 737, in __init__
self.set_interactive_mode(opt['interactive_mode'], shared)
File "/root/ParlAI/parlai/core/torch_ranker_agent.py", line 206, in set_interactive_mode
path = self.get_task_candidates_path()
File "/root/ParlAI/parlai/core/torch_ranker_agent.py", line 230, in get_task_candidates_path
build_cands(opt)
File "/root/ParlAI/parlai/scripts/build_candidates.py", line 47, in build_cands
acts = world.get_acts()[0]
File "/root/ParlAI/parlai/core/worlds.py", line 162, in get_acts
return self.acts
AttributeError: 'HogwildWorld' object has no attribute 'acts'
```
**While running**
```python
python examples/interactive.py -mf /tmp/babi_memnn -ecands vocab
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parlai/scripts/build_candidates.py`
Content:
```
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6 """Build the candidate responses for a retrieval model.
7
8 Examples
9 --------
10
11 .. code-block:: shell
12
13 python build_candidates.py -t convai2 --outfile /tmp/cands.txt
14 """
15
16 from parlai.core.params import ParlaiParser
17 from parlai.agents.repeat_label.repeat_label import RepeatLabelAgent
18 from parlai.core.worlds import create_task
19 from parlai.core.utils import TimeLogger
20 import random
21 import tempfile
22
23
24 def build_cands(opt):
25 # create repeat label agent and assign it to the specified task
26 agent = RepeatLabelAgent(opt)
27 world = create_task(opt, agent)
28 if opt['outfile'] is None:
29 outfile = tempfile.mkstemp(
30 prefix='{}_{}_'.format(opt['task'], opt['datatype']), suffix='.txt'
31 )[1]
32 else:
33 outfile = opt['outfile']
34
35 if opt.get('num_examples', -1) == -1:
36 num_examples = world.num_examples()
37 else:
38 num_examples = opt['num_examples']
39 log_timer = TimeLogger()
40
41 print('[ starting to build candidates from task.. (ex:' + str(num_examples) + ')]')
42 print('[ saving output to {} ]'.format(outfile))
43 cands = []
44 for _ in range(num_examples):
45 world.parley()
46 # We get the acts of the first agent, which is the teacher.
47 acts = world.get_acts()[0]
48 if isinstance(acts, dict):
49 # We turn into a batch of 1 example, in case batching is being used.
50 acts = [acts]
51 for a in acts:
52 candidate = a.get('labels', a.get('eval_labels', None))
53 if candidate is not None:
54 candidate = candidate[0]
55 cands.append(candidate)
56 if log_timer.time() > opt['log_every_n_secs']:
57 text, _log = log_timer.log(world.total_parleys, world.num_examples())
58 print(text)
59 if world.epoch_done():
60 print('EPOCH DONE')
61 break
62 fw = open(outfile, 'w')
63 fw.write('\n'.join(cands))
64 fw.close()
65
66
67 def main():
68 random.seed(42)
69 # Get command line arguments
70 parser = ParlaiParser()
71 parser.add_argument(
72 '-n',
73 '--num-examples',
74 default=-1,
75 type=int,
76 help='Total number of exs to convert, -1 to convert all examples',
77 )
78 parser.add_argument(
79 '-of',
80 '--outfile',
81 default=None,
82 type=str,
83 help='Output file where to save, by default will be created in /tmp',
84 )
85 parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)
86 parser.set_defaults(datatype='train:evalmode')
87 opt = parser.parse_args()
88 build_cands(opt)
89
90
91 if __name__ == '__main__':
92 main()
93
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parlai/scripts/build_candidates.py b/parlai/scripts/build_candidates.py
--- a/parlai/scripts/build_candidates.py
+++ b/parlai/scripts/build_candidates.py
@@ -23,6 +23,9 @@
def build_cands(opt):
# create repeat label agent and assign it to the specified task
+ if opt['numthreads'] > 1:
+ # Broken in hogwild mode. Just fall back to single processing mode
+ opt['numthreads'] = 1
agent = RepeatLabelAgent(opt)
world = create_task(opt, agent)
if opt['outfile'] is None:
| {"golden_diff": "diff --git a/parlai/scripts/build_candidates.py b/parlai/scripts/build_candidates.py\n--- a/parlai/scripts/build_candidates.py\n+++ b/parlai/scripts/build_candidates.py\n@@ -23,6 +23,9 @@\n \n def build_cands(opt):\n # create repeat label agent and assign it to the specified task\n+ if opt['numthreads'] > 1:\n+ # Broken in hogwild mode. Just fall back to single processing mode\n+ opt['numthreads'] = 1\n agent = RepeatLabelAgent(opt)\n world = create_task(opt, agent)\n if opt['outfile'] is None:\n", "issue": "Quickstart AttributeError: 'HogwildWorld' object has no attribute 'acts'\n**Bug description**\r\nWhen going through the ParlAI [quickstart](https://parl.ai/docs/tutorial_quick.html#install), I got the following error:\r\n\r\n``` python\r\nTraceback (most recent call last):\r\n File \"examples/interactive.py\", line 18, in <module>\r\n interactive(opt, print_parser=parser)\r\n File \"/root/ParlAI/parlai/scripts/interactive.py\", line 68, in interactive\r\n agent = create_agent(opt, requireModelExists=True)\r\n File \"/root/ParlAI/parlai/core/agents.py\", line 683, in create_agent\r\n model = load_agent_module(opt)\r\n File \"/root/ParlAI/parlai/core/agents.py\", line 548, in load_agent_module\r\n return model_class(new_opt)\r\n File \"/root/ParlAI/parlai/agents/memnn/memnn.py\", line 86, in __init__\r\n super().__init__(opt, shared)\r\n File \"/root/ParlAI/parlai/core/torch_ranker_agent.py\", line 135, in __init__\r\n super().__init__(opt, shared)\r\n File \"/root/ParlAI/parlai/core/torch_agent.py\", line 737, in __init__\r\n self.set_interactive_mode(opt['interactive_mode'], shared)\r\n File \"/root/ParlAI/parlai/core/torch_ranker_agent.py\", line 206, in set_interactive_mode\r\n path = self.get_task_candidates_path()\r\n File \"/root/ParlAI/parlai/core/torch_ranker_agent.py\", line 230, in get_task_candidates_path\r\n build_cands(opt)\r\n File \"/root/ParlAI/parlai/scripts/build_candidates.py\", line 47, in build_cands\r\n acts = world.get_acts()[0]\r\n File \"/root/ParlAI/parlai/core/worlds.py\", line 162, in get_acts\r\n return self.acts\r\nAttributeError: 'HogwildWorld' object has no attribute 'acts'\r\n```\r\n\r\n**While running**\r\n```python\r\npython examples/interactive.py -mf /tmp/babi_memnn -ecands vocab\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"Build the candidate responses for a retrieval model.\n\nExamples\n--------\n\n.. code-block:: shell\n\n python build_candidates.py -t convai2 --outfile /tmp/cands.txt\n\"\"\"\n\nfrom parlai.core.params import ParlaiParser\nfrom parlai.agents.repeat_label.repeat_label import RepeatLabelAgent\nfrom parlai.core.worlds import create_task\nfrom parlai.core.utils import TimeLogger\nimport random\nimport tempfile\n\n\ndef build_cands(opt):\n # create repeat label agent and assign it to the specified task\n agent = RepeatLabelAgent(opt)\n world = create_task(opt, agent)\n if opt['outfile'] is None:\n outfile = tempfile.mkstemp(\n prefix='{}_{}_'.format(opt['task'], opt['datatype']), suffix='.txt'\n )[1]\n else:\n outfile = opt['outfile']\n\n if opt.get('num_examples', -1) == -1:\n num_examples = world.num_examples()\n else:\n num_examples = opt['num_examples']\n log_timer = TimeLogger()\n\n print('[ starting to build candidates from task.. (ex:' + str(num_examples) + ')]')\n print('[ saving output to {} ]'.format(outfile))\n cands = []\n for _ in range(num_examples):\n world.parley()\n # We get the acts of the first agent, which is the teacher.\n acts = world.get_acts()[0]\n if isinstance(acts, dict):\n # We turn into a batch of 1 example, in case batching is being used.\n acts = [acts]\n for a in acts:\n candidate = a.get('labels', a.get('eval_labels', None))\n if candidate is not None:\n candidate = candidate[0]\n cands.append(candidate)\n if log_timer.time() > opt['log_every_n_secs']:\n text, _log = log_timer.log(world.total_parleys, world.num_examples())\n print(text)\n if world.epoch_done():\n print('EPOCH DONE')\n break\n fw = open(outfile, 'w')\n fw.write('\\n'.join(cands))\n fw.close()\n\n\ndef main():\n random.seed(42)\n # Get command line arguments\n parser = ParlaiParser()\n parser.add_argument(\n '-n',\n '--num-examples',\n default=-1,\n type=int,\n help='Total number of exs to convert, -1 to convert all examples',\n )\n parser.add_argument(\n '-of',\n '--outfile',\n default=None,\n type=str,\n help='Output file where to save, by default will be created in /tmp',\n )\n parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)\n parser.set_defaults(datatype='train:evalmode')\n opt = parser.parse_args()\n build_cands(opt)\n\n\nif __name__ == '__main__':\n main()\n", "path": "parlai/scripts/build_candidates.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"Build the candidate responses for a retrieval model.\n\nExamples\n--------\n\n.. code-block:: shell\n\n python build_candidates.py -t convai2 --outfile /tmp/cands.txt\n\"\"\"\n\nfrom parlai.core.params import ParlaiParser\nfrom parlai.agents.repeat_label.repeat_label import RepeatLabelAgent\nfrom parlai.core.worlds import create_task\nfrom parlai.core.utils import TimeLogger\nimport random\nimport tempfile\n\n\ndef build_cands(opt):\n # create repeat label agent and assign it to the specified task\n if opt['numthreads'] > 1:\n # Broken in hogwild mode. Just fall back to single processing mode\n opt['numthreads'] = 1\n agent = RepeatLabelAgent(opt)\n world = create_task(opt, agent)\n if opt['outfile'] is None:\n outfile = tempfile.mkstemp(\n prefix='{}_{}_'.format(opt['task'], opt['datatype']), suffix='.txt'\n )[1]\n else:\n outfile = opt['outfile']\n\n if opt.get('num_examples', -1) == -1:\n num_examples = world.num_examples()\n else:\n num_examples = opt['num_examples']\n log_timer = TimeLogger()\n\n print('[ starting to build candidates from task.. (ex:' + str(num_examples) + ')]')\n print('[ saving output to {} ]'.format(outfile))\n cands = []\n for _ in range(num_examples):\n world.parley()\n # We get the acts of the first agent, which is the teacher.\n acts = world.get_acts()[0]\n if isinstance(acts, dict):\n # We turn into a batch of 1 example, in case batching is being used.\n acts = [acts]\n for a in acts:\n candidate = a.get('labels', a.get('eval_labels', None))\n if candidate is not None:\n candidate = candidate[0]\n cands.append(candidate)\n if log_timer.time() > opt['log_every_n_secs']:\n text, _log = log_timer.log(world.total_parleys, world.num_examples())\n print(text)\n if world.epoch_done():\n print('EPOCH DONE')\n break\n fw = open(outfile, 'w')\n fw.write('\\n'.join(cands))\n fw.close()\n\n\ndef main():\n random.seed(42)\n # Get command line arguments\n parser = ParlaiParser()\n parser.add_argument(\n '-n',\n '--num-examples',\n default=-1,\n type=int,\n help='Total number of exs to convert, -1 to convert all examples',\n )\n parser.add_argument(\n '-of',\n '--outfile',\n default=None,\n type=str,\n help='Output file where to save, by default will be created in /tmp',\n )\n parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)\n parser.set_defaults(datatype='train:evalmode')\n opt = parser.parse_args()\n build_cands(opt)\n\n\nif __name__ == '__main__':\n main()\n", "path": "parlai/scripts/build_candidates.py"}]} |
gh_patches_debug_1259 | rasdani/github-patches | git_diff | adamchainz__django-cors-headers-232 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
test_get_replaces_referer_when_secure() failing under Django 1.11
Running the django-cors-headers test suite under Django 1.11a1 results in one test failure:
```
________________ RefererReplacementCorsMiddlewareTests.test_get_replaces_referer_when_secure _________________
self = <tests.test_middleware.RefererReplacementCorsMiddlewareTests testMethod=test_get_replaces_referer_when_secure>
def test_get_replaces_referer_when_secure(self):
resp = self.client.get(
'/',
HTTP_FAKE_SECURE='true',
HTTP_HOST='example.com',
HTTP_ORIGIN='https://example.org',
HTTP_REFERER='https://example.org/foo'
)
> assert resp.wsgi_request.META['HTTP_REFERER'] == 'https://example.com/'
E AssertionError: assert 'https://example.org/foo' == 'https://example.com/'
E - https://example.org/foo
E ? ^^ ---
E + https://example.com/
E ? + ^
tests/test_middleware.py:341: AssertionError
==================================== 1 failed, 57 passed in 3.80 seconds =====================================
```
See:
https://travis-ci.org/ottoyiu/django-cors-headers/builds/193731215#L328
Changes on Django's side that might be related:
https://github.com/django/django/commit/ddf169cdaca91e92dd5bfe6796bb6f38369ecb68
https://github.com/django/django/commit/7fe2d8d940fdddd1a02c4754008a27060c4a03e9
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import codecs
2 import os
3 import re
4
5 from setuptools import setup
6
7
8 def get_version(package):
9 """
10 Return package version as listed in `__version__` in `__init__.py`.
11 """
12 with codecs.open(os.path.join(package, '__init__.py'), 'r', 'utf-8') as fp:
13 init_py = fp.read()
14 return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
15
16
17 version = get_version('corsheaders')
18
19 with codecs.open('README.rst', 'r', 'utf-8') as readme_file:
20 readme = readme_file.read()
21
22 with codecs.open('HISTORY.rst', 'r', 'utf-8') as history_file:
23 history = history_file.read()
24
25 setup(
26 name='django-cors-headers',
27 version=version,
28 description=(
29 'django-cors-headers is a Django application for handling the server '
30 'headers required for Cross-Origin Resource Sharing (CORS).'
31 ),
32 long_description=readme + '\n\n' + history,
33 author='Otto Yiu',
34 author_email='[email protected]',
35 url='https://github.com/ottoyiu/django-cors-headers',
36 packages=['corsheaders'],
37 license='MIT License',
38 keywords='django cors middleware rest api',
39 platforms=['any'],
40 classifiers=[
41 'Development Status :: 5 - Production/Stable',
42 'Environment :: Web Environment',
43 'Framework :: Django',
44 'Framework :: Django :: 1.8',
45 'Framework :: Django :: 1.9',
46 'Framework :: Django :: 1.10',
47 'Intended Audience :: Developers',
48 'License :: OSI Approved :: MIT License',
49 'Operating System :: OS Independent',
50 'Programming Language :: Python',
51 'Programming Language :: Python :: 2',
52 'Programming Language :: Python :: 2.7',
53 'Programming Language :: Python :: 3',
54 'Programming Language :: Python :: 3.5',
55 'Programming Language :: Python :: 3.6',
56 'Topic :: Software Development :: Libraries :: Application Frameworks',
57 'Topic :: Software Development :: Libraries :: Python Modules',
58 ],
59 install_requires=[],
60 )
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -44,6 +44,7 @@
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
+ 'Framework :: Django :: 1.11',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -44,6 +44,7 @@\n 'Framework :: Django :: 1.8',\n 'Framework :: Django :: 1.9',\n 'Framework :: Django :: 1.10',\n+ 'Framework :: Django :: 1.11',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n", "issue": "test_get_replaces_referer_when_secure() failing under Django 1.11\nRunning the django-cors-headers test suite under Django 1.11a1 results in one test failure:\r\n```\r\n________________ RefererReplacementCorsMiddlewareTests.test_get_replaces_referer_when_secure _________________\r\n\r\n\r\nself = <tests.test_middleware.RefererReplacementCorsMiddlewareTests testMethod=test_get_replaces_referer_when_secure>\r\n\r\n def test_get_replaces_referer_when_secure(self):\r\n resp = self.client.get(\r\n '/',\r\n HTTP_FAKE_SECURE='true',\r\n HTTP_HOST='example.com',\r\n HTTP_ORIGIN='https://example.org',\r\n HTTP_REFERER='https://example.org/foo'\r\n )\r\n> assert resp.wsgi_request.META['HTTP_REFERER'] == 'https://example.com/'\r\nE AssertionError: assert 'https://example.org/foo' == 'https://example.com/'\r\nE - https://example.org/foo\r\nE ? ^^ ---\r\nE + https://example.com/\r\nE ? + ^\r\n\r\ntests/test_middleware.py:341: AssertionError\r\n==================================== 1 failed, 57 passed in 3.80 seconds =====================================\r\n```\r\n\r\nSee:\r\nhttps://travis-ci.org/ottoyiu/django-cors-headers/builds/193731215#L328\r\n\r\nChanges on Django's side that might be related:\r\nhttps://github.com/django/django/commit/ddf169cdaca91e92dd5bfe6796bb6f38369ecb68\r\nhttps://github.com/django/django/commit/7fe2d8d940fdddd1a02c4754008a27060c4a03e9\n", "before_files": [{"content": "import codecs\nimport os\nimport re\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `__init__.py`.\n \"\"\"\n with codecs.open(os.path.join(package, '__init__.py'), 'r', 'utf-8') as fp:\n init_py = fp.read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\nversion = get_version('corsheaders')\n\nwith codecs.open('README.rst', 'r', 'utf-8') as readme_file:\n readme = readme_file.read()\n\nwith codecs.open('HISTORY.rst', 'r', 'utf-8') as history_file:\n history = history_file.read()\n\nsetup(\n name='django-cors-headers',\n version=version,\n description=(\n 'django-cors-headers is a Django application for handling the server '\n 'headers required for Cross-Origin Resource Sharing (CORS).'\n ),\n long_description=readme + '\\n\\n' + history,\n author='Otto Yiu',\n author_email='[email protected]',\n url='https://github.com/ottoyiu/django-cors-headers',\n packages=['corsheaders'],\n license='MIT License',\n keywords='django cors middleware rest api',\n platforms=['any'],\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Framework :: Django :: 1.8',\n 'Framework :: Django :: 1.9',\n 'Framework :: Django :: 1.10',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=[],\n)\n", "path": "setup.py"}], "after_files": [{"content": "import codecs\nimport os\nimport re\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `__init__.py`.\n \"\"\"\n with codecs.open(os.path.join(package, '__init__.py'), 'r', 'utf-8') as fp:\n init_py = fp.read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\nversion = get_version('corsheaders')\n\nwith codecs.open('README.rst', 'r', 'utf-8') as readme_file:\n readme = readme_file.read()\n\nwith codecs.open('HISTORY.rst', 'r', 'utf-8') as history_file:\n history = history_file.read()\n\nsetup(\n name='django-cors-headers',\n version=version,\n description=(\n 'django-cors-headers is a Django application for handling the server '\n 'headers required for Cross-Origin Resource Sharing (CORS).'\n ),\n long_description=readme + '\\n\\n' + history,\n author='Otto Yiu',\n author_email='[email protected]',\n url='https://github.com/ottoyiu/django-cors-headers',\n packages=['corsheaders'],\n license='MIT License',\n keywords='django cors middleware rest api',\n platforms=['any'],\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Framework :: Django :: 1.8',\n 'Framework :: Django :: 1.9',\n 'Framework :: Django :: 1.10',\n 'Framework :: Django :: 1.11',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=[],\n)\n", "path": "setup.py"}]} |
gh_patches_debug_1260 | rasdani/github-patches | git_diff | tobymao__sqlglot-2739 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`explode_to_unnest` transformation generates query that cannot be executed with trino
sqlglot code:
```
In [8]: import sqlglot as sg
In [9]: print(
...: sg.parse_one(
...: "select unnest(t.x) from (values [1, 2, 3] as t (x))", read="duckdb"
...: ).sql("trino", pretty=True)
...: )
SELECT
IF(_u.pos = _u_2.pos_2, _u_2.col) AS col
FROM (VALUES
(ARRAY[1, 2, 3])) AS t(x), UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(t.x)))) AS _u(pos)
CROSS JOIN UNNEST(t.x) WITH ORDINALITY AS _u_2(col, pos_2)
WHERE
_u.pos = _u_2.pos_2
OR (
_u.pos > CARDINALITY(t.x) AND _u_2.pos_2 = CARDINALITY(t.x)
)
```
trino-cli:
```
trino:default> SELECT
-> IF(_u.pos = _u_2.pos_2, _u_2.col) AS col
-> FROM (VALUES
-> (ARRAY[1, 2, 3])) AS t(x), UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(t.x)))) AS _u(pos)
-> CROSS JOIN UNNEST(t.x) WITH ORDINALITY AS _u_2(col, pos_2)
-> WHERE
-> _u.pos = _u_2.pos_2
-> OR (
-> _u.pos > CARDINALITY(t.x) AND _u_2.pos_2 = CARDINALITY(t.x)
-> );
Query 20231230_105739_28099_gh8pj failed: line 4:70: Column 't.x' cannot be resolved
```
Changing the first `,` to be `CROSS JOIN` instead fixes the issue:
```
trino:default> SELECT
-> IF(_u.pos = _u_2.pos_2, _u_2.col) AS col
-> FROM (VALUES
-> (ARRAY[1, 2, 3])) AS t(x) CROSS JOIN UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(t.x)))) AS _u(pos)
-> CROSS JOIN UNNEST(t.x) WITH ORDINALITY AS _u_2(col, pos_2)
-> WHERE
-> _u.pos = _u_2.pos_2
-> OR (
-> _u.pos > CARDINALITY(t.x) AND _u_2.pos_2 = CARDINALITY(t.x)
-> );
col
-----
1
2
3
(3 rows)
Query 20231230_105747_28107_gh8pj, FINISHED, 1 node
Splits: 17 total, 17 done (100.00%)
0.08 [0 rows, 0B] [0 rows/s, 0B/s]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sqlglot/transforms.py`
Content:
```
1 from __future__ import annotations
2
3 import typing as t
4
5 from sqlglot import expressions as exp
6 from sqlglot.helper import find_new_name, name_sequence
7
8 if t.TYPE_CHECKING:
9 from sqlglot.generator import Generator
10
11
12 def unalias_group(expression: exp.Expression) -> exp.Expression:
13 """
14 Replace references to select aliases in GROUP BY clauses.
15
16 Example:
17 >>> import sqlglot
18 >>> sqlglot.parse_one("SELECT a AS b FROM x GROUP BY b").transform(unalias_group).sql()
19 'SELECT a AS b FROM x GROUP BY 1'
20
21 Args:
22 expression: the expression that will be transformed.
23
24 Returns:
25 The transformed expression.
26 """
27 if isinstance(expression, exp.Group) and isinstance(expression.parent, exp.Select):
28 aliased_selects = {
29 e.alias: i
30 for i, e in enumerate(expression.parent.expressions, start=1)
31 if isinstance(e, exp.Alias)
32 }
33
34 for group_by in expression.expressions:
35 if (
36 isinstance(group_by, exp.Column)
37 and not group_by.table
38 and group_by.name in aliased_selects
39 ):
40 group_by.replace(exp.Literal.number(aliased_selects.get(group_by.name)))
41
42 return expression
43
44
45 def eliminate_distinct_on(expression: exp.Expression) -> exp.Expression:
46 """
47 Convert SELECT DISTINCT ON statements to a subquery with a window function.
48
49 This is useful for dialects that don't support SELECT DISTINCT ON but support window functions.
50
51 Args:
52 expression: the expression that will be transformed.
53
54 Returns:
55 The transformed expression.
56 """
57 if (
58 isinstance(expression, exp.Select)
59 and expression.args.get("distinct")
60 and expression.args["distinct"].args.get("on")
61 and isinstance(expression.args["distinct"].args["on"], exp.Tuple)
62 ):
63 distinct_cols = expression.args["distinct"].pop().args["on"].expressions
64 outer_selects = expression.selects
65 row_number = find_new_name(expression.named_selects, "_row_number")
66 window = exp.Window(this=exp.RowNumber(), partition_by=distinct_cols)
67 order = expression.args.get("order")
68
69 if order:
70 window.set("order", order.pop())
71 else:
72 window.set("order", exp.Order(expressions=[c.copy() for c in distinct_cols]))
73
74 window = exp.alias_(window, row_number)
75 expression.select(window, copy=False)
76
77 return (
78 exp.select(*outer_selects, copy=False)
79 .from_(expression.subquery("_t", copy=False), copy=False)
80 .where(exp.column(row_number).eq(1), copy=False)
81 )
82
83 return expression
84
85
86 def eliminate_qualify(expression: exp.Expression) -> exp.Expression:
87 """
88 Convert SELECT statements that contain the QUALIFY clause into subqueries, filtered equivalently.
89
90 The idea behind this transformation can be seen in Snowflake's documentation for QUALIFY:
91 https://docs.snowflake.com/en/sql-reference/constructs/qualify
92
93 Some dialects don't support window functions in the WHERE clause, so we need to include them as
94 projections in the subquery, in order to refer to them in the outer filter using aliases. Also,
95 if a column is referenced in the QUALIFY clause but is not selected, we need to include it too,
96 otherwise we won't be able to refer to it in the outer query's WHERE clause.
97 """
98 if isinstance(expression, exp.Select) and expression.args.get("qualify"):
99 taken = set(expression.named_selects)
100 for select in expression.selects:
101 if not select.alias_or_name:
102 alias = find_new_name(taken, "_c")
103 select.replace(exp.alias_(select, alias))
104 taken.add(alias)
105
106 outer_selects = exp.select(*[select.alias_or_name for select in expression.selects])
107 qualify_filters = expression.args["qualify"].pop().this
108
109 select_candidates = exp.Window if expression.is_star else (exp.Window, exp.Column)
110 for expr in qualify_filters.find_all(select_candidates):
111 if isinstance(expr, exp.Window):
112 alias = find_new_name(expression.named_selects, "_w")
113 expression.select(exp.alias_(expr, alias), copy=False)
114 column = exp.column(alias)
115
116 if isinstance(expr.parent, exp.Qualify):
117 qualify_filters = column
118 else:
119 expr.replace(column)
120 elif expr.name not in expression.named_selects:
121 expression.select(expr.copy(), copy=False)
122
123 return outer_selects.from_(expression.subquery(alias="_t", copy=False), copy=False).where(
124 qualify_filters, copy=False
125 )
126
127 return expression
128
129
130 def remove_precision_parameterized_types(expression: exp.Expression) -> exp.Expression:
131 """
132 Some dialects only allow the precision for parameterized types to be defined in the DDL and not in
133 other expressions. This transforms removes the precision from parameterized types in expressions.
134 """
135 for node in expression.find_all(exp.DataType):
136 node.set(
137 "expressions", [e for e in node.expressions if not isinstance(e, exp.DataTypeParam)]
138 )
139
140 return expression
141
142
143 def unnest_to_explode(expression: exp.Expression) -> exp.Expression:
144 """Convert cross join unnest into lateral view explode."""
145 if isinstance(expression, exp.Select):
146 for join in expression.args.get("joins") or []:
147 unnest = join.this
148
149 if isinstance(unnest, exp.Unnest):
150 alias = unnest.args.get("alias")
151 udtf = exp.Posexplode if unnest.args.get("offset") else exp.Explode
152
153 expression.args["joins"].remove(join)
154
155 for e, column in zip(unnest.expressions, alias.columns if alias else []):
156 expression.append(
157 "laterals",
158 exp.Lateral(
159 this=udtf(this=e),
160 view=True,
161 alias=exp.TableAlias(this=alias.this, columns=[column]), # type: ignore
162 ),
163 )
164
165 return expression
166
167
168 def explode_to_unnest(index_offset: int = 0) -> t.Callable[[exp.Expression], exp.Expression]:
169 """Convert explode/posexplode into unnest."""
170
171 def _explode_to_unnest(expression: exp.Expression) -> exp.Expression:
172 if isinstance(expression, exp.Select):
173 from sqlglot.optimizer.scope import Scope
174
175 taken_select_names = set(expression.named_selects)
176 taken_source_names = {name for name, _ in Scope(expression).references}
177
178 def new_name(names: t.Set[str], name: str) -> str:
179 name = find_new_name(names, name)
180 names.add(name)
181 return name
182
183 arrays: t.List[exp.Condition] = []
184 series_alias = new_name(taken_select_names, "pos")
185 series = exp.alias_(
186 exp.Unnest(
187 expressions=[exp.GenerateSeries(start=exp.Literal.number(index_offset))]
188 ),
189 new_name(taken_source_names, "_u"),
190 table=[series_alias],
191 )
192
193 # we use list here because expression.selects is mutated inside the loop
194 for select in list(expression.selects):
195 explode = select.find(exp.Explode)
196
197 if explode:
198 pos_alias = ""
199 explode_alias = ""
200
201 if isinstance(select, exp.Alias):
202 explode_alias = select.args["alias"]
203 alias = select
204 elif isinstance(select, exp.Aliases):
205 pos_alias = select.aliases[0]
206 explode_alias = select.aliases[1]
207 alias = select.replace(exp.alias_(select.this, "", copy=False))
208 else:
209 alias = select.replace(exp.alias_(select, ""))
210 explode = alias.find(exp.Explode)
211 assert explode
212
213 is_posexplode = isinstance(explode, exp.Posexplode)
214 explode_arg = explode.this
215
216 # This ensures that we won't use [POS]EXPLODE's argument as a new selection
217 if isinstance(explode_arg, exp.Column):
218 taken_select_names.add(explode_arg.output_name)
219
220 unnest_source_alias = new_name(taken_source_names, "_u")
221
222 if not explode_alias:
223 explode_alias = new_name(taken_select_names, "col")
224
225 if is_posexplode:
226 pos_alias = new_name(taken_select_names, "pos")
227
228 if not pos_alias:
229 pos_alias = new_name(taken_select_names, "pos")
230
231 alias.set("alias", exp.to_identifier(explode_alias))
232
233 series_table_alias = series.args["alias"].this
234 column = exp.If(
235 this=exp.column(series_alias, table=series_table_alias).eq(
236 exp.column(pos_alias, table=unnest_source_alias)
237 ),
238 true=exp.column(explode_alias, table=unnest_source_alias),
239 )
240
241 explode.replace(column)
242
243 if is_posexplode:
244 expressions = expression.expressions
245 expressions.insert(
246 expressions.index(alias) + 1,
247 exp.If(
248 this=exp.column(series_alias, table=series_table_alias).eq(
249 exp.column(pos_alias, table=unnest_source_alias)
250 ),
251 true=exp.column(pos_alias, table=unnest_source_alias),
252 ).as_(pos_alias),
253 )
254 expression.set("expressions", expressions)
255
256 if not arrays:
257 if expression.args.get("from"):
258 expression.join(series, copy=False)
259 else:
260 expression.from_(series, copy=False)
261
262 size: exp.Condition = exp.ArraySize(this=explode_arg.copy())
263 arrays.append(size)
264
265 # trino doesn't support left join unnest with on conditions
266 # if it did, this would be much simpler
267 expression.join(
268 exp.alias_(
269 exp.Unnest(
270 expressions=[explode_arg.copy()],
271 offset=exp.to_identifier(pos_alias),
272 ),
273 unnest_source_alias,
274 table=[explode_alias],
275 ),
276 join_type="CROSS",
277 copy=False,
278 )
279
280 if index_offset != 1:
281 size = size - 1
282
283 expression.where(
284 exp.column(series_alias, table=series_table_alias)
285 .eq(exp.column(pos_alias, table=unnest_source_alias))
286 .or_(
287 (exp.column(series_alias, table=series_table_alias) > size).and_(
288 exp.column(pos_alias, table=unnest_source_alias).eq(size)
289 )
290 ),
291 copy=False,
292 )
293
294 if arrays:
295 end: exp.Condition = exp.Greatest(this=arrays[0], expressions=arrays[1:])
296
297 if index_offset != 1:
298 end = end - (1 - index_offset)
299 series.expressions[0].set("end", end)
300
301 return expression
302
303 return _explode_to_unnest
304
305
306 PERCENTILES = (exp.PercentileCont, exp.PercentileDisc)
307
308
309 def add_within_group_for_percentiles(expression: exp.Expression) -> exp.Expression:
310 """Transforms percentiles by adding a WITHIN GROUP clause to them."""
311 if (
312 isinstance(expression, PERCENTILES)
313 and not isinstance(expression.parent, exp.WithinGroup)
314 and expression.expression
315 ):
316 column = expression.this.pop()
317 expression.set("this", expression.expression.pop())
318 order = exp.Order(expressions=[exp.Ordered(this=column)])
319 expression = exp.WithinGroup(this=expression, expression=order)
320
321 return expression
322
323
324 def remove_within_group_for_percentiles(expression: exp.Expression) -> exp.Expression:
325 """Transforms percentiles by getting rid of their corresponding WITHIN GROUP clause."""
326 if (
327 isinstance(expression, exp.WithinGroup)
328 and isinstance(expression.this, PERCENTILES)
329 and isinstance(expression.expression, exp.Order)
330 ):
331 quantile = expression.this.this
332 input_value = t.cast(exp.Ordered, expression.find(exp.Ordered)).this
333 return expression.replace(exp.ApproxQuantile(this=input_value, quantile=quantile))
334
335 return expression
336
337
338 def add_recursive_cte_column_names(expression: exp.Expression) -> exp.Expression:
339 """Uses projection output names in recursive CTE definitions to define the CTEs' columns."""
340 if isinstance(expression, exp.With) and expression.recursive:
341 next_name = name_sequence("_c_")
342
343 for cte in expression.expressions:
344 if not cte.args["alias"].columns:
345 query = cte.this
346 if isinstance(query, exp.Union):
347 query = query.this
348
349 cte.args["alias"].set(
350 "columns",
351 [exp.to_identifier(s.alias_or_name or next_name()) for s in query.selects],
352 )
353
354 return expression
355
356
357 def epoch_cast_to_ts(expression: exp.Expression) -> exp.Expression:
358 """Replace 'epoch' in casts by the equivalent date literal."""
359 if (
360 isinstance(expression, (exp.Cast, exp.TryCast))
361 and expression.name.lower() == "epoch"
362 and expression.to.this in exp.DataType.TEMPORAL_TYPES
363 ):
364 expression.this.replace(exp.Literal.string("1970-01-01 00:00:00"))
365
366 return expression
367
368
369 def eliminate_semi_and_anti_joins(expression: exp.Expression) -> exp.Expression:
370 """Convert SEMI and ANTI joins into equivalent forms that use EXIST instead."""
371 if isinstance(expression, exp.Select):
372 for join in expression.args.get("joins") or []:
373 on = join.args.get("on")
374 if on and join.kind in ("SEMI", "ANTI"):
375 subquery = exp.select("1").from_(join.this).where(on)
376 exists = exp.Exists(this=subquery)
377 if join.kind == "ANTI":
378 exists = exists.not_(copy=False)
379
380 join.pop()
381 expression.where(exists, copy=False)
382
383 return expression
384
385
386 def eliminate_full_outer_join(expression: exp.Expression) -> exp.Expression:
387 """
388 Converts a query with a FULL OUTER join to a union of identical queries that
389 use LEFT/RIGHT OUTER joins instead. This transformation currently only works
390 for queries that have a single FULL OUTER join.
391 """
392 if isinstance(expression, exp.Select):
393 full_outer_joins = [
394 (index, join)
395 for index, join in enumerate(expression.args.get("joins") or [])
396 if join.side == "FULL"
397 ]
398
399 if len(full_outer_joins) == 1:
400 expression_copy = expression.copy()
401 expression.set("limit", None)
402 index, full_outer_join = full_outer_joins[0]
403 full_outer_join.set("side", "left")
404 expression_copy.args["joins"][index].set("side", "right")
405 expression_copy.args.pop("with", None) # remove CTEs from RIGHT side
406
407 return exp.union(expression, expression_copy, copy=False)
408
409 return expression
410
411
412 def move_ctes_to_top_level(expression: exp.Expression) -> exp.Expression:
413 """
414 Some dialects (e.g. Hive, T-SQL, Spark prior to version 3) only allow CTEs to be
415 defined at the top-level, so for example queries like:
416
417 SELECT * FROM (WITH t(c) AS (SELECT 1) SELECT * FROM t) AS subq
418
419 are invalid in those dialects. This transformation can be used to ensure all CTEs are
420 moved to the top level so that the final SQL code is valid from a syntax standpoint.
421
422 TODO: handle name clashes whilst moving CTEs (it can get quite tricky & costly).
423 """
424 top_level_with = expression.args.get("with")
425 for node in expression.find_all(exp.With):
426 if node.parent is expression:
427 continue
428
429 inner_with = node.pop()
430 if not top_level_with:
431 top_level_with = inner_with
432 expression.set("with", top_level_with)
433 else:
434 if inner_with.recursive:
435 top_level_with.set("recursive", True)
436
437 top_level_with.expressions.extend(inner_with.expressions)
438
439 return expression
440
441
442 def ensure_bools(expression: exp.Expression) -> exp.Expression:
443 """Converts numeric values used in conditions into explicit boolean expressions."""
444 from sqlglot.optimizer.canonicalize import ensure_bools
445
446 def _ensure_bool(node: exp.Expression) -> None:
447 if (
448 node.is_number
449 or node.is_type(exp.DataType.Type.UNKNOWN, *exp.DataType.NUMERIC_TYPES)
450 or (isinstance(node, exp.Column) and not node.type)
451 ):
452 node.replace(node.neq(0))
453
454 for node, *_ in expression.walk():
455 ensure_bools(node, _ensure_bool)
456
457 return expression
458
459
460 def unqualify_columns(expression: exp.Expression) -> exp.Expression:
461 for column in expression.find_all(exp.Column):
462 # We only wanna pop off the table, db, catalog args
463 for part in column.parts[:-1]:
464 part.pop()
465
466 return expression
467
468
469 def preprocess(
470 transforms: t.List[t.Callable[[exp.Expression], exp.Expression]],
471 ) -> t.Callable[[Generator, exp.Expression], str]:
472 """
473 Creates a new transform by chaining a sequence of transformations and converts the resulting
474 expression to SQL, using either the "_sql" method corresponding to the resulting expression,
475 or the appropriate `Generator.TRANSFORMS` function (when applicable -- see below).
476
477 Args:
478 transforms: sequence of transform functions. These will be called in order.
479
480 Returns:
481 Function that can be used as a generator transform.
482 """
483
484 def _to_sql(self, expression: exp.Expression) -> str:
485 expression_type = type(expression)
486
487 expression = transforms[0](expression)
488 for t in transforms[1:]:
489 expression = t(expression)
490
491 _sql_handler = getattr(self, expression.key + "_sql", None)
492 if _sql_handler:
493 return _sql_handler(expression)
494
495 transforms_handler = self.TRANSFORMS.get(type(expression))
496 if transforms_handler:
497 if expression_type is type(expression):
498 if isinstance(expression, exp.Func):
499 return self.function_fallback_sql(expression)
500
501 # Ensures we don't enter an infinite loop. This can happen when the original expression
502 # has the same type as the final expression and there's no _sql method available for it,
503 # because then it'd re-enter _to_sql.
504 raise ValueError(
505 f"Expression type {expression.__class__.__name__} requires a _sql method in order to be transformed."
506 )
507
508 return transforms_handler(self, expression)
509
510 raise ValueError(f"Unsupported expression type {expression.__class__.__name__}.")
511
512 return _to_sql
513
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sqlglot/transforms.py b/sqlglot/transforms.py
--- a/sqlglot/transforms.py
+++ b/sqlglot/transforms.py
@@ -255,7 +255,7 @@
if not arrays:
if expression.args.get("from"):
- expression.join(series, copy=False)
+ expression.join(series, copy=False, join_type="CROSS")
else:
expression.from_(series, copy=False)
| {"golden_diff": "diff --git a/sqlglot/transforms.py b/sqlglot/transforms.py\n--- a/sqlglot/transforms.py\n+++ b/sqlglot/transforms.py\n@@ -255,7 +255,7 @@\n \n if not arrays:\n if expression.args.get(\"from\"):\n- expression.join(series, copy=False)\n+ expression.join(series, copy=False, join_type=\"CROSS\")\n else:\n expression.from_(series, copy=False)\n", "issue": "`explode_to_unnest` transformation generates query that cannot be executed with trino\nsqlglot code:\r\n\r\n```\r\nIn [8]: import sqlglot as sg\r\n\r\nIn [9]: print(\r\n ...: sg.parse_one(\r\n ...: \"select unnest(t.x) from (values [1, 2, 3] as t (x))\", read=\"duckdb\"\r\n ...: ).sql(\"trino\", pretty=True)\r\n ...: )\r\nSELECT\r\n IF(_u.pos = _u_2.pos_2, _u_2.col) AS col\r\nFROM (VALUES\r\n (ARRAY[1, 2, 3])) AS t(x), UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(t.x)))) AS _u(pos)\r\nCROSS JOIN UNNEST(t.x) WITH ORDINALITY AS _u_2(col, pos_2)\r\nWHERE\r\n _u.pos = _u_2.pos_2\r\n OR (\r\n _u.pos > CARDINALITY(t.x) AND _u_2.pos_2 = CARDINALITY(t.x)\r\n )\r\n```\r\n\r\ntrino-cli:\r\n\r\n```\r\ntrino:default> SELECT\r\n -> IF(_u.pos = _u_2.pos_2, _u_2.col) AS col\r\n -> FROM (VALUES\r\n -> (ARRAY[1, 2, 3])) AS t(x), UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(t.x)))) AS _u(pos)\r\n -> CROSS JOIN UNNEST(t.x) WITH ORDINALITY AS _u_2(col, pos_2)\r\n -> WHERE\r\n -> _u.pos = _u_2.pos_2\r\n -> OR (\r\n -> _u.pos > CARDINALITY(t.x) AND _u_2.pos_2 = CARDINALITY(t.x)\r\n -> );\r\nQuery 20231230_105739_28099_gh8pj failed: line 4:70: Column 't.x' cannot be resolved\r\n```\r\n\r\nChanging the first `,` to be `CROSS JOIN` instead fixes the issue:\r\n\r\n```\r\ntrino:default> SELECT\r\n -> IF(_u.pos = _u_2.pos_2, _u_2.col) AS col\r\n -> FROM (VALUES\r\n -> (ARRAY[1, 2, 3])) AS t(x) CROSS JOIN UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(t.x)))) AS _u(pos)\r\n -> CROSS JOIN UNNEST(t.x) WITH ORDINALITY AS _u_2(col, pos_2)\r\n -> WHERE\r\n -> _u.pos = _u_2.pos_2\r\n -> OR (\r\n -> _u.pos > CARDINALITY(t.x) AND _u_2.pos_2 = CARDINALITY(t.x)\r\n -> );\r\n col\r\n-----\r\n 1\r\n 2\r\n 3\r\n(3 rows)\r\n\r\nQuery 20231230_105747_28107_gh8pj, FINISHED, 1 node\r\nSplits: 17 total, 17 done (100.00%)\r\n0.08 [0 rows, 0B] [0 rows/s, 0B/s]\r\n```\n", "before_files": [{"content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import expressions as exp\nfrom sqlglot.helper import find_new_name, name_sequence\n\nif t.TYPE_CHECKING:\n from sqlglot.generator import Generator\n\n\ndef unalias_group(expression: exp.Expression) -> exp.Expression:\n \"\"\"\n Replace references to select aliases in GROUP BY clauses.\n\n Example:\n >>> import sqlglot\n >>> sqlglot.parse_one(\"SELECT a AS b FROM x GROUP BY b\").transform(unalias_group).sql()\n 'SELECT a AS b FROM x GROUP BY 1'\n\n Args:\n expression: the expression that will be transformed.\n\n Returns:\n The transformed expression.\n \"\"\"\n if isinstance(expression, exp.Group) and isinstance(expression.parent, exp.Select):\n aliased_selects = {\n e.alias: i\n for i, e in enumerate(expression.parent.expressions, start=1)\n if isinstance(e, exp.Alias)\n }\n\n for group_by in expression.expressions:\n if (\n isinstance(group_by, exp.Column)\n and not group_by.table\n and group_by.name in aliased_selects\n ):\n group_by.replace(exp.Literal.number(aliased_selects.get(group_by.name)))\n\n return expression\n\n\ndef eliminate_distinct_on(expression: exp.Expression) -> exp.Expression:\n \"\"\"\n Convert SELECT DISTINCT ON statements to a subquery with a window function.\n\n This is useful for dialects that don't support SELECT DISTINCT ON but support window functions.\n\n Args:\n expression: the expression that will be transformed.\n\n Returns:\n The transformed expression.\n \"\"\"\n if (\n isinstance(expression, exp.Select)\n and expression.args.get(\"distinct\")\n and expression.args[\"distinct\"].args.get(\"on\")\n and isinstance(expression.args[\"distinct\"].args[\"on\"], exp.Tuple)\n ):\n distinct_cols = expression.args[\"distinct\"].pop().args[\"on\"].expressions\n outer_selects = expression.selects\n row_number = find_new_name(expression.named_selects, \"_row_number\")\n window = exp.Window(this=exp.RowNumber(), partition_by=distinct_cols)\n order = expression.args.get(\"order\")\n\n if order:\n window.set(\"order\", order.pop())\n else:\n window.set(\"order\", exp.Order(expressions=[c.copy() for c in distinct_cols]))\n\n window = exp.alias_(window, row_number)\n expression.select(window, copy=False)\n\n return (\n exp.select(*outer_selects, copy=False)\n .from_(expression.subquery(\"_t\", copy=False), copy=False)\n .where(exp.column(row_number).eq(1), copy=False)\n )\n\n return expression\n\n\ndef eliminate_qualify(expression: exp.Expression) -> exp.Expression:\n \"\"\"\n Convert SELECT statements that contain the QUALIFY clause into subqueries, filtered equivalently.\n\n The idea behind this transformation can be seen in Snowflake's documentation for QUALIFY:\n https://docs.snowflake.com/en/sql-reference/constructs/qualify\n\n Some dialects don't support window functions in the WHERE clause, so we need to include them as\n projections in the subquery, in order to refer to them in the outer filter using aliases. Also,\n if a column is referenced in the QUALIFY clause but is not selected, we need to include it too,\n otherwise we won't be able to refer to it in the outer query's WHERE clause.\n \"\"\"\n if isinstance(expression, exp.Select) and expression.args.get(\"qualify\"):\n taken = set(expression.named_selects)\n for select in expression.selects:\n if not select.alias_or_name:\n alias = find_new_name(taken, \"_c\")\n select.replace(exp.alias_(select, alias))\n taken.add(alias)\n\n outer_selects = exp.select(*[select.alias_or_name for select in expression.selects])\n qualify_filters = expression.args[\"qualify\"].pop().this\n\n select_candidates = exp.Window if expression.is_star else (exp.Window, exp.Column)\n for expr in qualify_filters.find_all(select_candidates):\n if isinstance(expr, exp.Window):\n alias = find_new_name(expression.named_selects, \"_w\")\n expression.select(exp.alias_(expr, alias), copy=False)\n column = exp.column(alias)\n\n if isinstance(expr.parent, exp.Qualify):\n qualify_filters = column\n else:\n expr.replace(column)\n elif expr.name not in expression.named_selects:\n expression.select(expr.copy(), copy=False)\n\n return outer_selects.from_(expression.subquery(alias=\"_t\", copy=False), copy=False).where(\n qualify_filters, copy=False\n )\n\n return expression\n\n\ndef remove_precision_parameterized_types(expression: exp.Expression) -> exp.Expression:\n \"\"\"\n Some dialects only allow the precision for parameterized types to be defined in the DDL and not in\n other expressions. This transforms removes the precision from parameterized types in expressions.\n \"\"\"\n for node in expression.find_all(exp.DataType):\n node.set(\n \"expressions\", [e for e in node.expressions if not isinstance(e, exp.DataTypeParam)]\n )\n\n return expression\n\n\ndef unnest_to_explode(expression: exp.Expression) -> exp.Expression:\n \"\"\"Convert cross join unnest into lateral view explode.\"\"\"\n if isinstance(expression, exp.Select):\n for join in expression.args.get(\"joins\") or []:\n unnest = join.this\n\n if isinstance(unnest, exp.Unnest):\n alias = unnest.args.get(\"alias\")\n udtf = exp.Posexplode if unnest.args.get(\"offset\") else exp.Explode\n\n expression.args[\"joins\"].remove(join)\n\n for e, column in zip(unnest.expressions, alias.columns if alias else []):\n expression.append(\n \"laterals\",\n exp.Lateral(\n this=udtf(this=e),\n view=True,\n alias=exp.TableAlias(this=alias.this, columns=[column]), # type: ignore\n ),\n )\n\n return expression\n\n\ndef explode_to_unnest(index_offset: int = 0) -> t.Callable[[exp.Expression], exp.Expression]:\n \"\"\"Convert explode/posexplode into unnest.\"\"\"\n\n def _explode_to_unnest(expression: exp.Expression) -> exp.Expression:\n if isinstance(expression, exp.Select):\n from sqlglot.optimizer.scope import Scope\n\n taken_select_names = set(expression.named_selects)\n taken_source_names = {name for name, _ in Scope(expression).references}\n\n def new_name(names: t.Set[str], name: str) -> str:\n name = find_new_name(names, name)\n names.add(name)\n return name\n\n arrays: t.List[exp.Condition] = []\n series_alias = new_name(taken_select_names, \"pos\")\n series = exp.alias_(\n exp.Unnest(\n expressions=[exp.GenerateSeries(start=exp.Literal.number(index_offset))]\n ),\n new_name(taken_source_names, \"_u\"),\n table=[series_alias],\n )\n\n # we use list here because expression.selects is mutated inside the loop\n for select in list(expression.selects):\n explode = select.find(exp.Explode)\n\n if explode:\n pos_alias = \"\"\n explode_alias = \"\"\n\n if isinstance(select, exp.Alias):\n explode_alias = select.args[\"alias\"]\n alias = select\n elif isinstance(select, exp.Aliases):\n pos_alias = select.aliases[0]\n explode_alias = select.aliases[1]\n alias = select.replace(exp.alias_(select.this, \"\", copy=False))\n else:\n alias = select.replace(exp.alias_(select, \"\"))\n explode = alias.find(exp.Explode)\n assert explode\n\n is_posexplode = isinstance(explode, exp.Posexplode)\n explode_arg = explode.this\n\n # This ensures that we won't use [POS]EXPLODE's argument as a new selection\n if isinstance(explode_arg, exp.Column):\n taken_select_names.add(explode_arg.output_name)\n\n unnest_source_alias = new_name(taken_source_names, \"_u\")\n\n if not explode_alias:\n explode_alias = new_name(taken_select_names, \"col\")\n\n if is_posexplode:\n pos_alias = new_name(taken_select_names, \"pos\")\n\n if not pos_alias:\n pos_alias = new_name(taken_select_names, \"pos\")\n\n alias.set(\"alias\", exp.to_identifier(explode_alias))\n\n series_table_alias = series.args[\"alias\"].this\n column = exp.If(\n this=exp.column(series_alias, table=series_table_alias).eq(\n exp.column(pos_alias, table=unnest_source_alias)\n ),\n true=exp.column(explode_alias, table=unnest_source_alias),\n )\n\n explode.replace(column)\n\n if is_posexplode:\n expressions = expression.expressions\n expressions.insert(\n expressions.index(alias) + 1,\n exp.If(\n this=exp.column(series_alias, table=series_table_alias).eq(\n exp.column(pos_alias, table=unnest_source_alias)\n ),\n true=exp.column(pos_alias, table=unnest_source_alias),\n ).as_(pos_alias),\n )\n expression.set(\"expressions\", expressions)\n\n if not arrays:\n if expression.args.get(\"from\"):\n expression.join(series, copy=False)\n else:\n expression.from_(series, copy=False)\n\n size: exp.Condition = exp.ArraySize(this=explode_arg.copy())\n arrays.append(size)\n\n # trino doesn't support left join unnest with on conditions\n # if it did, this would be much simpler\n expression.join(\n exp.alias_(\n exp.Unnest(\n expressions=[explode_arg.copy()],\n offset=exp.to_identifier(pos_alias),\n ),\n unnest_source_alias,\n table=[explode_alias],\n ),\n join_type=\"CROSS\",\n copy=False,\n )\n\n if index_offset != 1:\n size = size - 1\n\n expression.where(\n exp.column(series_alias, table=series_table_alias)\n .eq(exp.column(pos_alias, table=unnest_source_alias))\n .or_(\n (exp.column(series_alias, table=series_table_alias) > size).and_(\n exp.column(pos_alias, table=unnest_source_alias).eq(size)\n )\n ),\n copy=False,\n )\n\n if arrays:\n end: exp.Condition = exp.Greatest(this=arrays[0], expressions=arrays[1:])\n\n if index_offset != 1:\n end = end - (1 - index_offset)\n series.expressions[0].set(\"end\", end)\n\n return expression\n\n return _explode_to_unnest\n\n\nPERCENTILES = (exp.PercentileCont, exp.PercentileDisc)\n\n\ndef add_within_group_for_percentiles(expression: exp.Expression) -> exp.Expression:\n \"\"\"Transforms percentiles by adding a WITHIN GROUP clause to them.\"\"\"\n if (\n isinstance(expression, PERCENTILES)\n and not isinstance(expression.parent, exp.WithinGroup)\n and expression.expression\n ):\n column = expression.this.pop()\n expression.set(\"this\", expression.expression.pop())\n order = exp.Order(expressions=[exp.Ordered(this=column)])\n expression = exp.WithinGroup(this=expression, expression=order)\n\n return expression\n\n\ndef remove_within_group_for_percentiles(expression: exp.Expression) -> exp.Expression:\n \"\"\"Transforms percentiles by getting rid of their corresponding WITHIN GROUP clause.\"\"\"\n if (\n isinstance(expression, exp.WithinGroup)\n and isinstance(expression.this, PERCENTILES)\n and isinstance(expression.expression, exp.Order)\n ):\n quantile = expression.this.this\n input_value = t.cast(exp.Ordered, expression.find(exp.Ordered)).this\n return expression.replace(exp.ApproxQuantile(this=input_value, quantile=quantile))\n\n return expression\n\n\ndef add_recursive_cte_column_names(expression: exp.Expression) -> exp.Expression:\n \"\"\"Uses projection output names in recursive CTE definitions to define the CTEs' columns.\"\"\"\n if isinstance(expression, exp.With) and expression.recursive:\n next_name = name_sequence(\"_c_\")\n\n for cte in expression.expressions:\n if not cte.args[\"alias\"].columns:\n query = cte.this\n if isinstance(query, exp.Union):\n query = query.this\n\n cte.args[\"alias\"].set(\n \"columns\",\n [exp.to_identifier(s.alias_or_name or next_name()) for s in query.selects],\n )\n\n return expression\n\n\ndef epoch_cast_to_ts(expression: exp.Expression) -> exp.Expression:\n \"\"\"Replace 'epoch' in casts by the equivalent date literal.\"\"\"\n if (\n isinstance(expression, (exp.Cast, exp.TryCast))\n and expression.name.lower() == \"epoch\"\n and expression.to.this in exp.DataType.TEMPORAL_TYPES\n ):\n expression.this.replace(exp.Literal.string(\"1970-01-01 00:00:00\"))\n\n return expression\n\n\ndef eliminate_semi_and_anti_joins(expression: exp.Expression) -> exp.Expression:\n \"\"\"Convert SEMI and ANTI joins into equivalent forms that use EXIST instead.\"\"\"\n if isinstance(expression, exp.Select):\n for join in expression.args.get(\"joins\") or []:\n on = join.args.get(\"on\")\n if on and join.kind in (\"SEMI\", \"ANTI\"):\n subquery = exp.select(\"1\").from_(join.this).where(on)\n exists = exp.Exists(this=subquery)\n if join.kind == \"ANTI\":\n exists = exists.not_(copy=False)\n\n join.pop()\n expression.where(exists, copy=False)\n\n return expression\n\n\ndef eliminate_full_outer_join(expression: exp.Expression) -> exp.Expression:\n \"\"\"\n Converts a query with a FULL OUTER join to a union of identical queries that\n use LEFT/RIGHT OUTER joins instead. This transformation currently only works\n for queries that have a single FULL OUTER join.\n \"\"\"\n if isinstance(expression, exp.Select):\n full_outer_joins = [\n (index, join)\n for index, join in enumerate(expression.args.get(\"joins\") or [])\n if join.side == \"FULL\"\n ]\n\n if len(full_outer_joins) == 1:\n expression_copy = expression.copy()\n expression.set(\"limit\", None)\n index, full_outer_join = full_outer_joins[0]\n full_outer_join.set(\"side\", \"left\")\n expression_copy.args[\"joins\"][index].set(\"side\", \"right\")\n expression_copy.args.pop(\"with\", None) # remove CTEs from RIGHT side\n\n return exp.union(expression, expression_copy, copy=False)\n\n return expression\n\n\ndef move_ctes_to_top_level(expression: exp.Expression) -> exp.Expression:\n \"\"\"\n Some dialects (e.g. Hive, T-SQL, Spark prior to version 3) only allow CTEs to be\n defined at the top-level, so for example queries like:\n\n SELECT * FROM (WITH t(c) AS (SELECT 1) SELECT * FROM t) AS subq\n\n are invalid in those dialects. This transformation can be used to ensure all CTEs are\n moved to the top level so that the final SQL code is valid from a syntax standpoint.\n\n TODO: handle name clashes whilst moving CTEs (it can get quite tricky & costly).\n \"\"\"\n top_level_with = expression.args.get(\"with\")\n for node in expression.find_all(exp.With):\n if node.parent is expression:\n continue\n\n inner_with = node.pop()\n if not top_level_with:\n top_level_with = inner_with\n expression.set(\"with\", top_level_with)\n else:\n if inner_with.recursive:\n top_level_with.set(\"recursive\", True)\n\n top_level_with.expressions.extend(inner_with.expressions)\n\n return expression\n\n\ndef ensure_bools(expression: exp.Expression) -> exp.Expression:\n \"\"\"Converts numeric values used in conditions into explicit boolean expressions.\"\"\"\n from sqlglot.optimizer.canonicalize import ensure_bools\n\n def _ensure_bool(node: exp.Expression) -> None:\n if (\n node.is_number\n or node.is_type(exp.DataType.Type.UNKNOWN, *exp.DataType.NUMERIC_TYPES)\n or (isinstance(node, exp.Column) and not node.type)\n ):\n node.replace(node.neq(0))\n\n for node, *_ in expression.walk():\n ensure_bools(node, _ensure_bool)\n\n return expression\n\n\ndef unqualify_columns(expression: exp.Expression) -> exp.Expression:\n for column in expression.find_all(exp.Column):\n # We only wanna pop off the table, db, catalog args\n for part in column.parts[:-1]:\n part.pop()\n\n return expression\n\n\ndef preprocess(\n transforms: t.List[t.Callable[[exp.Expression], exp.Expression]],\n) -> t.Callable[[Generator, exp.Expression], str]:\n \"\"\"\n Creates a new transform by chaining a sequence of transformations and converts the resulting\n expression to SQL, using either the \"_sql\" method corresponding to the resulting expression,\n or the appropriate `Generator.TRANSFORMS` function (when applicable -- see below).\n\n Args:\n transforms: sequence of transform functions. These will be called in order.\n\n Returns:\n Function that can be used as a generator transform.\n \"\"\"\n\n def _to_sql(self, expression: exp.Expression) -> str:\n expression_type = type(expression)\n\n expression = transforms[0](expression)\n for t in transforms[1:]:\n expression = t(expression)\n\n _sql_handler = getattr(self, expression.key + \"_sql\", None)\n if _sql_handler:\n return _sql_handler(expression)\n\n transforms_handler = self.TRANSFORMS.get(type(expression))\n if transforms_handler:\n if expression_type is type(expression):\n if isinstance(expression, exp.Func):\n return self.function_fallback_sql(expression)\n\n # Ensures we don't enter an infinite loop. This can happen when the original expression\n # has the same type as the final expression and there's no _sql method available for it,\n # because then it'd re-enter _to_sql.\n raise ValueError(\n f\"Expression type {expression.__class__.__name__} requires a _sql method in order to be transformed.\"\n )\n\n return transforms_handler(self, expression)\n\n raise ValueError(f\"Unsupported expression type {expression.__class__.__name__}.\")\n\n return _to_sql\n", "path": "sqlglot/transforms.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import expressions as exp\nfrom sqlglot.helper import find_new_name, name_sequence\n\nif t.TYPE_CHECKING:\n from sqlglot.generator import Generator\n\n\ndef unalias_group(expression: exp.Expression) -> exp.Expression:\n \"\"\"\n Replace references to select aliases in GROUP BY clauses.\n\n Example:\n >>> import sqlglot\n >>> sqlglot.parse_one(\"SELECT a AS b FROM x GROUP BY b\").transform(unalias_group).sql()\n 'SELECT a AS b FROM x GROUP BY 1'\n\n Args:\n expression: the expression that will be transformed.\n\n Returns:\n The transformed expression.\n \"\"\"\n if isinstance(expression, exp.Group) and isinstance(expression.parent, exp.Select):\n aliased_selects = {\n e.alias: i\n for i, e in enumerate(expression.parent.expressions, start=1)\n if isinstance(e, exp.Alias)\n }\n\n for group_by in expression.expressions:\n if (\n isinstance(group_by, exp.Column)\n and not group_by.table\n and group_by.name in aliased_selects\n ):\n group_by.replace(exp.Literal.number(aliased_selects.get(group_by.name)))\n\n return expression\n\n\ndef eliminate_distinct_on(expression: exp.Expression) -> exp.Expression:\n \"\"\"\n Convert SELECT DISTINCT ON statements to a subquery with a window function.\n\n This is useful for dialects that don't support SELECT DISTINCT ON but support window functions.\n\n Args:\n expression: the expression that will be transformed.\n\n Returns:\n The transformed expression.\n \"\"\"\n if (\n isinstance(expression, exp.Select)\n and expression.args.get(\"distinct\")\n and expression.args[\"distinct\"].args.get(\"on\")\n and isinstance(expression.args[\"distinct\"].args[\"on\"], exp.Tuple)\n ):\n distinct_cols = expression.args[\"distinct\"].pop().args[\"on\"].expressions\n outer_selects = expression.selects\n row_number = find_new_name(expression.named_selects, \"_row_number\")\n window = exp.Window(this=exp.RowNumber(), partition_by=distinct_cols)\n order = expression.args.get(\"order\")\n\n if order:\n window.set(\"order\", order.pop())\n else:\n window.set(\"order\", exp.Order(expressions=[c.copy() for c in distinct_cols]))\n\n window = exp.alias_(window, row_number)\n expression.select(window, copy=False)\n\n return (\n exp.select(*outer_selects, copy=False)\n .from_(expression.subquery(\"_t\", copy=False), copy=False)\n .where(exp.column(row_number).eq(1), copy=False)\n )\n\n return expression\n\n\ndef eliminate_qualify(expression: exp.Expression) -> exp.Expression:\n \"\"\"\n Convert SELECT statements that contain the QUALIFY clause into subqueries, filtered equivalently.\n\n The idea behind this transformation can be seen in Snowflake's documentation for QUALIFY:\n https://docs.snowflake.com/en/sql-reference/constructs/qualify\n\n Some dialects don't support window functions in the WHERE clause, so we need to include them as\n projections in the subquery, in order to refer to them in the outer filter using aliases. Also,\n if a column is referenced in the QUALIFY clause but is not selected, we need to include it too,\n otherwise we won't be able to refer to it in the outer query's WHERE clause.\n \"\"\"\n if isinstance(expression, exp.Select) and expression.args.get(\"qualify\"):\n taken = set(expression.named_selects)\n for select in expression.selects:\n if not select.alias_or_name:\n alias = find_new_name(taken, \"_c\")\n select.replace(exp.alias_(select, alias))\n taken.add(alias)\n\n outer_selects = exp.select(*[select.alias_or_name for select in expression.selects])\n qualify_filters = expression.args[\"qualify\"].pop().this\n\n select_candidates = exp.Window if expression.is_star else (exp.Window, exp.Column)\n for expr in qualify_filters.find_all(select_candidates):\n if isinstance(expr, exp.Window):\n alias = find_new_name(expression.named_selects, \"_w\")\n expression.select(exp.alias_(expr, alias), copy=False)\n column = exp.column(alias)\n\n if isinstance(expr.parent, exp.Qualify):\n qualify_filters = column\n else:\n expr.replace(column)\n elif expr.name not in expression.named_selects:\n expression.select(expr.copy(), copy=False)\n\n return outer_selects.from_(expression.subquery(alias=\"_t\", copy=False), copy=False).where(\n qualify_filters, copy=False\n )\n\n return expression\n\n\ndef remove_precision_parameterized_types(expression: exp.Expression) -> exp.Expression:\n \"\"\"\n Some dialects only allow the precision for parameterized types to be defined in the DDL and not in\n other expressions. This transforms removes the precision from parameterized types in expressions.\n \"\"\"\n for node in expression.find_all(exp.DataType):\n node.set(\n \"expressions\", [e for e in node.expressions if not isinstance(e, exp.DataTypeParam)]\n )\n\n return expression\n\n\ndef unnest_to_explode(expression: exp.Expression) -> exp.Expression:\n \"\"\"Convert cross join unnest into lateral view explode.\"\"\"\n if isinstance(expression, exp.Select):\n for join in expression.args.get(\"joins\") or []:\n unnest = join.this\n\n if isinstance(unnest, exp.Unnest):\n alias = unnest.args.get(\"alias\")\n udtf = exp.Posexplode if unnest.args.get(\"offset\") else exp.Explode\n\n expression.args[\"joins\"].remove(join)\n\n for e, column in zip(unnest.expressions, alias.columns if alias else []):\n expression.append(\n \"laterals\",\n exp.Lateral(\n this=udtf(this=e),\n view=True,\n alias=exp.TableAlias(this=alias.this, columns=[column]), # type: ignore\n ),\n )\n\n return expression\n\n\ndef explode_to_unnest(index_offset: int = 0) -> t.Callable[[exp.Expression], exp.Expression]:\n \"\"\"Convert explode/posexplode into unnest.\"\"\"\n\n def _explode_to_unnest(expression: exp.Expression) -> exp.Expression:\n if isinstance(expression, exp.Select):\n from sqlglot.optimizer.scope import Scope\n\n taken_select_names = set(expression.named_selects)\n taken_source_names = {name for name, _ in Scope(expression).references}\n\n def new_name(names: t.Set[str], name: str) -> str:\n name = find_new_name(names, name)\n names.add(name)\n return name\n\n arrays: t.List[exp.Condition] = []\n series_alias = new_name(taken_select_names, \"pos\")\n series = exp.alias_(\n exp.Unnest(\n expressions=[exp.GenerateSeries(start=exp.Literal.number(index_offset))]\n ),\n new_name(taken_source_names, \"_u\"),\n table=[series_alias],\n )\n\n # we use list here because expression.selects is mutated inside the loop\n for select in list(expression.selects):\n explode = select.find(exp.Explode)\n\n if explode:\n pos_alias = \"\"\n explode_alias = \"\"\n\n if isinstance(select, exp.Alias):\n explode_alias = select.args[\"alias\"]\n alias = select\n elif isinstance(select, exp.Aliases):\n pos_alias = select.aliases[0]\n explode_alias = select.aliases[1]\n alias = select.replace(exp.alias_(select.this, \"\", copy=False))\n else:\n alias = select.replace(exp.alias_(select, \"\"))\n explode = alias.find(exp.Explode)\n assert explode\n\n is_posexplode = isinstance(explode, exp.Posexplode)\n explode_arg = explode.this\n\n # This ensures that we won't use [POS]EXPLODE's argument as a new selection\n if isinstance(explode_arg, exp.Column):\n taken_select_names.add(explode_arg.output_name)\n\n unnest_source_alias = new_name(taken_source_names, \"_u\")\n\n if not explode_alias:\n explode_alias = new_name(taken_select_names, \"col\")\n\n if is_posexplode:\n pos_alias = new_name(taken_select_names, \"pos\")\n\n if not pos_alias:\n pos_alias = new_name(taken_select_names, \"pos\")\n\n alias.set(\"alias\", exp.to_identifier(explode_alias))\n\n series_table_alias = series.args[\"alias\"].this\n column = exp.If(\n this=exp.column(series_alias, table=series_table_alias).eq(\n exp.column(pos_alias, table=unnest_source_alias)\n ),\n true=exp.column(explode_alias, table=unnest_source_alias),\n )\n\n explode.replace(column)\n\n if is_posexplode:\n expressions = expression.expressions\n expressions.insert(\n expressions.index(alias) + 1,\n exp.If(\n this=exp.column(series_alias, table=series_table_alias).eq(\n exp.column(pos_alias, table=unnest_source_alias)\n ),\n true=exp.column(pos_alias, table=unnest_source_alias),\n ).as_(pos_alias),\n )\n expression.set(\"expressions\", expressions)\n\n if not arrays:\n if expression.args.get(\"from\"):\n expression.join(series, copy=False, join_type=\"CROSS\")\n else:\n expression.from_(series, copy=False)\n\n size: exp.Condition = exp.ArraySize(this=explode_arg.copy())\n arrays.append(size)\n\n # trino doesn't support left join unnest with on conditions\n # if it did, this would be much simpler\n expression.join(\n exp.alias_(\n exp.Unnest(\n expressions=[explode_arg.copy()],\n offset=exp.to_identifier(pos_alias),\n ),\n unnest_source_alias,\n table=[explode_alias],\n ),\n join_type=\"CROSS\",\n copy=False,\n )\n\n if index_offset != 1:\n size = size - 1\n\n expression.where(\n exp.column(series_alias, table=series_table_alias)\n .eq(exp.column(pos_alias, table=unnest_source_alias))\n .or_(\n (exp.column(series_alias, table=series_table_alias) > size).and_(\n exp.column(pos_alias, table=unnest_source_alias).eq(size)\n )\n ),\n copy=False,\n )\n\n if arrays:\n end: exp.Condition = exp.Greatest(this=arrays[0], expressions=arrays[1:])\n\n if index_offset != 1:\n end = end - (1 - index_offset)\n series.expressions[0].set(\"end\", end)\n\n return expression\n\n return _explode_to_unnest\n\n\nPERCENTILES = (exp.PercentileCont, exp.PercentileDisc)\n\n\ndef add_within_group_for_percentiles(expression: exp.Expression) -> exp.Expression:\n \"\"\"Transforms percentiles by adding a WITHIN GROUP clause to them.\"\"\"\n if (\n isinstance(expression, PERCENTILES)\n and not isinstance(expression.parent, exp.WithinGroup)\n and expression.expression\n ):\n column = expression.this.pop()\n expression.set(\"this\", expression.expression.pop())\n order = exp.Order(expressions=[exp.Ordered(this=column)])\n expression = exp.WithinGroup(this=expression, expression=order)\n\n return expression\n\n\ndef remove_within_group_for_percentiles(expression: exp.Expression) -> exp.Expression:\n \"\"\"Transforms percentiles by getting rid of their corresponding WITHIN GROUP clause.\"\"\"\n if (\n isinstance(expression, exp.WithinGroup)\n and isinstance(expression.this, PERCENTILES)\n and isinstance(expression.expression, exp.Order)\n ):\n quantile = expression.this.this\n input_value = t.cast(exp.Ordered, expression.find(exp.Ordered)).this\n return expression.replace(exp.ApproxQuantile(this=input_value, quantile=quantile))\n\n return expression\n\n\ndef add_recursive_cte_column_names(expression: exp.Expression) -> exp.Expression:\n \"\"\"Uses projection output names in recursive CTE definitions to define the CTEs' columns.\"\"\"\n if isinstance(expression, exp.With) and expression.recursive:\n next_name = name_sequence(\"_c_\")\n\n for cte in expression.expressions:\n if not cte.args[\"alias\"].columns:\n query = cte.this\n if isinstance(query, exp.Union):\n query = query.this\n\n cte.args[\"alias\"].set(\n \"columns\",\n [exp.to_identifier(s.alias_or_name or next_name()) for s in query.selects],\n )\n\n return expression\n\n\ndef epoch_cast_to_ts(expression: exp.Expression) -> exp.Expression:\n \"\"\"Replace 'epoch' in casts by the equivalent date literal.\"\"\"\n if (\n isinstance(expression, (exp.Cast, exp.TryCast))\n and expression.name.lower() == \"epoch\"\n and expression.to.this in exp.DataType.TEMPORAL_TYPES\n ):\n expression.this.replace(exp.Literal.string(\"1970-01-01 00:00:00\"))\n\n return expression\n\n\ndef eliminate_semi_and_anti_joins(expression: exp.Expression) -> exp.Expression:\n \"\"\"Convert SEMI and ANTI joins into equivalent forms that use EXIST instead.\"\"\"\n if isinstance(expression, exp.Select):\n for join in expression.args.get(\"joins\") or []:\n on = join.args.get(\"on\")\n if on and join.kind in (\"SEMI\", \"ANTI\"):\n subquery = exp.select(\"1\").from_(join.this).where(on)\n exists = exp.Exists(this=subquery)\n if join.kind == \"ANTI\":\n exists = exists.not_(copy=False)\n\n join.pop()\n expression.where(exists, copy=False)\n\n return expression\n\n\ndef eliminate_full_outer_join(expression: exp.Expression) -> exp.Expression:\n \"\"\"\n Converts a query with a FULL OUTER join to a union of identical queries that\n use LEFT/RIGHT OUTER joins instead. This transformation currently only works\n for queries that have a single FULL OUTER join.\n \"\"\"\n if isinstance(expression, exp.Select):\n full_outer_joins = [\n (index, join)\n for index, join in enumerate(expression.args.get(\"joins\") or [])\n if join.side == \"FULL\"\n ]\n\n if len(full_outer_joins) == 1:\n expression_copy = expression.copy()\n expression.set(\"limit\", None)\n index, full_outer_join = full_outer_joins[0]\n full_outer_join.set(\"side\", \"left\")\n expression_copy.args[\"joins\"][index].set(\"side\", \"right\")\n expression_copy.args.pop(\"with\", None) # remove CTEs from RIGHT side\n\n return exp.union(expression, expression_copy, copy=False)\n\n return expression\n\n\ndef move_ctes_to_top_level(expression: exp.Expression) -> exp.Expression:\n \"\"\"\n Some dialects (e.g. Hive, T-SQL, Spark prior to version 3) only allow CTEs to be\n defined at the top-level, so for example queries like:\n\n SELECT * FROM (WITH t(c) AS (SELECT 1) SELECT * FROM t) AS subq\n\n are invalid in those dialects. This transformation can be used to ensure all CTEs are\n moved to the top level so that the final SQL code is valid from a syntax standpoint.\n\n TODO: handle name clashes whilst moving CTEs (it can get quite tricky & costly).\n \"\"\"\n top_level_with = expression.args.get(\"with\")\n for node in expression.find_all(exp.With):\n if node.parent is expression:\n continue\n\n inner_with = node.pop()\n if not top_level_with:\n top_level_with = inner_with\n expression.set(\"with\", top_level_with)\n else:\n if inner_with.recursive:\n top_level_with.set(\"recursive\", True)\n\n top_level_with.expressions.extend(inner_with.expressions)\n\n return expression\n\n\ndef ensure_bools(expression: exp.Expression) -> exp.Expression:\n \"\"\"Converts numeric values used in conditions into explicit boolean expressions.\"\"\"\n from sqlglot.optimizer.canonicalize import ensure_bools\n\n def _ensure_bool(node: exp.Expression) -> None:\n if (\n node.is_number\n or node.is_type(exp.DataType.Type.UNKNOWN, *exp.DataType.NUMERIC_TYPES)\n or (isinstance(node, exp.Column) and not node.type)\n ):\n node.replace(node.neq(0))\n\n for node, *_ in expression.walk():\n ensure_bools(node, _ensure_bool)\n\n return expression\n\n\ndef unqualify_columns(expression: exp.Expression) -> exp.Expression:\n for column in expression.find_all(exp.Column):\n # We only wanna pop off the table, db, catalog args\n for part in column.parts[:-1]:\n part.pop()\n\n return expression\n\n\ndef preprocess(\n transforms: t.List[t.Callable[[exp.Expression], exp.Expression]],\n) -> t.Callable[[Generator, exp.Expression], str]:\n \"\"\"\n Creates a new transform by chaining a sequence of transformations and converts the resulting\n expression to SQL, using either the \"_sql\" method corresponding to the resulting expression,\n or the appropriate `Generator.TRANSFORMS` function (when applicable -- see below).\n\n Args:\n transforms: sequence of transform functions. These will be called in order.\n\n Returns:\n Function that can be used as a generator transform.\n \"\"\"\n\n def _to_sql(self, expression: exp.Expression) -> str:\n expression_type = type(expression)\n\n expression = transforms[0](expression)\n for t in transforms[1:]:\n expression = t(expression)\n\n _sql_handler = getattr(self, expression.key + \"_sql\", None)\n if _sql_handler:\n return _sql_handler(expression)\n\n transforms_handler = self.TRANSFORMS.get(type(expression))\n if transforms_handler:\n if expression_type is type(expression):\n if isinstance(expression, exp.Func):\n return self.function_fallback_sql(expression)\n\n # Ensures we don't enter an infinite loop. This can happen when the original expression\n # has the same type as the final expression and there's no _sql method available for it,\n # because then it'd re-enter _to_sql.\n raise ValueError(\n f\"Expression type {expression.__class__.__name__} requires a _sql method in order to be transformed.\"\n )\n\n return transforms_handler(self, expression)\n\n raise ValueError(f\"Unsupported expression type {expression.__class__.__name__}.\")\n\n return _to_sql\n", "path": "sqlglot/transforms.py"}]} |
gh_patches_debug_1261 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-2285 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incohérence sur le nombre de messages d'un membre
Les 2 pages en questions : https://zestedesavoir.com/membres/voir/Iris13/ https://zestedesavoir.com/forums/messages/1927/.
On a 3 messages alors que aucun n'est affiché.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zds/member/models.py`
Content:
```
1 # coding: utf-8
2
3 from datetime import datetime
4 from django.conf import settings
5 from django.db import models
6 from hashlib import md5
7 from django.http import HttpRequest
8 from django.contrib.sessions.models import Session
9 from django.contrib.auth import logout
10 import os
11
12 from django.contrib.auth.models import User
13 from django.core.urlresolvers import reverse
14 from django.dispatch import receiver
15
16 import pygeoip
17 from zds.article.models import Article
18 from zds.forum.models import Post, Topic
19 from zds.tutorial.models import Tutorial
20 from zds.utils.models import Alert
21 from zds.member.managers import ProfileManager
22 from django.utils.importlib import import_module
23
24
25 class Profile(models.Model):
26
27 """Represents an user profile."""
28 class Meta:
29 verbose_name = 'Profil'
30 verbose_name_plural = 'Profils'
31 permissions = (
32 ("moderation", u"Modérer un membre"),
33 ("show_ip", u"Afficher les IP d'un membre"),
34 )
35
36 user = models.OneToOneField(
37 User,
38 verbose_name='Utilisateur',
39 related_name="profile")
40
41 last_ip_address = models.CharField(
42 'Adresse IP',
43 max_length=39,
44 blank=True,
45 null=True)
46
47 site = models.CharField('Site internet', max_length=128, blank=True)
48 show_email = models.BooleanField('Afficher adresse mail publiquement',
49 default=False)
50
51 avatar_url = models.CharField(
52 'URL de l\'avatar', max_length=128, null=True, blank=True
53 )
54
55 biography = models.TextField('Biographie', blank=True)
56
57 karma = models.IntegerField('Karma', default=0)
58
59 sign = models.TextField('Signature', max_length=250, blank=True)
60
61 show_sign = models.BooleanField('Voir les signatures',
62 default=True)
63
64 hover_or_click = models.BooleanField('Survol ou click ?',
65 default=False)
66
67 email_for_answer = models.BooleanField('Envoyer pour les réponse MP',
68 default=False)
69
70 sdz_tutorial = models.TextField(
71 'Identifiant des tutos SdZ',
72 blank=True,
73 null=True)
74
75 can_read = models.BooleanField('Possibilité de lire', default=True)
76 end_ban_read = models.DateTimeField(
77 'Fin d\'interdiction de lecture',
78 null=True,
79 blank=True)
80
81 can_write = models.BooleanField('Possibilité d\'écrire', default=True)
82 end_ban_write = models.DateTimeField(
83 'Fin d\'interdiction d\'ecrire',
84 null=True,
85 blank=True)
86
87 last_visit = models.DateTimeField(
88 'Date de dernière visite',
89 null=True,
90 blank=True)
91
92 objects = ProfileManager()
93
94 def __unicode__(self):
95 """Textual forum of a profile."""
96 return self.user.username
97
98 def is_private(self):
99 """checks the user can display his stats"""
100 user_groups = self.user.groups.all()
101 user_group_names = [g.name for g in user_groups]
102 return settings.ZDS_APP['member']['bot_group'] in user_group_names
103
104 def get_absolute_url(self):
105 """Absolute URL to the profile page."""
106 return reverse('member-detail',
107 kwargs={'user_name': self.user.username})
108
109 def get_city(self):
110 """return physical adress by geolocalisation."""
111 if len(self.last_ip_address) <= 16:
112 gic = pygeoip.GeoIP(
113 os.path.join(
114 settings.GEOIP_PATH,
115 'GeoLiteCity.dat'))
116 else:
117 gic = pygeoip.GeoIP(
118 os.path.join(
119 settings.GEOIP_PATH,
120 'GeoLiteCityv6.dat'))
121 geo = gic.record_by_addr(self.last_ip_address)
122
123 return u'{0}, {1}'.format(
124 geo['city'], geo['country_name'])
125
126 def get_avatar_url(self):
127 """Avatar URL (using custom URL or Gravatar)"""
128 if self.avatar_url:
129 return self.avatar_url
130 else:
131 return 'https://secure.gravatar.com/avatar/{0}?d=identicon'.format(
132 md5(self.user.email.lower()).hexdigest())
133
134 def get_post_count(self):
135 """Number of messages posted."""
136 return Post.objects.filter(author__pk=self.user.pk).count()
137
138 def get_topic_count(self):
139 """Number of threads created."""
140 return Topic.objects.filter(author=self.user).count()
141
142 def get_tuto_count(self):
143 """Number of tutos created."""
144 if self.is_private():
145 return 0
146 return Tutorial.objects.filter(authors__in=[self.user]).count()
147
148 def get_tutos(self):
149 """Get all tutorials of the user."""
150 return Tutorial.objects.filter(authors__in=[self.user]).all()
151
152 def get_draft_tutos(self):
153 """Tutorial in draft."""
154 return Tutorial.objects.filter(
155 authors__in=[self.user],
156 sha_draft__isnull=False,
157 sha_beta__isnull=True,
158 sha_validation__isnull=True,
159 sha_public__isnull=True,
160 ).all()
161
162 def get_public_tutos(self):
163 """Tutorial in public."""
164 return Tutorial.objects.filter(
165 authors__in=[
166 self.user],
167 sha_public__isnull=False).all()
168
169 def get_validate_tutos(self):
170 """Tutorial in validation."""
171 return Tutorial.objects.filter(
172 authors__in=[
173 self.user],
174 sha_validation__isnull=False).all()
175
176 def get_beta_tutos(self):
177 """Tutorial in beta."""
178 return Tutorial.objects.filter(
179 authors__in=[
180 self.user],
181 sha_beta__isnull=False).all()
182
183 def get_articles(self):
184 """Get all articles of the user."""
185 return Article.objects.filter(authors__in=[self.user]).all()
186
187 def get_public_articles(self):
188 """Get all public articles of the user."""
189 return Article.objects.filter(
190 authors__in=[
191 self.user],
192 sha_public__isnull=False).all()
193
194 def get_validate_articles(self):
195 """Articles in validation."""
196 return Article.objects.filter(
197 authors__in=[
198 self.user],
199 sha_validation__isnull=False).all()
200
201 def get_draft_articles(self):
202 """Get all draft articles of the user."""
203 return Article.objects\
204 .filter(
205 authors__in=[self.user],
206 sha_draft__isnull=False,
207 sha_validation__isnull=True,
208 sha_public__isnull=True,
209 ).all()
210
211 def get_posts(self):
212 return Post.objects.filter(author=self.user).all()
213
214 def get_invisible_posts_count(self):
215 return Post.objects.filter(is_visible=False, author=self.user).count()
216
217 def get_alerts_posts_count(self):
218 return Alert.objects.filter(author=self.user).count()
219
220 def can_read_now(self):
221 if self.user.is_authenticated:
222 if self.user.is_active:
223 if self.end_ban_read:
224 return self.can_read or (
225 self.end_ban_read < datetime.now())
226 else:
227 return self.can_read
228 else:
229 return False
230
231 def can_write_now(self):
232 if self.user.is_active:
233 if self.end_ban_write:
234 return self.can_write or (self.end_ban_write < datetime.now())
235 else:
236 return self.can_write
237 else:
238 return False
239
240 def get_followed_topics(self):
241 """Followed topics."""
242 return Topic.objects.filter(topicfollowed__user=self.user)\
243 .order_by('-last_message__pubdate')
244
245
246 @receiver(models.signals.post_delete, sender=User)
247 def auto_delete_token_on_unregistering(sender, instance, **kwargs):
248 TokenForgotPassword.objects.filter(user=instance).delete()
249 TokenRegister.objects.filter(user=instance).delete()
250
251
252 class TokenForgotPassword(models.Model):
253
254 class Meta:
255 verbose_name = 'Token de mot de passe oublié'
256 verbose_name_plural = 'Tokens de mots de passe oubliés'
257
258 user = models.ForeignKey(User, verbose_name='Utilisateur', db_index=True)
259 token = models.CharField(max_length=100, db_index=True)
260 date_end = models.DateTimeField('Date de fin')
261
262 def get_absolute_url(self):
263 """Absolute URL to the new password page."""
264 return reverse('zds.member.views.new_password') + \
265 '?token={0}'.format(self.token)
266
267
268 class TokenRegister(models.Model):
269
270 class Meta:
271 verbose_name = 'Token d\'inscription'
272 verbose_name_plural = 'Tokens d\'inscription'
273
274 user = models.ForeignKey(User, verbose_name='Utilisateur', db_index=True)
275 token = models.CharField(max_length=100, db_index=True)
276 date_end = models.DateTimeField('Date de fin')
277
278 def get_absolute_url(self):
279 """Absolute URL to the active account page."""
280 return reverse('zds.member.views.active_account') + \
281 '?token={0}'.format(self.token)
282
283 def __unicode__(self):
284 """Textual forum of a profile."""
285 return u"{0} - {1}".format(self.user.username, self.date_end)
286
287
288 def save_profile(backend, user, response, *args, **kwargs):
289 profile = Profile.objects.filter(user=user).first()
290 if profile is None:
291 profile = Profile(user=user,
292 show_email=False,
293 show_sign=True,
294 hover_or_click=True,
295 email_for_answer=False)
296 profile.last_ip_address = "0.0.0.0"
297 profile.save()
298
299
300 class Ban(models.Model):
301
302 class Meta:
303 verbose_name = 'Sanction'
304 verbose_name_plural = 'Sanctions'
305
306 user = models.ForeignKey(User, verbose_name='Sanctionné', db_index=True)
307 moderator = models.ForeignKey(User, verbose_name='Moderateur',
308 related_name='bans', db_index=True)
309 type = models.CharField('Type', max_length=80, db_index=True)
310 text = models.TextField('Explication de la sanction')
311 pubdate = models.DateTimeField(
312 'Date de publication',
313 blank=True,
314 null=True, db_index=True)
315
316
317 class KarmaNote(models.Model):
318
319 class Meta:
320 verbose_name = 'Note de karma'
321 verbose_name_plural = 'Notes de karma'
322
323 user = models.ForeignKey(User, related_name='karmanote_user', db_index=True)
324 staff = models.ForeignKey(User, related_name='karmanote_staff', db_index=True)
325 comment = models.CharField('Commentaire', max_length=150)
326 value = models.IntegerField('Valeur')
327 create_at = models.DateTimeField('Date d\'ajout', auto_now_add=True)
328
329
330 def logout_user(username):
331 now = datetime.now()
332 request = HttpRequest()
333
334 sessions = Session.objects.filter(expire_date__gt=now)
335
336 for session in sessions:
337 user_id = session.get_decoded().get('_auth_user_id')
338 if username == user_id:
339 engine = import_module(settings.SESSION_ENGINE)
340 request.session = engine.SessionStore(session.session_key)
341 logout(request)
342 break
343
344
345 def listing():
346
347 fichier = []
348 if os.path.isdir(settings.SDZ_TUTO_DIR):
349 for root in os.listdir(settings.SDZ_TUTO_DIR):
350 if os.path.isdir(os.path.join(settings.SDZ_TUTO_DIR, root)):
351 num = root.split('_')[0]
352 if num is not None and num.isdigit():
353 fichier.append((num, root))
354 return fichier
355 else:
356 return ()
357
358
359 def get_info_old_tuto(id):
360 titre = ''
361 tuto = ''
362 images = ''
363 logo = ''
364 if os.path.isdir(settings.SDZ_TUTO_DIR):
365 for rep in os.listdir(settings.SDZ_TUTO_DIR):
366 if rep.startswith(str(id) + '_'):
367 if os.path.isdir(os.path.join(settings.SDZ_TUTO_DIR, rep)):
368 for root, dirs, files in os.walk(
369 os.path.join(
370 settings.SDZ_TUTO_DIR, rep
371 )):
372 for file in files:
373 if file.split('.')[-1] == 'tuto':
374 titre = os.path.splitext(file)[0]
375 tuto = os.path.join(root, file)
376 elif file.split('.')[-1] == 'zip':
377 images = os.path.join(root, file)
378 elif file.split('.')[-1] in ['png',
379 'jpg',
380 'ico',
381 'jpeg',
382 'gif']:
383 logo = os.path.join(root, file)
384
385 return (id, titre, tuto, images, logo)
386
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zds/member/models.py b/zds/member/models.py
--- a/zds/member/models.py
+++ b/zds/member/models.py
@@ -133,6 +133,10 @@
def get_post_count(self):
"""Number of messages posted."""
+ return Post.objects.filter(author__pk=self.user.pk, is_visible=True).count()
+
+ def get_post_count_as_staff(self):
+ """Number of messages posted (view as staff)."""
return Post.objects.filter(author__pk=self.user.pk).count()
def get_topic_count(self):
| {"golden_diff": "diff --git a/zds/member/models.py b/zds/member/models.py\n--- a/zds/member/models.py\n+++ b/zds/member/models.py\n@@ -133,6 +133,10 @@\n \n def get_post_count(self):\n \"\"\"Number of messages posted.\"\"\"\n+ return Post.objects.filter(author__pk=self.user.pk, is_visible=True).count()\n+\n+ def get_post_count_as_staff(self):\n+ \"\"\"Number of messages posted (view as staff).\"\"\"\n return Post.objects.filter(author__pk=self.user.pk).count()\n \n def get_topic_count(self):\n", "issue": "Incoh\u00e9rence sur le nombre de messages d'un membre\nLes 2 pages en questions : https://zestedesavoir.com/membres/voir/Iris13/ https://zestedesavoir.com/forums/messages/1927/.\n\nOn a 3 messages alors que aucun n'est affich\u00e9.\n\n", "before_files": [{"content": "# coding: utf-8\n\nfrom datetime import datetime\nfrom django.conf import settings\nfrom django.db import models\nfrom hashlib import md5\nfrom django.http import HttpRequest\nfrom django.contrib.sessions.models import Session\nfrom django.contrib.auth import logout\nimport os\n\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse\nfrom django.dispatch import receiver\n\nimport pygeoip\nfrom zds.article.models import Article\nfrom zds.forum.models import Post, Topic\nfrom zds.tutorial.models import Tutorial\nfrom zds.utils.models import Alert\nfrom zds.member.managers import ProfileManager\nfrom django.utils.importlib import import_module\n\n\nclass Profile(models.Model):\n\n \"\"\"Represents an user profile.\"\"\"\n class Meta:\n verbose_name = 'Profil'\n verbose_name_plural = 'Profils'\n permissions = (\n (\"moderation\", u\"Mod\u00e9rer un membre\"),\n (\"show_ip\", u\"Afficher les IP d'un membre\"),\n )\n\n user = models.OneToOneField(\n User,\n verbose_name='Utilisateur',\n related_name=\"profile\")\n\n last_ip_address = models.CharField(\n 'Adresse IP',\n max_length=39,\n blank=True,\n null=True)\n\n site = models.CharField('Site internet', max_length=128, blank=True)\n show_email = models.BooleanField('Afficher adresse mail publiquement',\n default=False)\n\n avatar_url = models.CharField(\n 'URL de l\\'avatar', max_length=128, null=True, blank=True\n )\n\n biography = models.TextField('Biographie', blank=True)\n\n karma = models.IntegerField('Karma', default=0)\n\n sign = models.TextField('Signature', max_length=250, blank=True)\n\n show_sign = models.BooleanField('Voir les signatures',\n default=True)\n\n hover_or_click = models.BooleanField('Survol ou click ?',\n default=False)\n\n email_for_answer = models.BooleanField('Envoyer pour les r\u00e9ponse MP',\n default=False)\n\n sdz_tutorial = models.TextField(\n 'Identifiant des tutos SdZ',\n blank=True,\n null=True)\n\n can_read = models.BooleanField('Possibilit\u00e9 de lire', default=True)\n end_ban_read = models.DateTimeField(\n 'Fin d\\'interdiction de lecture',\n null=True,\n blank=True)\n\n can_write = models.BooleanField('Possibilit\u00e9 d\\'\u00e9crire', default=True)\n end_ban_write = models.DateTimeField(\n 'Fin d\\'interdiction d\\'ecrire',\n null=True,\n blank=True)\n\n last_visit = models.DateTimeField(\n 'Date de derni\u00e8re visite',\n null=True,\n blank=True)\n\n objects = ProfileManager()\n\n def __unicode__(self):\n \"\"\"Textual forum of a profile.\"\"\"\n return self.user.username\n\n def is_private(self):\n \"\"\"checks the user can display his stats\"\"\"\n user_groups = self.user.groups.all()\n user_group_names = [g.name for g in user_groups]\n return settings.ZDS_APP['member']['bot_group'] in user_group_names\n\n def get_absolute_url(self):\n \"\"\"Absolute URL to the profile page.\"\"\"\n return reverse('member-detail',\n kwargs={'user_name': self.user.username})\n\n def get_city(self):\n \"\"\"return physical adress by geolocalisation.\"\"\"\n if len(self.last_ip_address) <= 16:\n gic = pygeoip.GeoIP(\n os.path.join(\n settings.GEOIP_PATH,\n 'GeoLiteCity.dat'))\n else:\n gic = pygeoip.GeoIP(\n os.path.join(\n settings.GEOIP_PATH,\n 'GeoLiteCityv6.dat'))\n geo = gic.record_by_addr(self.last_ip_address)\n\n return u'{0}, {1}'.format(\n geo['city'], geo['country_name'])\n\n def get_avatar_url(self):\n \"\"\"Avatar URL (using custom URL or Gravatar)\"\"\"\n if self.avatar_url:\n return self.avatar_url\n else:\n return 'https://secure.gravatar.com/avatar/{0}?d=identicon'.format(\n md5(self.user.email.lower()).hexdigest())\n\n def get_post_count(self):\n \"\"\"Number of messages posted.\"\"\"\n return Post.objects.filter(author__pk=self.user.pk).count()\n\n def get_topic_count(self):\n \"\"\"Number of threads created.\"\"\"\n return Topic.objects.filter(author=self.user).count()\n\n def get_tuto_count(self):\n \"\"\"Number of tutos created.\"\"\"\n if self.is_private():\n return 0\n return Tutorial.objects.filter(authors__in=[self.user]).count()\n\n def get_tutos(self):\n \"\"\"Get all tutorials of the user.\"\"\"\n return Tutorial.objects.filter(authors__in=[self.user]).all()\n\n def get_draft_tutos(self):\n \"\"\"Tutorial in draft.\"\"\"\n return Tutorial.objects.filter(\n authors__in=[self.user],\n sha_draft__isnull=False,\n sha_beta__isnull=True,\n sha_validation__isnull=True,\n sha_public__isnull=True,\n ).all()\n\n def get_public_tutos(self):\n \"\"\"Tutorial in public.\"\"\"\n return Tutorial.objects.filter(\n authors__in=[\n self.user],\n sha_public__isnull=False).all()\n\n def get_validate_tutos(self):\n \"\"\"Tutorial in validation.\"\"\"\n return Tutorial.objects.filter(\n authors__in=[\n self.user],\n sha_validation__isnull=False).all()\n\n def get_beta_tutos(self):\n \"\"\"Tutorial in beta.\"\"\"\n return Tutorial.objects.filter(\n authors__in=[\n self.user],\n sha_beta__isnull=False).all()\n\n def get_articles(self):\n \"\"\"Get all articles of the user.\"\"\"\n return Article.objects.filter(authors__in=[self.user]).all()\n\n def get_public_articles(self):\n \"\"\"Get all public articles of the user.\"\"\"\n return Article.objects.filter(\n authors__in=[\n self.user],\n sha_public__isnull=False).all()\n\n def get_validate_articles(self):\n \"\"\"Articles in validation.\"\"\"\n return Article.objects.filter(\n authors__in=[\n self.user],\n sha_validation__isnull=False).all()\n\n def get_draft_articles(self):\n \"\"\"Get all draft articles of the user.\"\"\"\n return Article.objects\\\n .filter(\n authors__in=[self.user],\n sha_draft__isnull=False,\n sha_validation__isnull=True,\n sha_public__isnull=True,\n ).all()\n\n def get_posts(self):\n return Post.objects.filter(author=self.user).all()\n\n def get_invisible_posts_count(self):\n return Post.objects.filter(is_visible=False, author=self.user).count()\n\n def get_alerts_posts_count(self):\n return Alert.objects.filter(author=self.user).count()\n\n def can_read_now(self):\n if self.user.is_authenticated:\n if self.user.is_active:\n if self.end_ban_read:\n return self.can_read or (\n self.end_ban_read < datetime.now())\n else:\n return self.can_read\n else:\n return False\n\n def can_write_now(self):\n if self.user.is_active:\n if self.end_ban_write:\n return self.can_write or (self.end_ban_write < datetime.now())\n else:\n return self.can_write\n else:\n return False\n\n def get_followed_topics(self):\n \"\"\"Followed topics.\"\"\"\n return Topic.objects.filter(topicfollowed__user=self.user)\\\n .order_by('-last_message__pubdate')\n\n\n@receiver(models.signals.post_delete, sender=User)\ndef auto_delete_token_on_unregistering(sender, instance, **kwargs):\n TokenForgotPassword.objects.filter(user=instance).delete()\n TokenRegister.objects.filter(user=instance).delete()\n\n\nclass TokenForgotPassword(models.Model):\n\n class Meta:\n verbose_name = 'Token de mot de passe oubli\u00e9'\n verbose_name_plural = 'Tokens de mots de passe oubli\u00e9s'\n\n user = models.ForeignKey(User, verbose_name='Utilisateur', db_index=True)\n token = models.CharField(max_length=100, db_index=True)\n date_end = models.DateTimeField('Date de fin')\n\n def get_absolute_url(self):\n \"\"\"Absolute URL to the new password page.\"\"\"\n return reverse('zds.member.views.new_password') + \\\n '?token={0}'.format(self.token)\n\n\nclass TokenRegister(models.Model):\n\n class Meta:\n verbose_name = 'Token d\\'inscription'\n verbose_name_plural = 'Tokens d\\'inscription'\n\n user = models.ForeignKey(User, verbose_name='Utilisateur', db_index=True)\n token = models.CharField(max_length=100, db_index=True)\n date_end = models.DateTimeField('Date de fin')\n\n def get_absolute_url(self):\n \"\"\"Absolute URL to the active account page.\"\"\"\n return reverse('zds.member.views.active_account') + \\\n '?token={0}'.format(self.token)\n\n def __unicode__(self):\n \"\"\"Textual forum of a profile.\"\"\"\n return u\"{0} - {1}\".format(self.user.username, self.date_end)\n\n\ndef save_profile(backend, user, response, *args, **kwargs):\n profile = Profile.objects.filter(user=user).first()\n if profile is None:\n profile = Profile(user=user,\n show_email=False,\n show_sign=True,\n hover_or_click=True,\n email_for_answer=False)\n profile.last_ip_address = \"0.0.0.0\"\n profile.save()\n\n\nclass Ban(models.Model):\n\n class Meta:\n verbose_name = 'Sanction'\n verbose_name_plural = 'Sanctions'\n\n user = models.ForeignKey(User, verbose_name='Sanctionn\u00e9', db_index=True)\n moderator = models.ForeignKey(User, verbose_name='Moderateur',\n related_name='bans', db_index=True)\n type = models.CharField('Type', max_length=80, db_index=True)\n text = models.TextField('Explication de la sanction')\n pubdate = models.DateTimeField(\n 'Date de publication',\n blank=True,\n null=True, db_index=True)\n\n\nclass KarmaNote(models.Model):\n\n class Meta:\n verbose_name = 'Note de karma'\n verbose_name_plural = 'Notes de karma'\n\n user = models.ForeignKey(User, related_name='karmanote_user', db_index=True)\n staff = models.ForeignKey(User, related_name='karmanote_staff', db_index=True)\n comment = models.CharField('Commentaire', max_length=150)\n value = models.IntegerField('Valeur')\n create_at = models.DateTimeField('Date d\\'ajout', auto_now_add=True)\n\n\ndef logout_user(username):\n now = datetime.now()\n request = HttpRequest()\n\n sessions = Session.objects.filter(expire_date__gt=now)\n\n for session in sessions:\n user_id = session.get_decoded().get('_auth_user_id')\n if username == user_id:\n engine = import_module(settings.SESSION_ENGINE)\n request.session = engine.SessionStore(session.session_key)\n logout(request)\n break\n\n\ndef listing():\n\n fichier = []\n if os.path.isdir(settings.SDZ_TUTO_DIR):\n for root in os.listdir(settings.SDZ_TUTO_DIR):\n if os.path.isdir(os.path.join(settings.SDZ_TUTO_DIR, root)):\n num = root.split('_')[0]\n if num is not None and num.isdigit():\n fichier.append((num, root))\n return fichier\n else:\n return ()\n\n\ndef get_info_old_tuto(id):\n titre = ''\n tuto = ''\n images = ''\n logo = ''\n if os.path.isdir(settings.SDZ_TUTO_DIR):\n for rep in os.listdir(settings.SDZ_TUTO_DIR):\n if rep.startswith(str(id) + '_'):\n if os.path.isdir(os.path.join(settings.SDZ_TUTO_DIR, rep)):\n for root, dirs, files in os.walk(\n os.path.join(\n settings.SDZ_TUTO_DIR, rep\n )):\n for file in files:\n if file.split('.')[-1] == 'tuto':\n titre = os.path.splitext(file)[0]\n tuto = os.path.join(root, file)\n elif file.split('.')[-1] == 'zip':\n images = os.path.join(root, file)\n elif file.split('.')[-1] in ['png',\n 'jpg',\n 'ico',\n 'jpeg',\n 'gif']:\n logo = os.path.join(root, file)\n\n return (id, titre, tuto, images, logo)\n", "path": "zds/member/models.py"}], "after_files": [{"content": "# coding: utf-8\n\nfrom datetime import datetime\nfrom django.conf import settings\nfrom django.db import models\nfrom hashlib import md5\nfrom django.http import HttpRequest\nfrom django.contrib.sessions.models import Session\nfrom django.contrib.auth import logout\nimport os\n\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse\nfrom django.dispatch import receiver\n\nimport pygeoip\nfrom zds.article.models import Article\nfrom zds.forum.models import Post, Topic\nfrom zds.tutorial.models import Tutorial\nfrom zds.utils.models import Alert\nfrom zds.member.managers import ProfileManager\nfrom django.utils.importlib import import_module\n\n\nclass Profile(models.Model):\n\n \"\"\"Represents an user profile.\"\"\"\n class Meta:\n verbose_name = 'Profil'\n verbose_name_plural = 'Profils'\n permissions = (\n (\"moderation\", u\"Mod\u00e9rer un membre\"),\n (\"show_ip\", u\"Afficher les IP d'un membre\"),\n )\n\n user = models.OneToOneField(\n User,\n verbose_name='Utilisateur',\n related_name=\"profile\")\n\n last_ip_address = models.CharField(\n 'Adresse IP',\n max_length=39,\n blank=True,\n null=True)\n\n site = models.CharField('Site internet', max_length=128, blank=True)\n show_email = models.BooleanField('Afficher adresse mail publiquement',\n default=False)\n\n avatar_url = models.CharField(\n 'URL de l\\'avatar', max_length=128, null=True, blank=True\n )\n\n biography = models.TextField('Biographie', blank=True)\n\n karma = models.IntegerField('Karma', default=0)\n\n sign = models.TextField('Signature', max_length=250, blank=True)\n\n show_sign = models.BooleanField('Voir les signatures',\n default=True)\n\n hover_or_click = models.BooleanField('Survol ou click ?',\n default=False)\n\n email_for_answer = models.BooleanField('Envoyer pour les r\u00e9ponse MP',\n default=False)\n\n sdz_tutorial = models.TextField(\n 'Identifiant des tutos SdZ',\n blank=True,\n null=True)\n\n can_read = models.BooleanField('Possibilit\u00e9 de lire', default=True)\n end_ban_read = models.DateTimeField(\n 'Fin d\\'interdiction de lecture',\n null=True,\n blank=True)\n\n can_write = models.BooleanField('Possibilit\u00e9 d\\'\u00e9crire', default=True)\n end_ban_write = models.DateTimeField(\n 'Fin d\\'interdiction d\\'ecrire',\n null=True,\n blank=True)\n\n last_visit = models.DateTimeField(\n 'Date de derni\u00e8re visite',\n null=True,\n blank=True)\n\n objects = ProfileManager()\n\n def __unicode__(self):\n \"\"\"Textual forum of a profile.\"\"\"\n return self.user.username\n\n def is_private(self):\n \"\"\"checks the user can display his stats\"\"\"\n user_groups = self.user.groups.all()\n user_group_names = [g.name for g in user_groups]\n return settings.ZDS_APP['member']['bot_group'] in user_group_names\n\n def get_absolute_url(self):\n \"\"\"Absolute URL to the profile page.\"\"\"\n return reverse('member-detail',\n kwargs={'user_name': self.user.username})\n\n def get_city(self):\n \"\"\"return physical adress by geolocalisation.\"\"\"\n if len(self.last_ip_address) <= 16:\n gic = pygeoip.GeoIP(\n os.path.join(\n settings.GEOIP_PATH,\n 'GeoLiteCity.dat'))\n else:\n gic = pygeoip.GeoIP(\n os.path.join(\n settings.GEOIP_PATH,\n 'GeoLiteCityv6.dat'))\n geo = gic.record_by_addr(self.last_ip_address)\n\n return u'{0}, {1}'.format(\n geo['city'], geo['country_name'])\n\n def get_avatar_url(self):\n \"\"\"Avatar URL (using custom URL or Gravatar)\"\"\"\n if self.avatar_url:\n return self.avatar_url\n else:\n return 'https://secure.gravatar.com/avatar/{0}?d=identicon'.format(\n md5(self.user.email.lower()).hexdigest())\n\n def get_post_count(self):\n \"\"\"Number of messages posted.\"\"\"\n return Post.objects.filter(author__pk=self.user.pk, is_visible=True).count()\n\n def get_post_count_as_staff(self):\n \"\"\"Number of messages posted (view as staff).\"\"\"\n return Post.objects.filter(author__pk=self.user.pk).count()\n\n def get_topic_count(self):\n \"\"\"Number of threads created.\"\"\"\n return Topic.objects.filter(author=self.user).count()\n\n def get_tuto_count(self):\n \"\"\"Number of tutos created.\"\"\"\n if self.is_private():\n return 0\n return Tutorial.objects.filter(authors__in=[self.user]).count()\n\n def get_tutos(self):\n \"\"\"Get all tutorials of the user.\"\"\"\n return Tutorial.objects.filter(authors__in=[self.user]).all()\n\n def get_draft_tutos(self):\n \"\"\"Tutorial in draft.\"\"\"\n return Tutorial.objects.filter(\n authors__in=[self.user],\n sha_draft__isnull=False,\n sha_beta__isnull=True,\n sha_validation__isnull=True,\n sha_public__isnull=True,\n ).all()\n\n def get_public_tutos(self):\n \"\"\"Tutorial in public.\"\"\"\n return Tutorial.objects.filter(\n authors__in=[\n self.user],\n sha_public__isnull=False).all()\n\n def get_validate_tutos(self):\n \"\"\"Tutorial in validation.\"\"\"\n return Tutorial.objects.filter(\n authors__in=[\n self.user],\n sha_validation__isnull=False).all()\n\n def get_beta_tutos(self):\n \"\"\"Tutorial in beta.\"\"\"\n return Tutorial.objects.filter(\n authors__in=[\n self.user],\n sha_beta__isnull=False).all()\n\n def get_articles(self):\n \"\"\"Get all articles of the user.\"\"\"\n return Article.objects.filter(authors__in=[self.user]).all()\n\n def get_public_articles(self):\n \"\"\"Get all public articles of the user.\"\"\"\n return Article.objects.filter(\n authors__in=[\n self.user],\n sha_public__isnull=False).all()\n\n def get_validate_articles(self):\n \"\"\"Articles in validation.\"\"\"\n return Article.objects.filter(\n authors__in=[\n self.user],\n sha_validation__isnull=False).all()\n\n def get_draft_articles(self):\n \"\"\"Get all draft articles of the user.\"\"\"\n return Article.objects\\\n .filter(\n authors__in=[self.user],\n sha_draft__isnull=False,\n sha_validation__isnull=True,\n sha_public__isnull=True,\n ).all()\n\n def get_posts(self):\n return Post.objects.filter(author=self.user).all()\n\n def get_invisible_posts_count(self):\n return Post.objects.filter(is_visible=False, author=self.user).count()\n\n def get_alerts_posts_count(self):\n return Alert.objects.filter(author=self.user).count()\n\n def can_read_now(self):\n if self.user.is_authenticated:\n if self.user.is_active:\n if self.end_ban_read:\n return self.can_read or (\n self.end_ban_read < datetime.now())\n else:\n return self.can_read\n else:\n return False\n\n def can_write_now(self):\n if self.user.is_active:\n if self.end_ban_write:\n return self.can_write or (self.end_ban_write < datetime.now())\n else:\n return self.can_write\n else:\n return False\n\n def get_followed_topics(self):\n \"\"\"Followed topics.\"\"\"\n return Topic.objects.filter(topicfollowed__user=self.user)\\\n .order_by('-last_message__pubdate')\n\n\n@receiver(models.signals.post_delete, sender=User)\ndef auto_delete_token_on_unregistering(sender, instance, **kwargs):\n TokenForgotPassword.objects.filter(user=instance).delete()\n TokenRegister.objects.filter(user=instance).delete()\n\n\nclass TokenForgotPassword(models.Model):\n\n class Meta:\n verbose_name = 'Token de mot de passe oubli\u00e9'\n verbose_name_plural = 'Tokens de mots de passe oubli\u00e9s'\n\n user = models.ForeignKey(User, verbose_name='Utilisateur', db_index=True)\n token = models.CharField(max_length=100, db_index=True)\n date_end = models.DateTimeField('Date de fin')\n\n def get_absolute_url(self):\n \"\"\"Absolute URL to the new password page.\"\"\"\n return reverse('zds.member.views.new_password') + \\\n '?token={0}'.format(self.token)\n\n\nclass TokenRegister(models.Model):\n\n class Meta:\n verbose_name = 'Token d\\'inscription'\n verbose_name_plural = 'Tokens d\\'inscription'\n\n user = models.ForeignKey(User, verbose_name='Utilisateur', db_index=True)\n token = models.CharField(max_length=100, db_index=True)\n date_end = models.DateTimeField('Date de fin')\n\n def get_absolute_url(self):\n \"\"\"Absolute URL to the active account page.\"\"\"\n return reverse('zds.member.views.active_account') + \\\n '?token={0}'.format(self.token)\n\n def __unicode__(self):\n \"\"\"Textual forum of a profile.\"\"\"\n return u\"{0} - {1}\".format(self.user.username, self.date_end)\n\n\ndef save_profile(backend, user, response, *args, **kwargs):\n profile = Profile.objects.filter(user=user).first()\n if profile is None:\n profile = Profile(user=user,\n show_email=False,\n show_sign=True,\n hover_or_click=True,\n email_for_answer=False)\n profile.last_ip_address = \"0.0.0.0\"\n profile.save()\n\n\nclass Ban(models.Model):\n\n class Meta:\n verbose_name = 'Sanction'\n verbose_name_plural = 'Sanctions'\n\n user = models.ForeignKey(User, verbose_name='Sanctionn\u00e9', db_index=True)\n moderator = models.ForeignKey(User, verbose_name='Moderateur',\n related_name='bans', db_index=True)\n type = models.CharField('Type', max_length=80, db_index=True)\n text = models.TextField('Explication de la sanction')\n pubdate = models.DateTimeField(\n 'Date de publication',\n blank=True,\n null=True, db_index=True)\n\n\nclass KarmaNote(models.Model):\n\n class Meta:\n verbose_name = 'Note de karma'\n verbose_name_plural = 'Notes de karma'\n\n user = models.ForeignKey(User, related_name='karmanote_user', db_index=True)\n staff = models.ForeignKey(User, related_name='karmanote_staff', db_index=True)\n comment = models.CharField('Commentaire', max_length=150)\n value = models.IntegerField('Valeur')\n create_at = models.DateTimeField('Date d\\'ajout', auto_now_add=True)\n\n\ndef logout_user(username):\n now = datetime.now()\n request = HttpRequest()\n\n sessions = Session.objects.filter(expire_date__gt=now)\n\n for session in sessions:\n user_id = session.get_decoded().get('_auth_user_id')\n if username == user_id:\n engine = import_module(settings.SESSION_ENGINE)\n request.session = engine.SessionStore(session.session_key)\n logout(request)\n break\n\n\ndef listing():\n\n fichier = []\n if os.path.isdir(settings.SDZ_TUTO_DIR):\n for root in os.listdir(settings.SDZ_TUTO_DIR):\n if os.path.isdir(os.path.join(settings.SDZ_TUTO_DIR, root)):\n num = root.split('_')[0]\n if num is not None and num.isdigit():\n fichier.append((num, root))\n return fichier\n else:\n return ()\n\n\ndef get_info_old_tuto(id):\n titre = ''\n tuto = ''\n images = ''\n logo = ''\n if os.path.isdir(settings.SDZ_TUTO_DIR):\n for rep in os.listdir(settings.SDZ_TUTO_DIR):\n if rep.startswith(str(id) + '_'):\n if os.path.isdir(os.path.join(settings.SDZ_TUTO_DIR, rep)):\n for root, dirs, files in os.walk(\n os.path.join(\n settings.SDZ_TUTO_DIR, rep\n )):\n for file in files:\n if file.split('.')[-1] == 'tuto':\n titre = os.path.splitext(file)[0]\n tuto = os.path.join(root, file)\n elif file.split('.')[-1] == 'zip':\n images = os.path.join(root, file)\n elif file.split('.')[-1] in ['png',\n 'jpg',\n 'ico',\n 'jpeg',\n 'gif']:\n logo = os.path.join(root, file)\n\n return (id, titre, tuto, images, logo)\n", "path": "zds/member/models.py"}]} |
gh_patches_debug_1262 | rasdani/github-patches | git_diff | Pyomo__pyomo-526 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ExternalFunction and DAE Transformation
I think there is a problem applying DAE transformations to Pyomo models with ExternalFunction objects. I can come up with a simpler demonstration, but hopefully the trace below is good enough.
jeslick@pds04:~/git/eslickj/models/idaes_models/process/solvent/MEA_simple> python main.py
Traceback (most recent call last):
File "main.py", line 96, in <module>
flowsheet = init(filename=args.load_json)
File "main.py", line 34, in init
flowsheet = MeaSheet(name='MEA_Model', solver=solver)
File "/imports/users/jeslick/git/eslickj/models/idaes_models/process/solvent/MEA_simple/flowsheet.py", line 51, in __init__
FlowsheetModel.__init__(self, *args, **kwargs)
File "/imports/users/jeslick/git/eslickj/models/idaes_models/core/flowsheet_model.py", line 29, in __init__
self.build()
File "/imports/users/jeslick/git/eslickj/models/idaes_models/process/solvent/MEA_simple/flowsheet.py", line 341, in build
doc="CO2 absorber"))
File "/imports/users/jeslick/git/eslickj/models/idaes_models/unit/solvent/MEA_simple/column.py", line 123, in __init__
self.build()
File "/imports/users/jeslick/git/eslickj/models/idaes_models/unit/solvent/MEA_simple/column.py", line 164, in build
self._fd_transform()
File "/imports/users/jeslick/git/eslickj/models/idaes_models/unit/solvent/MEA_simple/column.py", line 347, in _fd_transform
blk, nfe=self.nfe, wrt=blk.z, scheme='BACKWARD')
File "/imports/users/jeslick/anaconda2/lib/python2.7/site-packages/pyomo/core/base/plugin.py", line 330, in apply_to
self._apply_to(model, **kwds)
File "/imports/users/jeslick/anaconda2/lib/python2.7/site-packages/pyomo/dae/plugins/finitedifference.py", line 170, in _apply_to
self._transformBlock(block,currentds)
File "/imports/users/jeslick/anaconda2/lib/python2.7/site-packages/pyomo/dae/plugins/finitedifference.py", line 199, in _transformBlock
update_contset_indexed_component(c)
File "/imports/users/jeslick/anaconda2/lib/python2.7/site-packages/pyomo/dae/misc.py", line 115, in update_contset_indexed_component
if comp.dim() == 1:
AttributeError: 'AMPLExternalFunction' object has no attribute 'dim'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyomo/dae/misc.py`
Content:
```
1 # ___________________________________________________________________________
2 #
3 # Pyomo: Python Optimization Modeling Objects
4 # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
5 # Under the terms of Contract DE-NA0003525 with National Technology and
6 # Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
7 # rights in this software.
8 # This software is distributed under the 3-clause BSD License.
9 # ___________________________________________________________________________
10
11 import logging
12
13 from pyomo.core import Suffix, Var, Constraint, Piecewise, Block
14 from pyomo.core import Expression, Param
15 from pyomo.core.base.indexed_component import IndexedComponent
16 from pyomo.core.base.misc import apply_indexed_rule
17 from pyomo.core.base.block import _BlockData, IndexedBlock
18 from pyomo.dae import ContinuousSet, DerivativeVar, DAE_Error
19 from pyomo.core.kernel.component_map import ComponentMap
20 from pyomo.core.base.block import SortComponents
21 from pyomo.common.log import LoggingIntercept
22
23 from six import iterkeys, itervalues, iteritems, StringIO
24
25 logger = logging.getLogger('pyomo.dae')
26
27
28 def generate_finite_elements(ds, nfe):
29 """
30 This function first checks to see if the number of finite elements
31 in the differential set is equal to nfe. If the number of finite
32 elements is less than nfe, additional points will be generated. If
33 the number of finite elements is greater than or equal to nfe the
34 differential set will not be modified
35 """
36 if (len(ds) - 1) >= nfe:
37 # In this case the differentialset already contains the
38 # desired number or more than the desired number of finite
39 # elements so no additional points are needed.
40 return
41 elif len(ds) == 2:
42 # If only bounds have been specified on the differentialset we
43 # generate the desired number of finite elements by
44 # spreading them evenly over the interval
45 step = (max(ds) - min(ds)) / float(nfe)
46 tmp = min(ds) + step
47 while round(tmp, 6) <= round((max(ds) - step), 6):
48 ds.add(round(tmp, 6))
49 tmp += step
50 ds.set_changed(True)
51 ds._sort()
52 ds._fe = list(ds)
53 return
54 else:
55 # This is the case where some points have been specified
56 # inside of the bounds however the desired number of finite
57 # elements has not been met. We first look at the step sizes
58 # between the existing points. Then an additional point
59 # is placed at the midpoint of the largest step. This
60 # process is repeated until we have achieved the desired
61 # number of finite elements. If there are multiple "largest steps"
62 # the point will be placed at the first occurance of the
63 # largest step
64
65 addpts = nfe - (len(ds) - 1)
66 while addpts > 0:
67 _add_point(ds)
68 addpts -= 1
69 ds.set_changed(True)
70 ds._sort()
71 ds._fe = list(ds)
72 return
73
74
75 def _add_point(ds):
76 sortds = sorted(ds)
77 maxstep = sortds[1] - sortds[0]
78 maxloc = 0
79 for i in range(2, len(sortds)):
80 if (sortds[i] - sortds[i - 1]) > maxstep:
81 maxstep = sortds[i] - sortds[i - 1]
82 maxloc = i - 1
83
84 ds.add(round((sortds[maxloc] + maxstep / 2.0), 6))
85
86
87 def generate_colloc_points(ds, tau):
88 """
89 This function adds collocation points between the finite elements
90 in the differential set
91 """
92 fes = sorted(ds)
93 for i in range(1, len(fes)):
94 h = fes[i] - fes[i - 1]
95 for j in range(len(tau)):
96 if tau[j] == 1 or tau[j] == 0:
97 continue
98 pt = fes[i - 1] + h * tau[j]
99 pt = round(pt, 6)
100 if pt not in ds:
101 ds.add(pt)
102 ds.set_changed(True)
103 ds._sort()
104
105
106 def expand_components(block):
107 """
108 Loop over block components and try expanding them. If expansion fails
109 then save the component and try again later. This function has some
110 built-in robustness for block-hierarchical models with circular
111 references but will not work for all cases.
112 """
113
114 # expansion_map is used to map components to the functions used to
115 # expand them so that the update_contset_indexed_component function
116 # logic only has to be called once even in the case where we have to
117 # re-try expanding components due to circular references
118 expansion_map = ComponentMap()
119 redo_expansion = list()
120
121 # Record the missing BlockData before expanding components. This is for
122 # the case where a ContinuousSet indexed Block is used in a Constraint.
123 # If the Constraint is expanded before the Block then the missing
124 # BlockData will be added to the indexed Block but will not be
125 # constructed correctly.
126 for blk in block.component_objects(Block, descend_into=True):
127 missing_idx = set(blk._index) - set(iterkeys(blk._data))
128 if missing_idx:
129 blk._dae_missing_idx = missing_idx
130
131 # Wrap this whole process in a try block in order to ensure that errors
132 # swallowed by the LoggingIntercept context below are re-raised if the
133 # discretization encounters an error it isn't expecting.
134 try:
135
136 # Intercept logging to suppress Error messages arising from failed
137 # constraint rules. These error messages get logged even though the
138 # AttributeError causing the error is caught and handled by this
139 # function when expanding discretized models. We maintain a stream
140 # of the intercepted logging messages which will be printed if an
141 # unexpected exception is raised.
142 buf = StringIO()
143 with LoggingIntercept(buf, 'pyomo.core', logging.ERROR):
144
145 # Identify components that need to be expanded and try expanding
146 # them
147 for c in block.component_objects(descend_into=True,
148 sort=SortComponents.declOrder):
149 try:
150 update_contset_indexed_component(c, expansion_map)
151 except AttributeError:
152 redo_expansion.append(c)
153
154 # Re-try expansion on any components that failed the first time.
155 # This is indicative of circular component references and not
156 # expanding components in the correct order the first time
157 # through.
158 N = len(redo_expansion)
159 while N:
160 for i in range(N):
161 c = redo_expansion.pop()
162 try:
163 expansion_map[c](c)
164 except AttributeError:
165 redo_expansion.append(c)
166 if len(redo_expansion) == N:
167 raise DAE_Error("Unable to fully discretize %s. Possible "
168 "circular references detected between "
169 "components %s. Reformulate your model to"
170 " remove circular references or apply a "
171 "discretization transformation before "
172 "linking blocks together."
173 % (block, str(redo_expansion)))
174
175 N = len(redo_expansion)
176
177 except Exception as e:
178 logger.error(buf.getvalue())
179 raise
180
181 def update_contset_indexed_component(comp, expansion_map):
182 """
183 Update any model components which are indexed by a ContinuousSet that
184 has changed
185 """
186
187 # This implemenation will *NOT* check for or update
188 # components which use a ContinuousSet implicitly. ex) an
189 # objective function which iterates through a ContinuousSet and
190 # sums the squared error. If you use a ContinuousSet implicitly
191 # you must initialize it with every index you would like to have
192 # access to!
193
194 if comp.type() is Suffix:
195 return
196
197 # Params indexed by a ContinuousSet should include an initialize
198 # and/or default rule which will be called automatically when the
199 # parameter value at a new point in the ContinuousSet is
200 # requested. Therefore, no special processing is required for
201 # Params.
202 if comp.type() is Param:
203 return
204
205 # Components indexed by a ContinuousSet must have a dimension of at
206 # least 1
207 if comp.dim() == 0:
208 return
209
210 # Extract the indexing sets. Must treat components with a single
211 # index separately from components with multiple indexing sets.
212 if comp._implicit_subsets is None:
213 indexset = [comp._index]
214 else:
215 indexset = comp._implicit_subsets
216
217 for s in indexset:
218 if s.type() == ContinuousSet and s.get_changed():
219 if isinstance(comp, Var): # Don't use the type() method here
220 # because we want to catch DerivativeVar components as well
221 # as Var components
222 expansion_map[comp] = _update_var
223 _update_var(comp)
224 elif comp.type() == Constraint:
225 expansion_map[comp] = _update_constraint
226 _update_constraint(comp)
227 elif comp.type() == Expression:
228 expansion_map[comp] = _update_expression
229 _update_expression(comp)
230 elif isinstance(comp, Piecewise):
231 expansion_map[comp] =_update_piecewise
232 _update_piecewise(comp)
233 elif comp.type() == Block:
234 expansion_map[comp] = _update_block
235 _update_block(comp)
236 else:
237 raise TypeError(
238 "Found component %s of type %s indexed "
239 "by a ContinuousSet. Components of this type are "
240 "not currently supported by the automatic "
241 "discretization transformation in pyomo.dae. "
242 "Try adding the component to the model "
243 "after discretizing. Alert the pyomo developers "
244 "for more assistance." % (str(comp), comp.type()))
245
246
247 def _update_var(v):
248 """
249 This method will construct any additional indices in a variable
250 resulting from the discretization of a ContinuousSet.
251 """
252
253 # Note: This is not required it is handled by the _default method on
254 # Var (which is now a IndexedComponent). However, it
255 # would be much slower to rely on that method to generate new
256 # _VarData for a large number of new indices.
257 new_indices = set(v._index) - set(iterkeys(v._data))
258 for index in new_indices:
259 v.add(index)
260
261
262 def _update_constraint(con):
263 """
264 This method will construct any additional indices in a constraint
265 resulting from the discretization of a ContinuousSet.
266 """
267
268 _rule = con.rule
269 _parent = con._parent()
270 for i in con.index_set():
271 if i not in con:
272 # Code taken from the construct() method of Constraint
273 con.add(i, apply_indexed_rule(con, _rule, _parent, i))
274
275
276 def _update_expression(expre):
277 """
278 This method will construct any additional indices in an expression
279 resulting from the discretization of a ContinuousSet.
280 """
281 _rule = expre._init_rule
282 _parent = expre._parent()
283 for i in expre.index_set():
284 if i not in expre:
285 # Code taken from the construct() method of Expression
286 expre.add(i, apply_indexed_rule(expre, _rule, _parent, i))
287
288
289 def _update_block(blk):
290 """
291 This method will construct any additional indices in a block
292 resulting from the discretization of a ContinuousSet. For
293 Block-derived components we check if the Block construct method has
294 been overridden. If not then we update it like a regular block. If
295 construct has been overridden then we try to call the component's
296 update_after_discretization method. If the component hasn't
297 implemented this method then we throw a warning and try to update it
298 like a normal block. The issue, when construct is overridden, is that
299 anything could be happening and we can't automatically assume that
300 treating the block-derived component like a normal block will be
301 sufficient to update it correctly.
302
303 """
304
305 # Check if Block construct method is overridden
306 # getattr needed below for Python 2, 3 compatibility
307 if blk.construct.__func__ is not getattr(IndexedBlock.construct,
308 '__func__',
309 IndexedBlock.construct):
310 # check for custom update function
311 if hasattr(blk, 'update_after_discretization'):
312 blk.update_after_discretization()
313 return
314 else:
315 logger.warning(
316 'DAE(misc): Attempting to apply a discretization '
317 'transformation to the Block-derived component "%s". The '
318 'component overrides the Block construct method but no '
319 'update_after_discretization() function was found. Will '
320 'attempt to update as a standard Block but user should verify '
321 'that the component was expanded correctly. To suppress this '
322 'warning, please provide an update_after_discretization() '
323 'function on Block-derived components that override '
324 'construct()' % blk.name)
325
326 # Code taken from the construct() method of Block
327 missing_idx = getattr(blk, '_dae_missing_idx', set([]))
328 for idx in list(missing_idx):
329 _block = blk[idx]
330 obj = apply_indexed_rule(
331 blk, blk._rule, _block, idx, blk._options)
332
333 if isinstance(obj, _BlockData) and obj is not _block:
334 # If the user returns a block, use their block instead
335 # of the empty one we just created.
336 for c in list(obj.component_objects(descend_into=False)):
337 obj.del_component(c)
338 _block.add_component(c.local_name, c)
339 # transfer over any other attributes that are not components
340 for name, val in iteritems(obj.__dict__):
341 if not hasattr(_block, name) and not hasattr(blk, name):
342 super(_BlockData, _block).__setattr__(name, val)
343
344 # Remove book-keeping data after Block is discretized
345 if hasattr(blk, '_dae_missing_idx'):
346 del blk._dae_missing_idx
347
348
349 def _update_piecewise(pw):
350 """
351 This method will construct any additional indices in a Piecewise
352 object resulting from the discretization of a ContinuousSet.
353 """
354 pw._constructed = False
355 pw.construct()
356
357
358 def create_access_function(var):
359 """
360 This method returns a function that returns a component by calling
361 it rather than indexing it
362 """
363 def _fun(*args):
364 return var[args]
365 return _fun
366
367
368 def create_partial_expression(scheme, expr, ind, loc):
369 """
370 This method returns a function which applies a discretization scheme
371 to an expression along a particular indexind set. This is admittedly a
372 convoluted looking implementation. The idea is that we only apply a
373 discretization scheme to one indexing set at a time but we also want
374 the function to be expanded over any other indexing sets.
375 """
376 def _fun(*args):
377 return scheme(lambda i: expr(*(args[0:loc] + (i,) + args[loc + 1:])),
378 ind)
379 return lambda *args: _fun(*args)(args[loc])
380
381
382 def add_discretization_equations(block, d):
383 """
384 Adds the discretization equations for DerivativeVar d to the Block block.
385 Because certain indices will be valid for some discretization schemes and
386 not others, we skip any constraints which raise an IndexError.
387 """
388
389 def _disc_eq(m, *args):
390 try:
391 return d[args] == d._expr(*args)
392 except IndexError:
393 return Constraint.Skip
394
395 if d.dim() == 1:
396 block.add_component(d.local_name + '_disc_eq',
397 Constraint(d._index, rule=_disc_eq))
398 else:
399 block.add_component(d.local_name + '_disc_eq',
400 Constraint(*d._implicit_subsets, rule=_disc_eq))
401
402
403 def add_continuity_equations(block, d, i, loc):
404 """
405 Adds continuity equations in the case that the polynomial basis function
406 does not have a root at the finite element boundary
407 """
408 svar = d.get_state_var()
409 nme = svar.local_name + '_' + i.local_name + '_cont_eq'
410 if block.find_component(nme) is not None:
411 return
412
413 def _cont_exp(v, s):
414 ncp = s.get_discretization_info()['ncp']
415 afinal = s.get_discretization_info()['afinal']
416
417 def _fun(i):
418 tmp = sorted(s)
419 idx = tmp.index(i)
420 low = s.get_lower_element_boundary(i)
421 if i != low or idx == 0:
422 raise IndexError("list index out of range")
423 low = s.get_lower_element_boundary(tmp[idx - 1])
424 lowidx = tmp.index(low)
425 return sum(v(tmp[lowidx + j]) * afinal[j] for j in range(ncp + 1))
426 return _fun
427 expr = create_partial_expression(_cont_exp, create_access_function(svar),
428 i, loc)
429
430 def _cont_eq(m, *args):
431 try:
432 return svar[args] == expr(*args)
433 except IndexError:
434 return Constraint.Skip
435
436 if d.dim() == 1:
437 block.add_component(nme, Constraint(d._index, rule=_cont_eq))
438 else:
439 block.add_component(nme, Constraint(*d._implicit_subsets,
440 rule=_cont_eq))
441
442
443 def block_fully_discretized(b):
444 """
445 Checks to see if all ContinuousSets in a block have been discretized
446 """
447
448 for i in itervalues(b.component_map(ContinuousSet)):
449 if 'scheme' not in i.get_discretization_info():
450 return False
451 return True
452
453
454 def get_index_information(var, ds):
455 """
456 This method will find the index location of the set ds in the var,
457 return a list of the non_ds indices and return a function that can be
458 used to access specific indices in var indexed by a ContinuousSet by
459 specifying the finite element and collocation point. Users of this
460 method should have already confirmed that ds is an indexing set of var
461 and that it's a ContinuousSet
462 """
463
464 # Find index order of ContinuousSet in the variable
465 indargs = []
466 dsindex = 0
467 tmpds2 = None
468
469 if var.dim() != 1:
470 indCount = 0
471 for index in var._implicit_subsets:
472 if isinstance(index, ContinuousSet):
473 if index == ds:
474 dsindex = indCount
475 else:
476 # If var is indexed by multiple ContinuousSets treat
477 # other ContinuousSets like a normal indexing set
478 indargs.append(index)
479 indCount += 1 # A ContinuousSet must be one dimensional
480 else:
481 indargs.append(index)
482 indCount += index.dimen
483
484 if indargs == []:
485 non_ds = (None,)
486 elif len(indargs) > 1:
487 non_ds = tuple(a for a in indargs)
488 else:
489 non_ds = (indargs[0],)
490
491 if None in non_ds:
492 tmpidx = (None,)
493 elif len(non_ds) == 1:
494 tmpidx = non_ds[0]
495 else:
496 tmpidx = non_ds[0].cross(*non_ds[1:])
497
498 # Lambda function used to generate the desired index
499 # more concisely
500 idx = lambda n, i, k: _get_idx(dsindex, ds, n, i, k)
501
502 info = dict()
503 info['non_ds'] = tmpidx
504 info['index function'] = idx
505 return info
506
507
508 def _get_idx(l, ds, n, i, k):
509 """
510 This function returns the appropriate index for a variable
511 indexed by a differential set. It's needed because the collocation
512 constraints are indexed by finite element and collocation point
513 however a ContinuousSet contains a list of all the discretization
514 points and is not separated into finite elements and collocation
515 points.
516 """
517 t = sorted(ds)
518 tmp = t.index(ds._fe[i])
519 tik = t[tmp + k]
520 if n is None:
521 return tik
522 else:
523 tmpn = n
524 if not isinstance(n, tuple):
525 tmpn = (n,)
526 return tmpn[0:l] + (tik,) + tmpn[l:]
527
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyomo/dae/misc.py b/pyomo/dae/misc.py
--- a/pyomo/dae/misc.py
+++ b/pyomo/dae/misc.py
@@ -202,6 +202,12 @@
if comp.type() is Param:
return
+ # Skip components that do not have a 'dim' attribute. This assumes that
+ # all components that could be indexed by a ContinuousSet have the 'dim'
+ # attribute
+ if not hasattr(comp, 'dim'):
+ return
+
# Components indexed by a ContinuousSet must have a dimension of at
# least 1
if comp.dim() == 0:
| {"golden_diff": "diff --git a/pyomo/dae/misc.py b/pyomo/dae/misc.py\n--- a/pyomo/dae/misc.py\n+++ b/pyomo/dae/misc.py\n@@ -202,6 +202,12 @@\n if comp.type() is Param:\n return\n \n+ # Skip components that do not have a 'dim' attribute. This assumes that\n+ # all components that could be indexed by a ContinuousSet have the 'dim'\n+ # attribute\n+ if not hasattr(comp, 'dim'):\n+ return\n+\n # Components indexed by a ContinuousSet must have a dimension of at\n # least 1\n if comp.dim() == 0:\n", "issue": "ExternalFunction and DAE Transformation\nI think there is a problem applying DAE transformations to Pyomo models with ExternalFunction objects. I can come up with a simpler demonstration, but hopefully the trace below is good enough.\r\n\r\njeslick@pds04:~/git/eslickj/models/idaes_models/process/solvent/MEA_simple> python main.py \r\nTraceback (most recent call last):\r\n File \"main.py\", line 96, in <module>\r\n flowsheet = init(filename=args.load_json)\r\n File \"main.py\", line 34, in init\r\n flowsheet = MeaSheet(name='MEA_Model', solver=solver)\r\n File \"/imports/users/jeslick/git/eslickj/models/idaes_models/process/solvent/MEA_simple/flowsheet.py\", line 51, in __init__\r\n FlowsheetModel.__init__(self, *args, **kwargs)\r\n File \"/imports/users/jeslick/git/eslickj/models/idaes_models/core/flowsheet_model.py\", line 29, in __init__\r\n self.build()\r\n File \"/imports/users/jeslick/git/eslickj/models/idaes_models/process/solvent/MEA_simple/flowsheet.py\", line 341, in build\r\n doc=\"CO2 absorber\"))\r\n File \"/imports/users/jeslick/git/eslickj/models/idaes_models/unit/solvent/MEA_simple/column.py\", line 123, in __init__\r\n self.build()\r\n File \"/imports/users/jeslick/git/eslickj/models/idaes_models/unit/solvent/MEA_simple/column.py\", line 164, in build\r\n self._fd_transform()\r\n File \"/imports/users/jeslick/git/eslickj/models/idaes_models/unit/solvent/MEA_simple/column.py\", line 347, in _fd_transform\r\n blk, nfe=self.nfe, wrt=blk.z, scheme='BACKWARD')\r\n File \"/imports/users/jeslick/anaconda2/lib/python2.7/site-packages/pyomo/core/base/plugin.py\", line 330, in apply_to\r\n self._apply_to(model, **kwds)\r\n File \"/imports/users/jeslick/anaconda2/lib/python2.7/site-packages/pyomo/dae/plugins/finitedifference.py\", line 170, in _apply_to\r\n self._transformBlock(block,currentds)\r\n File \"/imports/users/jeslick/anaconda2/lib/python2.7/site-packages/pyomo/dae/plugins/finitedifference.py\", line 199, in _transformBlock\r\n update_contset_indexed_component(c)\r\n File \"/imports/users/jeslick/anaconda2/lib/python2.7/site-packages/pyomo/dae/misc.py\", line 115, in update_contset_indexed_component\r\n if comp.dim() == 1:\r\nAttributeError: 'AMPLExternalFunction' object has no attribute 'dim'\r\n \n", "before_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and \n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain \n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\nimport logging\n\nfrom pyomo.core import Suffix, Var, Constraint, Piecewise, Block\nfrom pyomo.core import Expression, Param\nfrom pyomo.core.base.indexed_component import IndexedComponent\nfrom pyomo.core.base.misc import apply_indexed_rule\nfrom pyomo.core.base.block import _BlockData, IndexedBlock\nfrom pyomo.dae import ContinuousSet, DerivativeVar, DAE_Error\nfrom pyomo.core.kernel.component_map import ComponentMap\nfrom pyomo.core.base.block import SortComponents\nfrom pyomo.common.log import LoggingIntercept\n\nfrom six import iterkeys, itervalues, iteritems, StringIO\n\nlogger = logging.getLogger('pyomo.dae')\n\n\ndef generate_finite_elements(ds, nfe):\n \"\"\"\n This function first checks to see if the number of finite elements\n in the differential set is equal to nfe. If the number of finite\n elements is less than nfe, additional points will be generated. If\n the number of finite elements is greater than or equal to nfe the\n differential set will not be modified\n \"\"\"\n if (len(ds) - 1) >= nfe:\n # In this case the differentialset already contains the\n # desired number or more than the desired number of finite\n # elements so no additional points are needed.\n return\n elif len(ds) == 2:\n # If only bounds have been specified on the differentialset we\n # generate the desired number of finite elements by\n # spreading them evenly over the interval\n step = (max(ds) - min(ds)) / float(nfe)\n tmp = min(ds) + step\n while round(tmp, 6) <= round((max(ds) - step), 6):\n ds.add(round(tmp, 6))\n tmp += step\n ds.set_changed(True)\n ds._sort()\n ds._fe = list(ds)\n return\n else:\n # This is the case where some points have been specified\n # inside of the bounds however the desired number of finite\n # elements has not been met. We first look at the step sizes\n # between the existing points. Then an additional point\n # is placed at the midpoint of the largest step. This\n # process is repeated until we have achieved the desired\n # number of finite elements. If there are multiple \"largest steps\"\n # the point will be placed at the first occurance of the\n # largest step\n\n addpts = nfe - (len(ds) - 1)\n while addpts > 0:\n _add_point(ds)\n addpts -= 1\n ds.set_changed(True)\n ds._sort()\n ds._fe = list(ds)\n return\n\n\ndef _add_point(ds):\n sortds = sorted(ds)\n maxstep = sortds[1] - sortds[0]\n maxloc = 0\n for i in range(2, len(sortds)):\n if (sortds[i] - sortds[i - 1]) > maxstep:\n maxstep = sortds[i] - sortds[i - 1]\n maxloc = i - 1\n\n ds.add(round((sortds[maxloc] + maxstep / 2.0), 6))\n\n\ndef generate_colloc_points(ds, tau):\n \"\"\"\n This function adds collocation points between the finite elements\n in the differential set\n \"\"\"\n fes = sorted(ds)\n for i in range(1, len(fes)):\n h = fes[i] - fes[i - 1]\n for j in range(len(tau)):\n if tau[j] == 1 or tau[j] == 0:\n continue\n pt = fes[i - 1] + h * tau[j]\n pt = round(pt, 6)\n if pt not in ds:\n ds.add(pt)\n ds.set_changed(True)\n ds._sort()\n\n\ndef expand_components(block):\n \"\"\"\n Loop over block components and try expanding them. If expansion fails\n then save the component and try again later. This function has some\n built-in robustness for block-hierarchical models with circular\n references but will not work for all cases.\n \"\"\"\n\n # expansion_map is used to map components to the functions used to\n # expand them so that the update_contset_indexed_component function\n # logic only has to be called once even in the case where we have to\n # re-try expanding components due to circular references\n expansion_map = ComponentMap()\n redo_expansion = list()\n\n # Record the missing BlockData before expanding components. This is for\n # the case where a ContinuousSet indexed Block is used in a Constraint.\n # If the Constraint is expanded before the Block then the missing\n # BlockData will be added to the indexed Block but will not be\n # constructed correctly.\n for blk in block.component_objects(Block, descend_into=True):\n missing_idx = set(blk._index) - set(iterkeys(blk._data))\n if missing_idx:\n blk._dae_missing_idx = missing_idx\n\n # Wrap this whole process in a try block in order to ensure that errors\n # swallowed by the LoggingIntercept context below are re-raised if the\n # discretization encounters an error it isn't expecting.\n try:\n\n # Intercept logging to suppress Error messages arising from failed\n # constraint rules. These error messages get logged even though the\n # AttributeError causing the error is caught and handled by this\n # function when expanding discretized models. We maintain a stream\n # of the intercepted logging messages which will be printed if an\n # unexpected exception is raised.\n buf = StringIO()\n with LoggingIntercept(buf, 'pyomo.core', logging.ERROR):\n\n # Identify components that need to be expanded and try expanding\n # them\n for c in block.component_objects(descend_into=True,\n sort=SortComponents.declOrder):\n try:\n update_contset_indexed_component(c, expansion_map)\n except AttributeError:\n redo_expansion.append(c)\n\n # Re-try expansion on any components that failed the first time.\n # This is indicative of circular component references and not\n # expanding components in the correct order the first time\n # through.\n N = len(redo_expansion)\n while N:\n for i in range(N):\n c = redo_expansion.pop()\n try:\n expansion_map[c](c)\n except AttributeError:\n redo_expansion.append(c)\n if len(redo_expansion) == N:\n raise DAE_Error(\"Unable to fully discretize %s. Possible \"\n \"circular references detected between \"\n \"components %s. Reformulate your model to\"\n \" remove circular references or apply a \"\n \"discretization transformation before \"\n \"linking blocks together.\"\n % (block, str(redo_expansion)))\n\n N = len(redo_expansion)\n\n except Exception as e:\n logger.error(buf.getvalue())\n raise\n\ndef update_contset_indexed_component(comp, expansion_map):\n \"\"\"\n Update any model components which are indexed by a ContinuousSet that\n has changed\n \"\"\"\n\n # This implemenation will *NOT* check for or update\n # components which use a ContinuousSet implicitly. ex) an\n # objective function which iterates through a ContinuousSet and\n # sums the squared error. If you use a ContinuousSet implicitly\n # you must initialize it with every index you would like to have\n # access to!\n\n if comp.type() is Suffix:\n return\n \n # Params indexed by a ContinuousSet should include an initialize\n # and/or default rule which will be called automatically when the\n # parameter value at a new point in the ContinuousSet is\n # requested. Therefore, no special processing is required for\n # Params.\n if comp.type() is Param:\n return\n\n # Components indexed by a ContinuousSet must have a dimension of at\n # least 1\n if comp.dim() == 0:\n return\n\n # Extract the indexing sets. Must treat components with a single\n # index separately from components with multiple indexing sets.\n if comp._implicit_subsets is None:\n indexset = [comp._index]\n else:\n indexset = comp._implicit_subsets\n\n for s in indexset:\n if s.type() == ContinuousSet and s.get_changed():\n if isinstance(comp, Var): # Don't use the type() method here\n # because we want to catch DerivativeVar components as well\n # as Var components\n expansion_map[comp] = _update_var\n _update_var(comp)\n elif comp.type() == Constraint:\n expansion_map[comp] = _update_constraint\n _update_constraint(comp)\n elif comp.type() == Expression:\n expansion_map[comp] = _update_expression\n _update_expression(comp)\n elif isinstance(comp, Piecewise):\n expansion_map[comp] =_update_piecewise\n _update_piecewise(comp)\n elif comp.type() == Block:\n expansion_map[comp] = _update_block\n _update_block(comp) \n else:\n raise TypeError(\n \"Found component %s of type %s indexed \"\n \"by a ContinuousSet. Components of this type are \"\n \"not currently supported by the automatic \"\n \"discretization transformation in pyomo.dae. \"\n \"Try adding the component to the model \"\n \"after discretizing. Alert the pyomo developers \"\n \"for more assistance.\" % (str(comp), comp.type()))\n\n\ndef _update_var(v):\n \"\"\"\n This method will construct any additional indices in a variable\n resulting from the discretization of a ContinuousSet.\n \"\"\"\n\n # Note: This is not required it is handled by the _default method on\n # Var (which is now a IndexedComponent). However, it\n # would be much slower to rely on that method to generate new\n # _VarData for a large number of new indices.\n new_indices = set(v._index) - set(iterkeys(v._data))\n for index in new_indices:\n v.add(index)\n\n\ndef _update_constraint(con):\n \"\"\"\n This method will construct any additional indices in a constraint\n resulting from the discretization of a ContinuousSet.\n \"\"\"\n\n _rule = con.rule\n _parent = con._parent()\n for i in con.index_set():\n if i not in con:\n # Code taken from the construct() method of Constraint\n con.add(i, apply_indexed_rule(con, _rule, _parent, i))\n\n\ndef _update_expression(expre):\n \"\"\"\n This method will construct any additional indices in an expression\n resulting from the discretization of a ContinuousSet.\n \"\"\"\n _rule = expre._init_rule\n _parent = expre._parent()\n for i in expre.index_set():\n if i not in expre:\n # Code taken from the construct() method of Expression\n expre.add(i, apply_indexed_rule(expre, _rule, _parent, i))\n\n\ndef _update_block(blk):\n \"\"\"\n This method will construct any additional indices in a block\n resulting from the discretization of a ContinuousSet. For\n Block-derived components we check if the Block construct method has\n been overridden. If not then we update it like a regular block. If\n construct has been overridden then we try to call the component's\n update_after_discretization method. If the component hasn't\n implemented this method then we throw a warning and try to update it\n like a normal block. The issue, when construct is overridden, is that\n anything could be happening and we can't automatically assume that\n treating the block-derived component like a normal block will be\n sufficient to update it correctly.\n\n \"\"\"\n \n # Check if Block construct method is overridden\n # getattr needed below for Python 2, 3 compatibility\n if blk.construct.__func__ is not getattr(IndexedBlock.construct,\n '__func__',\n IndexedBlock.construct):\n # check for custom update function\n if hasattr(blk, 'update_after_discretization'):\n blk.update_after_discretization()\n return\n else:\n logger.warning(\n 'DAE(misc): Attempting to apply a discretization '\n 'transformation to the Block-derived component \"%s\". The '\n 'component overrides the Block construct method but no '\n 'update_after_discretization() function was found. Will '\n 'attempt to update as a standard Block but user should verify '\n 'that the component was expanded correctly. To suppress this '\n 'warning, please provide an update_after_discretization() '\n 'function on Block-derived components that override '\n 'construct()' % blk.name)\n\n # Code taken from the construct() method of Block\n missing_idx = getattr(blk, '_dae_missing_idx', set([]))\n for idx in list(missing_idx):\n _block = blk[idx]\n obj = apply_indexed_rule(\n blk, blk._rule, _block, idx, blk._options)\n \n if isinstance(obj, _BlockData) and obj is not _block:\n # If the user returns a block, use their block instead\n # of the empty one we just created.\n for c in list(obj.component_objects(descend_into=False)):\n obj.del_component(c)\n _block.add_component(c.local_name, c)\n # transfer over any other attributes that are not components\n for name, val in iteritems(obj.__dict__):\n if not hasattr(_block, name) and not hasattr(blk, name):\n super(_BlockData, _block).__setattr__(name, val)\n\n # Remove book-keeping data after Block is discretized\n if hasattr(blk, '_dae_missing_idx'):\n del blk._dae_missing_idx\n\n\ndef _update_piecewise(pw):\n \"\"\"\n This method will construct any additional indices in a Piecewise\n object resulting from the discretization of a ContinuousSet.\n \"\"\"\n pw._constructed = False\n pw.construct()\n\n\ndef create_access_function(var):\n \"\"\"\n This method returns a function that returns a component by calling\n it rather than indexing it\n \"\"\"\n def _fun(*args):\n return var[args]\n return _fun\n\n\ndef create_partial_expression(scheme, expr, ind, loc):\n \"\"\"\n This method returns a function which applies a discretization scheme\n to an expression along a particular indexind set. This is admittedly a\n convoluted looking implementation. The idea is that we only apply a\n discretization scheme to one indexing set at a time but we also want\n the function to be expanded over any other indexing sets.\n \"\"\"\n def _fun(*args):\n return scheme(lambda i: expr(*(args[0:loc] + (i,) + args[loc + 1:])),\n ind)\n return lambda *args: _fun(*args)(args[loc])\n\n\ndef add_discretization_equations(block, d):\n \"\"\"\n Adds the discretization equations for DerivativeVar d to the Block block.\n Because certain indices will be valid for some discretization schemes and\n not others, we skip any constraints which raise an IndexError.\n \"\"\"\n\n def _disc_eq(m, *args):\n try:\n return d[args] == d._expr(*args)\n except IndexError:\n return Constraint.Skip\n\n if d.dim() == 1:\n block.add_component(d.local_name + '_disc_eq',\n Constraint(d._index, rule=_disc_eq))\n else:\n block.add_component(d.local_name + '_disc_eq',\n Constraint(*d._implicit_subsets, rule=_disc_eq))\n\n\ndef add_continuity_equations(block, d, i, loc):\n \"\"\"\n Adds continuity equations in the case that the polynomial basis function\n does not have a root at the finite element boundary\n \"\"\"\n svar = d.get_state_var()\n nme = svar.local_name + '_' + i.local_name + '_cont_eq'\n if block.find_component(nme) is not None:\n return\n\n def _cont_exp(v, s):\n ncp = s.get_discretization_info()['ncp']\n afinal = s.get_discretization_info()['afinal']\n\n def _fun(i):\n tmp = sorted(s)\n idx = tmp.index(i)\n low = s.get_lower_element_boundary(i)\n if i != low or idx == 0:\n raise IndexError(\"list index out of range\")\n low = s.get_lower_element_boundary(tmp[idx - 1])\n lowidx = tmp.index(low)\n return sum(v(tmp[lowidx + j]) * afinal[j] for j in range(ncp + 1))\n return _fun\n expr = create_partial_expression(_cont_exp, create_access_function(svar),\n i, loc)\n\n def _cont_eq(m, *args):\n try:\n return svar[args] == expr(*args)\n except IndexError:\n return Constraint.Skip\n\n if d.dim() == 1:\n block.add_component(nme, Constraint(d._index, rule=_cont_eq))\n else:\n block.add_component(nme, Constraint(*d._implicit_subsets,\n rule=_cont_eq))\n\n\ndef block_fully_discretized(b):\n \"\"\"\n Checks to see if all ContinuousSets in a block have been discretized\n \"\"\"\n\n for i in itervalues(b.component_map(ContinuousSet)):\n if 'scheme' not in i.get_discretization_info():\n return False\n return True\n\n\ndef get_index_information(var, ds):\n \"\"\"\n This method will find the index location of the set ds in the var,\n return a list of the non_ds indices and return a function that can be\n used to access specific indices in var indexed by a ContinuousSet by\n specifying the finite element and collocation point. Users of this\n method should have already confirmed that ds is an indexing set of var\n and that it's a ContinuousSet\n \"\"\"\n\n # Find index order of ContinuousSet in the variable\n indargs = []\n dsindex = 0\n tmpds2 = None\n\n if var.dim() != 1:\n indCount = 0\n for index in var._implicit_subsets:\n if isinstance(index, ContinuousSet):\n if index == ds:\n dsindex = indCount\n else:\n # If var is indexed by multiple ContinuousSets treat\n # other ContinuousSets like a normal indexing set\n indargs.append(index)\n indCount += 1 # A ContinuousSet must be one dimensional\n else:\n indargs.append(index)\n indCount += index.dimen\n\n if indargs == []:\n non_ds = (None,)\n elif len(indargs) > 1:\n non_ds = tuple(a for a in indargs)\n else:\n non_ds = (indargs[0],)\n\n if None in non_ds:\n tmpidx = (None,)\n elif len(non_ds) == 1:\n tmpidx = non_ds[0]\n else:\n tmpidx = non_ds[0].cross(*non_ds[1:])\n\n # Lambda function used to generate the desired index\n # more concisely\n idx = lambda n, i, k: _get_idx(dsindex, ds, n, i, k)\n\n info = dict()\n info['non_ds'] = tmpidx\n info['index function'] = idx\n return info\n\n\ndef _get_idx(l, ds, n, i, k):\n \"\"\"\n This function returns the appropriate index for a variable\n indexed by a differential set. It's needed because the collocation\n constraints are indexed by finite element and collocation point\n however a ContinuousSet contains a list of all the discretization\n points and is not separated into finite elements and collocation\n points.\n \"\"\"\n t = sorted(ds)\n tmp = t.index(ds._fe[i])\n tik = t[tmp + k]\n if n is None:\n return tik\n else:\n tmpn = n\n if not isinstance(n, tuple):\n tmpn = (n,)\n return tmpn[0:l] + (tik,) + tmpn[l:]\n", "path": "pyomo/dae/misc.py"}], "after_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and \n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain \n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\nimport logging\n\nfrom pyomo.core import Suffix, Var, Constraint, Piecewise, Block\nfrom pyomo.core import Expression, Param\nfrom pyomo.core.base.indexed_component import IndexedComponent\nfrom pyomo.core.base.misc import apply_indexed_rule\nfrom pyomo.core.base.block import _BlockData, IndexedBlock\nfrom pyomo.dae import ContinuousSet, DerivativeVar, DAE_Error\nfrom pyomo.core.kernel.component_map import ComponentMap\nfrom pyomo.core.base.block import SortComponents\nfrom pyomo.common.log import LoggingIntercept\n\nfrom six import iterkeys, itervalues, iteritems, StringIO\n\nlogger = logging.getLogger('pyomo.dae')\n\n\ndef generate_finite_elements(ds, nfe):\n \"\"\"\n This function first checks to see if the number of finite elements\n in the differential set is equal to nfe. If the number of finite\n elements is less than nfe, additional points will be generated. If\n the number of finite elements is greater than or equal to nfe the\n differential set will not be modified\n \"\"\"\n if (len(ds) - 1) >= nfe:\n # In this case the differentialset already contains the\n # desired number or more than the desired number of finite\n # elements so no additional points are needed.\n return\n elif len(ds) == 2:\n # If only bounds have been specified on the differentialset we\n # generate the desired number of finite elements by\n # spreading them evenly over the interval\n step = (max(ds) - min(ds)) / float(nfe)\n tmp = min(ds) + step\n while round(tmp, 6) <= round((max(ds) - step), 6):\n ds.add(round(tmp, 6))\n tmp += step\n ds.set_changed(True)\n ds._sort()\n ds._fe = list(ds)\n return\n else:\n # This is the case where some points have been specified\n # inside of the bounds however the desired number of finite\n # elements has not been met. We first look at the step sizes\n # between the existing points. Then an additional point\n # is placed at the midpoint of the largest step. This\n # process is repeated until we have achieved the desired\n # number of finite elements. If there are multiple \"largest steps\"\n # the point will be placed at the first occurance of the\n # largest step\n\n addpts = nfe - (len(ds) - 1)\n while addpts > 0:\n _add_point(ds)\n addpts -= 1\n ds.set_changed(True)\n ds._sort()\n ds._fe = list(ds)\n return\n\n\ndef _add_point(ds):\n sortds = sorted(ds)\n maxstep = sortds[1] - sortds[0]\n maxloc = 0\n for i in range(2, len(sortds)):\n if (sortds[i] - sortds[i - 1]) > maxstep:\n maxstep = sortds[i] - sortds[i - 1]\n maxloc = i - 1\n\n ds.add(round((sortds[maxloc] + maxstep / 2.0), 6))\n\n\ndef generate_colloc_points(ds, tau):\n \"\"\"\n This function adds collocation points between the finite elements\n in the differential set\n \"\"\"\n fes = sorted(ds)\n for i in range(1, len(fes)):\n h = fes[i] - fes[i - 1]\n for j in range(len(tau)):\n if tau[j] == 1 or tau[j] == 0:\n continue\n pt = fes[i - 1] + h * tau[j]\n pt = round(pt, 6)\n if pt not in ds:\n ds.add(pt)\n ds.set_changed(True)\n ds._sort()\n\n\ndef expand_components(block):\n \"\"\"\n Loop over block components and try expanding them. If expansion fails\n then save the component and try again later. This function has some\n built-in robustness for block-hierarchical models with circular\n references but will not work for all cases.\n \"\"\"\n\n # expansion_map is used to map components to the functions used to\n # expand them so that the update_contset_indexed_component function\n # logic only has to be called once even in the case where we have to\n # re-try expanding components due to circular references\n expansion_map = ComponentMap()\n redo_expansion = list()\n\n # Record the missing BlockData before expanding components. This is for\n # the case where a ContinuousSet indexed Block is used in a Constraint.\n # If the Constraint is expanded before the Block then the missing\n # BlockData will be added to the indexed Block but will not be\n # constructed correctly.\n for blk in block.component_objects(Block, descend_into=True):\n missing_idx = set(blk._index) - set(iterkeys(blk._data))\n if missing_idx:\n blk._dae_missing_idx = missing_idx\n\n # Wrap this whole process in a try block in order to ensure that errors\n # swallowed by the LoggingIntercept context below are re-raised if the\n # discretization encounters an error it isn't expecting.\n try:\n\n # Intercept logging to suppress Error messages arising from failed\n # constraint rules. These error messages get logged even though the\n # AttributeError causing the error is caught and handled by this\n # function when expanding discretized models. We maintain a stream\n # of the intercepted logging messages which will be printed if an\n # unexpected exception is raised.\n buf = StringIO()\n with LoggingIntercept(buf, 'pyomo.core', logging.ERROR):\n\n # Identify components that need to be expanded and try expanding\n # them\n for c in block.component_objects(descend_into=True,\n sort=SortComponents.declOrder):\n try:\n update_contset_indexed_component(c, expansion_map)\n except AttributeError:\n redo_expansion.append(c)\n\n # Re-try expansion on any components that failed the first time.\n # This is indicative of circular component references and not\n # expanding components in the correct order the first time\n # through.\n N = len(redo_expansion)\n while N:\n for i in range(N):\n c = redo_expansion.pop()\n try:\n expansion_map[c](c)\n except AttributeError:\n redo_expansion.append(c)\n if len(redo_expansion) == N:\n raise DAE_Error(\"Unable to fully discretize %s. Possible \"\n \"circular references detected between \"\n \"components %s. Reformulate your model to\"\n \" remove circular references or apply a \"\n \"discretization transformation before \"\n \"linking blocks together.\"\n % (block, str(redo_expansion)))\n\n N = len(redo_expansion)\n\n except Exception as e:\n logger.error(buf.getvalue())\n raise\n\ndef update_contset_indexed_component(comp, expansion_map):\n \"\"\"\n Update any model components which are indexed by a ContinuousSet that\n has changed\n \"\"\"\n\n # This implemenation will *NOT* check for or update\n # components which use a ContinuousSet implicitly. ex) an\n # objective function which iterates through a ContinuousSet and\n # sums the squared error. If you use a ContinuousSet implicitly\n # you must initialize it with every index you would like to have\n # access to!\n\n if comp.type() is Suffix:\n return\n \n # Params indexed by a ContinuousSet should include an initialize\n # and/or default rule which will be called automatically when the\n # parameter value at a new point in the ContinuousSet is\n # requested. Therefore, no special processing is required for\n # Params.\n if comp.type() is Param:\n return\n\n # Skip components that do not have a 'dim' attribute. This assumes that\n # all components that could be indexed by a ContinuousSet have the 'dim'\n # attribute\n if not hasattr(comp, 'dim'):\n return\n\n # Components indexed by a ContinuousSet must have a dimension of at\n # least 1\n if comp.dim() == 0:\n return\n\n # Extract the indexing sets. Must treat components with a single\n # index separately from components with multiple indexing sets.\n if comp._implicit_subsets is None:\n indexset = [comp._index]\n else:\n indexset = comp._implicit_subsets\n\n for s in indexset:\n if s.type() == ContinuousSet and s.get_changed():\n if isinstance(comp, Var): # Don't use the type() method here\n # because we want to catch DerivativeVar components as well\n # as Var components\n expansion_map[comp] = _update_var\n _update_var(comp)\n elif comp.type() == Constraint:\n expansion_map[comp] = _update_constraint\n _update_constraint(comp)\n elif comp.type() == Expression:\n expansion_map[comp] = _update_expression\n _update_expression(comp)\n elif isinstance(comp, Piecewise):\n expansion_map[comp] =_update_piecewise\n _update_piecewise(comp)\n elif comp.type() == Block:\n expansion_map[comp] = _update_block\n _update_block(comp) \n else:\n raise TypeError(\n \"Found component %s of type %s indexed \"\n \"by a ContinuousSet. Components of this type are \"\n \"not currently supported by the automatic \"\n \"discretization transformation in pyomo.dae. \"\n \"Try adding the component to the model \"\n \"after discretizing. Alert the pyomo developers \"\n \"for more assistance.\" % (str(comp), comp.type()))\n\n\ndef _update_var(v):\n \"\"\"\n This method will construct any additional indices in a variable\n resulting from the discretization of a ContinuousSet.\n \"\"\"\n\n # Note: This is not required it is handled by the _default method on\n # Var (which is now a IndexedComponent). However, it\n # would be much slower to rely on that method to generate new\n # _VarData for a large number of new indices.\n new_indices = set(v._index) - set(iterkeys(v._data))\n for index in new_indices:\n v.add(index)\n\n\ndef _update_constraint(con):\n \"\"\"\n This method will construct any additional indices in a constraint\n resulting from the discretization of a ContinuousSet.\n \"\"\"\n\n _rule = con.rule\n _parent = con._parent()\n for i in con.index_set():\n if i not in con:\n # Code taken from the construct() method of Constraint\n con.add(i, apply_indexed_rule(con, _rule, _parent, i))\n\n\ndef _update_expression(expre):\n \"\"\"\n This method will construct any additional indices in an expression\n resulting from the discretization of a ContinuousSet.\n \"\"\"\n _rule = expre._init_rule\n _parent = expre._parent()\n for i in expre.index_set():\n if i not in expre:\n # Code taken from the construct() method of Expression\n expre.add(i, apply_indexed_rule(expre, _rule, _parent, i))\n\n\ndef _update_block(blk):\n \"\"\"\n This method will construct any additional indices in a block\n resulting from the discretization of a ContinuousSet. For\n Block-derived components we check if the Block construct method has\n been overridden. If not then we update it like a regular block. If\n construct has been overridden then we try to call the component's\n update_after_discretization method. If the component hasn't\n implemented this method then we throw a warning and try to update it\n like a normal block. The issue, when construct is overridden, is that\n anything could be happening and we can't automatically assume that\n treating the block-derived component like a normal block will be\n sufficient to update it correctly.\n\n \"\"\"\n \n # Check if Block construct method is overridden\n # getattr needed below for Python 2, 3 compatibility\n if blk.construct.__func__ is not getattr(IndexedBlock.construct,\n '__func__',\n IndexedBlock.construct):\n # check for custom update function\n if hasattr(blk, 'update_after_discretization'):\n blk.update_after_discretization()\n return\n else:\n logger.warning(\n 'DAE(misc): Attempting to apply a discretization '\n 'transformation to the Block-derived component \"%s\". The '\n 'component overrides the Block construct method but no '\n 'update_after_discretization() function was found. Will '\n 'attempt to update as a standard Block but user should verify '\n 'that the component was expanded correctly. To suppress this '\n 'warning, please provide an update_after_discretization() '\n 'function on Block-derived components that override '\n 'construct()' % blk.name)\n\n # Code taken from the construct() method of Block\n missing_idx = getattr(blk, '_dae_missing_idx', set([]))\n for idx in list(missing_idx):\n _block = blk[idx]\n obj = apply_indexed_rule(\n blk, blk._rule, _block, idx, blk._options)\n \n if isinstance(obj, _BlockData) and obj is not _block:\n # If the user returns a block, use their block instead\n # of the empty one we just created.\n for c in list(obj.component_objects(descend_into=False)):\n obj.del_component(c)\n _block.add_component(c.local_name, c)\n # transfer over any other attributes that are not components\n for name, val in iteritems(obj.__dict__):\n if not hasattr(_block, name) and not hasattr(blk, name):\n super(_BlockData, _block).__setattr__(name, val)\n\n # Remove book-keeping data after Block is discretized\n if hasattr(blk, '_dae_missing_idx'):\n del blk._dae_missing_idx\n\n\ndef _update_piecewise(pw):\n \"\"\"\n This method will construct any additional indices in a Piecewise\n object resulting from the discretization of a ContinuousSet.\n \"\"\"\n pw._constructed = False\n pw.construct()\n\n\ndef create_access_function(var):\n \"\"\"\n This method returns a function that returns a component by calling\n it rather than indexing it\n \"\"\"\n def _fun(*args):\n return var[args]\n return _fun\n\n\ndef create_partial_expression(scheme, expr, ind, loc):\n \"\"\"\n This method returns a function which applies a discretization scheme\n to an expression along a particular indexind set. This is admittedly a\n convoluted looking implementation. The idea is that we only apply a\n discretization scheme to one indexing set at a time but we also want\n the function to be expanded over any other indexing sets.\n \"\"\"\n def _fun(*args):\n return scheme(lambda i: expr(*(args[0:loc] + (i,) + args[loc + 1:])),\n ind)\n return lambda *args: _fun(*args)(args[loc])\n\n\ndef add_discretization_equations(block, d):\n \"\"\"\n Adds the discretization equations for DerivativeVar d to the Block block.\n Because certain indices will be valid for some discretization schemes and\n not others, we skip any constraints which raise an IndexError.\n \"\"\"\n\n def _disc_eq(m, *args):\n try:\n return d[args] == d._expr(*args)\n except IndexError:\n return Constraint.Skip\n\n if d.dim() == 1:\n block.add_component(d.local_name + '_disc_eq',\n Constraint(d._index, rule=_disc_eq))\n else:\n block.add_component(d.local_name + '_disc_eq',\n Constraint(*d._implicit_subsets, rule=_disc_eq))\n\n\ndef add_continuity_equations(block, d, i, loc):\n \"\"\"\n Adds continuity equations in the case that the polynomial basis function\n does not have a root at the finite element boundary\n \"\"\"\n svar = d.get_state_var()\n nme = svar.local_name + '_' + i.local_name + '_cont_eq'\n if block.find_component(nme) is not None:\n return\n\n def _cont_exp(v, s):\n ncp = s.get_discretization_info()['ncp']\n afinal = s.get_discretization_info()['afinal']\n\n def _fun(i):\n tmp = sorted(s)\n idx = tmp.index(i)\n low = s.get_lower_element_boundary(i)\n if i != low or idx == 0:\n raise IndexError(\"list index out of range\")\n low = s.get_lower_element_boundary(tmp[idx - 1])\n lowidx = tmp.index(low)\n return sum(v(tmp[lowidx + j]) * afinal[j] for j in range(ncp + 1))\n return _fun\n expr = create_partial_expression(_cont_exp, create_access_function(svar),\n i, loc)\n\n def _cont_eq(m, *args):\n try:\n return svar[args] == expr(*args)\n except IndexError:\n return Constraint.Skip\n\n if d.dim() == 1:\n block.add_component(nme, Constraint(d._index, rule=_cont_eq))\n else:\n block.add_component(nme, Constraint(*d._implicit_subsets,\n rule=_cont_eq))\n\n\ndef block_fully_discretized(b):\n \"\"\"\n Checks to see if all ContinuousSets in a block have been discretized\n \"\"\"\n\n for i in itervalues(b.component_map(ContinuousSet)):\n if 'scheme' not in i.get_discretization_info():\n return False\n return True\n\n\ndef get_index_information(var, ds):\n \"\"\"\n This method will find the index location of the set ds in the var,\n return a list of the non_ds indices and return a function that can be\n used to access specific indices in var indexed by a ContinuousSet by\n specifying the finite element and collocation point. Users of this\n method should have already confirmed that ds is an indexing set of var\n and that it's a ContinuousSet\n \"\"\"\n\n # Find index order of ContinuousSet in the variable\n indargs = []\n dsindex = 0\n tmpds2 = None\n\n if var.dim() != 1:\n indCount = 0\n for index in var._implicit_subsets:\n if isinstance(index, ContinuousSet):\n if index == ds:\n dsindex = indCount\n else:\n # If var is indexed by multiple ContinuousSets treat\n # other ContinuousSets like a normal indexing set\n indargs.append(index)\n indCount += 1 # A ContinuousSet must be one dimensional\n else:\n indargs.append(index)\n indCount += index.dimen\n\n if indargs == []:\n non_ds = (None,)\n elif len(indargs) > 1:\n non_ds = tuple(a for a in indargs)\n else:\n non_ds = (indargs[0],)\n\n if None in non_ds:\n tmpidx = (None,)\n elif len(non_ds) == 1:\n tmpidx = non_ds[0]\n else:\n tmpidx = non_ds[0].cross(*non_ds[1:])\n\n # Lambda function used to generate the desired index\n # more concisely\n idx = lambda n, i, k: _get_idx(dsindex, ds, n, i, k)\n\n info = dict()\n info['non_ds'] = tmpidx\n info['index function'] = idx\n return info\n\n\ndef _get_idx(l, ds, n, i, k):\n \"\"\"\n This function returns the appropriate index for a variable\n indexed by a differential set. It's needed because the collocation\n constraints are indexed by finite element and collocation point\n however a ContinuousSet contains a list of all the discretization\n points and is not separated into finite elements and collocation\n points.\n \"\"\"\n t = sorted(ds)\n tmp = t.index(ds._fe[i])\n tik = t[tmp + k]\n if n is None:\n return tik\n else:\n tmpn = n\n if not isinstance(n, tuple):\n tmpn = (n,)\n return tmpn[0:l] + (tik,) + tmpn[l:]\n", "path": "pyomo/dae/misc.py"}]} |
gh_patches_debug_1263 | rasdani/github-patches | git_diff | urllib3__urllib3-2656 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Retry retries on fruitless ssl ImportError
### Subject
Describe the issue here.
### Environment
Describe your environment.
At least, paste here the output of:
```python
import platform
import urllib3
print("OS", platform.platform())
print("Python", platform.python_version())
print("urllib3", urllib3.__version__)
```
### Steps to Reproduce
```
Python 3.10.4 (main, Mar 24 2022, 16:12:56) [GCC 9.4.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import sys
>>> sys.modules["ssl"] = None
>>> import requests
>>> requests.get("https://google.com")
Traceback (most recent call last):
File "/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/urllib3/connectionpool.py", line 692, in urlopen
conn = self._get_conn(timeout=pool_timeout)
File "/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/urllib3/connectionpool.py", line 281, in _get_conn
return conn or self._new_conn()
File "/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/urllib3/connectionpool.py", line 1009, in _new_conn
raise SSLError(
urllib3.exceptions.SSLError: Can't connect to HTTPS URL because the SSL module is not available.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/requests/adapters.py", line 440, in send
resp = conn.urlopen(
File "/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/urllib3/connectionpool.py", line 785, in urlopen
retries = retries.increment(
File "/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/urllib3/util/retry.py", line 592, in increment
raise MaxRetryError(_pool, url, error or ResponseError(cause))
urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='google.com', port=443): Max retries exceeded with url: / (Caused by SSLError("Can't connect to HTTPS URL because the SSL module is not available."))
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/requests/api.py", line 75, in get
return request('get', url, params=params, **kwargs)
File "/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/requests/api.py", line 61, in request
return session.request(method=method, url=url, **kwargs)
File "/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/requests/sessions.py", line 529, in request
resp = self.send(prep, **send_kwargs)
File "/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/requests/sessions.py", line 645, in send
r = adapter.send(request, **kwargs)
File "/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/requests/adapters.py", line 517, in send
raise SSLError(e, request=request)
requests.exceptions.SSLError: HTTPSConnectionPool(host='google.com', port=443): Max retries exceeded with url: / (Caused by SSLError("Can't connect to HTTPS URL because the SSL module is not available."))
```
### Expected Behavior
only one attempt
### Actual Behavior
^
Retry retries on fruitless ssl ImportError
### Subject
Describe the issue here.
### Environment
Describe your environment.
At least, paste here the output of:
```python
import platform
import urllib3
print("OS", platform.platform())
print("Python", platform.python_version())
print("urllib3", urllib3.__version__)
```
### Steps to Reproduce
```
Python 3.10.4 (main, Mar 24 2022, 16:12:56) [GCC 9.4.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import sys
>>> sys.modules["ssl"] = None
>>> import requests
>>> requests.get("https://google.com")
Traceback (most recent call last):
File "/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/urllib3/connectionpool.py", line 692, in urlopen
conn = self._get_conn(timeout=pool_timeout)
File "/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/urllib3/connectionpool.py", line 281, in _get_conn
return conn or self._new_conn()
File "/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/urllib3/connectionpool.py", line 1009, in _new_conn
raise SSLError(
urllib3.exceptions.SSLError: Can't connect to HTTPS URL because the SSL module is not available.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/requests/adapters.py", line 440, in send
resp = conn.urlopen(
File "/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/urllib3/connectionpool.py", line 785, in urlopen
retries = retries.increment(
File "/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/urllib3/util/retry.py", line 592, in increment
raise MaxRetryError(_pool, url, error or ResponseError(cause))
urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='google.com', port=443): Max retries exceeded with url: / (Caused by SSLError("Can't connect to HTTPS URL because the SSL module is not available."))
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/requests/api.py", line 75, in get
return request('get', url, params=params, **kwargs)
File "/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/requests/api.py", line 61, in request
return session.request(method=method, url=url, **kwargs)
File "/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/requests/sessions.py", line 529, in request
resp = self.send(prep, **send_kwargs)
File "/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/requests/sessions.py", line 645, in send
r = adapter.send(request, **kwargs)
File "/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/requests/adapters.py", line 517, in send
raise SSLError(e, request=request)
requests.exceptions.SSLError: HTTPSConnectionPool(host='google.com', port=443): Max retries exceeded with url: / (Caused by SSLError("Can't connect to HTTPS URL because the SSL module is not available."))
```
### Expected Behavior
only one attempt
### Actual Behavior
^
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/urllib3/connectionpool.py`
Content:
```
1 import errno
2 import logging
3 import queue
4 import sys
5 import warnings
6 from http.client import HTTPResponse as _HttplibHTTPResponse
7 from socket import timeout as SocketTimeout
8 from types import TracebackType
9 from typing import TYPE_CHECKING, Any, Mapping, Optional, Type, TypeVar, Union, overload
10
11 from ._request_methods import RequestMethods
12 from .connection import (
13 _TYPE_BODY,
14 BaseSSLError,
15 BrokenPipeError,
16 DummyConnection,
17 HTTPConnection,
18 HTTPException,
19 HTTPSConnection,
20 ProxyConfig,
21 VerifiedHTTPSConnection,
22 _wrap_proxy_error,
23 )
24 from .connection import port_by_scheme as port_by_scheme
25 from .exceptions import (
26 ClosedPoolError,
27 EmptyPoolError,
28 FullPoolError,
29 HeaderParsingError,
30 HostChangedError,
31 InsecureRequestWarning,
32 LocationValueError,
33 MaxRetryError,
34 NewConnectionError,
35 ProtocolError,
36 ProxyError,
37 ReadTimeoutError,
38 SSLError,
39 TimeoutError,
40 )
41 from .response import BaseHTTPResponse, HTTPResponse
42 from .util.connection import is_connection_dropped
43 from .util.proxy import connection_requires_http_tunnel
44 from .util.request import _TYPE_BODY_POSITION, set_file_position
45 from .util.response import assert_header_parsing
46 from .util.retry import Retry
47 from .util.ssl_match_hostname import CertificateError
48 from .util.timeout import _DEFAULT_TIMEOUT, _TYPE_DEFAULT, Timeout
49 from .util.url import Url, _encode_target
50 from .util.url import _normalize_host as normalize_host
51 from .util.url import parse_url
52 from .util.util import to_str
53
54 if TYPE_CHECKING:
55 import ssl
56
57 from typing_extensions import Literal
58
59 log = logging.getLogger(__name__)
60
61 _TYPE_TIMEOUT = Union[Timeout, float, _TYPE_DEFAULT]
62
63 _SelfT = TypeVar("_SelfT")
64
65
66 # Pool objects
67 class ConnectionPool:
68 """
69 Base class for all connection pools, such as
70 :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
71
72 .. note::
73 ConnectionPool.urlopen() does not normalize or percent-encode target URIs
74 which is useful if your target server doesn't support percent-encoded
75 target URIs.
76 """
77
78 scheme: Optional[str] = None
79 QueueCls = queue.LifoQueue
80
81 def __init__(self, host: str, port: Optional[int] = None) -> None:
82 if not host:
83 raise LocationValueError("No host specified.")
84
85 self.host = _normalize_host(host, scheme=self.scheme)
86 self._proxy_host = host.lower()
87 self.port = port
88
89 def __str__(self) -> str:
90 return f"{type(self).__name__}(host={self.host!r}, port={self.port!r})"
91
92 def __enter__(self: _SelfT) -> _SelfT:
93 return self
94
95 def __exit__(
96 self,
97 exc_type: Optional[Type[BaseException]],
98 exc_val: Optional[BaseException],
99 exc_tb: Optional[TracebackType],
100 ) -> "Literal[False]":
101 self.close()
102 # Return False to re-raise any potential exceptions
103 return False
104
105 def close(self) -> None:
106 """
107 Close all pooled connections and disable the pool.
108 """
109 pass
110
111
112 # This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
113 _blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK}
114
115
116 class HTTPConnectionPool(ConnectionPool, RequestMethods):
117 """
118 Thread-safe connection pool for one host.
119
120 :param host:
121 Host used for this HTTP Connection (e.g. "localhost"), passed into
122 :class:`http.client.HTTPConnection`.
123
124 :param port:
125 Port used for this HTTP Connection (None is equivalent to 80), passed
126 into :class:`http.client.HTTPConnection`.
127
128 :param timeout:
129 Socket timeout in seconds for each individual connection. This can
130 be a float or integer, which sets the timeout for the HTTP request,
131 or an instance of :class:`urllib3.util.Timeout` which gives you more
132 fine-grained control over request timeouts. After the constructor has
133 been parsed, this is always a `urllib3.util.Timeout` object.
134
135 :param maxsize:
136 Number of connections to save that can be reused. More than 1 is useful
137 in multithreaded situations. If ``block`` is set to False, more
138 connections will be created but they will not be saved once they've
139 been used.
140
141 :param block:
142 If set to True, no more than ``maxsize`` connections will be used at
143 a time. When no free connections are available, the call will block
144 until a connection has been released. This is a useful side effect for
145 particular multithreaded situations where one does not want to use more
146 than maxsize connections per host to prevent flooding.
147
148 :param headers:
149 Headers to include with all requests, unless other headers are given
150 explicitly.
151
152 :param retries:
153 Retry configuration to use by default with requests in this pool.
154
155 :param _proxy:
156 Parsed proxy URL, should not be used directly, instead, see
157 :class:`urllib3.ProxyManager`
158
159 :param _proxy_headers:
160 A dictionary with proxy headers, should not be used directly,
161 instead, see :class:`urllib3.ProxyManager`
162
163 :param \\**conn_kw:
164 Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
165 :class:`urllib3.connection.HTTPSConnection` instances.
166 """
167
168 scheme = "http"
169 ConnectionCls: Type[Union[HTTPConnection, HTTPSConnection]] = HTTPConnection
170 ResponseCls = HTTPResponse
171
172 def __init__(
173 self,
174 host: str,
175 port: Optional[int] = None,
176 timeout: Optional[_TYPE_TIMEOUT] = _DEFAULT_TIMEOUT,
177 maxsize: int = 1,
178 block: bool = False,
179 headers: Optional[Mapping[str, str]] = None,
180 retries: Optional[Union[Retry, bool, int]] = None,
181 _proxy: Optional[Url] = None,
182 _proxy_headers: Optional[Mapping[str, str]] = None,
183 _proxy_config: Optional[ProxyConfig] = None,
184 **conn_kw: Any,
185 ):
186 ConnectionPool.__init__(self, host, port)
187 RequestMethods.__init__(self, headers)
188
189 if not isinstance(timeout, Timeout):
190 timeout = Timeout.from_float(timeout)
191
192 if retries is None:
193 retries = Retry.DEFAULT
194
195 self.timeout = timeout
196 self.retries = retries
197
198 self.pool: Optional[queue.LifoQueue[Any]] = self.QueueCls(maxsize)
199 self.block = block
200
201 self.proxy = _proxy
202 self.proxy_headers = _proxy_headers or {}
203 self.proxy_config = _proxy_config
204
205 # Fill the queue up so that doing get() on it will block properly
206 for _ in range(maxsize):
207 self.pool.put(None)
208
209 # These are mostly for testing and debugging purposes.
210 self.num_connections = 0
211 self.num_requests = 0
212 self.conn_kw = conn_kw
213
214 if self.proxy:
215 # Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
216 # We cannot know if the user has added default socket options, so we cannot replace the
217 # list.
218 self.conn_kw.setdefault("socket_options", [])
219
220 self.conn_kw["proxy"] = self.proxy
221 self.conn_kw["proxy_config"] = self.proxy_config
222
223 def _new_conn(self) -> HTTPConnection:
224 """
225 Return a fresh :class:`HTTPConnection`.
226 """
227 self.num_connections += 1
228 log.debug(
229 "Starting new HTTP connection (%d): %s:%s",
230 self.num_connections,
231 self.host,
232 self.port or "80",
233 )
234
235 conn = self.ConnectionCls(
236 host=self.host,
237 port=self.port,
238 timeout=self.timeout.connect_timeout,
239 **self.conn_kw,
240 )
241 return conn
242
243 def _get_conn(self, timeout: Optional[float] = None) -> HTTPConnection:
244 """
245 Get a connection. Will return a pooled connection if one is available.
246
247 If no connections are available and :prop:`.block` is ``False``, then a
248 fresh connection is returned.
249
250 :param timeout:
251 Seconds to wait before giving up and raising
252 :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
253 :prop:`.block` is ``True``.
254 """
255 conn = None
256
257 if self.pool is None:
258 raise ClosedPoolError(self, "Pool is closed.")
259
260 try:
261 conn = self.pool.get(block=self.block, timeout=timeout)
262
263 except AttributeError: # self.pool is None
264 raise ClosedPoolError(self, "Pool is closed.") from None # Defensive:
265
266 except queue.Empty:
267 if self.block:
268 raise EmptyPoolError(
269 self,
270 "Pool is empty and a new connection can't be opened due to blocking mode.",
271 ) from None
272 pass # Oh well, we'll create a new connection then
273
274 # If this is a persistent connection, check if it got disconnected
275 if conn and is_connection_dropped(conn):
276 log.debug("Resetting dropped connection: %s", self.host)
277 conn.close()
278 if getattr(conn, "auto_open", 1) == 0:
279 # This is a proxied connection that has been mutated by
280 # http.client._tunnel() and cannot be reused (since it would
281 # attempt to bypass the proxy)
282 conn = None
283
284 return conn or self._new_conn()
285
286 def _put_conn(self, conn: Optional[HTTPConnection]) -> None:
287 """
288 Put a connection back into the pool.
289
290 :param conn:
291 Connection object for the current host and port as returned by
292 :meth:`._new_conn` or :meth:`._get_conn`.
293
294 If the pool is already full, the connection is closed and discarded
295 because we exceeded maxsize. If connections are discarded frequently,
296 then maxsize should be increased.
297
298 If the pool is closed, then the connection will be closed and discarded.
299 """
300 if self.pool is not None:
301 try:
302 self.pool.put(conn, block=False)
303 return # Everything is dandy, done.
304 except AttributeError:
305 # self.pool is None.
306 pass
307 except queue.Full:
308
309 # Connection never got put back into the pool, close it.
310 if conn:
311 conn.close()
312
313 if self.block:
314 # This should never happen if you got the conn from self._get_conn
315 raise FullPoolError(
316 self,
317 "Pool reached maximum size and no more connections are allowed.",
318 ) from None
319
320 log.warning(
321 "Connection pool is full, discarding connection: %s. Connection pool size: %s",
322 self.host,
323 self.pool.qsize(),
324 )
325
326 # Connection never got put back into the pool, close it.
327 if conn:
328 conn.close()
329
330 def _validate_conn(self, conn: HTTPConnection) -> None:
331 """
332 Called right before a request is made, after the socket is created.
333 """
334 pass
335
336 def _prepare_proxy(self, conn: HTTPConnection) -> None:
337 # Nothing to do for HTTP connections.
338 pass
339
340 def _get_timeout(self, timeout: _TYPE_TIMEOUT) -> Timeout:
341 """Helper that always returns a :class:`urllib3.util.Timeout`"""
342 if timeout is _DEFAULT_TIMEOUT:
343 return self.timeout.clone()
344
345 if isinstance(timeout, Timeout):
346 return timeout.clone()
347 else:
348 # User passed us an int/float. This is for backwards compatibility,
349 # can be removed later
350 return Timeout.from_float(timeout)
351
352 def _raise_timeout(
353 self,
354 err: Union[BaseSSLError, OSError, SocketTimeout],
355 url: str,
356 timeout_value: Optional[_TYPE_TIMEOUT],
357 ) -> None:
358 """Is the error actually a timeout? Will raise a ReadTimeout or pass"""
359
360 if isinstance(err, SocketTimeout):
361 raise ReadTimeoutError(
362 self, url, f"Read timed out. (read timeout={timeout_value})"
363 ) from err
364
365 # See the above comment about EAGAIN in Python 3.
366 if hasattr(err, "errno") and err.errno in _blocking_errnos:
367 raise ReadTimeoutError(
368 self, url, f"Read timed out. (read timeout={timeout_value})"
369 ) from err
370
371 def _make_request(
372 self,
373 conn: HTTPConnection,
374 method: str,
375 url: str,
376 timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,
377 chunked: bool = False,
378 **httplib_request_kw: Any,
379 ) -> _HttplibHTTPResponse:
380 """
381 Perform a request on a given urllib connection object taken from our
382 pool.
383
384 :param conn:
385 a connection from one of our connection pools
386
387 :param timeout:
388 Socket timeout in seconds for the request. This can be a
389 float or integer, which will set the same timeout value for
390 the socket connect and the socket read, or an instance of
391 :class:`urllib3.util.Timeout`, which gives you more fine-grained
392 control over your timeouts.
393 """
394 self.num_requests += 1
395
396 timeout_obj = self._get_timeout(timeout)
397 timeout_obj.start_connect()
398 conn.timeout = timeout_obj.connect_timeout # type: ignore[assignment]
399
400 try:
401 # Trigger any extra validation we need to do.
402 try:
403 self._validate_conn(conn)
404 except (SocketTimeout, BaseSSLError) as e:
405 self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
406 raise
407
408 # _validate_conn() starts the connection to an HTTPS proxy
409 # so we need to wrap errors with 'ProxyError' here too.
410 except (
411 OSError,
412 NewConnectionError,
413 TimeoutError,
414 BaseSSLError,
415 CertificateError,
416 SSLError,
417 ) as e:
418 new_e: Exception = e
419 if isinstance(e, (BaseSSLError, CertificateError)):
420 new_e = SSLError(e)
421 if isinstance(
422 new_e, (OSError, NewConnectionError, TimeoutError, SSLError)
423 ) and (conn and conn._connecting_to_proxy and conn.proxy):
424 new_e = _wrap_proxy_error(new_e, conn.proxy.scheme)
425 raise new_e
426
427 # conn.request() calls http.client.*.request, not the method in
428 # urllib3.request. It also calls makefile (recv) on the socket.
429 try:
430 if chunked:
431 conn.request_chunked(method, url, **httplib_request_kw)
432 else:
433 conn.request(method, url, **httplib_request_kw)
434
435 # We are swallowing BrokenPipeError (errno.EPIPE) since the server is
436 # legitimately able to close the connection after sending a valid response.
437 # With this behaviour, the received response is still readable.
438 except BrokenPipeError:
439 pass
440 except OSError as e:
441 # MacOS/Linux
442 # EPROTOTYPE is needed on macOS
443 # https://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/
444 if e.errno != errno.EPROTOTYPE:
445 raise
446
447 # Reset the timeout for the recv() on the socket
448 read_timeout = timeout_obj.read_timeout
449
450 if conn.sock:
451 # In Python 3 socket.py will catch EAGAIN and return None when you
452 # try and read into the file pointer created by http.client, which
453 # instead raises a BadStatusLine exception. Instead of catching
454 # the exception and assuming all BadStatusLine exceptions are read
455 # timeouts, check for a zero timeout before making the request.
456 if read_timeout == 0:
457 raise ReadTimeoutError(
458 self, url, f"Read timed out. (read timeout={read_timeout})"
459 )
460 conn.sock.settimeout(read_timeout)
461
462 # Receive the response from the server
463 try:
464 httplib_response = conn.getresponse()
465 except (BaseSSLError, OSError) as e:
466 self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
467 raise
468
469 log.debug(
470 '%s://%s:%s "%s %s %s" %s %s',
471 self.scheme,
472 self.host,
473 self.port,
474 method,
475 url,
476 # HTTP version
477 conn._http_vsn_str, # type: ignore[attr-defined]
478 httplib_response.status,
479 httplib_response.length,
480 )
481
482 try:
483 assert_header_parsing(httplib_response.msg)
484 except (HeaderParsingError, TypeError) as hpe:
485 log.warning(
486 "Failed to parse headers (url=%s): %s",
487 self._absolute_url(url),
488 hpe,
489 exc_info=True,
490 )
491
492 return httplib_response
493
494 def _absolute_url(self, path: str) -> str:
495 return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
496
497 def close(self) -> None:
498 """
499 Close all pooled connections and disable the pool.
500 """
501 if self.pool is None:
502 return
503 # Disable access to the pool
504 old_pool, self.pool = self.pool, None
505
506 try:
507 while True:
508 conn = old_pool.get(block=False)
509 if conn:
510 conn.close()
511
512 except queue.Empty:
513 pass # Done.
514
515 def is_same_host(self, url: str) -> bool:
516 """
517 Check if the given ``url`` is a member of the same host as this
518 connection pool.
519 """
520 if url.startswith("/"):
521 return True
522
523 # TODO: Add optional support for socket.gethostbyname checking.
524 scheme, _, host, port, *_ = parse_url(url)
525 scheme = scheme or "http"
526 if host is not None:
527 host = _normalize_host(host, scheme=scheme)
528
529 # Use explicit default port for comparison when none is given
530 if self.port and not port:
531 port = port_by_scheme.get(scheme)
532 elif not self.port and port == port_by_scheme.get(scheme):
533 port = None
534
535 return (scheme, host, port) == (self.scheme, self.host, self.port)
536
537 def urlopen( # type: ignore[override]
538 self,
539 method: str,
540 url: str,
541 body: Optional[_TYPE_BODY] = None,
542 headers: Optional[Mapping[str, str]] = None,
543 retries: Optional[Union[Retry, bool, int]] = None,
544 redirect: bool = True,
545 assert_same_host: bool = True,
546 timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,
547 pool_timeout: Optional[int] = None,
548 release_conn: Optional[bool] = None,
549 chunked: bool = False,
550 body_pos: Optional[_TYPE_BODY_POSITION] = None,
551 **response_kw: Any,
552 ) -> BaseHTTPResponse:
553 """
554 Get a connection from the pool and perform an HTTP request. This is the
555 lowest level call for making a request, so you'll need to specify all
556 the raw details.
557
558 .. note::
559
560 More commonly, it's appropriate to use a convenience method
561 such as :meth:`request`.
562
563 .. note::
564
565 `release_conn` will only behave as expected if
566 `preload_content=False` because we want to make
567 `preload_content=False` the default behaviour someday soon without
568 breaking backwards compatibility.
569
570 :param method:
571 HTTP request method (such as GET, POST, PUT, etc.)
572
573 :param url:
574 The URL to perform the request on.
575
576 :param body:
577 Data to send in the request body, either :class:`str`, :class:`bytes`,
578 an iterable of :class:`str`/:class:`bytes`, or a file-like object.
579
580 :param headers:
581 Dictionary of custom headers to send, such as User-Agent,
582 If-None-Match, etc. If None, pool headers are used. If provided,
583 these headers completely replace any pool-specific headers.
584
585 :param retries:
586 Configure the number of retries to allow before raising a
587 :class:`~urllib3.exceptions.MaxRetryError` exception.
588
589 Pass ``None`` to retry until you receive a response. Pass a
590 :class:`~urllib3.util.retry.Retry` object for fine-grained control
591 over different types of retries.
592 Pass an integer number to retry connection errors that many times,
593 but no other types of errors. Pass zero to never retry.
594
595 If ``False``, then retries are disabled and any exception is raised
596 immediately. Also, instead of raising a MaxRetryError on redirects,
597 the redirect response will be returned.
598
599 :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
600
601 :param redirect:
602 If True, automatically handle redirects (status codes 301, 302,
603 303, 307, 308). Each redirect counts as a retry. Disabling retries
604 will disable redirect, too.
605
606 :param assert_same_host:
607 If ``True``, will make sure that the host of the pool requests is
608 consistent else will raise HostChangedError. When ``False``, you can
609 use the pool on an HTTP proxy and request foreign hosts.
610
611 :param timeout:
612 If specified, overrides the default timeout for this one
613 request. It may be a float (in seconds) or an instance of
614 :class:`urllib3.util.Timeout`.
615
616 :param pool_timeout:
617 If set and the pool is set to block=True, then this method will
618 block for ``pool_timeout`` seconds and raise EmptyPoolError if no
619 connection is available within the time period.
620
621 :param release_conn:
622 If False, then the urlopen call will not release the connection
623 back into the pool once a response is received (but will release if
624 you read the entire contents of the response such as when
625 `preload_content=True`). This is useful if you're not preloading
626 the response's content immediately. You will need to call
627 ``r.release_conn()`` on the response ``r`` to return the connection
628 back into the pool. If None, it takes the value of
629 ``response_kw.get('preload_content', True)``.
630
631 :param chunked:
632 If True, urllib3 will send the body using chunked transfer
633 encoding. Otherwise, urllib3 will send the body using the standard
634 content-length form. Defaults to False.
635
636 :param int body_pos:
637 Position to seek to in file-like body in the event of a retry or
638 redirect. Typically this won't need to be set because urllib3 will
639 auto-populate the value when needed.
640
641 :param \\**response_kw:
642 Additional parameters are passed to
643 :meth:`urllib3.response.HTTPResponse.from_httplib`
644 """
645
646 parsed_url = parse_url(url)
647 destination_scheme = parsed_url.scheme
648
649 if headers is None:
650 headers = self.headers
651
652 if not isinstance(retries, Retry):
653 retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
654
655 if release_conn is None:
656 release_conn = response_kw.get("preload_content", True)
657
658 # Check host
659 if assert_same_host and not self.is_same_host(url):
660 raise HostChangedError(self, url, retries)
661
662 # Ensure that the URL we're connecting to is properly encoded
663 if url.startswith("/"):
664 url = to_str(_encode_target(url))
665 else:
666 url = to_str(parsed_url.url)
667
668 conn = None
669
670 # Track whether `conn` needs to be released before
671 # returning/raising/recursing. Update this variable if necessary, and
672 # leave `release_conn` constant throughout the function. That way, if
673 # the function recurses, the original value of `release_conn` will be
674 # passed down into the recursive call, and its value will be respected.
675 #
676 # See issue #651 [1] for details.
677 #
678 # [1] <https://github.com/urllib3/urllib3/issues/651>
679 release_this_conn = release_conn
680
681 http_tunnel_required = connection_requires_http_tunnel(
682 self.proxy, self.proxy_config, destination_scheme
683 )
684
685 # Merge the proxy headers. Only done when not using HTTP CONNECT. We
686 # have to copy the headers dict so we can safely change it without those
687 # changes being reflected in anyone else's copy.
688 if not http_tunnel_required:
689 headers = headers.copy() # type: ignore[attr-defined]
690 headers.update(self.proxy_headers) # type: ignore[union-attr]
691
692 # Must keep the exception bound to a separate variable or else Python 3
693 # complains about UnboundLocalError.
694 err = None
695
696 # Keep track of whether we cleanly exited the except block. This
697 # ensures we do proper cleanup in finally.
698 clean_exit = False
699
700 # Rewind body position, if needed. Record current position
701 # for future rewinds in the event of a redirect/retry.
702 body_pos = set_file_position(body, body_pos)
703
704 try:
705 # Request a connection from the queue.
706 timeout_obj = self._get_timeout(timeout)
707 conn = self._get_conn(timeout=pool_timeout)
708
709 conn.timeout = timeout_obj.connect_timeout # type: ignore[assignment]
710
711 is_new_proxy_conn = self.proxy is not None and not getattr(
712 conn, "sock", None
713 )
714 if is_new_proxy_conn:
715 assert isinstance(self.proxy, Url)
716 conn._connecting_to_proxy = True
717 if http_tunnel_required:
718 try:
719 self._prepare_proxy(conn)
720 except (BaseSSLError, OSError, SocketTimeout) as e:
721 self._raise_timeout(
722 err=e, url=self.proxy.url, timeout_value=conn.timeout
723 )
724 raise
725
726 # Make the request on the httplib connection object.
727 httplib_response = self._make_request(
728 conn,
729 method,
730 url,
731 timeout=timeout_obj,
732 body=body,
733 headers=headers,
734 chunked=chunked,
735 )
736
737 # If we're going to release the connection in ``finally:``, then
738 # the response doesn't need to know about the connection. Otherwise
739 # it will also try to release it and we'll have a double-release
740 # mess.
741 response_conn = conn if not release_conn else None
742
743 # Pass method to Response for length checking
744 response_kw["request_method"] = method
745
746 # Import httplib's response into our own wrapper object
747 response = self.ResponseCls.from_httplib(
748 httplib_response,
749 pool=self,
750 connection=response_conn,
751 retries=retries,
752 **response_kw,
753 )
754
755 # Everything went great!
756 clean_exit = True
757
758 except EmptyPoolError:
759 # Didn't get a connection from the pool, no need to clean up
760 clean_exit = True
761 release_this_conn = False
762 raise
763
764 except (
765 TimeoutError,
766 HTTPException,
767 OSError,
768 ProtocolError,
769 BaseSSLError,
770 SSLError,
771 CertificateError,
772 ProxyError,
773 ) as e:
774 # Discard the connection for these exceptions. It will be
775 # replaced during the next _get_conn() call.
776 clean_exit = False
777 new_e: Exception = e
778 if isinstance(e, (BaseSSLError, CertificateError)):
779 new_e = SSLError(e)
780 if isinstance(
781 new_e,
782 (
783 OSError,
784 NewConnectionError,
785 TimeoutError,
786 SSLError,
787 HTTPException,
788 ),
789 ) and (conn and conn._connecting_to_proxy and conn.proxy):
790 new_e = _wrap_proxy_error(new_e, conn.proxy.scheme)
791 elif isinstance(new_e, (OSError, HTTPException)):
792 new_e = ProtocolError("Connection aborted.", new_e)
793
794 retries = retries.increment(
795 method, url, error=new_e, _pool=self, _stacktrace=sys.exc_info()[2]
796 )
797 retries.sleep()
798
799 # Keep track of the error for the retry warning.
800 err = e
801
802 finally:
803 if not clean_exit:
804 # We hit some kind of exception, handled or otherwise. We need
805 # to throw the connection away unless explicitly told not to.
806 # Close the connection, set the variable to None, and make sure
807 # we put the None back in the pool to avoid leaking it.
808 if conn:
809 conn.close()
810 conn = None
811 release_this_conn = True
812
813 if release_this_conn:
814 # Put the connection back to be reused. If the connection is
815 # expired then it will be None, which will get replaced with a
816 # fresh connection during _get_conn.
817 self._put_conn(conn)
818
819 if not conn:
820 # Try again
821 log.warning(
822 "Retrying (%r) after connection broken by '%r': %s", retries, err, url
823 )
824 return self.urlopen(
825 method,
826 url,
827 body,
828 headers,
829 retries,
830 redirect,
831 assert_same_host,
832 timeout=timeout,
833 pool_timeout=pool_timeout,
834 release_conn=release_conn,
835 chunked=chunked,
836 body_pos=body_pos,
837 **response_kw,
838 )
839
840 # Handle redirect?
841 redirect_location = redirect and response.get_redirect_location()
842 if redirect_location:
843 if response.status == 303:
844 method = "GET"
845
846 try:
847 retries = retries.increment(method, url, response=response, _pool=self)
848 except MaxRetryError:
849 if retries.raise_on_redirect:
850 response.drain_conn()
851 raise
852 return response
853
854 response.drain_conn()
855 retries.sleep_for_retry(response)
856 log.debug("Redirecting %s -> %s", url, redirect_location)
857 return self.urlopen(
858 method,
859 redirect_location,
860 body,
861 headers,
862 retries=retries,
863 redirect=redirect,
864 assert_same_host=assert_same_host,
865 timeout=timeout,
866 pool_timeout=pool_timeout,
867 release_conn=release_conn,
868 chunked=chunked,
869 body_pos=body_pos,
870 **response_kw,
871 )
872
873 # Check if we should retry the HTTP response.
874 has_retry_after = bool(response.getheader("Retry-After"))
875 if retries.is_retry(method, response.status, has_retry_after):
876 try:
877 retries = retries.increment(method, url, response=response, _pool=self)
878 except MaxRetryError:
879 if retries.raise_on_status:
880 response.drain_conn()
881 raise
882 return response
883
884 response.drain_conn()
885 retries.sleep(response)
886 log.debug("Retry: %s", url)
887 return self.urlopen(
888 method,
889 url,
890 body,
891 headers,
892 retries=retries,
893 redirect=redirect,
894 assert_same_host=assert_same_host,
895 timeout=timeout,
896 pool_timeout=pool_timeout,
897 release_conn=release_conn,
898 chunked=chunked,
899 body_pos=body_pos,
900 **response_kw,
901 )
902
903 return response
904
905
906 class HTTPSConnectionPool(HTTPConnectionPool):
907 """
908 Same as :class:`.HTTPConnectionPool`, but HTTPS.
909
910 :class:`.HTTPSConnection` uses one of ``assert_fingerprint``,
911 ``assert_hostname`` and ``host`` in this order to verify connections.
912 If ``assert_hostname`` is False, no verification is done.
913
914 The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
915 ``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl`
916 is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
917 the connection socket into an SSL socket.
918 """
919
920 scheme = "https"
921 ConnectionCls = HTTPSConnection
922
923 def __init__(
924 self,
925 host: str,
926 port: Optional[int] = None,
927 timeout: Optional[_TYPE_TIMEOUT] = _DEFAULT_TIMEOUT,
928 maxsize: int = 1,
929 block: bool = False,
930 headers: Optional[Mapping[str, str]] = None,
931 retries: Optional[Union[Retry, bool, int]] = None,
932 _proxy: Optional[Url] = None,
933 _proxy_headers: Optional[Mapping[str, str]] = None,
934 key_file: Optional[str] = None,
935 cert_file: Optional[str] = None,
936 cert_reqs: Optional[Union[int, str]] = None,
937 key_password: Optional[str] = None,
938 ca_certs: Optional[str] = None,
939 ssl_version: Optional[Union[int, str]] = None,
940 ssl_minimum_version: Optional["ssl.TLSVersion"] = None,
941 ssl_maximum_version: Optional["ssl.TLSVersion"] = None,
942 assert_hostname: Optional[Union[str, "Literal[False]"]] = None,
943 assert_fingerprint: Optional[str] = None,
944 ca_cert_dir: Optional[str] = None,
945 **conn_kw: Any,
946 ) -> None:
947
948 super().__init__(
949 host,
950 port,
951 timeout,
952 maxsize,
953 block,
954 headers,
955 retries,
956 _proxy,
957 _proxy_headers,
958 **conn_kw,
959 )
960
961 self.key_file = key_file
962 self.cert_file = cert_file
963 self.cert_reqs = cert_reqs
964 self.key_password = key_password
965 self.ca_certs = ca_certs
966 self.ca_cert_dir = ca_cert_dir
967 self.ssl_version = ssl_version
968 self.ssl_minimum_version = ssl_minimum_version
969 self.ssl_maximum_version = ssl_maximum_version
970 self.assert_hostname = assert_hostname
971 self.assert_fingerprint = assert_fingerprint
972
973 def _prepare_conn(self, conn: HTTPSConnection) -> HTTPConnection:
974 """
975 Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
976 and establish the tunnel if proxy is used.
977 """
978
979 if isinstance(conn, VerifiedHTTPSConnection):
980 conn.set_cert(
981 key_file=self.key_file,
982 key_password=self.key_password,
983 cert_file=self.cert_file,
984 cert_reqs=self.cert_reqs,
985 ca_certs=self.ca_certs,
986 ca_cert_dir=self.ca_cert_dir,
987 assert_hostname=self.assert_hostname,
988 assert_fingerprint=self.assert_fingerprint,
989 )
990 conn.ssl_version = self.ssl_version
991 conn.ssl_minimum_version = self.ssl_minimum_version
992 conn.ssl_maximum_version = self.ssl_maximum_version
993
994 return conn
995
996 def _prepare_proxy(self, conn: HTTPSConnection) -> None: # type: ignore[override]
997 """
998 Establishes a tunnel connection through HTTP CONNECT.
999
1000 Tunnel connection is established early because otherwise httplib would
1001 improperly set Host: header to proxy's IP:port.
1002 """
1003 conn.set_tunnel(self._proxy_host, self.port, self.proxy_headers)
1004
1005 if self.proxy and self.proxy.scheme == "https":
1006 conn.tls_in_tls_required = True
1007
1008 conn.connect()
1009
1010 def _new_conn(self) -> HTTPConnection:
1011 """
1012 Return a fresh :class:`urllib3.connection.HTTPConnection`.
1013 """
1014 self.num_connections += 1
1015 log.debug(
1016 "Starting new HTTPS connection (%d): %s:%s",
1017 self.num_connections,
1018 self.host,
1019 self.port or "443",
1020 )
1021
1022 if not self.ConnectionCls or self.ConnectionCls is DummyConnection: # type: ignore[comparison-overlap]
1023 raise SSLError(
1024 "Can't connect to HTTPS URL because the SSL module is not available."
1025 )
1026
1027 actual_host: str = self.host
1028 actual_port = self.port
1029 if self.proxy is not None and self.proxy.host is not None:
1030 actual_host = self.proxy.host
1031 actual_port = self.proxy.port
1032
1033 conn = self.ConnectionCls(
1034 host=actual_host,
1035 port=actual_port,
1036 timeout=self.timeout.connect_timeout,
1037 cert_file=self.cert_file,
1038 key_file=self.key_file,
1039 key_password=self.key_password,
1040 **self.conn_kw,
1041 )
1042
1043 return self._prepare_conn(conn)
1044
1045 def _validate_conn(self, conn: HTTPConnection) -> None:
1046 """
1047 Called right before a request is made, after the socket is created.
1048 """
1049 super()._validate_conn(conn)
1050
1051 # Force connect early to allow us to validate the connection.
1052 if not conn.sock:
1053 conn.connect()
1054
1055 if not conn.is_verified:
1056 warnings.warn(
1057 (
1058 f"Unverified HTTPS request is being made to host '{conn.host}'. "
1059 "Adding certificate verification is strongly advised. See: "
1060 "https://urllib3.readthedocs.io/en/latest/advanced-usage.html"
1061 "#tls-warnings"
1062 ),
1063 InsecureRequestWarning,
1064 )
1065
1066
1067 def connection_from_url(url: str, **kw: Any) -> HTTPConnectionPool:
1068 """
1069 Given a url, return an :class:`.ConnectionPool` instance of its host.
1070
1071 This is a shortcut for not having to parse out the scheme, host, and port
1072 of the url before creating an :class:`.ConnectionPool` instance.
1073
1074 :param url:
1075 Absolute URL string that must include the scheme. Port is optional.
1076
1077 :param \\**kw:
1078 Passes additional parameters to the constructor of the appropriate
1079 :class:`.ConnectionPool`. Useful for specifying things like
1080 timeout, maxsize, headers, etc.
1081
1082 Example::
1083
1084 >>> conn = connection_from_url('http://google.com/')
1085 >>> r = conn.request('GET', '/')
1086 """
1087 scheme, _, host, port, *_ = parse_url(url)
1088 scheme = scheme or "http"
1089 port = port or port_by_scheme.get(scheme, 80)
1090 if scheme == "https":
1091 return HTTPSConnectionPool(host, port=port, **kw) # type: ignore[arg-type]
1092 else:
1093 return HTTPConnectionPool(host, port=port, **kw) # type: ignore[arg-type]
1094
1095
1096 @overload
1097 def _normalize_host(host: None, scheme: Optional[str]) -> None:
1098 ...
1099
1100
1101 @overload
1102 def _normalize_host(host: str, scheme: Optional[str]) -> str:
1103 ...
1104
1105
1106 def _normalize_host(host: Optional[str], scheme: Optional[str]) -> Optional[str]:
1107 """
1108 Normalize hosts for comparisons and use with sockets.
1109 """
1110
1111 host = normalize_host(host, scheme)
1112
1113 # httplib doesn't like it when we include brackets in IPv6 addresses
1114 # Specifically, if we include brackets but also pass the port then
1115 # httplib crazily doubles up the square brackets on the Host header.
1116 # Instead, we need to make sure we never pass ``None`` as the port.
1117 # However, for backward compatibility reasons we can't actually
1118 # *assert* that. See http://bugs.python.org/issue28539
1119 if host and host.startswith("[") and host.endswith("]"):
1120 host = host[1:-1]
1121 return host
1122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/urllib3/connectionpool.py b/src/urllib3/connectionpool.py
--- a/src/urllib3/connectionpool.py
+++ b/src/urllib3/connectionpool.py
@@ -1020,7 +1020,7 @@
)
if not self.ConnectionCls or self.ConnectionCls is DummyConnection: # type: ignore[comparison-overlap]
- raise SSLError(
+ raise ImportError(
"Can't connect to HTTPS URL because the SSL module is not available."
)
| {"golden_diff": "diff --git a/src/urllib3/connectionpool.py b/src/urllib3/connectionpool.py\n--- a/src/urllib3/connectionpool.py\n+++ b/src/urllib3/connectionpool.py\n@@ -1020,7 +1020,7 @@\n )\n \n if not self.ConnectionCls or self.ConnectionCls is DummyConnection: # type: ignore[comparison-overlap]\n- raise SSLError(\n+ raise ImportError(\n \"Can't connect to HTTPS URL because the SSL module is not available.\"\n )\n", "issue": "Retry retries on fruitless ssl ImportError\n### Subject\r\n\r\nDescribe the issue here.\r\n\r\n### Environment\r\n\r\nDescribe your environment.\r\nAt least, paste here the output of:\r\n\r\n```python\r\nimport platform\r\nimport urllib3\r\n\r\nprint(\"OS\", platform.platform())\r\nprint(\"Python\", platform.python_version())\r\nprint(\"urllib3\", urllib3.__version__)\r\n```\r\n\r\n### Steps to Reproduce\r\n\r\n```\r\nPython 3.10.4 (main, Mar 24 2022, 16:12:56) [GCC 9.4.0] on linux\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> import sys\r\n>>> sys.modules[\"ssl\"] = None\r\n>>> import requests\r\n>>> requests.get(\"https://google.com\")\r\nTraceback (most recent call last):\r\n File \"/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/urllib3/connectionpool.py\", line 692, in urlopen\r\n conn = self._get_conn(timeout=pool_timeout)\r\n File \"/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/urllib3/connectionpool.py\", line 281, in _get_conn\r\n return conn or self._new_conn()\r\n File \"/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/urllib3/connectionpool.py\", line 1009, in _new_conn\r\n raise SSLError(\r\nurllib3.exceptions.SSLError: Can't connect to HTTPS URL because the SSL module is not available.\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/requests/adapters.py\", line 440, in send\r\n resp = conn.urlopen(\r\n File \"/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/urllib3/connectionpool.py\", line 785, in urlopen\r\n retries = retries.increment(\r\n File \"/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/urllib3/util/retry.py\", line 592, in increment\r\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\r\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='google.com', port=443): Max retries exceeded with url: / (Caused by SSLError(\"Can't connect to HTTPS URL because the SSL module is not available.\"))\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/requests/api.py\", line 75, in get\r\n return request('get', url, params=params, **kwargs)\r\n File \"/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/requests/api.py\", line 61, in request\r\n return session.request(method=method, url=url, **kwargs)\r\n File \"/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/requests/sessions.py\", line 529, in request\r\n resp = self.send(prep, **send_kwargs)\r\n File \"/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/requests/sessions.py\", line 645, in send\r\n r = adapter.send(request, **kwargs)\r\n File \"/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/requests/adapters.py\", line 517, in send\r\n raise SSLError(e, request=request)\r\nrequests.exceptions.SSLError: HTTPSConnectionPool(host='google.com', port=443): Max retries exceeded with url: / (Caused by SSLError(\"Can't connect to HTTPS URL because the SSL module is not available.\"))\r\n```\r\n\r\n### Expected Behavior\r\n\r\nonly one attempt\r\n\r\n### Actual Behavior\r\n\r\n^\r\n\nRetry retries on fruitless ssl ImportError\n### Subject\r\n\r\nDescribe the issue here.\r\n\r\n### Environment\r\n\r\nDescribe your environment.\r\nAt least, paste here the output of:\r\n\r\n```python\r\nimport platform\r\nimport urllib3\r\n\r\nprint(\"OS\", platform.platform())\r\nprint(\"Python\", platform.python_version())\r\nprint(\"urllib3\", urllib3.__version__)\r\n```\r\n\r\n### Steps to Reproduce\r\n\r\n```\r\nPython 3.10.4 (main, Mar 24 2022, 16:12:56) [GCC 9.4.0] on linux\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> import sys\r\n>>> sys.modules[\"ssl\"] = None\r\n>>> import requests\r\n>>> requests.get(\"https://google.com\")\r\nTraceback (most recent call last):\r\n File \"/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/urllib3/connectionpool.py\", line 692, in urlopen\r\n conn = self._get_conn(timeout=pool_timeout)\r\n File \"/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/urllib3/connectionpool.py\", line 281, in _get_conn\r\n return conn or self._new_conn()\r\n File \"/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/urllib3/connectionpool.py\", line 1009, in _new_conn\r\n raise SSLError(\r\nurllib3.exceptions.SSLError: Can't connect to HTTPS URL because the SSL module is not available.\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/requests/adapters.py\", line 440, in send\r\n resp = conn.urlopen(\r\n File \"/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/urllib3/connectionpool.py\", line 785, in urlopen\r\n retries = retries.increment(\r\n File \"/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/urllib3/util/retry.py\", line 592, in increment\r\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\r\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='google.com', port=443): Max retries exceeded with url: / (Caused by SSLError(\"Can't connect to HTTPS URL because the SSL module is not available.\"))\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/requests/api.py\", line 75, in get\r\n return request('get', url, params=params, **kwargs)\r\n File \"/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/requests/api.py\", line 61, in request\r\n return session.request(method=method, url=url, **kwargs)\r\n File \"/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/requests/sessions.py\", line 529, in request\r\n resp = self.send(prep, **send_kwargs)\r\n File \"/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/requests/sessions.py\", line 645, in send\r\n r = adapter.send(request, **kwargs)\r\n File \"/home/graingert/.virtualenvs/testing310/lib/python3.10/site-packages/requests/adapters.py\", line 517, in send\r\n raise SSLError(e, request=request)\r\nrequests.exceptions.SSLError: HTTPSConnectionPool(host='google.com', port=443): Max retries exceeded with url: / (Caused by SSLError(\"Can't connect to HTTPS URL because the SSL module is not available.\"))\r\n```\r\n\r\n### Expected Behavior\r\n\r\nonly one attempt\r\n\r\n### Actual Behavior\r\n\r\n^\r\n\n", "before_files": [{"content": "import errno\nimport logging\nimport queue\nimport sys\nimport warnings\nfrom http.client import HTTPResponse as _HttplibHTTPResponse\nfrom socket import timeout as SocketTimeout\nfrom types import TracebackType\nfrom typing import TYPE_CHECKING, Any, Mapping, Optional, Type, TypeVar, Union, overload\n\nfrom ._request_methods import RequestMethods\nfrom .connection import (\n _TYPE_BODY,\n BaseSSLError,\n BrokenPipeError,\n DummyConnection,\n HTTPConnection,\n HTTPException,\n HTTPSConnection,\n ProxyConfig,\n VerifiedHTTPSConnection,\n _wrap_proxy_error,\n)\nfrom .connection import port_by_scheme as port_by_scheme\nfrom .exceptions import (\n ClosedPoolError,\n EmptyPoolError,\n FullPoolError,\n HeaderParsingError,\n HostChangedError,\n InsecureRequestWarning,\n LocationValueError,\n MaxRetryError,\n NewConnectionError,\n ProtocolError,\n ProxyError,\n ReadTimeoutError,\n SSLError,\n TimeoutError,\n)\nfrom .response import BaseHTTPResponse, HTTPResponse\nfrom .util.connection import is_connection_dropped\nfrom .util.proxy import connection_requires_http_tunnel\nfrom .util.request import _TYPE_BODY_POSITION, set_file_position\nfrom .util.response import assert_header_parsing\nfrom .util.retry import Retry\nfrom .util.ssl_match_hostname import CertificateError\nfrom .util.timeout import _DEFAULT_TIMEOUT, _TYPE_DEFAULT, Timeout\nfrom .util.url import Url, _encode_target\nfrom .util.url import _normalize_host as normalize_host\nfrom .util.url import parse_url\nfrom .util.util import to_str\n\nif TYPE_CHECKING:\n import ssl\n\n from typing_extensions import Literal\n\nlog = logging.getLogger(__name__)\n\n_TYPE_TIMEOUT = Union[Timeout, float, _TYPE_DEFAULT]\n\n_SelfT = TypeVar(\"_SelfT\")\n\n\n# Pool objects\nclass ConnectionPool:\n \"\"\"\n Base class for all connection pools, such as\n :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.\n\n .. note::\n ConnectionPool.urlopen() does not normalize or percent-encode target URIs\n which is useful if your target server doesn't support percent-encoded\n target URIs.\n \"\"\"\n\n scheme: Optional[str] = None\n QueueCls = queue.LifoQueue\n\n def __init__(self, host: str, port: Optional[int] = None) -> None:\n if not host:\n raise LocationValueError(\"No host specified.\")\n\n self.host = _normalize_host(host, scheme=self.scheme)\n self._proxy_host = host.lower()\n self.port = port\n\n def __str__(self) -> str:\n return f\"{type(self).__name__}(host={self.host!r}, port={self.port!r})\"\n\n def __enter__(self: _SelfT) -> _SelfT:\n return self\n\n def __exit__(\n self,\n exc_type: Optional[Type[BaseException]],\n exc_val: Optional[BaseException],\n exc_tb: Optional[TracebackType],\n ) -> \"Literal[False]\":\n self.close()\n # Return False to re-raise any potential exceptions\n return False\n\n def close(self) -> None:\n \"\"\"\n Close all pooled connections and disable the pool.\n \"\"\"\n pass\n\n\n# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252\n_blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK}\n\n\nclass HTTPConnectionPool(ConnectionPool, RequestMethods):\n \"\"\"\n Thread-safe connection pool for one host.\n\n :param host:\n Host used for this HTTP Connection (e.g. \"localhost\"), passed into\n :class:`http.client.HTTPConnection`.\n\n :param port:\n Port used for this HTTP Connection (None is equivalent to 80), passed\n into :class:`http.client.HTTPConnection`.\n\n :param timeout:\n Socket timeout in seconds for each individual connection. This can\n be a float or integer, which sets the timeout for the HTTP request,\n or an instance of :class:`urllib3.util.Timeout` which gives you more\n fine-grained control over request timeouts. After the constructor has\n been parsed, this is always a `urllib3.util.Timeout` object.\n\n :param maxsize:\n Number of connections to save that can be reused. More than 1 is useful\n in multithreaded situations. If ``block`` is set to False, more\n connections will be created but they will not be saved once they've\n been used.\n\n :param block:\n If set to True, no more than ``maxsize`` connections will be used at\n a time. When no free connections are available, the call will block\n until a connection has been released. This is a useful side effect for\n particular multithreaded situations where one does not want to use more\n than maxsize connections per host to prevent flooding.\n\n :param headers:\n Headers to include with all requests, unless other headers are given\n explicitly.\n\n :param retries:\n Retry configuration to use by default with requests in this pool.\n\n :param _proxy:\n Parsed proxy URL, should not be used directly, instead, see\n :class:`urllib3.ProxyManager`\n\n :param _proxy_headers:\n A dictionary with proxy headers, should not be used directly,\n instead, see :class:`urllib3.ProxyManager`\n\n :param \\\\**conn_kw:\n Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,\n :class:`urllib3.connection.HTTPSConnection` instances.\n \"\"\"\n\n scheme = \"http\"\n ConnectionCls: Type[Union[HTTPConnection, HTTPSConnection]] = HTTPConnection\n ResponseCls = HTTPResponse\n\n def __init__(\n self,\n host: str,\n port: Optional[int] = None,\n timeout: Optional[_TYPE_TIMEOUT] = _DEFAULT_TIMEOUT,\n maxsize: int = 1,\n block: bool = False,\n headers: Optional[Mapping[str, str]] = None,\n retries: Optional[Union[Retry, bool, int]] = None,\n _proxy: Optional[Url] = None,\n _proxy_headers: Optional[Mapping[str, str]] = None,\n _proxy_config: Optional[ProxyConfig] = None,\n **conn_kw: Any,\n ):\n ConnectionPool.__init__(self, host, port)\n RequestMethods.__init__(self, headers)\n\n if not isinstance(timeout, Timeout):\n timeout = Timeout.from_float(timeout)\n\n if retries is None:\n retries = Retry.DEFAULT\n\n self.timeout = timeout\n self.retries = retries\n\n self.pool: Optional[queue.LifoQueue[Any]] = self.QueueCls(maxsize)\n self.block = block\n\n self.proxy = _proxy\n self.proxy_headers = _proxy_headers or {}\n self.proxy_config = _proxy_config\n\n # Fill the queue up so that doing get() on it will block properly\n for _ in range(maxsize):\n self.pool.put(None)\n\n # These are mostly for testing and debugging purposes.\n self.num_connections = 0\n self.num_requests = 0\n self.conn_kw = conn_kw\n\n if self.proxy:\n # Enable Nagle's algorithm for proxies, to avoid packet fragmentation.\n # We cannot know if the user has added default socket options, so we cannot replace the\n # list.\n self.conn_kw.setdefault(\"socket_options\", [])\n\n self.conn_kw[\"proxy\"] = self.proxy\n self.conn_kw[\"proxy_config\"] = self.proxy_config\n\n def _new_conn(self) -> HTTPConnection:\n \"\"\"\n Return a fresh :class:`HTTPConnection`.\n \"\"\"\n self.num_connections += 1\n log.debug(\n \"Starting new HTTP connection (%d): %s:%s\",\n self.num_connections,\n self.host,\n self.port or \"80\",\n )\n\n conn = self.ConnectionCls(\n host=self.host,\n port=self.port,\n timeout=self.timeout.connect_timeout,\n **self.conn_kw,\n )\n return conn\n\n def _get_conn(self, timeout: Optional[float] = None) -> HTTPConnection:\n \"\"\"\n Get a connection. Will return a pooled connection if one is available.\n\n If no connections are available and :prop:`.block` is ``False``, then a\n fresh connection is returned.\n\n :param timeout:\n Seconds to wait before giving up and raising\n :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and\n :prop:`.block` is ``True``.\n \"\"\"\n conn = None\n\n if self.pool is None:\n raise ClosedPoolError(self, \"Pool is closed.\")\n\n try:\n conn = self.pool.get(block=self.block, timeout=timeout)\n\n except AttributeError: # self.pool is None\n raise ClosedPoolError(self, \"Pool is closed.\") from None # Defensive:\n\n except queue.Empty:\n if self.block:\n raise EmptyPoolError(\n self,\n \"Pool is empty and a new connection can't be opened due to blocking mode.\",\n ) from None\n pass # Oh well, we'll create a new connection then\n\n # If this is a persistent connection, check if it got disconnected\n if conn and is_connection_dropped(conn):\n log.debug(\"Resetting dropped connection: %s\", self.host)\n conn.close()\n if getattr(conn, \"auto_open\", 1) == 0:\n # This is a proxied connection that has been mutated by\n # http.client._tunnel() and cannot be reused (since it would\n # attempt to bypass the proxy)\n conn = None\n\n return conn or self._new_conn()\n\n def _put_conn(self, conn: Optional[HTTPConnection]) -> None:\n \"\"\"\n Put a connection back into the pool.\n\n :param conn:\n Connection object for the current host and port as returned by\n :meth:`._new_conn` or :meth:`._get_conn`.\n\n If the pool is already full, the connection is closed and discarded\n because we exceeded maxsize. If connections are discarded frequently,\n then maxsize should be increased.\n\n If the pool is closed, then the connection will be closed and discarded.\n \"\"\"\n if self.pool is not None:\n try:\n self.pool.put(conn, block=False)\n return # Everything is dandy, done.\n except AttributeError:\n # self.pool is None.\n pass\n except queue.Full:\n\n # Connection never got put back into the pool, close it.\n if conn:\n conn.close()\n\n if self.block:\n # This should never happen if you got the conn from self._get_conn\n raise FullPoolError(\n self,\n \"Pool reached maximum size and no more connections are allowed.\",\n ) from None\n\n log.warning(\n \"Connection pool is full, discarding connection: %s. Connection pool size: %s\",\n self.host,\n self.pool.qsize(),\n )\n\n # Connection never got put back into the pool, close it.\n if conn:\n conn.close()\n\n def _validate_conn(self, conn: HTTPConnection) -> None:\n \"\"\"\n Called right before a request is made, after the socket is created.\n \"\"\"\n pass\n\n def _prepare_proxy(self, conn: HTTPConnection) -> None:\n # Nothing to do for HTTP connections.\n pass\n\n def _get_timeout(self, timeout: _TYPE_TIMEOUT) -> Timeout:\n \"\"\"Helper that always returns a :class:`urllib3.util.Timeout`\"\"\"\n if timeout is _DEFAULT_TIMEOUT:\n return self.timeout.clone()\n\n if isinstance(timeout, Timeout):\n return timeout.clone()\n else:\n # User passed us an int/float. This is for backwards compatibility,\n # can be removed later\n return Timeout.from_float(timeout)\n\n def _raise_timeout(\n self,\n err: Union[BaseSSLError, OSError, SocketTimeout],\n url: str,\n timeout_value: Optional[_TYPE_TIMEOUT],\n ) -> None:\n \"\"\"Is the error actually a timeout? Will raise a ReadTimeout or pass\"\"\"\n\n if isinstance(err, SocketTimeout):\n raise ReadTimeoutError(\n self, url, f\"Read timed out. (read timeout={timeout_value})\"\n ) from err\n\n # See the above comment about EAGAIN in Python 3.\n if hasattr(err, \"errno\") and err.errno in _blocking_errnos:\n raise ReadTimeoutError(\n self, url, f\"Read timed out. (read timeout={timeout_value})\"\n ) from err\n\n def _make_request(\n self,\n conn: HTTPConnection,\n method: str,\n url: str,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n chunked: bool = False,\n **httplib_request_kw: Any,\n ) -> _HttplibHTTPResponse:\n \"\"\"\n Perform a request on a given urllib connection object taken from our\n pool.\n\n :param conn:\n a connection from one of our connection pools\n\n :param timeout:\n Socket timeout in seconds for the request. This can be a\n float or integer, which will set the same timeout value for\n the socket connect and the socket read, or an instance of\n :class:`urllib3.util.Timeout`, which gives you more fine-grained\n control over your timeouts.\n \"\"\"\n self.num_requests += 1\n\n timeout_obj = self._get_timeout(timeout)\n timeout_obj.start_connect()\n conn.timeout = timeout_obj.connect_timeout # type: ignore[assignment]\n\n try:\n # Trigger any extra validation we need to do.\n try:\n self._validate_conn(conn)\n except (SocketTimeout, BaseSSLError) as e:\n self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)\n raise\n\n # _validate_conn() starts the connection to an HTTPS proxy\n # so we need to wrap errors with 'ProxyError' here too.\n except (\n OSError,\n NewConnectionError,\n TimeoutError,\n BaseSSLError,\n CertificateError,\n SSLError,\n ) as e:\n new_e: Exception = e\n if isinstance(e, (BaseSSLError, CertificateError)):\n new_e = SSLError(e)\n if isinstance(\n new_e, (OSError, NewConnectionError, TimeoutError, SSLError)\n ) and (conn and conn._connecting_to_proxy and conn.proxy):\n new_e = _wrap_proxy_error(new_e, conn.proxy.scheme)\n raise new_e\n\n # conn.request() calls http.client.*.request, not the method in\n # urllib3.request. It also calls makefile (recv) on the socket.\n try:\n if chunked:\n conn.request_chunked(method, url, **httplib_request_kw)\n else:\n conn.request(method, url, **httplib_request_kw)\n\n # We are swallowing BrokenPipeError (errno.EPIPE) since the server is\n # legitimately able to close the connection after sending a valid response.\n # With this behaviour, the received response is still readable.\n except BrokenPipeError:\n pass\n except OSError as e:\n # MacOS/Linux\n # EPROTOTYPE is needed on macOS\n # https://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/\n if e.errno != errno.EPROTOTYPE:\n raise\n\n # Reset the timeout for the recv() on the socket\n read_timeout = timeout_obj.read_timeout\n\n if conn.sock:\n # In Python 3 socket.py will catch EAGAIN and return None when you\n # try and read into the file pointer created by http.client, which\n # instead raises a BadStatusLine exception. Instead of catching\n # the exception and assuming all BadStatusLine exceptions are read\n # timeouts, check for a zero timeout before making the request.\n if read_timeout == 0:\n raise ReadTimeoutError(\n self, url, f\"Read timed out. (read timeout={read_timeout})\"\n )\n conn.sock.settimeout(read_timeout)\n\n # Receive the response from the server\n try:\n httplib_response = conn.getresponse()\n except (BaseSSLError, OSError) as e:\n self._raise_timeout(err=e, url=url, timeout_value=read_timeout)\n raise\n\n log.debug(\n '%s://%s:%s \"%s %s %s\" %s %s',\n self.scheme,\n self.host,\n self.port,\n method,\n url,\n # HTTP version\n conn._http_vsn_str, # type: ignore[attr-defined]\n httplib_response.status,\n httplib_response.length,\n )\n\n try:\n assert_header_parsing(httplib_response.msg)\n except (HeaderParsingError, TypeError) as hpe:\n log.warning(\n \"Failed to parse headers (url=%s): %s\",\n self._absolute_url(url),\n hpe,\n exc_info=True,\n )\n\n return httplib_response\n\n def _absolute_url(self, path: str) -> str:\n return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url\n\n def close(self) -> None:\n \"\"\"\n Close all pooled connections and disable the pool.\n \"\"\"\n if self.pool is None:\n return\n # Disable access to the pool\n old_pool, self.pool = self.pool, None\n\n try:\n while True:\n conn = old_pool.get(block=False)\n if conn:\n conn.close()\n\n except queue.Empty:\n pass # Done.\n\n def is_same_host(self, url: str) -> bool:\n \"\"\"\n Check if the given ``url`` is a member of the same host as this\n connection pool.\n \"\"\"\n if url.startswith(\"/\"):\n return True\n\n # TODO: Add optional support for socket.gethostbyname checking.\n scheme, _, host, port, *_ = parse_url(url)\n scheme = scheme or \"http\"\n if host is not None:\n host = _normalize_host(host, scheme=scheme)\n\n # Use explicit default port for comparison when none is given\n if self.port and not port:\n port = port_by_scheme.get(scheme)\n elif not self.port and port == port_by_scheme.get(scheme):\n port = None\n\n return (scheme, host, port) == (self.scheme, self.host, self.port)\n\n def urlopen( # type: ignore[override]\n self,\n method: str,\n url: str,\n body: Optional[_TYPE_BODY] = None,\n headers: Optional[Mapping[str, str]] = None,\n retries: Optional[Union[Retry, bool, int]] = None,\n redirect: bool = True,\n assert_same_host: bool = True,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n pool_timeout: Optional[int] = None,\n release_conn: Optional[bool] = None,\n chunked: bool = False,\n body_pos: Optional[_TYPE_BODY_POSITION] = None,\n **response_kw: Any,\n ) -> BaseHTTPResponse:\n \"\"\"\n Get a connection from the pool and perform an HTTP request. This is the\n lowest level call for making a request, so you'll need to specify all\n the raw details.\n\n .. note::\n\n More commonly, it's appropriate to use a convenience method\n such as :meth:`request`.\n\n .. note::\n\n `release_conn` will only behave as expected if\n `preload_content=False` because we want to make\n `preload_content=False` the default behaviour someday soon without\n breaking backwards compatibility.\n\n :param method:\n HTTP request method (such as GET, POST, PUT, etc.)\n\n :param url:\n The URL to perform the request on.\n\n :param body:\n Data to send in the request body, either :class:`str`, :class:`bytes`,\n an iterable of :class:`str`/:class:`bytes`, or a file-like object.\n\n :param headers:\n Dictionary of custom headers to send, such as User-Agent,\n If-None-Match, etc. If None, pool headers are used. If provided,\n these headers completely replace any pool-specific headers.\n\n :param retries:\n Configure the number of retries to allow before raising a\n :class:`~urllib3.exceptions.MaxRetryError` exception.\n\n Pass ``None`` to retry until you receive a response. Pass a\n :class:`~urllib3.util.retry.Retry` object for fine-grained control\n over different types of retries.\n Pass an integer number to retry connection errors that many times,\n but no other types of errors. Pass zero to never retry.\n\n If ``False``, then retries are disabled and any exception is raised\n immediately. Also, instead of raising a MaxRetryError on redirects,\n the redirect response will be returned.\n\n :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.\n\n :param redirect:\n If True, automatically handle redirects (status codes 301, 302,\n 303, 307, 308). Each redirect counts as a retry. Disabling retries\n will disable redirect, too.\n\n :param assert_same_host:\n If ``True``, will make sure that the host of the pool requests is\n consistent else will raise HostChangedError. When ``False``, you can\n use the pool on an HTTP proxy and request foreign hosts.\n\n :param timeout:\n If specified, overrides the default timeout for this one\n request. It may be a float (in seconds) or an instance of\n :class:`urllib3.util.Timeout`.\n\n :param pool_timeout:\n If set and the pool is set to block=True, then this method will\n block for ``pool_timeout`` seconds and raise EmptyPoolError if no\n connection is available within the time period.\n\n :param release_conn:\n If False, then the urlopen call will not release the connection\n back into the pool once a response is received (but will release if\n you read the entire contents of the response such as when\n `preload_content=True`). This is useful if you're not preloading\n the response's content immediately. You will need to call\n ``r.release_conn()`` on the response ``r`` to return the connection\n back into the pool. If None, it takes the value of\n ``response_kw.get('preload_content', True)``.\n\n :param chunked:\n If True, urllib3 will send the body using chunked transfer\n encoding. Otherwise, urllib3 will send the body using the standard\n content-length form. Defaults to False.\n\n :param int body_pos:\n Position to seek to in file-like body in the event of a retry or\n redirect. Typically this won't need to be set because urllib3 will\n auto-populate the value when needed.\n\n :param \\\\**response_kw:\n Additional parameters are passed to\n :meth:`urllib3.response.HTTPResponse.from_httplib`\n \"\"\"\n\n parsed_url = parse_url(url)\n destination_scheme = parsed_url.scheme\n\n if headers is None:\n headers = self.headers\n\n if not isinstance(retries, Retry):\n retries = Retry.from_int(retries, redirect=redirect, default=self.retries)\n\n if release_conn is None:\n release_conn = response_kw.get(\"preload_content\", True)\n\n # Check host\n if assert_same_host and not self.is_same_host(url):\n raise HostChangedError(self, url, retries)\n\n # Ensure that the URL we're connecting to is properly encoded\n if url.startswith(\"/\"):\n url = to_str(_encode_target(url))\n else:\n url = to_str(parsed_url.url)\n\n conn = None\n\n # Track whether `conn` needs to be released before\n # returning/raising/recursing. Update this variable if necessary, and\n # leave `release_conn` constant throughout the function. That way, if\n # the function recurses, the original value of `release_conn` will be\n # passed down into the recursive call, and its value will be respected.\n #\n # See issue #651 [1] for details.\n #\n # [1] <https://github.com/urllib3/urllib3/issues/651>\n release_this_conn = release_conn\n\n http_tunnel_required = connection_requires_http_tunnel(\n self.proxy, self.proxy_config, destination_scheme\n )\n\n # Merge the proxy headers. Only done when not using HTTP CONNECT. We\n # have to copy the headers dict so we can safely change it without those\n # changes being reflected in anyone else's copy.\n if not http_tunnel_required:\n headers = headers.copy() # type: ignore[attr-defined]\n headers.update(self.proxy_headers) # type: ignore[union-attr]\n\n # Must keep the exception bound to a separate variable or else Python 3\n # complains about UnboundLocalError.\n err = None\n\n # Keep track of whether we cleanly exited the except block. This\n # ensures we do proper cleanup in finally.\n clean_exit = False\n\n # Rewind body position, if needed. Record current position\n # for future rewinds in the event of a redirect/retry.\n body_pos = set_file_position(body, body_pos)\n\n try:\n # Request a connection from the queue.\n timeout_obj = self._get_timeout(timeout)\n conn = self._get_conn(timeout=pool_timeout)\n\n conn.timeout = timeout_obj.connect_timeout # type: ignore[assignment]\n\n is_new_proxy_conn = self.proxy is not None and not getattr(\n conn, \"sock\", None\n )\n if is_new_proxy_conn:\n assert isinstance(self.proxy, Url)\n conn._connecting_to_proxy = True\n if http_tunnel_required:\n try:\n self._prepare_proxy(conn)\n except (BaseSSLError, OSError, SocketTimeout) as e:\n self._raise_timeout(\n err=e, url=self.proxy.url, timeout_value=conn.timeout\n )\n raise\n\n # Make the request on the httplib connection object.\n httplib_response = self._make_request(\n conn,\n method,\n url,\n timeout=timeout_obj,\n body=body,\n headers=headers,\n chunked=chunked,\n )\n\n # If we're going to release the connection in ``finally:``, then\n # the response doesn't need to know about the connection. Otherwise\n # it will also try to release it and we'll have a double-release\n # mess.\n response_conn = conn if not release_conn else None\n\n # Pass method to Response for length checking\n response_kw[\"request_method\"] = method\n\n # Import httplib's response into our own wrapper object\n response = self.ResponseCls.from_httplib(\n httplib_response,\n pool=self,\n connection=response_conn,\n retries=retries,\n **response_kw,\n )\n\n # Everything went great!\n clean_exit = True\n\n except EmptyPoolError:\n # Didn't get a connection from the pool, no need to clean up\n clean_exit = True\n release_this_conn = False\n raise\n\n except (\n TimeoutError,\n HTTPException,\n OSError,\n ProtocolError,\n BaseSSLError,\n SSLError,\n CertificateError,\n ProxyError,\n ) as e:\n # Discard the connection for these exceptions. It will be\n # replaced during the next _get_conn() call.\n clean_exit = False\n new_e: Exception = e\n if isinstance(e, (BaseSSLError, CertificateError)):\n new_e = SSLError(e)\n if isinstance(\n new_e,\n (\n OSError,\n NewConnectionError,\n TimeoutError,\n SSLError,\n HTTPException,\n ),\n ) and (conn and conn._connecting_to_proxy and conn.proxy):\n new_e = _wrap_proxy_error(new_e, conn.proxy.scheme)\n elif isinstance(new_e, (OSError, HTTPException)):\n new_e = ProtocolError(\"Connection aborted.\", new_e)\n\n retries = retries.increment(\n method, url, error=new_e, _pool=self, _stacktrace=sys.exc_info()[2]\n )\n retries.sleep()\n\n # Keep track of the error for the retry warning.\n err = e\n\n finally:\n if not clean_exit:\n # We hit some kind of exception, handled or otherwise. We need\n # to throw the connection away unless explicitly told not to.\n # Close the connection, set the variable to None, and make sure\n # we put the None back in the pool to avoid leaking it.\n if conn:\n conn.close()\n conn = None\n release_this_conn = True\n\n if release_this_conn:\n # Put the connection back to be reused. If the connection is\n # expired then it will be None, which will get replaced with a\n # fresh connection during _get_conn.\n self._put_conn(conn)\n\n if not conn:\n # Try again\n log.warning(\n \"Retrying (%r) after connection broken by '%r': %s\", retries, err, url\n )\n return self.urlopen(\n method,\n url,\n body,\n headers,\n retries,\n redirect,\n assert_same_host,\n timeout=timeout,\n pool_timeout=pool_timeout,\n release_conn=release_conn,\n chunked=chunked,\n body_pos=body_pos,\n **response_kw,\n )\n\n # Handle redirect?\n redirect_location = redirect and response.get_redirect_location()\n if redirect_location:\n if response.status == 303:\n method = \"GET\"\n\n try:\n retries = retries.increment(method, url, response=response, _pool=self)\n except MaxRetryError:\n if retries.raise_on_redirect:\n response.drain_conn()\n raise\n return response\n\n response.drain_conn()\n retries.sleep_for_retry(response)\n log.debug(\"Redirecting %s -> %s\", url, redirect_location)\n return self.urlopen(\n method,\n redirect_location,\n body,\n headers,\n retries=retries,\n redirect=redirect,\n assert_same_host=assert_same_host,\n timeout=timeout,\n pool_timeout=pool_timeout,\n release_conn=release_conn,\n chunked=chunked,\n body_pos=body_pos,\n **response_kw,\n )\n\n # Check if we should retry the HTTP response.\n has_retry_after = bool(response.getheader(\"Retry-After\"))\n if retries.is_retry(method, response.status, has_retry_after):\n try:\n retries = retries.increment(method, url, response=response, _pool=self)\n except MaxRetryError:\n if retries.raise_on_status:\n response.drain_conn()\n raise\n return response\n\n response.drain_conn()\n retries.sleep(response)\n log.debug(\"Retry: %s\", url)\n return self.urlopen(\n method,\n url,\n body,\n headers,\n retries=retries,\n redirect=redirect,\n assert_same_host=assert_same_host,\n timeout=timeout,\n pool_timeout=pool_timeout,\n release_conn=release_conn,\n chunked=chunked,\n body_pos=body_pos,\n **response_kw,\n )\n\n return response\n\n\nclass HTTPSConnectionPool(HTTPConnectionPool):\n \"\"\"\n Same as :class:`.HTTPConnectionPool`, but HTTPS.\n\n :class:`.HTTPSConnection` uses one of ``assert_fingerprint``,\n ``assert_hostname`` and ``host`` in this order to verify connections.\n If ``assert_hostname`` is False, no verification is done.\n\n The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,\n ``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl`\n is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade\n the connection socket into an SSL socket.\n \"\"\"\n\n scheme = \"https\"\n ConnectionCls = HTTPSConnection\n\n def __init__(\n self,\n host: str,\n port: Optional[int] = None,\n timeout: Optional[_TYPE_TIMEOUT] = _DEFAULT_TIMEOUT,\n maxsize: int = 1,\n block: bool = False,\n headers: Optional[Mapping[str, str]] = None,\n retries: Optional[Union[Retry, bool, int]] = None,\n _proxy: Optional[Url] = None,\n _proxy_headers: Optional[Mapping[str, str]] = None,\n key_file: Optional[str] = None,\n cert_file: Optional[str] = None,\n cert_reqs: Optional[Union[int, str]] = None,\n key_password: Optional[str] = None,\n ca_certs: Optional[str] = None,\n ssl_version: Optional[Union[int, str]] = None,\n ssl_minimum_version: Optional[\"ssl.TLSVersion\"] = None,\n ssl_maximum_version: Optional[\"ssl.TLSVersion\"] = None,\n assert_hostname: Optional[Union[str, \"Literal[False]\"]] = None,\n assert_fingerprint: Optional[str] = None,\n ca_cert_dir: Optional[str] = None,\n **conn_kw: Any,\n ) -> None:\n\n super().__init__(\n host,\n port,\n timeout,\n maxsize,\n block,\n headers,\n retries,\n _proxy,\n _proxy_headers,\n **conn_kw,\n )\n\n self.key_file = key_file\n self.cert_file = cert_file\n self.cert_reqs = cert_reqs\n self.key_password = key_password\n self.ca_certs = ca_certs\n self.ca_cert_dir = ca_cert_dir\n self.ssl_version = ssl_version\n self.ssl_minimum_version = ssl_minimum_version\n self.ssl_maximum_version = ssl_maximum_version\n self.assert_hostname = assert_hostname\n self.assert_fingerprint = assert_fingerprint\n\n def _prepare_conn(self, conn: HTTPSConnection) -> HTTPConnection:\n \"\"\"\n Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`\n and establish the tunnel if proxy is used.\n \"\"\"\n\n if isinstance(conn, VerifiedHTTPSConnection):\n conn.set_cert(\n key_file=self.key_file,\n key_password=self.key_password,\n cert_file=self.cert_file,\n cert_reqs=self.cert_reqs,\n ca_certs=self.ca_certs,\n ca_cert_dir=self.ca_cert_dir,\n assert_hostname=self.assert_hostname,\n assert_fingerprint=self.assert_fingerprint,\n )\n conn.ssl_version = self.ssl_version\n conn.ssl_minimum_version = self.ssl_minimum_version\n conn.ssl_maximum_version = self.ssl_maximum_version\n\n return conn\n\n def _prepare_proxy(self, conn: HTTPSConnection) -> None: # type: ignore[override]\n \"\"\"\n Establishes a tunnel connection through HTTP CONNECT.\n\n Tunnel connection is established early because otherwise httplib would\n improperly set Host: header to proxy's IP:port.\n \"\"\"\n conn.set_tunnel(self._proxy_host, self.port, self.proxy_headers)\n\n if self.proxy and self.proxy.scheme == \"https\":\n conn.tls_in_tls_required = True\n\n conn.connect()\n\n def _new_conn(self) -> HTTPConnection:\n \"\"\"\n Return a fresh :class:`urllib3.connection.HTTPConnection`.\n \"\"\"\n self.num_connections += 1\n log.debug(\n \"Starting new HTTPS connection (%d): %s:%s\",\n self.num_connections,\n self.host,\n self.port or \"443\",\n )\n\n if not self.ConnectionCls or self.ConnectionCls is DummyConnection: # type: ignore[comparison-overlap]\n raise SSLError(\n \"Can't connect to HTTPS URL because the SSL module is not available.\"\n )\n\n actual_host: str = self.host\n actual_port = self.port\n if self.proxy is not None and self.proxy.host is not None:\n actual_host = self.proxy.host\n actual_port = self.proxy.port\n\n conn = self.ConnectionCls(\n host=actual_host,\n port=actual_port,\n timeout=self.timeout.connect_timeout,\n cert_file=self.cert_file,\n key_file=self.key_file,\n key_password=self.key_password,\n **self.conn_kw,\n )\n\n return self._prepare_conn(conn)\n\n def _validate_conn(self, conn: HTTPConnection) -> None:\n \"\"\"\n Called right before a request is made, after the socket is created.\n \"\"\"\n super()._validate_conn(conn)\n\n # Force connect early to allow us to validate the connection.\n if not conn.sock:\n conn.connect()\n\n if not conn.is_verified:\n warnings.warn(\n (\n f\"Unverified HTTPS request is being made to host '{conn.host}'. \"\n \"Adding certificate verification is strongly advised. See: \"\n \"https://urllib3.readthedocs.io/en/latest/advanced-usage.html\"\n \"#tls-warnings\"\n ),\n InsecureRequestWarning,\n )\n\n\ndef connection_from_url(url: str, **kw: Any) -> HTTPConnectionPool:\n \"\"\"\n Given a url, return an :class:`.ConnectionPool` instance of its host.\n\n This is a shortcut for not having to parse out the scheme, host, and port\n of the url before creating an :class:`.ConnectionPool` instance.\n\n :param url:\n Absolute URL string that must include the scheme. Port is optional.\n\n :param \\\\**kw:\n Passes additional parameters to the constructor of the appropriate\n :class:`.ConnectionPool`. Useful for specifying things like\n timeout, maxsize, headers, etc.\n\n Example::\n\n >>> conn = connection_from_url('http://google.com/')\n >>> r = conn.request('GET', '/')\n \"\"\"\n scheme, _, host, port, *_ = parse_url(url)\n scheme = scheme or \"http\"\n port = port or port_by_scheme.get(scheme, 80)\n if scheme == \"https\":\n return HTTPSConnectionPool(host, port=port, **kw) # type: ignore[arg-type]\n else:\n return HTTPConnectionPool(host, port=port, **kw) # type: ignore[arg-type]\n\n\n@overload\ndef _normalize_host(host: None, scheme: Optional[str]) -> None:\n ...\n\n\n@overload\ndef _normalize_host(host: str, scheme: Optional[str]) -> str:\n ...\n\n\ndef _normalize_host(host: Optional[str], scheme: Optional[str]) -> Optional[str]:\n \"\"\"\n Normalize hosts for comparisons and use with sockets.\n \"\"\"\n\n host = normalize_host(host, scheme)\n\n # httplib doesn't like it when we include brackets in IPv6 addresses\n # Specifically, if we include brackets but also pass the port then\n # httplib crazily doubles up the square brackets on the Host header.\n # Instead, we need to make sure we never pass ``None`` as the port.\n # However, for backward compatibility reasons we can't actually\n # *assert* that. See http://bugs.python.org/issue28539\n if host and host.startswith(\"[\") and host.endswith(\"]\"):\n host = host[1:-1]\n return host\n", "path": "src/urllib3/connectionpool.py"}], "after_files": [{"content": "import errno\nimport logging\nimport queue\nimport sys\nimport warnings\nfrom http.client import HTTPResponse as _HttplibHTTPResponse\nfrom socket import timeout as SocketTimeout\nfrom types import TracebackType\nfrom typing import TYPE_CHECKING, Any, Mapping, Optional, Type, TypeVar, Union, overload\n\nfrom ._request_methods import RequestMethods\nfrom .connection import (\n _TYPE_BODY,\n BaseSSLError,\n BrokenPipeError,\n DummyConnection,\n HTTPConnection,\n HTTPException,\n HTTPSConnection,\n ProxyConfig,\n VerifiedHTTPSConnection,\n _wrap_proxy_error,\n)\nfrom .connection import port_by_scheme as port_by_scheme\nfrom .exceptions import (\n ClosedPoolError,\n EmptyPoolError,\n FullPoolError,\n HeaderParsingError,\n HostChangedError,\n InsecureRequestWarning,\n LocationValueError,\n MaxRetryError,\n NewConnectionError,\n ProtocolError,\n ProxyError,\n ReadTimeoutError,\n SSLError,\n TimeoutError,\n)\nfrom .response import BaseHTTPResponse, HTTPResponse\nfrom .util.connection import is_connection_dropped\nfrom .util.proxy import connection_requires_http_tunnel\nfrom .util.request import _TYPE_BODY_POSITION, set_file_position\nfrom .util.response import assert_header_parsing\nfrom .util.retry import Retry\nfrom .util.ssl_match_hostname import CertificateError\nfrom .util.timeout import _DEFAULT_TIMEOUT, _TYPE_DEFAULT, Timeout\nfrom .util.url import Url, _encode_target\nfrom .util.url import _normalize_host as normalize_host\nfrom .util.url import parse_url\nfrom .util.util import to_str\n\nif TYPE_CHECKING:\n import ssl\n\n from typing_extensions import Literal\n\nlog = logging.getLogger(__name__)\n\n_TYPE_TIMEOUT = Union[Timeout, float, _TYPE_DEFAULT]\n\n_SelfT = TypeVar(\"_SelfT\")\n\n\n# Pool objects\nclass ConnectionPool:\n \"\"\"\n Base class for all connection pools, such as\n :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.\n\n .. note::\n ConnectionPool.urlopen() does not normalize or percent-encode target URIs\n which is useful if your target server doesn't support percent-encoded\n target URIs.\n \"\"\"\n\n scheme: Optional[str] = None\n QueueCls = queue.LifoQueue\n\n def __init__(self, host: str, port: Optional[int] = None) -> None:\n if not host:\n raise LocationValueError(\"No host specified.\")\n\n self.host = _normalize_host(host, scheme=self.scheme)\n self._proxy_host = host.lower()\n self.port = port\n\n def __str__(self) -> str:\n return f\"{type(self).__name__}(host={self.host!r}, port={self.port!r})\"\n\n def __enter__(self: _SelfT) -> _SelfT:\n return self\n\n def __exit__(\n self,\n exc_type: Optional[Type[BaseException]],\n exc_val: Optional[BaseException],\n exc_tb: Optional[TracebackType],\n ) -> \"Literal[False]\":\n self.close()\n # Return False to re-raise any potential exceptions\n return False\n\n def close(self) -> None:\n \"\"\"\n Close all pooled connections and disable the pool.\n \"\"\"\n pass\n\n\n# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252\n_blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK}\n\n\nclass HTTPConnectionPool(ConnectionPool, RequestMethods):\n \"\"\"\n Thread-safe connection pool for one host.\n\n :param host:\n Host used for this HTTP Connection (e.g. \"localhost\"), passed into\n :class:`http.client.HTTPConnection`.\n\n :param port:\n Port used for this HTTP Connection (None is equivalent to 80), passed\n into :class:`http.client.HTTPConnection`.\n\n :param timeout:\n Socket timeout in seconds for each individual connection. This can\n be a float or integer, which sets the timeout for the HTTP request,\n or an instance of :class:`urllib3.util.Timeout` which gives you more\n fine-grained control over request timeouts. After the constructor has\n been parsed, this is always a `urllib3.util.Timeout` object.\n\n :param maxsize:\n Number of connections to save that can be reused. More than 1 is useful\n in multithreaded situations. If ``block`` is set to False, more\n connections will be created but they will not be saved once they've\n been used.\n\n :param block:\n If set to True, no more than ``maxsize`` connections will be used at\n a time. When no free connections are available, the call will block\n until a connection has been released. This is a useful side effect for\n particular multithreaded situations where one does not want to use more\n than maxsize connections per host to prevent flooding.\n\n :param headers:\n Headers to include with all requests, unless other headers are given\n explicitly.\n\n :param retries:\n Retry configuration to use by default with requests in this pool.\n\n :param _proxy:\n Parsed proxy URL, should not be used directly, instead, see\n :class:`urllib3.ProxyManager`\n\n :param _proxy_headers:\n A dictionary with proxy headers, should not be used directly,\n instead, see :class:`urllib3.ProxyManager`\n\n :param \\\\**conn_kw:\n Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,\n :class:`urllib3.connection.HTTPSConnection` instances.\n \"\"\"\n\n scheme = \"http\"\n ConnectionCls: Type[Union[HTTPConnection, HTTPSConnection]] = HTTPConnection\n ResponseCls = HTTPResponse\n\n def __init__(\n self,\n host: str,\n port: Optional[int] = None,\n timeout: Optional[_TYPE_TIMEOUT] = _DEFAULT_TIMEOUT,\n maxsize: int = 1,\n block: bool = False,\n headers: Optional[Mapping[str, str]] = None,\n retries: Optional[Union[Retry, bool, int]] = None,\n _proxy: Optional[Url] = None,\n _proxy_headers: Optional[Mapping[str, str]] = None,\n _proxy_config: Optional[ProxyConfig] = None,\n **conn_kw: Any,\n ):\n ConnectionPool.__init__(self, host, port)\n RequestMethods.__init__(self, headers)\n\n if not isinstance(timeout, Timeout):\n timeout = Timeout.from_float(timeout)\n\n if retries is None:\n retries = Retry.DEFAULT\n\n self.timeout = timeout\n self.retries = retries\n\n self.pool: Optional[queue.LifoQueue[Any]] = self.QueueCls(maxsize)\n self.block = block\n\n self.proxy = _proxy\n self.proxy_headers = _proxy_headers or {}\n self.proxy_config = _proxy_config\n\n # Fill the queue up so that doing get() on it will block properly\n for _ in range(maxsize):\n self.pool.put(None)\n\n # These are mostly for testing and debugging purposes.\n self.num_connections = 0\n self.num_requests = 0\n self.conn_kw = conn_kw\n\n if self.proxy:\n # Enable Nagle's algorithm for proxies, to avoid packet fragmentation.\n # We cannot know if the user has added default socket options, so we cannot replace the\n # list.\n self.conn_kw.setdefault(\"socket_options\", [])\n\n self.conn_kw[\"proxy\"] = self.proxy\n self.conn_kw[\"proxy_config\"] = self.proxy_config\n\n def _new_conn(self) -> HTTPConnection:\n \"\"\"\n Return a fresh :class:`HTTPConnection`.\n \"\"\"\n self.num_connections += 1\n log.debug(\n \"Starting new HTTP connection (%d): %s:%s\",\n self.num_connections,\n self.host,\n self.port or \"80\",\n )\n\n conn = self.ConnectionCls(\n host=self.host,\n port=self.port,\n timeout=self.timeout.connect_timeout,\n **self.conn_kw,\n )\n return conn\n\n def _get_conn(self, timeout: Optional[float] = None) -> HTTPConnection:\n \"\"\"\n Get a connection. Will return a pooled connection if one is available.\n\n If no connections are available and :prop:`.block` is ``False``, then a\n fresh connection is returned.\n\n :param timeout:\n Seconds to wait before giving up and raising\n :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and\n :prop:`.block` is ``True``.\n \"\"\"\n conn = None\n\n if self.pool is None:\n raise ClosedPoolError(self, \"Pool is closed.\")\n\n try:\n conn = self.pool.get(block=self.block, timeout=timeout)\n\n except AttributeError: # self.pool is None\n raise ClosedPoolError(self, \"Pool is closed.\") from None # Defensive:\n\n except queue.Empty:\n if self.block:\n raise EmptyPoolError(\n self,\n \"Pool is empty and a new connection can't be opened due to blocking mode.\",\n ) from None\n pass # Oh well, we'll create a new connection then\n\n # If this is a persistent connection, check if it got disconnected\n if conn and is_connection_dropped(conn):\n log.debug(\"Resetting dropped connection: %s\", self.host)\n conn.close()\n if getattr(conn, \"auto_open\", 1) == 0:\n # This is a proxied connection that has been mutated by\n # http.client._tunnel() and cannot be reused (since it would\n # attempt to bypass the proxy)\n conn = None\n\n return conn or self._new_conn()\n\n def _put_conn(self, conn: Optional[HTTPConnection]) -> None:\n \"\"\"\n Put a connection back into the pool.\n\n :param conn:\n Connection object for the current host and port as returned by\n :meth:`._new_conn` or :meth:`._get_conn`.\n\n If the pool is already full, the connection is closed and discarded\n because we exceeded maxsize. If connections are discarded frequently,\n then maxsize should be increased.\n\n If the pool is closed, then the connection will be closed and discarded.\n \"\"\"\n if self.pool is not None:\n try:\n self.pool.put(conn, block=False)\n return # Everything is dandy, done.\n except AttributeError:\n # self.pool is None.\n pass\n except queue.Full:\n\n # Connection never got put back into the pool, close it.\n if conn:\n conn.close()\n\n if self.block:\n # This should never happen if you got the conn from self._get_conn\n raise FullPoolError(\n self,\n \"Pool reached maximum size and no more connections are allowed.\",\n ) from None\n\n log.warning(\n \"Connection pool is full, discarding connection: %s. Connection pool size: %s\",\n self.host,\n self.pool.qsize(),\n )\n\n # Connection never got put back into the pool, close it.\n if conn:\n conn.close()\n\n def _validate_conn(self, conn: HTTPConnection) -> None:\n \"\"\"\n Called right before a request is made, after the socket is created.\n \"\"\"\n pass\n\n def _prepare_proxy(self, conn: HTTPConnection) -> None:\n # Nothing to do for HTTP connections.\n pass\n\n def _get_timeout(self, timeout: _TYPE_TIMEOUT) -> Timeout:\n \"\"\"Helper that always returns a :class:`urllib3.util.Timeout`\"\"\"\n if timeout is _DEFAULT_TIMEOUT:\n return self.timeout.clone()\n\n if isinstance(timeout, Timeout):\n return timeout.clone()\n else:\n # User passed us an int/float. This is for backwards compatibility,\n # can be removed later\n return Timeout.from_float(timeout)\n\n def _raise_timeout(\n self,\n err: Union[BaseSSLError, OSError, SocketTimeout],\n url: str,\n timeout_value: Optional[_TYPE_TIMEOUT],\n ) -> None:\n \"\"\"Is the error actually a timeout? Will raise a ReadTimeout or pass\"\"\"\n\n if isinstance(err, SocketTimeout):\n raise ReadTimeoutError(\n self, url, f\"Read timed out. (read timeout={timeout_value})\"\n ) from err\n\n # See the above comment about EAGAIN in Python 3.\n if hasattr(err, \"errno\") and err.errno in _blocking_errnos:\n raise ReadTimeoutError(\n self, url, f\"Read timed out. (read timeout={timeout_value})\"\n ) from err\n\n def _make_request(\n self,\n conn: HTTPConnection,\n method: str,\n url: str,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n chunked: bool = False,\n **httplib_request_kw: Any,\n ) -> _HttplibHTTPResponse:\n \"\"\"\n Perform a request on a given urllib connection object taken from our\n pool.\n\n :param conn:\n a connection from one of our connection pools\n\n :param timeout:\n Socket timeout in seconds for the request. This can be a\n float or integer, which will set the same timeout value for\n the socket connect and the socket read, or an instance of\n :class:`urllib3.util.Timeout`, which gives you more fine-grained\n control over your timeouts.\n \"\"\"\n self.num_requests += 1\n\n timeout_obj = self._get_timeout(timeout)\n timeout_obj.start_connect()\n conn.timeout = timeout_obj.connect_timeout # type: ignore[assignment]\n\n try:\n # Trigger any extra validation we need to do.\n try:\n self._validate_conn(conn)\n except (SocketTimeout, BaseSSLError) as e:\n self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)\n raise\n\n # _validate_conn() starts the connection to an HTTPS proxy\n # so we need to wrap errors with 'ProxyError' here too.\n except (\n OSError,\n NewConnectionError,\n TimeoutError,\n BaseSSLError,\n CertificateError,\n SSLError,\n ) as e:\n new_e: Exception = e\n if isinstance(e, (BaseSSLError, CertificateError)):\n new_e = SSLError(e)\n if isinstance(\n new_e, (OSError, NewConnectionError, TimeoutError, SSLError)\n ) and (conn and conn._connecting_to_proxy and conn.proxy):\n new_e = _wrap_proxy_error(new_e, conn.proxy.scheme)\n raise new_e\n\n # conn.request() calls http.client.*.request, not the method in\n # urllib3.request. It also calls makefile (recv) on the socket.\n try:\n if chunked:\n conn.request_chunked(method, url, **httplib_request_kw)\n else:\n conn.request(method, url, **httplib_request_kw)\n\n # We are swallowing BrokenPipeError (errno.EPIPE) since the server is\n # legitimately able to close the connection after sending a valid response.\n # With this behaviour, the received response is still readable.\n except BrokenPipeError:\n pass\n except OSError as e:\n # MacOS/Linux\n # EPROTOTYPE is needed on macOS\n # https://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/\n if e.errno != errno.EPROTOTYPE:\n raise\n\n # Reset the timeout for the recv() on the socket\n read_timeout = timeout_obj.read_timeout\n\n if conn.sock:\n # In Python 3 socket.py will catch EAGAIN and return None when you\n # try and read into the file pointer created by http.client, which\n # instead raises a BadStatusLine exception. Instead of catching\n # the exception and assuming all BadStatusLine exceptions are read\n # timeouts, check for a zero timeout before making the request.\n if read_timeout == 0:\n raise ReadTimeoutError(\n self, url, f\"Read timed out. (read timeout={read_timeout})\"\n )\n conn.sock.settimeout(read_timeout)\n\n # Receive the response from the server\n try:\n httplib_response = conn.getresponse()\n except (BaseSSLError, OSError) as e:\n self._raise_timeout(err=e, url=url, timeout_value=read_timeout)\n raise\n\n log.debug(\n '%s://%s:%s \"%s %s %s\" %s %s',\n self.scheme,\n self.host,\n self.port,\n method,\n url,\n # HTTP version\n conn._http_vsn_str, # type: ignore[attr-defined]\n httplib_response.status,\n httplib_response.length,\n )\n\n try:\n assert_header_parsing(httplib_response.msg)\n except (HeaderParsingError, TypeError) as hpe:\n log.warning(\n \"Failed to parse headers (url=%s): %s\",\n self._absolute_url(url),\n hpe,\n exc_info=True,\n )\n\n return httplib_response\n\n def _absolute_url(self, path: str) -> str:\n return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url\n\n def close(self) -> None:\n \"\"\"\n Close all pooled connections and disable the pool.\n \"\"\"\n if self.pool is None:\n return\n # Disable access to the pool\n old_pool, self.pool = self.pool, None\n\n try:\n while True:\n conn = old_pool.get(block=False)\n if conn:\n conn.close()\n\n except queue.Empty:\n pass # Done.\n\n def is_same_host(self, url: str) -> bool:\n \"\"\"\n Check if the given ``url`` is a member of the same host as this\n connection pool.\n \"\"\"\n if url.startswith(\"/\"):\n return True\n\n # TODO: Add optional support for socket.gethostbyname checking.\n scheme, _, host, port, *_ = parse_url(url)\n scheme = scheme or \"http\"\n if host is not None:\n host = _normalize_host(host, scheme=scheme)\n\n # Use explicit default port for comparison when none is given\n if self.port and not port:\n port = port_by_scheme.get(scheme)\n elif not self.port and port == port_by_scheme.get(scheme):\n port = None\n\n return (scheme, host, port) == (self.scheme, self.host, self.port)\n\n def urlopen( # type: ignore[override]\n self,\n method: str,\n url: str,\n body: Optional[_TYPE_BODY] = None,\n headers: Optional[Mapping[str, str]] = None,\n retries: Optional[Union[Retry, bool, int]] = None,\n redirect: bool = True,\n assert_same_host: bool = True,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n pool_timeout: Optional[int] = None,\n release_conn: Optional[bool] = None,\n chunked: bool = False,\n body_pos: Optional[_TYPE_BODY_POSITION] = None,\n **response_kw: Any,\n ) -> BaseHTTPResponse:\n \"\"\"\n Get a connection from the pool and perform an HTTP request. This is the\n lowest level call for making a request, so you'll need to specify all\n the raw details.\n\n .. note::\n\n More commonly, it's appropriate to use a convenience method\n such as :meth:`request`.\n\n .. note::\n\n `release_conn` will only behave as expected if\n `preload_content=False` because we want to make\n `preload_content=False` the default behaviour someday soon without\n breaking backwards compatibility.\n\n :param method:\n HTTP request method (such as GET, POST, PUT, etc.)\n\n :param url:\n The URL to perform the request on.\n\n :param body:\n Data to send in the request body, either :class:`str`, :class:`bytes`,\n an iterable of :class:`str`/:class:`bytes`, or a file-like object.\n\n :param headers:\n Dictionary of custom headers to send, such as User-Agent,\n If-None-Match, etc. If None, pool headers are used. If provided,\n these headers completely replace any pool-specific headers.\n\n :param retries:\n Configure the number of retries to allow before raising a\n :class:`~urllib3.exceptions.MaxRetryError` exception.\n\n Pass ``None`` to retry until you receive a response. Pass a\n :class:`~urllib3.util.retry.Retry` object for fine-grained control\n over different types of retries.\n Pass an integer number to retry connection errors that many times,\n but no other types of errors. Pass zero to never retry.\n\n If ``False``, then retries are disabled and any exception is raised\n immediately. Also, instead of raising a MaxRetryError on redirects,\n the redirect response will be returned.\n\n :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.\n\n :param redirect:\n If True, automatically handle redirects (status codes 301, 302,\n 303, 307, 308). Each redirect counts as a retry. Disabling retries\n will disable redirect, too.\n\n :param assert_same_host:\n If ``True``, will make sure that the host of the pool requests is\n consistent else will raise HostChangedError. When ``False``, you can\n use the pool on an HTTP proxy and request foreign hosts.\n\n :param timeout:\n If specified, overrides the default timeout for this one\n request. It may be a float (in seconds) or an instance of\n :class:`urllib3.util.Timeout`.\n\n :param pool_timeout:\n If set and the pool is set to block=True, then this method will\n block for ``pool_timeout`` seconds and raise EmptyPoolError if no\n connection is available within the time period.\n\n :param release_conn:\n If False, then the urlopen call will not release the connection\n back into the pool once a response is received (but will release if\n you read the entire contents of the response such as when\n `preload_content=True`). This is useful if you're not preloading\n the response's content immediately. You will need to call\n ``r.release_conn()`` on the response ``r`` to return the connection\n back into the pool. If None, it takes the value of\n ``response_kw.get('preload_content', True)``.\n\n :param chunked:\n If True, urllib3 will send the body using chunked transfer\n encoding. Otherwise, urllib3 will send the body using the standard\n content-length form. Defaults to False.\n\n :param int body_pos:\n Position to seek to in file-like body in the event of a retry or\n redirect. Typically this won't need to be set because urllib3 will\n auto-populate the value when needed.\n\n :param \\\\**response_kw:\n Additional parameters are passed to\n :meth:`urllib3.response.HTTPResponse.from_httplib`\n \"\"\"\n\n parsed_url = parse_url(url)\n destination_scheme = parsed_url.scheme\n\n if headers is None:\n headers = self.headers\n\n if not isinstance(retries, Retry):\n retries = Retry.from_int(retries, redirect=redirect, default=self.retries)\n\n if release_conn is None:\n release_conn = response_kw.get(\"preload_content\", True)\n\n # Check host\n if assert_same_host and not self.is_same_host(url):\n raise HostChangedError(self, url, retries)\n\n # Ensure that the URL we're connecting to is properly encoded\n if url.startswith(\"/\"):\n url = to_str(_encode_target(url))\n else:\n url = to_str(parsed_url.url)\n\n conn = None\n\n # Track whether `conn` needs to be released before\n # returning/raising/recursing. Update this variable if necessary, and\n # leave `release_conn` constant throughout the function. That way, if\n # the function recurses, the original value of `release_conn` will be\n # passed down into the recursive call, and its value will be respected.\n #\n # See issue #651 [1] for details.\n #\n # [1] <https://github.com/urllib3/urllib3/issues/651>\n release_this_conn = release_conn\n\n http_tunnel_required = connection_requires_http_tunnel(\n self.proxy, self.proxy_config, destination_scheme\n )\n\n # Merge the proxy headers. Only done when not using HTTP CONNECT. We\n # have to copy the headers dict so we can safely change it without those\n # changes being reflected in anyone else's copy.\n if not http_tunnel_required:\n headers = headers.copy() # type: ignore[attr-defined]\n headers.update(self.proxy_headers) # type: ignore[union-attr]\n\n # Must keep the exception bound to a separate variable or else Python 3\n # complains about UnboundLocalError.\n err = None\n\n # Keep track of whether we cleanly exited the except block. This\n # ensures we do proper cleanup in finally.\n clean_exit = False\n\n # Rewind body position, if needed. Record current position\n # for future rewinds in the event of a redirect/retry.\n body_pos = set_file_position(body, body_pos)\n\n try:\n # Request a connection from the queue.\n timeout_obj = self._get_timeout(timeout)\n conn = self._get_conn(timeout=pool_timeout)\n\n conn.timeout = timeout_obj.connect_timeout # type: ignore[assignment]\n\n is_new_proxy_conn = self.proxy is not None and not getattr(\n conn, \"sock\", None\n )\n if is_new_proxy_conn:\n assert isinstance(self.proxy, Url)\n conn._connecting_to_proxy = True\n if http_tunnel_required:\n try:\n self._prepare_proxy(conn)\n except (BaseSSLError, OSError, SocketTimeout) as e:\n self._raise_timeout(\n err=e, url=self.proxy.url, timeout_value=conn.timeout\n )\n raise\n\n # Make the request on the httplib connection object.\n httplib_response = self._make_request(\n conn,\n method,\n url,\n timeout=timeout_obj,\n body=body,\n headers=headers,\n chunked=chunked,\n )\n\n # If we're going to release the connection in ``finally:``, then\n # the response doesn't need to know about the connection. Otherwise\n # it will also try to release it and we'll have a double-release\n # mess.\n response_conn = conn if not release_conn else None\n\n # Pass method to Response for length checking\n response_kw[\"request_method\"] = method\n\n # Import httplib's response into our own wrapper object\n response = self.ResponseCls.from_httplib(\n httplib_response,\n pool=self,\n connection=response_conn,\n retries=retries,\n **response_kw,\n )\n\n # Everything went great!\n clean_exit = True\n\n except EmptyPoolError:\n # Didn't get a connection from the pool, no need to clean up\n clean_exit = True\n release_this_conn = False\n raise\n\n except (\n TimeoutError,\n HTTPException,\n OSError,\n ProtocolError,\n BaseSSLError,\n SSLError,\n CertificateError,\n ProxyError,\n ) as e:\n # Discard the connection for these exceptions. It will be\n # replaced during the next _get_conn() call.\n clean_exit = False\n new_e: Exception = e\n if isinstance(e, (BaseSSLError, CertificateError)):\n new_e = SSLError(e)\n if isinstance(\n new_e,\n (\n OSError,\n NewConnectionError,\n TimeoutError,\n SSLError,\n HTTPException,\n ),\n ) and (conn and conn._connecting_to_proxy and conn.proxy):\n new_e = _wrap_proxy_error(new_e, conn.proxy.scheme)\n elif isinstance(new_e, (OSError, HTTPException)):\n new_e = ProtocolError(\"Connection aborted.\", new_e)\n\n retries = retries.increment(\n method, url, error=new_e, _pool=self, _stacktrace=sys.exc_info()[2]\n )\n retries.sleep()\n\n # Keep track of the error for the retry warning.\n err = e\n\n finally:\n if not clean_exit:\n # We hit some kind of exception, handled or otherwise. We need\n # to throw the connection away unless explicitly told not to.\n # Close the connection, set the variable to None, and make sure\n # we put the None back in the pool to avoid leaking it.\n if conn:\n conn.close()\n conn = None\n release_this_conn = True\n\n if release_this_conn:\n # Put the connection back to be reused. If the connection is\n # expired then it will be None, which will get replaced with a\n # fresh connection during _get_conn.\n self._put_conn(conn)\n\n if not conn:\n # Try again\n log.warning(\n \"Retrying (%r) after connection broken by '%r': %s\", retries, err, url\n )\n return self.urlopen(\n method,\n url,\n body,\n headers,\n retries,\n redirect,\n assert_same_host,\n timeout=timeout,\n pool_timeout=pool_timeout,\n release_conn=release_conn,\n chunked=chunked,\n body_pos=body_pos,\n **response_kw,\n )\n\n # Handle redirect?\n redirect_location = redirect and response.get_redirect_location()\n if redirect_location:\n if response.status == 303:\n method = \"GET\"\n\n try:\n retries = retries.increment(method, url, response=response, _pool=self)\n except MaxRetryError:\n if retries.raise_on_redirect:\n response.drain_conn()\n raise\n return response\n\n response.drain_conn()\n retries.sleep_for_retry(response)\n log.debug(\"Redirecting %s -> %s\", url, redirect_location)\n return self.urlopen(\n method,\n redirect_location,\n body,\n headers,\n retries=retries,\n redirect=redirect,\n assert_same_host=assert_same_host,\n timeout=timeout,\n pool_timeout=pool_timeout,\n release_conn=release_conn,\n chunked=chunked,\n body_pos=body_pos,\n **response_kw,\n )\n\n # Check if we should retry the HTTP response.\n has_retry_after = bool(response.getheader(\"Retry-After\"))\n if retries.is_retry(method, response.status, has_retry_after):\n try:\n retries = retries.increment(method, url, response=response, _pool=self)\n except MaxRetryError:\n if retries.raise_on_status:\n response.drain_conn()\n raise\n return response\n\n response.drain_conn()\n retries.sleep(response)\n log.debug(\"Retry: %s\", url)\n return self.urlopen(\n method,\n url,\n body,\n headers,\n retries=retries,\n redirect=redirect,\n assert_same_host=assert_same_host,\n timeout=timeout,\n pool_timeout=pool_timeout,\n release_conn=release_conn,\n chunked=chunked,\n body_pos=body_pos,\n **response_kw,\n )\n\n return response\n\n\nclass HTTPSConnectionPool(HTTPConnectionPool):\n \"\"\"\n Same as :class:`.HTTPConnectionPool`, but HTTPS.\n\n :class:`.HTTPSConnection` uses one of ``assert_fingerprint``,\n ``assert_hostname`` and ``host`` in this order to verify connections.\n If ``assert_hostname`` is False, no verification is done.\n\n The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,\n ``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl`\n is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade\n the connection socket into an SSL socket.\n \"\"\"\n\n scheme = \"https\"\n ConnectionCls = HTTPSConnection\n\n def __init__(\n self,\n host: str,\n port: Optional[int] = None,\n timeout: Optional[_TYPE_TIMEOUT] = _DEFAULT_TIMEOUT,\n maxsize: int = 1,\n block: bool = False,\n headers: Optional[Mapping[str, str]] = None,\n retries: Optional[Union[Retry, bool, int]] = None,\n _proxy: Optional[Url] = None,\n _proxy_headers: Optional[Mapping[str, str]] = None,\n key_file: Optional[str] = None,\n cert_file: Optional[str] = None,\n cert_reqs: Optional[Union[int, str]] = None,\n key_password: Optional[str] = None,\n ca_certs: Optional[str] = None,\n ssl_version: Optional[Union[int, str]] = None,\n ssl_minimum_version: Optional[\"ssl.TLSVersion\"] = None,\n ssl_maximum_version: Optional[\"ssl.TLSVersion\"] = None,\n assert_hostname: Optional[Union[str, \"Literal[False]\"]] = None,\n assert_fingerprint: Optional[str] = None,\n ca_cert_dir: Optional[str] = None,\n **conn_kw: Any,\n ) -> None:\n\n super().__init__(\n host,\n port,\n timeout,\n maxsize,\n block,\n headers,\n retries,\n _proxy,\n _proxy_headers,\n **conn_kw,\n )\n\n self.key_file = key_file\n self.cert_file = cert_file\n self.cert_reqs = cert_reqs\n self.key_password = key_password\n self.ca_certs = ca_certs\n self.ca_cert_dir = ca_cert_dir\n self.ssl_version = ssl_version\n self.ssl_minimum_version = ssl_minimum_version\n self.ssl_maximum_version = ssl_maximum_version\n self.assert_hostname = assert_hostname\n self.assert_fingerprint = assert_fingerprint\n\n def _prepare_conn(self, conn: HTTPSConnection) -> HTTPConnection:\n \"\"\"\n Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`\n and establish the tunnel if proxy is used.\n \"\"\"\n\n if isinstance(conn, VerifiedHTTPSConnection):\n conn.set_cert(\n key_file=self.key_file,\n key_password=self.key_password,\n cert_file=self.cert_file,\n cert_reqs=self.cert_reqs,\n ca_certs=self.ca_certs,\n ca_cert_dir=self.ca_cert_dir,\n assert_hostname=self.assert_hostname,\n assert_fingerprint=self.assert_fingerprint,\n )\n conn.ssl_version = self.ssl_version\n conn.ssl_minimum_version = self.ssl_minimum_version\n conn.ssl_maximum_version = self.ssl_maximum_version\n\n return conn\n\n def _prepare_proxy(self, conn: HTTPSConnection) -> None: # type: ignore[override]\n \"\"\"\n Establishes a tunnel connection through HTTP CONNECT.\n\n Tunnel connection is established early because otherwise httplib would\n improperly set Host: header to proxy's IP:port.\n \"\"\"\n conn.set_tunnel(self._proxy_host, self.port, self.proxy_headers)\n\n if self.proxy and self.proxy.scheme == \"https\":\n conn.tls_in_tls_required = True\n\n conn.connect()\n\n def _new_conn(self) -> HTTPConnection:\n \"\"\"\n Return a fresh :class:`urllib3.connection.HTTPConnection`.\n \"\"\"\n self.num_connections += 1\n log.debug(\n \"Starting new HTTPS connection (%d): %s:%s\",\n self.num_connections,\n self.host,\n self.port or \"443\",\n )\n\n if not self.ConnectionCls or self.ConnectionCls is DummyConnection: # type: ignore[comparison-overlap]\n raise ImportError(\n \"Can't connect to HTTPS URL because the SSL module is not available.\"\n )\n\n actual_host: str = self.host\n actual_port = self.port\n if self.proxy is not None and self.proxy.host is not None:\n actual_host = self.proxy.host\n actual_port = self.proxy.port\n\n conn = self.ConnectionCls(\n host=actual_host,\n port=actual_port,\n timeout=self.timeout.connect_timeout,\n cert_file=self.cert_file,\n key_file=self.key_file,\n key_password=self.key_password,\n **self.conn_kw,\n )\n\n return self._prepare_conn(conn)\n\n def _validate_conn(self, conn: HTTPConnection) -> None:\n \"\"\"\n Called right before a request is made, after the socket is created.\n \"\"\"\n super()._validate_conn(conn)\n\n # Force connect early to allow us to validate the connection.\n if not conn.sock:\n conn.connect()\n\n if not conn.is_verified:\n warnings.warn(\n (\n f\"Unverified HTTPS request is being made to host '{conn.host}'. \"\n \"Adding certificate verification is strongly advised. See: \"\n \"https://urllib3.readthedocs.io/en/latest/advanced-usage.html\"\n \"#tls-warnings\"\n ),\n InsecureRequestWarning,\n )\n\n\ndef connection_from_url(url: str, **kw: Any) -> HTTPConnectionPool:\n \"\"\"\n Given a url, return an :class:`.ConnectionPool` instance of its host.\n\n This is a shortcut for not having to parse out the scheme, host, and port\n of the url before creating an :class:`.ConnectionPool` instance.\n\n :param url:\n Absolute URL string that must include the scheme. Port is optional.\n\n :param \\\\**kw:\n Passes additional parameters to the constructor of the appropriate\n :class:`.ConnectionPool`. Useful for specifying things like\n timeout, maxsize, headers, etc.\n\n Example::\n\n >>> conn = connection_from_url('http://google.com/')\n >>> r = conn.request('GET', '/')\n \"\"\"\n scheme, _, host, port, *_ = parse_url(url)\n scheme = scheme or \"http\"\n port = port or port_by_scheme.get(scheme, 80)\n if scheme == \"https\":\n return HTTPSConnectionPool(host, port=port, **kw) # type: ignore[arg-type]\n else:\n return HTTPConnectionPool(host, port=port, **kw) # type: ignore[arg-type]\n\n\n@overload\ndef _normalize_host(host: None, scheme: Optional[str]) -> None:\n ...\n\n\n@overload\ndef _normalize_host(host: str, scheme: Optional[str]) -> str:\n ...\n\n\ndef _normalize_host(host: Optional[str], scheme: Optional[str]) -> Optional[str]:\n \"\"\"\n Normalize hosts for comparisons and use with sockets.\n \"\"\"\n\n host = normalize_host(host, scheme)\n\n # httplib doesn't like it when we include brackets in IPv6 addresses\n # Specifically, if we include brackets but also pass the port then\n # httplib crazily doubles up the square brackets on the Host header.\n # Instead, we need to make sure we never pass ``None`` as the port.\n # However, for backward compatibility reasons we can't actually\n # *assert* that. See http://bugs.python.org/issue28539\n if host and host.startswith(\"[\") and host.endswith(\"]\"):\n host = host[1:-1]\n return host\n", "path": "src/urllib3/connectionpool.py"}]} |
gh_patches_debug_1264 | rasdani/github-patches | git_diff | sopel-irc__sopel-611 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[search].duck is horribly broken.
It appears we're scraping the page wrong, since ".duck wikipedia" returns an ad page.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `willie/modules/search.py`
Content:
```
1 # coding=utf8
2 """
3 search.py - Willie Web Search Module
4 Copyright 2008-9, Sean B. Palmer, inamidst.com
5 Copyright 2012, Edward Powell, embolalia.net
6 Licensed under the Eiffel Forum License 2.
7
8 http://willie.dftba.net
9 """
10 from __future__ import unicode_literals
11
12 import re
13 from willie import web
14 from willie.module import commands, example
15 import json
16 import sys
17 import time
18
19
20 def google_ajax(query):
21 """Search using AjaxSearch, and return its JSON."""
22 uri = 'http://ajax.googleapis.com/ajax/services/search/web'
23 args = '?v=1.0&safe=off&q=' + query
24 bytes = web.get(uri + args)
25 return json.loads(bytes)
26
27
28 def google_search(query):
29 results = google_ajax(query)
30 try:
31 return results['responseData']['results'][0]['unescapedUrl']
32 except IndexError:
33 return None
34 except TypeError:
35 return False
36
37
38 def google_count(query):
39 results = google_ajax(query)
40 if not 'responseData' in results:
41 return '0'
42 if not 'cursor' in results['responseData']:
43 return '0'
44 if not 'estimatedResultCount' in results['responseData']['cursor']:
45 return '0'
46 return results['responseData']['cursor']['estimatedResultCount']
47
48
49 def formatnumber(n):
50 """Format a number with beautiful commas."""
51 parts = list(str(n))
52 for i in range((len(parts) - 3), 0, -3):
53 parts.insert(i, ',')
54 return ''.join(parts)
55
56
57 @commands('g', 'google')
58 @example('.g swhack')
59 def g(bot, trigger):
60 """Queries Google for the specified input."""
61 query = trigger.group(2)
62 if not query:
63 return bot.reply('.g what?')
64 uri = google_search(query)
65 if uri:
66 bot.reply(uri)
67 bot.memory['last_seen_url'][trigger.sender] = uri
68 elif uri is False:
69 bot.reply("Problem getting data from Google.")
70 else:
71 bot.reply("No results found for '%s'." % query)
72
73
74 @commands('gc')
75 @example('.gc extrapolate')
76 def gc(bot, trigger):
77 """Returns the number of Google results for the specified input."""
78 query = trigger.group(2)
79 if not query:
80 return bot.reply('.gc what?')
81 num = formatnumber(google_count(query))
82 bot.say(query + ': ' + num)
83
84 r_query = re.compile(
85 r'\+?"[^"\\]*(?:\\.[^"\\]*)*"|\[[^]\\]*(?:\\.[^]\\]*)*\]|\S+'
86 )
87
88
89 @commands('gcs', 'comp')
90 @example('.gcs foo bar')
91 def gcs(bot, trigger):
92 """Compare the number of Google search results"""
93 if not trigger.group(2):
94 return bot.reply("Nothing to compare.")
95 queries = r_query.findall(trigger.group(2))
96 if len(queries) > 6:
97 return bot.reply('Sorry, can only compare up to six things.')
98
99 results = []
100 for i, query in enumerate(queries):
101 query = query.strip('[]')
102 n = int((formatnumber(google_count(query)) or '0').replace(',', ''))
103 results.append((n, query))
104 if i >= 2:
105 time.sleep(0.25)
106 if i >= 4:
107 time.sleep(0.25)
108
109 results = [(term, n) for (n, term) in reversed(sorted(results))]
110 reply = ', '.join('%s (%s)' % (t, formatnumber(n)) for (t, n) in results)
111 bot.say(reply)
112
113 r_bing = re.compile(r'<h3><a href="([^"]+)"')
114
115
116 def bing_search(query, lang='en-GB'):
117 base = 'http://www.bing.com/search?mkt=%s&q=' % lang
118 bytes = web.get(base + query)
119 m = r_bing.search(bytes)
120 if m:
121 return m.group(1)
122
123 r_duck = re.compile(r'nofollow" class="[^"]+" href="(.*?)">')
124
125
126 def duck_search(query):
127 query = query.replace('!', '')
128 uri = 'http://duckduckgo.com/html/?q=%s&kl=uk-en' % query
129 bytes = web.get(uri)
130 m = r_duck.search(bytes)
131 if m:
132 return web.decode(m.group(1))
133
134
135 def duck_api(query):
136 if '!bang' in query.lower():
137 return 'https://duckduckgo.com/bang.html'
138
139 uri = 'http://api.duckduckgo.com/?q=%s&format=json&no_html=1&no_redirect=1' % query
140 results = json.loads(web.get(uri))
141 if results['Redirect']:
142 return results['Redirect']
143 else:
144 return None
145
146
147 @commands('duck', 'ddg')
148 @example('.duck privacy or .duck !mcwiki obsidian')
149 def duck(bot, trigger):
150 """Queries Duck Duck Go for the specified input."""
151 query = trigger.group(2)
152 if not query:
153 return bot.reply('.ddg what?')
154
155 #If the API gives us something, say it and stop
156 result = duck_api(query)
157 if result:
158 bot.reply(result)
159 return
160
161 #Otherwise, look it up on the HTMl version
162 uri = duck_search(query)
163
164 if uri:
165 bot.reply(uri)
166 bot.memory['last_seen_url'][trigger.sender] = uri
167 else:
168 bot.reply("No results found for '%s'." % query)
169
170
171 @commands('search')
172 @example('.search nerdfighter')
173 def search(bot, trigger):
174 """Searches Google, Bing, and Duck Duck Go."""
175 if not trigger.group(2):
176 return bot.reply('.search for what?')
177 query = trigger.group(2)
178 gu = google_search(query) or '-'
179 bu = bing_search(query) or '-'
180 du = duck_search(query) or '-'
181
182 if (gu == bu) and (bu == du):
183 result = '%s (g, b, d)' % gu
184 elif (gu == bu):
185 result = '%s (g, b), %s (d)' % (gu, du)
186 elif (bu == du):
187 result = '%s (b, d), %s (g)' % (bu, gu)
188 elif (gu == du):
189 result = '%s (g, d), %s (b)' % (gu, bu)
190 else:
191 if len(gu) > 250:
192 gu = '(extremely long link)'
193 if len(bu) > 150:
194 bu = '(extremely long link)'
195 if len(du) > 150:
196 du = '(extremely long link)'
197 result = '%s (g), %s (b), %s (d)' % (gu, bu, du)
198
199 bot.reply(result)
200
201
202 @commands('suggest')
203 def suggest(bot, trigger):
204 """Suggest terms starting with given input"""
205 if not trigger.group(2):
206 return bot.reply("No query term.")
207 query = trigger.group(2)
208 uri = 'http://websitedev.de/temp-bin/suggest.pl?q='
209 answer = web.get(uri+query.replace('+', '%2B'))
210 if answer:
211 bot.say(answer)
212 else:
213 bot.reply('Sorry, no result.')
214
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/willie/modules/search.py b/willie/modules/search.py
--- a/willie/modules/search.py
+++ b/willie/modules/search.py
@@ -127,6 +127,8 @@
query = query.replace('!', '')
uri = 'http://duckduckgo.com/html/?q=%s&kl=uk-en' % query
bytes = web.get(uri)
+ if 'web-result"' in bytes: #filter out the adds on top of the page
+ bytes = bytes.split('web-result"')[1]
m = r_duck.search(bytes)
if m:
return web.decode(m.group(1))
| {"golden_diff": "diff --git a/willie/modules/search.py b/willie/modules/search.py\n--- a/willie/modules/search.py\n+++ b/willie/modules/search.py\n@@ -127,6 +127,8 @@\n query = query.replace('!', '')\n uri = 'http://duckduckgo.com/html/?q=%s&kl=uk-en' % query\n bytes = web.get(uri)\n+ if 'web-result\"' in bytes: #filter out the adds on top of the page\n+ bytes = bytes.split('web-result\"')[1]\n m = r_duck.search(bytes)\n if m:\n return web.decode(m.group(1))\n", "issue": "[search].duck is horribly broken.\nIt appears we're scraping the page wrong, since \".duck wikipedia\" returns an ad page.\n\n", "before_files": [{"content": "# coding=utf8\n\"\"\"\nsearch.py - Willie Web Search Module\nCopyright 2008-9, Sean B. Palmer, inamidst.com\nCopyright 2012, Edward Powell, embolalia.net\nLicensed under the Eiffel Forum License 2.\n\nhttp://willie.dftba.net\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport re\nfrom willie import web\nfrom willie.module import commands, example\nimport json\nimport sys\nimport time\n\n\ndef google_ajax(query):\n \"\"\"Search using AjaxSearch, and return its JSON.\"\"\"\n uri = 'http://ajax.googleapis.com/ajax/services/search/web'\n args = '?v=1.0&safe=off&q=' + query\n bytes = web.get(uri + args)\n return json.loads(bytes)\n\n\ndef google_search(query):\n results = google_ajax(query)\n try:\n return results['responseData']['results'][0]['unescapedUrl']\n except IndexError:\n return None\n except TypeError:\n return False\n\n\ndef google_count(query):\n results = google_ajax(query)\n if not 'responseData' in results:\n return '0'\n if not 'cursor' in results['responseData']:\n return '0'\n if not 'estimatedResultCount' in results['responseData']['cursor']:\n return '0'\n return results['responseData']['cursor']['estimatedResultCount']\n\n\ndef formatnumber(n):\n \"\"\"Format a number with beautiful commas.\"\"\"\n parts = list(str(n))\n for i in range((len(parts) - 3), 0, -3):\n parts.insert(i, ',')\n return ''.join(parts)\n\n\n@commands('g', 'google')\n@example('.g swhack')\ndef g(bot, trigger):\n \"\"\"Queries Google for the specified input.\"\"\"\n query = trigger.group(2)\n if not query:\n return bot.reply('.g what?')\n uri = google_search(query)\n if uri:\n bot.reply(uri)\n bot.memory['last_seen_url'][trigger.sender] = uri\n elif uri is False:\n bot.reply(\"Problem getting data from Google.\")\n else:\n bot.reply(\"No results found for '%s'.\" % query)\n\n\n@commands('gc')\n@example('.gc extrapolate')\ndef gc(bot, trigger):\n \"\"\"Returns the number of Google results for the specified input.\"\"\"\n query = trigger.group(2)\n if not query:\n return bot.reply('.gc what?')\n num = formatnumber(google_count(query))\n bot.say(query + ': ' + num)\n\nr_query = re.compile(\n r'\\+?\"[^\"\\\\]*(?:\\\\.[^\"\\\\]*)*\"|\\[[^]\\\\]*(?:\\\\.[^]\\\\]*)*\\]|\\S+'\n)\n\n\n@commands('gcs', 'comp')\n@example('.gcs foo bar')\ndef gcs(bot, trigger):\n \"\"\"Compare the number of Google search results\"\"\"\n if not trigger.group(2):\n return bot.reply(\"Nothing to compare.\")\n queries = r_query.findall(trigger.group(2))\n if len(queries) > 6:\n return bot.reply('Sorry, can only compare up to six things.')\n\n results = []\n for i, query in enumerate(queries):\n query = query.strip('[]')\n n = int((formatnumber(google_count(query)) or '0').replace(',', ''))\n results.append((n, query))\n if i >= 2:\n time.sleep(0.25)\n if i >= 4:\n time.sleep(0.25)\n\n results = [(term, n) for (n, term) in reversed(sorted(results))]\n reply = ', '.join('%s (%s)' % (t, formatnumber(n)) for (t, n) in results)\n bot.say(reply)\n\nr_bing = re.compile(r'<h3><a href=\"([^\"]+)\"')\n\n\ndef bing_search(query, lang='en-GB'):\n base = 'http://www.bing.com/search?mkt=%s&q=' % lang\n bytes = web.get(base + query)\n m = r_bing.search(bytes)\n if m:\n return m.group(1)\n\nr_duck = re.compile(r'nofollow\" class=\"[^\"]+\" href=\"(.*?)\">')\n\n\ndef duck_search(query):\n query = query.replace('!', '')\n uri = 'http://duckduckgo.com/html/?q=%s&kl=uk-en' % query\n bytes = web.get(uri)\n m = r_duck.search(bytes)\n if m:\n return web.decode(m.group(1))\n\n\ndef duck_api(query):\n if '!bang' in query.lower():\n return 'https://duckduckgo.com/bang.html'\n\n uri = 'http://api.duckduckgo.com/?q=%s&format=json&no_html=1&no_redirect=1' % query\n results = json.loads(web.get(uri))\n if results['Redirect']:\n return results['Redirect']\n else:\n return None\n\n\n@commands('duck', 'ddg')\n@example('.duck privacy or .duck !mcwiki obsidian')\ndef duck(bot, trigger):\n \"\"\"Queries Duck Duck Go for the specified input.\"\"\"\n query = trigger.group(2)\n if not query:\n return bot.reply('.ddg what?')\n\n #If the API gives us something, say it and stop\n result = duck_api(query)\n if result:\n bot.reply(result)\n return\n\n #Otherwise, look it up on the HTMl version\n uri = duck_search(query)\n\n if uri:\n bot.reply(uri)\n bot.memory['last_seen_url'][trigger.sender] = uri\n else:\n bot.reply(\"No results found for '%s'.\" % query)\n\n\n@commands('search')\n@example('.search nerdfighter')\ndef search(bot, trigger):\n \"\"\"Searches Google, Bing, and Duck Duck Go.\"\"\"\n if not trigger.group(2):\n return bot.reply('.search for what?')\n query = trigger.group(2)\n gu = google_search(query) or '-'\n bu = bing_search(query) or '-'\n du = duck_search(query) or '-'\n\n if (gu == bu) and (bu == du):\n result = '%s (g, b, d)' % gu\n elif (gu == bu):\n result = '%s (g, b), %s (d)' % (gu, du)\n elif (bu == du):\n result = '%s (b, d), %s (g)' % (bu, gu)\n elif (gu == du):\n result = '%s (g, d), %s (b)' % (gu, bu)\n else:\n if len(gu) > 250:\n gu = '(extremely long link)'\n if len(bu) > 150:\n bu = '(extremely long link)'\n if len(du) > 150:\n du = '(extremely long link)'\n result = '%s (g), %s (b), %s (d)' % (gu, bu, du)\n\n bot.reply(result)\n\n\n@commands('suggest')\ndef suggest(bot, trigger):\n \"\"\"Suggest terms starting with given input\"\"\"\n if not trigger.group(2):\n return bot.reply(\"No query term.\")\n query = trigger.group(2)\n uri = 'http://websitedev.de/temp-bin/suggest.pl?q='\n answer = web.get(uri+query.replace('+', '%2B'))\n if answer:\n bot.say(answer)\n else:\n bot.reply('Sorry, no result.')\n", "path": "willie/modules/search.py"}], "after_files": [{"content": "# coding=utf8\n\"\"\"\nsearch.py - Willie Web Search Module\nCopyright 2008-9, Sean B. Palmer, inamidst.com\nCopyright 2012, Edward Powell, embolalia.net\nLicensed under the Eiffel Forum License 2.\n\nhttp://willie.dftba.net\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport re\nfrom willie import web\nfrom willie.module import commands, example\nimport json\nimport sys\nimport time\n\n\ndef google_ajax(query):\n \"\"\"Search using AjaxSearch, and return its JSON.\"\"\"\n uri = 'http://ajax.googleapis.com/ajax/services/search/web'\n args = '?v=1.0&safe=off&q=' + query\n bytes = web.get(uri + args)\n return json.loads(bytes)\n\n\ndef google_search(query):\n results = google_ajax(query)\n try:\n return results['responseData']['results'][0]['unescapedUrl']\n except IndexError:\n return None\n except TypeError:\n return False\n\n\ndef google_count(query):\n results = google_ajax(query)\n if not 'responseData' in results:\n return '0'\n if not 'cursor' in results['responseData']:\n return '0'\n if not 'estimatedResultCount' in results['responseData']['cursor']:\n return '0'\n return results['responseData']['cursor']['estimatedResultCount']\n\n\ndef formatnumber(n):\n \"\"\"Format a number with beautiful commas.\"\"\"\n parts = list(str(n))\n for i in range((len(parts) - 3), 0, -3):\n parts.insert(i, ',')\n return ''.join(parts)\n\n\n@commands('g', 'google')\n@example('.g swhack')\ndef g(bot, trigger):\n \"\"\"Queries Google for the specified input.\"\"\"\n query = trigger.group(2)\n if not query:\n return bot.reply('.g what?')\n uri = google_search(query)\n if uri:\n bot.reply(uri)\n bot.memory['last_seen_url'][trigger.sender] = uri\n elif uri is False:\n bot.reply(\"Problem getting data from Google.\")\n else:\n bot.reply(\"No results found for '%s'.\" % query)\n\n\n@commands('gc')\n@example('.gc extrapolate')\ndef gc(bot, trigger):\n \"\"\"Returns the number of Google results for the specified input.\"\"\"\n query = trigger.group(2)\n if not query:\n return bot.reply('.gc what?')\n num = formatnumber(google_count(query))\n bot.say(query + ': ' + num)\n\nr_query = re.compile(\n r'\\+?\"[^\"\\\\]*(?:\\\\.[^\"\\\\]*)*\"|\\[[^]\\\\]*(?:\\\\.[^]\\\\]*)*\\]|\\S+'\n)\n\n\n@commands('gcs', 'comp')\n@example('.gcs foo bar')\ndef gcs(bot, trigger):\n \"\"\"Compare the number of Google search results\"\"\"\n if not trigger.group(2):\n return bot.reply(\"Nothing to compare.\")\n queries = r_query.findall(trigger.group(2))\n if len(queries) > 6:\n return bot.reply('Sorry, can only compare up to six things.')\n\n results = []\n for i, query in enumerate(queries):\n query = query.strip('[]')\n n = int((formatnumber(google_count(query)) or '0').replace(',', ''))\n results.append((n, query))\n if i >= 2:\n time.sleep(0.25)\n if i >= 4:\n time.sleep(0.25)\n\n results = [(term, n) for (n, term) in reversed(sorted(results))]\n reply = ', '.join('%s (%s)' % (t, formatnumber(n)) for (t, n) in results)\n bot.say(reply)\n\nr_bing = re.compile(r'<h3><a href=\"([^\"]+)\"')\n\n\ndef bing_search(query, lang='en-GB'):\n base = 'http://www.bing.com/search?mkt=%s&q=' % lang\n bytes = web.get(base + query)\n m = r_bing.search(bytes)\n if m:\n return m.group(1)\n\nr_duck = re.compile(r'nofollow\" class=\"[^\"]+\" href=\"(.*?)\">')\n\n\ndef duck_search(query):\n query = query.replace('!', '')\n uri = 'http://duckduckgo.com/html/?q=%s&kl=uk-en' % query\n bytes = web.get(uri)\n if 'web-result\"' in bytes: #filter out the adds on top of the page\n bytes = bytes.split('web-result\"')[1]\n m = r_duck.search(bytes)\n if m:\n return web.decode(m.group(1))\n\n\ndef duck_api(query):\n if '!bang' in query.lower():\n return 'https://duckduckgo.com/bang.html'\n\n uri = 'http://api.duckduckgo.com/?q=%s&format=json&no_html=1&no_redirect=1' % query\n results = json.loads(web.get(uri))\n if results['Redirect']:\n return results['Redirect']\n else:\n return None\n\n\n@commands('duck', 'ddg')\n@example('.duck privacy or .duck !mcwiki obsidian')\ndef duck(bot, trigger):\n \"\"\"Queries Duck Duck Go for the specified input.\"\"\"\n query = trigger.group(2)\n if not query:\n return bot.reply('.ddg what?')\n\n #If the API gives us something, say it and stop\n result = duck_api(query)\n if result:\n bot.reply(result)\n return\n\n #Otherwise, look it up on the HTMl version\n uri = duck_search(query)\n\n if uri:\n bot.reply(uri)\n bot.memory['last_seen_url'][trigger.sender] = uri\n else:\n bot.reply(\"No results found for '%s'.\" % query)\n\n\n@commands('search')\n@example('.search nerdfighter')\ndef search(bot, trigger):\n \"\"\"Searches Google, Bing, and Duck Duck Go.\"\"\"\n if not trigger.group(2):\n return bot.reply('.search for what?')\n query = trigger.group(2)\n gu = google_search(query) or '-'\n bu = bing_search(query) or '-'\n du = duck_search(query) or '-'\n\n if (gu == bu) and (bu == du):\n result = '%s (g, b, d)' % gu\n elif (gu == bu):\n result = '%s (g, b), %s (d)' % (gu, du)\n elif (bu == du):\n result = '%s (b, d), %s (g)' % (bu, gu)\n elif (gu == du):\n result = '%s (g, d), %s (b)' % (gu, bu)\n else:\n if len(gu) > 250:\n gu = '(extremely long link)'\n if len(bu) > 150:\n bu = '(extremely long link)'\n if len(du) > 150:\n du = '(extremely long link)'\n result = '%s (g), %s (b), %s (d)' % (gu, bu, du)\n\n bot.reply(result)\n\n\n@commands('suggest')\ndef suggest(bot, trigger):\n \"\"\"Suggest terms starting with given input\"\"\"\n if not trigger.group(2):\n return bot.reply(\"No query term.\")\n query = trigger.group(2)\n uri = 'http://websitedev.de/temp-bin/suggest.pl?q='\n answer = web.get(uri+query.replace('+', '%2B'))\n if answer:\n bot.say(answer)\n else:\n bot.reply('Sorry, no result.')\n", "path": "willie/modules/search.py"}]} |
gh_patches_debug_1265 | rasdani/github-patches | git_diff | cocotb__cocotb-1145 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Packaging: Add python_requires to manifest
Define our Python version requirements in our package manifest, as described here: https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 ###############################################################################
3 # Copyright (c) 2013 Potential Ventures Ltd
4 # Copyright (c) 2013 SolarFlare Communications Inc
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 # * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 # * Redistributions in binary form must reproduce the above copyright
12 # notice, this list of conditions and the following disclaimer in the
13 # documentation and/or other materials provided with the distribution.
14 # * Neither the name of Potential Ventures Ltd,
15 # SolarFlare Communications Inc nor the
16 # names of its contributors may be used to endorse or promote products
17 # derived from this software without specific prior written permission.
18 #
19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
20 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 # DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
23 # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 ###############################################################################
30
31 from setuptools import setup
32 from setuptools import find_packages
33 from os import path, walk
34
35 def read_file(fname):
36 return open(path.join(path.dirname(__file__), fname)).read()
37
38 def package_files(directory):
39 paths = []
40 for (fpath, directories, filenames) in walk(directory):
41 for filename in filenames:
42 paths.append(path.join('..', fpath, filename))
43 return paths
44
45 version = read_file('version')[8:].strip()
46
47 setup(
48 name='cocotb',
49 version=version,
50 description='cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.',
51 url='https://github.com/potentialventures/cocotb',
52 license='BSD',
53 long_description=read_file('README.md'),
54 long_description_content_type='text/markdown',
55 author='Chris Higgs, Stuart Hodgson',
56 author_email='[email protected]',
57 install_requires=[],
58 packages=find_packages(),
59 include_package_data=True,
60 package_data={'cocotb': package_files('cocotb/share')},
61 entry_points={
62 'console_scripts': [
63 'cocotb-config=cocotb.config:main',
64 ]
65 },
66 platforms='any',
67 classifiers=[
68 "Programming Language :: Python :: 2.7",
69 "Programming Language :: Python :: 3",
70 "License :: OSI Approved :: BSD License",
71 "Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)",
72 ],
73 )
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
old mode 100644
new mode 100755
--- a/setup.py
+++ b/setup.py
@@ -55,6 +55,7 @@
author='Chris Higgs, Stuart Hodgson',
author_email='[email protected]',
install_requires=[],
+ python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
packages=find_packages(),
include_package_data=True,
package_data={'cocotb': package_files('cocotb/share')},
| {"golden_diff": "diff --git a/setup.py b/setup.py\nold mode 100644\nnew mode 100755\n--- a/setup.py\n+++ b/setup.py\n@@ -55,6 +55,7 @@\n author='Chris Higgs, Stuart Hodgson',\n author_email='[email protected]',\n install_requires=[],\n+ python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',\n packages=find_packages(),\n include_package_data=True,\n package_data={'cocotb': package_files('cocotb/share')},\n", "issue": "Packaging: Add python_requires to manifest\nDefine our Python version requirements in our package manifest, as described here: https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires\n", "before_files": [{"content": "#!/usr/bin/env python\n###############################################################################\n# Copyright (c) 2013 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\nfrom setuptools import setup\nfrom setuptools import find_packages\nfrom os import path, walk\n\ndef read_file(fname):\n return open(path.join(path.dirname(__file__), fname)).read()\n\ndef package_files(directory):\n paths = []\n for (fpath, directories, filenames) in walk(directory):\n for filename in filenames:\n paths.append(path.join('..', fpath, filename))\n return paths\n\nversion = read_file('version')[8:].strip()\n\nsetup(\n name='cocotb',\n version=version,\n description='cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.',\n url='https://github.com/potentialventures/cocotb',\n license='BSD',\n long_description=read_file('README.md'),\n long_description_content_type='text/markdown',\n author='Chris Higgs, Stuart Hodgson',\n author_email='[email protected]',\n install_requires=[],\n packages=find_packages(),\n include_package_data=True,\n package_data={'cocotb': package_files('cocotb/share')},\n entry_points={\n 'console_scripts': [\n 'cocotb-config=cocotb.config:main',\n ]\n },\n platforms='any',\n classifiers=[\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: BSD License\",\n \"Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n###############################################################################\n# Copyright (c) 2013 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\nfrom setuptools import setup\nfrom setuptools import find_packages\nfrom os import path, walk\n\ndef read_file(fname):\n return open(path.join(path.dirname(__file__), fname)).read()\n\ndef package_files(directory):\n paths = []\n for (fpath, directories, filenames) in walk(directory):\n for filename in filenames:\n paths.append(path.join('..', fpath, filename))\n return paths\n\nversion = read_file('version')[8:].strip()\n\nsetup(\n name='cocotb',\n version=version,\n description='cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.',\n url='https://github.com/potentialventures/cocotb',\n license='BSD',\n long_description=read_file('README.md'),\n long_description_content_type='text/markdown',\n author='Chris Higgs, Stuart Hodgson',\n author_email='[email protected]',\n install_requires=[],\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',\n packages=find_packages(),\n include_package_data=True,\n package_data={'cocotb': package_files('cocotb/share')},\n entry_points={\n 'console_scripts': [\n 'cocotb-config=cocotb.config:main',\n ]\n },\n platforms='any',\n classifiers=[\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: BSD License\",\n \"Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)\",\n ],\n)\n", "path": "setup.py"}]} |
gh_patches_debug_1266 | rasdani/github-patches | git_diff | scikit-hep__pyhf-915 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cloudpickle v1.5.0 breaks testing
# Description
With the release of [`cloudpickle` `v1.5.0`](https://pypi.org/project/cloudpickle/1.5.0/) on 2020-07-01 the CI is broken in testing as the following error is raised
```pytb
ImportError while loading conftest '/home/runner/work/pyhf/pyhf/tests/conftest.py'.
tests/conftest.py:83: in <module>
(pyhf.tensor.tensorflow_backend(), None),
src/pyhf/tensor/__init__.py:44: in __getattr__
e,
E pyhf.exceptions.ImportBackendError: ('There was a problem importing TensorFlow. The tensorflow backend cannot be used.', ImportError("cannot import name 'CloudPickler' from 'cloudpickle.cloudpickle' (/opt/hostedtoolcache/Python/3.7.7/x64/lib/python3.7/site-packages/cloudpickle/cloudpickle.py)"))
##[error]Process completed with exit code 4.
```
`cloudpickle` is a required dependency of TensorFlow Probability and in TFP `v0.10.0` it is set to [`cloudpickle >= 1.2.2`](https://github.com/tensorflow/probability/blob/f051e03dd3cc847d31061803c2b31c564562a993/setup.py#L34).
This has been reported in:
- [TensorFlow Probability Issue 991](https://github.com/tensorflow/probability/issues/991)
- [`cloudpickle` Issue 390](https://github.com/cloudpipe/cloudpickle/issues/390)
# Expected Behavior
For no error to be raised
# Actual Behavior
c.f. above
# Steps to Reproduce
This was found in CI, but the minimal test case is just to install TensorFlow and TensorFlow Probability and then try to import TFP:
```
$ python -m pip install tensorflow tensorflow-probability
$ python -c "import tensorflow_probability"
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/__init__.py", line 76, in <module>
from tensorflow_probability.python import * # pylint: disable=wildcard-import
File "/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/python/__init__.py", line 23, in <module>
from tensorflow_probability.python import distributions
File "/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/python/distributions/__init__.py", line 88, in <module>
from tensorflow_probability.python.distributions.pixel_cnn import PixelCNN
File "/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/python/distributions/pixel_cnn.py", line 37, in <module>
from tensorflow_probability.python.layers import weight_norm
File "/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/python/layers/__init__.py", line 31, in <module>
from tensorflow_probability.python.layers.distribution_layer import CategoricalMixtureOfOneHotCategorical
File "/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/python/layers/distribution_layer.py", line 28, in <module>
from cloudpickle.cloudpickle import CloudPickler
ImportError: cannot import name 'CloudPickler' from 'cloudpickle.cloudpickle' (/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/cloudpickle/cloudpickle.py)
$ pip list | grep cloudpickle
cloudpickle 1.5.0
```
# Checklist
- [x] Run `git fetch` to get the most up to date version of `master`
- [x] Searched through existing Issues to confirm this is not a duplicate issue
- [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup
2
3 extras_require = {
4 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],
5 'torch': ['torch~=1.2'],
6 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],
7 'xmlio': ['uproot'],
8 'minuit': ['iminuit'],
9 }
10 extras_require['backends'] = sorted(
11 set(
12 extras_require['tensorflow']
13 + extras_require['torch']
14 + extras_require['jax']
15 + extras_require['minuit']
16 )
17 )
18 extras_require['contrib'] = sorted(set(['matplotlib']))
19 extras_require['lint'] = sorted(set(['pyflakes', 'black']))
20
21 extras_require['test'] = sorted(
22 set(
23 extras_require['backends']
24 + extras_require['xmlio']
25 + extras_require['contrib']
26 + [
27 'pytest~=3.5',
28 'pytest-cov>=2.5.1',
29 'pytest-mock',
30 'pytest-benchmark[histogram]',
31 'pytest-console-scripts',
32 'pytest-mpl',
33 'pydocstyle',
34 'coverage>=4.0', # coveralls
35 'papermill~=2.0',
36 'nteract-scrapbook~=0.2',
37 'jupyter',
38 'uproot~=3.3',
39 'graphviz',
40 'jsonpatch',
41 ]
42 )
43 )
44 extras_require['docs'] = sorted(
45 set(
46 [
47 'sphinx~=3.0.0', # Sphinx v3.1.X regressions break docs
48 'sphinxcontrib-bibtex',
49 'sphinx-click',
50 'sphinx_rtd_theme',
51 'nbsphinx',
52 'ipywidgets',
53 'sphinx-issues',
54 'sphinx-copybutton>0.2.9',
55 ]
56 )
57 )
58 extras_require['develop'] = sorted(
59 set(
60 extras_require['docs']
61 + extras_require['lint']
62 + extras_require['test']
63 + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine']
64 )
65 )
66 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
67
68
69 setup(
70 extras_require=extras_require,
71 use_scm_version=lambda: {'local_scheme': lambda version: ''},
72 )
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,7 +1,11 @@
from setuptools import setup
extras_require = {
- 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],
+ 'tensorflow': [
+ 'tensorflow~=2.0',
+ 'tensorflow-probability~=0.8',
+ 'cloudpickle!=1.5.0', # TODO: Temp patch until tfp v0.11
+ ],
'torch': ['torch~=1.2'],
'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],
'xmlio': ['uproot'],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,7 +1,11 @@\n from setuptools import setup\n \n extras_require = {\n- 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n+ 'tensorflow': [\n+ 'tensorflow~=2.0',\n+ 'tensorflow-probability~=0.8',\n+ 'cloudpickle!=1.5.0', # TODO: Temp patch until tfp v0.11\n+ ],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n", "issue": "cloudpickle v1.5.0 breaks testing\n# Description\r\n\r\nWith the release of [`cloudpickle` `v1.5.0`](https://pypi.org/project/cloudpickle/1.5.0/) on 2020-07-01 the CI is broken in testing as the following error is raised\r\n\r\n```pytb\r\nImportError while loading conftest '/home/runner/work/pyhf/pyhf/tests/conftest.py'.\r\ntests/conftest.py:83: in <module>\r\n (pyhf.tensor.tensorflow_backend(), None),\r\nsrc/pyhf/tensor/__init__.py:44: in __getattr__\r\n e,\r\nE pyhf.exceptions.ImportBackendError: ('There was a problem importing TensorFlow. The tensorflow backend cannot be used.', ImportError(\"cannot import name 'CloudPickler' from 'cloudpickle.cloudpickle' (/opt/hostedtoolcache/Python/3.7.7/x64/lib/python3.7/site-packages/cloudpickle/cloudpickle.py)\"))\r\n##[error]Process completed with exit code 4.\r\n```\r\n\r\n`cloudpickle` is a required dependency of TensorFlow Probability and in TFP `v0.10.0` it is set to [`cloudpickle >= 1.2.2`](https://github.com/tensorflow/probability/blob/f051e03dd3cc847d31061803c2b31c564562a993/setup.py#L34).\r\n\r\nThis has been reported in:\r\n- [TensorFlow Probability Issue 991](https://github.com/tensorflow/probability/issues/991)\r\n- [`cloudpickle` Issue 390](https://github.com/cloudpipe/cloudpickle/issues/390)\r\n\r\n# Expected Behavior\r\n\r\nFor no error to be raised\r\n\r\n# Actual Behavior\r\n\r\nc.f. above\r\n\r\n# Steps to Reproduce\r\n\r\nThis was found in CI, but the minimal test case is just to install TensorFlow and TensorFlow Probability and then try to import TFP:\r\n\r\n```\r\n$ python -m pip install tensorflow tensorflow-probability\r\n$ python -c \"import tensorflow_probability\"\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/__init__.py\", line 76, in <module>\r\n from tensorflow_probability.python import * # pylint: disable=wildcard-import\r\n File \"/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/python/__init__.py\", line 23, in <module>\r\n from tensorflow_probability.python import distributions\r\n File \"/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/python/distributions/__init__.py\", line 88, in <module>\r\n from tensorflow_probability.python.distributions.pixel_cnn import PixelCNN\r\n File \"/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/python/distributions/pixel_cnn.py\", line 37, in <module>\r\n from tensorflow_probability.python.layers import weight_norm\r\n File \"/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/python/layers/__init__.py\", line 31, in <module>\r\n from tensorflow_probability.python.layers.distribution_layer import CategoricalMixtureOfOneHotCategorical\r\n File \"/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/python/layers/distribution_layer.py\", line 28, in <module>\r\n from cloudpickle.cloudpickle import CloudPickler\r\nImportError: cannot import name 'CloudPickler' from 'cloudpickle.cloudpickle' (/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/cloudpickle/cloudpickle.py)\r\n$ pip list | grep cloudpickle\r\ncloudpickle 1.5.0\r\n```\r\n\r\n# Checklist\r\n\r\n- [x] Run `git fetch` to get the most up to date version of `master`\r\n- [x] Searched through existing Issues to confirm this is not a duplicate issue\r\n- [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue\r\n\n", "before_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\nextras_require['lint'] = sorted(set(['pyflakes', 'black']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx~=3.0.0', # Sphinx v3.1.X regressions break docs\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow~=2.0',\n 'tensorflow-probability~=0.8',\n 'cloudpickle!=1.5.0', # TODO: Temp patch until tfp v0.11\n ],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\nextras_require['lint'] = sorted(set(['pyflakes', 'black']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx~=3.0.0', # Sphinx v3.1.X regressions break docs\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]} |
gh_patches_debug_1267 | rasdani/github-patches | git_diff | ansible__molecule-4038 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
created: true is ignored in state.yml
# Issue Type
- Bug report
# Molecule and Ansible details
```
ansible --version && molecule --version
ansible [core 2.15.3]
config file = None
configured module search path = ['/home/manu/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/manu/.local/share/virtualenvs/molecule-test-IsY3eZIi/lib/python3.11/site-packages/ansible
ansible collection location = /home/manu/.ansible/collections:/usr/share/ansible/collections
executable location = /home/manu/.local/share/virtualenvs/molecule-test-IsY3eZIi/bin/ansible
python version = 3.11.5 (main, Aug 28 2023, 20:02:58) [GCC 13.2.1 20230801] (/home/manu/.local/share/virtualenvs/molecule-test-IsY3eZIi/bin/python)
jinja version = 3.1.2
libyaml = True
molecule 6.0.2 using python 3.11
ansible:2.15.3
default:6.0.2 from molecule
molecule-qemu:0.5.3 from molecule_qemu
```
Molecule installation method: source
Ansible installation method: pip
# Desired Behavior
In molecule v5, the `create` step was automatically skipped when `created: true` in `<XDG_CACHE_HOME>/molecule//<scenario>/state.yml` with the message `WARNING Skipping, instances already created.`. This is the desired behavior.
Here an example with a simple hello_world role after a `molecule create` execution. The molecule-qemu plugin is used here.
```
❯ molecule converge
INFO default scenario test matrix: dependency, create, prepare, converge
INFO Performing prerun with role_name_check=0...
INFO Set ANSIBLE_LIBRARY=/home/manu/.cache/ansible-compat/35072c/modules:/home/manu/.ansible/plugins/modules:/usr/share/ansible/plugins/modules
INFO Set ANSIBLE_COLLECTIONS_PATH=/home/manu/.cache/ansible-compat/35072c/collections:/home/manu/.ansible/collections:/usr/share/ansible/collections
INFO Set ANSIBLE_ROLES_PATH=/home/manu/.cache/ansible-compat/35072c/roles:/home/manu/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles
INFO Running default > dependency
WARNING Skipping, missing the requirements file.
WARNING Skipping, missing the requirements file.
INFO Running default > create
WARNING Skipping, instances already created.
INFO Running default > prepare
WARNING Skipping, prepare playbook not configured.
INFO Running default > converge
PLAY [all] *********************************************************************
TASK [hello_world : Hello world] ***********************************************
ok: [test-hello-world] => {
"msg": "Hello, world!"
}
PLAY RECAP *********************************************************************
test-hello-world : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
# Actual Behaviour
Since molecule v6, the `WARNING Skipping, instances already created.` message is no longer displayed and the create step is executed each time the `molecule converge` is called.
Here an example with the same role, with the same conditions except that molecule version is 6.0.2.
```
INFO default scenario test matrix: dependency, create, prepare, converge
INFO Performing prerun with role_name_check=0...
INFO Running default > dependency
WARNING Skipping, missing the requirements file.
WARNING Skipping, missing the requirements file.
INFO Running default > create
PLAY [Create] ******************************************************************
TASK [Gather only necessary facts] *********************************************
ok: [localhost]
TASK [Register VMs data] *******************************************************
ok: [localhost] => (item=test-hello-world)
TASK [Prepare VMs data] ********************************************************
ok: [localhost]
...
TASK [Dump VMs config] *********************************************************
ok: [localhost]
PLAY RECAP *********************************************************************
localhost : ok=31 changed=0 unreachable=0 failed=0 skipped=5 rescued=0 ignored=0
INFO Running default > prepare
WARNING Skipping, prepare playbook not configured.
INFO Running default > converge
PLAY [all] *********************************************************************
TASK [hello_world : Hello world] ***********************************************
ok: [test-hello-world] => {
"msg": "Hello, world!"
}
PLAY RECAP *********************************************************************
test-hello-world : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/molecule/command/create.py`
Content:
```
1 # Copyright (c) 2015-2018 Cisco Systems, Inc.
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to
5 # deal in the Software without restriction, including without limitation the
6 # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 # sell copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
19 # DEALINGS IN THE SOFTWARE.
20 """Create Command Module."""
21
22 import logging
23
24 import click
25
26 from molecule.api import drivers
27 from molecule.command import base
28 from molecule.config import DEFAULT_DRIVER
29
30 LOG = logging.getLogger(__name__)
31
32
33 class Create(base.Base):
34 """Create Command Class."""
35
36 def execute(self, action_args=None):
37 """Execute the actions necessary to perform a `molecule create` and \
38 returns None.
39
40 :return: None
41 """
42 self._config.state.change_state("driver", self._config.driver.name)
43
44 self._config.provisioner.create()
45
46 self._config.state.change_state("created", True)
47
48
49 @base.click_command_ex()
50 @click.pass_context
51 @click.option(
52 "--scenario-name",
53 "-s",
54 default=base.MOLECULE_DEFAULT_SCENARIO_NAME,
55 help=f"Name of the scenario to target. ({base.MOLECULE_DEFAULT_SCENARIO_NAME})",
56 )
57 @click.option(
58 "--driver-name",
59 "-d",
60 type=click.Choice([str(s) for s in drivers()]),
61 help=f"Name of driver to use. ({DEFAULT_DRIVER})",
62 )
63 def create(ctx, scenario_name, driver_name): # pragma: no cover
64 """Use the provisioner to start the instances."""
65 args = ctx.obj.get("args")
66 subcommand = base._get_subcommand(__name__)
67 command_args = {"subcommand": subcommand, "driver_name": driver_name}
68
69 base.execute_cmdline_scenarios(scenario_name, args, command_args)
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/molecule/command/create.py b/src/molecule/command/create.py
--- a/src/molecule/command/create.py
+++ b/src/molecule/command/create.py
@@ -41,6 +41,11 @@
"""
self._config.state.change_state("driver", self._config.driver.name)
+ if self._config.state.created:
+ msg = "Skipping, instances already created."
+ LOG.warning(msg)
+ return
+
self._config.provisioner.create()
self._config.state.change_state("created", True)
| {"golden_diff": "diff --git a/src/molecule/command/create.py b/src/molecule/command/create.py\n--- a/src/molecule/command/create.py\n+++ b/src/molecule/command/create.py\n@@ -41,6 +41,11 @@\n \"\"\"\n self._config.state.change_state(\"driver\", self._config.driver.name)\n \n+ if self._config.state.created:\n+ msg = \"Skipping, instances already created.\"\n+ LOG.warning(msg)\n+ return\n+\n self._config.provisioner.create()\n \n self._config.state.change_state(\"created\", True)\n", "issue": "created: true is ignored in state.yml\n# Issue Type\r\n\r\n- Bug report\r\n\r\n# Molecule and Ansible details\r\n\r\n```\r\nansible --version && molecule --version\r\nansible [core 2.15.3]\r\n config file = None\r\n configured module search path = ['/home/manu/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /home/manu/.local/share/virtualenvs/molecule-test-IsY3eZIi/lib/python3.11/site-packages/ansible\r\n ansible collection location = /home/manu/.ansible/collections:/usr/share/ansible/collections\r\n executable location = /home/manu/.local/share/virtualenvs/molecule-test-IsY3eZIi/bin/ansible\r\n python version = 3.11.5 (main, Aug 28 2023, 20:02:58) [GCC 13.2.1 20230801] (/home/manu/.local/share/virtualenvs/molecule-test-IsY3eZIi/bin/python)\r\n jinja version = 3.1.2\r\n libyaml = True\r\nmolecule 6.0.2 using python 3.11\r\n ansible:2.15.3\r\n default:6.0.2 from molecule\r\n molecule-qemu:0.5.3 from molecule_qemu\r\n```\r\n\r\nMolecule installation method: source\r\nAnsible installation method: pip\r\n\r\n# Desired Behavior\r\n\r\nIn molecule v5, the `create` step was automatically skipped when `created: true` in `<XDG_CACHE_HOME>/molecule//<scenario>/state.yml` with the message `WARNING Skipping, instances already created.`. This is the desired behavior.\r\n\r\nHere an example with a simple hello_world role after a `molecule create` execution. The molecule-qemu plugin is used here.\r\n```\r\n\u276f molecule converge\r\nINFO default scenario test matrix: dependency, create, prepare, converge\r\nINFO Performing prerun with role_name_check=0...\r\nINFO Set ANSIBLE_LIBRARY=/home/manu/.cache/ansible-compat/35072c/modules:/home/manu/.ansible/plugins/modules:/usr/share/ansible/plugins/modules\r\nINFO Set ANSIBLE_COLLECTIONS_PATH=/home/manu/.cache/ansible-compat/35072c/collections:/home/manu/.ansible/collections:/usr/share/ansible/collections\r\nINFO Set ANSIBLE_ROLES_PATH=/home/manu/.cache/ansible-compat/35072c/roles:/home/manu/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles\r\nINFO Running default > dependency\r\nWARNING Skipping, missing the requirements file.\r\nWARNING Skipping, missing the requirements file.\r\nINFO Running default > create\r\nWARNING Skipping, instances already created.\r\nINFO Running default > prepare\r\nWARNING Skipping, prepare playbook not configured.\r\nINFO Running default > converge\r\n\r\nPLAY [all] *********************************************************************\r\n\r\nTASK [hello_world : Hello world] ***********************************************\r\nok: [test-hello-world] => {\r\n \"msg\": \"Hello, world!\"\r\n}\r\n\r\nPLAY RECAP *********************************************************************\r\ntest-hello-world : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0\r\n```\r\n\r\n# Actual Behaviour\r\n\r\nSince molecule v6, the `WARNING Skipping, instances already created.` message is no longer displayed and the create step is executed each time the `molecule converge` is called.\r\n\r\nHere an example with the same role, with the same conditions except that molecule version is 6.0.2.\r\n```\r\nINFO default scenario test matrix: dependency, create, prepare, converge\r\nINFO Performing prerun with role_name_check=0...\r\nINFO Running default > dependency\r\nWARNING Skipping, missing the requirements file.\r\nWARNING Skipping, missing the requirements file.\r\nINFO Running default > create\r\n\r\nPLAY [Create] ******************************************************************\r\n\r\nTASK [Gather only necessary facts] *********************************************\r\nok: [localhost]\r\n\r\nTASK [Register VMs data] *******************************************************\r\nok: [localhost] => (item=test-hello-world)\r\n\r\nTASK [Prepare VMs data] ********************************************************\r\nok: [localhost]\r\n...\r\nTASK [Dump VMs config] *********************************************************\r\nok: [localhost]\r\n\r\nPLAY RECAP *********************************************************************\r\nlocalhost : ok=31 changed=0 unreachable=0 failed=0 skipped=5 rescued=0 ignored=0\r\n\r\nINFO Running default > prepare\r\nWARNING Skipping, prepare playbook not configured.\r\nINFO Running default > converge\r\n\r\nPLAY [all] *********************************************************************\r\n\r\nTASK [hello_world : Hello world] ***********************************************\r\nok: [test-hello-world] => {\r\n \"msg\": \"Hello, world!\"\r\n}\r\n\r\nPLAY RECAP *********************************************************************\r\ntest-hello-world : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0\r\n```\n", "before_files": [{"content": "# Copyright (c) 2015-2018 Cisco Systems, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\"\"\"Create Command Module.\"\"\"\n\nimport logging\n\nimport click\n\nfrom molecule.api import drivers\nfrom molecule.command import base\nfrom molecule.config import DEFAULT_DRIVER\n\nLOG = logging.getLogger(__name__)\n\n\nclass Create(base.Base):\n \"\"\"Create Command Class.\"\"\"\n\n def execute(self, action_args=None):\n \"\"\"Execute the actions necessary to perform a `molecule create` and \\\n returns None.\n\n :return: None\n \"\"\"\n self._config.state.change_state(\"driver\", self._config.driver.name)\n\n self._config.provisioner.create()\n\n self._config.state.change_state(\"created\", True)\n\n\[email protected]_command_ex()\[email protected]_context\[email protected](\n \"--scenario-name\",\n \"-s\",\n default=base.MOLECULE_DEFAULT_SCENARIO_NAME,\n help=f\"Name of the scenario to target. ({base.MOLECULE_DEFAULT_SCENARIO_NAME})\",\n)\[email protected](\n \"--driver-name\",\n \"-d\",\n type=click.Choice([str(s) for s in drivers()]),\n help=f\"Name of driver to use. ({DEFAULT_DRIVER})\",\n)\ndef create(ctx, scenario_name, driver_name): # pragma: no cover\n \"\"\"Use the provisioner to start the instances.\"\"\"\n args = ctx.obj.get(\"args\")\n subcommand = base._get_subcommand(__name__)\n command_args = {\"subcommand\": subcommand, \"driver_name\": driver_name}\n\n base.execute_cmdline_scenarios(scenario_name, args, command_args)\n", "path": "src/molecule/command/create.py"}], "after_files": [{"content": "# Copyright (c) 2015-2018 Cisco Systems, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\"\"\"Create Command Module.\"\"\"\n\nimport logging\n\nimport click\n\nfrom molecule.api import drivers\nfrom molecule.command import base\nfrom molecule.config import DEFAULT_DRIVER\n\nLOG = logging.getLogger(__name__)\n\n\nclass Create(base.Base):\n \"\"\"Create Command Class.\"\"\"\n\n def execute(self, action_args=None):\n \"\"\"Execute the actions necessary to perform a `molecule create` and \\\n returns None.\n\n :return: None\n \"\"\"\n self._config.state.change_state(\"driver\", self._config.driver.name)\n\n if self._config.state.created:\n msg = \"Skipping, instances already created.\"\n LOG.warning(msg)\n return\n\n self._config.provisioner.create()\n\n self._config.state.change_state(\"created\", True)\n\n\[email protected]_command_ex()\[email protected]_context\[email protected](\n \"--scenario-name\",\n \"-s\",\n default=base.MOLECULE_DEFAULT_SCENARIO_NAME,\n help=f\"Name of the scenario to target. ({base.MOLECULE_DEFAULT_SCENARIO_NAME})\",\n)\[email protected](\n \"--driver-name\",\n \"-d\",\n type=click.Choice([str(s) for s in drivers()]),\n help=f\"Name of driver to use. ({DEFAULT_DRIVER})\",\n)\ndef create(ctx, scenario_name, driver_name): # pragma: no cover\n \"\"\"Use the provisioner to start the instances.\"\"\"\n args = ctx.obj.get(\"args\")\n subcommand = base._get_subcommand(__name__)\n command_args = {\"subcommand\": subcommand, \"driver_name\": driver_name}\n\n base.execute_cmdline_scenarios(scenario_name, args, command_args)\n", "path": "src/molecule/command/create.py"}]} |
gh_patches_debug_1268 | rasdani/github-patches | git_diff | canonical__microk8s-2478 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[dashboard] should be exposed via ingress
When running microk8s on the server, rather than doing port forwarding it should be possible to access the dashboard via ingress (similar to kubeflow dashboard)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/wrappers/status.py`
Content:
```
1 #!/usr/bin/python3
2 import os
3 import argparse
4
5 from common.utils import (
6 exit_if_no_permission,
7 exit_if_stopped,
8 is_cluster_locked,
9 is_ha_enabled,
10 get_dqlite_info,
11 wait_for_ready,
12 is_cluster_ready,
13 get_available_addons,
14 get_current_arch,
15 get_addon_by_name,
16 kubectl_get,
17 kubectl_get_clusterroles,
18 )
19
20
21 def is_enabled(addon, item):
22 if addon in item:
23 return True
24 else:
25 filepath = os.path.expandvars(addon)
26 return os.path.isfile(filepath)
27
28 return False
29
30
31 def print_short(isReady, enabled_addons, disabled_addons):
32 if isReady:
33 print("microk8s is running")
34 print("addons:")
35 if enabled_addons and len(enabled_addons) > 0:
36 for enabled in enabled_addons:
37 print("{}: enabled".format(enabled["name"]))
38 if disabled_addons and len(disabled_addons) > 0:
39 for disabled in disabled_addons:
40 print("{}: disabled".format(disabled["name"]))
41 else:
42 print("microk8s is not running. Use microk8s inspect for a deeper inspection.")
43
44
45 def print_pretty(isReady, enabled_addons, disabled_addons):
46 console_formatter = "{:>3} {:<20} # {}"
47 if isReady:
48 print("microk8s is running")
49 if not is_ha_enabled():
50 print("high-availability: no")
51 else:
52 info = get_dqlite_info()
53 if ha_cluster_formed(info):
54 print("high-availability: yes")
55 else:
56 print("high-availability: no")
57
58 masters = "none"
59 standby = "none"
60 for node in info:
61 if node[1] == "voter":
62 if masters == "none":
63 masters = "{}".format(node[0])
64 else:
65 masters = "{} {}".format(masters, node[0])
66 if node[1] == "standby":
67 if standby == "none":
68 standby = "{}".format(node[0])
69 else:
70 standby = "{} {}".format(standby, node[0])
71
72 print("{:>2}{} {}".format("", "datastore master nodes:", masters))
73 print("{:>2}{} {}".format("", "datastore standby nodes:", standby))
74
75 print("addons:")
76 if enabled_addons and len(enabled_addons) > 0:
77 print("{:>2}{}".format("", "enabled:"))
78 for enabled in enabled_addons:
79 print(console_formatter.format("", enabled["name"], enabled["description"]))
80 if disabled_addons and len(disabled_addons) > 0:
81 print("{:>2}{}".format("", "disabled:"))
82 for disabled in disabled_addons:
83 print(console_formatter.format("", disabled["name"], disabled["description"]))
84 else:
85 print("microk8s is not running. Use microk8s inspect for a deeper inspection.")
86
87
88 def print_short_yaml(isReady, enabled_addons, disabled_addons):
89 print("microk8s:")
90 print("{:>2}{} {}".format("", "running:", isReady))
91
92 if isReady:
93 print("addons:")
94 for enabled in enabled_addons:
95 print(" {}: enabled".format(enabled["name"]))
96
97 for disabled in disabled_addons:
98 print(" {}: disabled".format(disabled["name"]))
99 else:
100 print(
101 "{:>2}{} {}".format(
102 "",
103 "message:",
104 "microk8s is not running. Use microk8s inspect for a deeper inspection.",
105 )
106 )
107
108
109 def print_yaml(isReady, enabled_addons, disabled_addons):
110 print("microk8s:")
111 print("{:>2}{} {}".format("", "running:", isReady))
112
113 print("{:>2}".format("high-availability:"))
114 ha_enabled = is_ha_enabled()
115 print("{:>2}{} {}".format("", "enabled:", ha_enabled))
116 if ha_enabled:
117 info = get_dqlite_info()
118 print("{:>2}{}".format("", "nodes:"))
119 for node in info:
120 print("{:>6}address: {:<1}".format("- ", node[0]))
121 print("{:>6}role: {:<1}".format("", node[1]))
122
123 if isReady:
124 print("{:>2}".format("addons:"))
125 for enabled in enabled_addons:
126 print("{:>4}name: {:<1}".format("- ", enabled["name"]))
127 print("{:>4}description: {:<1}".format("", enabled["description"]))
128 print("{:>4}version: {:<1}".format("", enabled["version"]))
129 print("{:>4}status: enabled".format(""))
130
131 for disabled in disabled_addons:
132 print("{:>4}name: {:<1}".format("- ", disabled["name"]))
133 print("{:>4}description: {:<1}".format("", disabled["description"]))
134 print("{:>4}version: {:<1}".format("", disabled["version"]))
135 print("{:>4}status: disabled".format(""))
136 else:
137 print(
138 "{:>2}{} {}".format(
139 "",
140 "message:",
141 "microk8s is not running. Use microk8s inspect for a deeper inspection.",
142 )
143 )
144
145
146 def print_addon_status(enabled):
147 if len(enabled) > 0:
148 print("enabled")
149 else:
150 print("disabled")
151
152
153 def get_status(available_addons, isReady):
154 enabled = []
155 disabled = []
156 if isReady:
157 kube_output = kubectl_get("all")
158 cluster_output = kubectl_get_clusterroles()
159 kube_output = kube_output + cluster_output
160 for addon in available_addons:
161 found = False
162 for row in kube_output.split("\n"):
163 if is_enabled(addon["check_status"], row):
164 enabled.append(addon)
165 found = True
166 break
167 if not found:
168 disabled.append(addon)
169
170 return enabled, disabled
171
172
173 def ha_cluster_formed(info):
174 voters = 0
175 for node in info:
176 if node[1] == "voter":
177 voters += 1
178 ha_formed = False
179 if voters > 2:
180 ha_formed = True
181 return ha_formed
182
183
184 if __name__ == "__main__":
185 exit_if_no_permission()
186 exit_if_stopped()
187 is_cluster_locked()
188
189 # initiate the parser with a description
190 parser = argparse.ArgumentParser(
191 description="Microk8s cluster status check.", prog="microk8s status"
192 )
193 parser.add_argument(
194 "--format",
195 help="print cluster and addon status, output can be in yaml, pretty or short",
196 default="pretty",
197 choices={"pretty", "yaml", "short"},
198 )
199 parser.add_argument(
200 "-w", "--wait-ready", action="store_true", help="wait until the cluster is in ready state"
201 )
202 parser.add_argument(
203 "-t",
204 "--timeout",
205 help="specify a timeout in seconds when waiting for the cluster to be ready.",
206 type=int,
207 default=0,
208 )
209 parser.add_argument("-a", "--addon", help="check the status of an addon.", default="all")
210 parser.add_argument(
211 "--yaml", action="store_true", help="DEPRECATED, use '--format yaml' instead"
212 )
213
214 # read arguments from the command line
215 args = parser.parse_args()
216
217 wait_ready = args.wait_ready
218 timeout = args.timeout
219 yaml_short = args.yaml
220
221 if wait_ready:
222 isReady = wait_for_ready(timeout)
223 else:
224 isReady = is_cluster_ready()
225
226 available_addons = get_available_addons(get_current_arch())
227
228 if args.addon != "all":
229 available_addons = get_addon_by_name(available_addons, args.addon)
230
231 enabled, disabled = get_status(available_addons, isReady)
232
233 if args.addon != "all":
234 print_addon_status(enabled)
235 else:
236 if args.format == "yaml":
237 print_yaml(isReady, enabled, disabled)
238 elif args.format == "short":
239 print_short(isReady, enabled, disabled)
240 else:
241 if yaml_short:
242 print_short_yaml(isReady, enabled, disabled)
243 else:
244 print_pretty(isReady, enabled, disabled)
245
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/wrappers/status.py b/scripts/wrappers/status.py
--- a/scripts/wrappers/status.py
+++ b/scripts/wrappers/status.py
@@ -154,7 +154,8 @@
enabled = []
disabled = []
if isReady:
- kube_output = kubectl_get("all")
+ # 'all' does not include ingress
+ kube_output = kubectl_get("all,ingress")
cluster_output = kubectl_get_clusterroles()
kube_output = kube_output + cluster_output
for addon in available_addons:
| {"golden_diff": "diff --git a/scripts/wrappers/status.py b/scripts/wrappers/status.py\n--- a/scripts/wrappers/status.py\n+++ b/scripts/wrappers/status.py\n@@ -154,7 +154,8 @@\n enabled = []\n disabled = []\n if isReady:\n- kube_output = kubectl_get(\"all\")\n+ # 'all' does not include ingress\n+ kube_output = kubectl_get(\"all,ingress\")\n cluster_output = kubectl_get_clusterroles()\n kube_output = kube_output + cluster_output\n for addon in available_addons:\n", "issue": "[dashboard] should be exposed via ingress\nWhen running microk8s on the server, rather than doing port forwarding it should be possible to access the dashboard via ingress (similar to kubeflow dashboard)\n", "before_files": [{"content": "#!/usr/bin/python3\nimport os\nimport argparse\n\nfrom common.utils import (\n exit_if_no_permission,\n exit_if_stopped,\n is_cluster_locked,\n is_ha_enabled,\n get_dqlite_info,\n wait_for_ready,\n is_cluster_ready,\n get_available_addons,\n get_current_arch,\n get_addon_by_name,\n kubectl_get,\n kubectl_get_clusterroles,\n)\n\n\ndef is_enabled(addon, item):\n if addon in item:\n return True\n else:\n filepath = os.path.expandvars(addon)\n return os.path.isfile(filepath)\n\n return False\n\n\ndef print_short(isReady, enabled_addons, disabled_addons):\n if isReady:\n print(\"microk8s is running\")\n print(\"addons:\")\n if enabled_addons and len(enabled_addons) > 0:\n for enabled in enabled_addons:\n print(\"{}: enabled\".format(enabled[\"name\"]))\n if disabled_addons and len(disabled_addons) > 0:\n for disabled in disabled_addons:\n print(\"{}: disabled\".format(disabled[\"name\"]))\n else:\n print(\"microk8s is not running. Use microk8s inspect for a deeper inspection.\")\n\n\ndef print_pretty(isReady, enabled_addons, disabled_addons):\n console_formatter = \"{:>3} {:<20} # {}\"\n if isReady:\n print(\"microk8s is running\")\n if not is_ha_enabled():\n print(\"high-availability: no\")\n else:\n info = get_dqlite_info()\n if ha_cluster_formed(info):\n print(\"high-availability: yes\")\n else:\n print(\"high-availability: no\")\n\n masters = \"none\"\n standby = \"none\"\n for node in info:\n if node[1] == \"voter\":\n if masters == \"none\":\n masters = \"{}\".format(node[0])\n else:\n masters = \"{} {}\".format(masters, node[0])\n if node[1] == \"standby\":\n if standby == \"none\":\n standby = \"{}\".format(node[0])\n else:\n standby = \"{} {}\".format(standby, node[0])\n\n print(\"{:>2}{} {}\".format(\"\", \"datastore master nodes:\", masters))\n print(\"{:>2}{} {}\".format(\"\", \"datastore standby nodes:\", standby))\n\n print(\"addons:\")\n if enabled_addons and len(enabled_addons) > 0:\n print(\"{:>2}{}\".format(\"\", \"enabled:\"))\n for enabled in enabled_addons:\n print(console_formatter.format(\"\", enabled[\"name\"], enabled[\"description\"]))\n if disabled_addons and len(disabled_addons) > 0:\n print(\"{:>2}{}\".format(\"\", \"disabled:\"))\n for disabled in disabled_addons:\n print(console_formatter.format(\"\", disabled[\"name\"], disabled[\"description\"]))\n else:\n print(\"microk8s is not running. Use microk8s inspect for a deeper inspection.\")\n\n\ndef print_short_yaml(isReady, enabled_addons, disabled_addons):\n print(\"microk8s:\")\n print(\"{:>2}{} {}\".format(\"\", \"running:\", isReady))\n\n if isReady:\n print(\"addons:\")\n for enabled in enabled_addons:\n print(\" {}: enabled\".format(enabled[\"name\"]))\n\n for disabled in disabled_addons:\n print(\" {}: disabled\".format(disabled[\"name\"]))\n else:\n print(\n \"{:>2}{} {}\".format(\n \"\",\n \"message:\",\n \"microk8s is not running. Use microk8s inspect for a deeper inspection.\",\n )\n )\n\n\ndef print_yaml(isReady, enabled_addons, disabled_addons):\n print(\"microk8s:\")\n print(\"{:>2}{} {}\".format(\"\", \"running:\", isReady))\n\n print(\"{:>2}\".format(\"high-availability:\"))\n ha_enabled = is_ha_enabled()\n print(\"{:>2}{} {}\".format(\"\", \"enabled:\", ha_enabled))\n if ha_enabled:\n info = get_dqlite_info()\n print(\"{:>2}{}\".format(\"\", \"nodes:\"))\n for node in info:\n print(\"{:>6}address: {:<1}\".format(\"- \", node[0]))\n print(\"{:>6}role: {:<1}\".format(\"\", node[1]))\n\n if isReady:\n print(\"{:>2}\".format(\"addons:\"))\n for enabled in enabled_addons:\n print(\"{:>4}name: {:<1}\".format(\"- \", enabled[\"name\"]))\n print(\"{:>4}description: {:<1}\".format(\"\", enabled[\"description\"]))\n print(\"{:>4}version: {:<1}\".format(\"\", enabled[\"version\"]))\n print(\"{:>4}status: enabled\".format(\"\"))\n\n for disabled in disabled_addons:\n print(\"{:>4}name: {:<1}\".format(\"- \", disabled[\"name\"]))\n print(\"{:>4}description: {:<1}\".format(\"\", disabled[\"description\"]))\n print(\"{:>4}version: {:<1}\".format(\"\", disabled[\"version\"]))\n print(\"{:>4}status: disabled\".format(\"\"))\n else:\n print(\n \"{:>2}{} {}\".format(\n \"\",\n \"message:\",\n \"microk8s is not running. Use microk8s inspect for a deeper inspection.\",\n )\n )\n\n\ndef print_addon_status(enabled):\n if len(enabled) > 0:\n print(\"enabled\")\n else:\n print(\"disabled\")\n\n\ndef get_status(available_addons, isReady):\n enabled = []\n disabled = []\n if isReady:\n kube_output = kubectl_get(\"all\")\n cluster_output = kubectl_get_clusterroles()\n kube_output = kube_output + cluster_output\n for addon in available_addons:\n found = False\n for row in kube_output.split(\"\\n\"):\n if is_enabled(addon[\"check_status\"], row):\n enabled.append(addon)\n found = True\n break\n if not found:\n disabled.append(addon)\n\n return enabled, disabled\n\n\ndef ha_cluster_formed(info):\n voters = 0\n for node in info:\n if node[1] == \"voter\":\n voters += 1\n ha_formed = False\n if voters > 2:\n ha_formed = True\n return ha_formed\n\n\nif __name__ == \"__main__\":\n exit_if_no_permission()\n exit_if_stopped()\n is_cluster_locked()\n\n # initiate the parser with a description\n parser = argparse.ArgumentParser(\n description=\"Microk8s cluster status check.\", prog=\"microk8s status\"\n )\n parser.add_argument(\n \"--format\",\n help=\"print cluster and addon status, output can be in yaml, pretty or short\",\n default=\"pretty\",\n choices={\"pretty\", \"yaml\", \"short\"},\n )\n parser.add_argument(\n \"-w\", \"--wait-ready\", action=\"store_true\", help=\"wait until the cluster is in ready state\"\n )\n parser.add_argument(\n \"-t\",\n \"--timeout\",\n help=\"specify a timeout in seconds when waiting for the cluster to be ready.\",\n type=int,\n default=0,\n )\n parser.add_argument(\"-a\", \"--addon\", help=\"check the status of an addon.\", default=\"all\")\n parser.add_argument(\n \"--yaml\", action=\"store_true\", help=\"DEPRECATED, use '--format yaml' instead\"\n )\n\n # read arguments from the command line\n args = parser.parse_args()\n\n wait_ready = args.wait_ready\n timeout = args.timeout\n yaml_short = args.yaml\n\n if wait_ready:\n isReady = wait_for_ready(timeout)\n else:\n isReady = is_cluster_ready()\n\n available_addons = get_available_addons(get_current_arch())\n\n if args.addon != \"all\":\n available_addons = get_addon_by_name(available_addons, args.addon)\n\n enabled, disabled = get_status(available_addons, isReady)\n\n if args.addon != \"all\":\n print_addon_status(enabled)\n else:\n if args.format == \"yaml\":\n print_yaml(isReady, enabled, disabled)\n elif args.format == \"short\":\n print_short(isReady, enabled, disabled)\n else:\n if yaml_short:\n print_short_yaml(isReady, enabled, disabled)\n else:\n print_pretty(isReady, enabled, disabled)\n", "path": "scripts/wrappers/status.py"}], "after_files": [{"content": "#!/usr/bin/python3\nimport os\nimport argparse\n\nfrom common.utils import (\n exit_if_no_permission,\n exit_if_stopped,\n is_cluster_locked,\n is_ha_enabled,\n get_dqlite_info,\n wait_for_ready,\n is_cluster_ready,\n get_available_addons,\n get_current_arch,\n get_addon_by_name,\n kubectl_get,\n kubectl_get_clusterroles,\n)\n\n\ndef is_enabled(addon, item):\n if addon in item:\n return True\n else:\n filepath = os.path.expandvars(addon)\n return os.path.isfile(filepath)\n\n return False\n\n\ndef print_short(isReady, enabled_addons, disabled_addons):\n if isReady:\n print(\"microk8s is running\")\n print(\"addons:\")\n if enabled_addons and len(enabled_addons) > 0:\n for enabled in enabled_addons:\n print(\"{}: enabled\".format(enabled[\"name\"]))\n if disabled_addons and len(disabled_addons) > 0:\n for disabled in disabled_addons:\n print(\"{}: disabled\".format(disabled[\"name\"]))\n else:\n print(\"microk8s is not running. Use microk8s inspect for a deeper inspection.\")\n\n\ndef print_pretty(isReady, enabled_addons, disabled_addons):\n console_formatter = \"{:>3} {:<20} # {}\"\n if isReady:\n print(\"microk8s is running\")\n if not is_ha_enabled():\n print(\"high-availability: no\")\n else:\n info = get_dqlite_info()\n if ha_cluster_formed(info):\n print(\"high-availability: yes\")\n else:\n print(\"high-availability: no\")\n\n masters = \"none\"\n standby = \"none\"\n for node in info:\n if node[1] == \"voter\":\n if masters == \"none\":\n masters = \"{}\".format(node[0])\n else:\n masters = \"{} {}\".format(masters, node[0])\n if node[1] == \"standby\":\n if standby == \"none\":\n standby = \"{}\".format(node[0])\n else:\n standby = \"{} {}\".format(standby, node[0])\n\n print(\"{:>2}{} {}\".format(\"\", \"datastore master nodes:\", masters))\n print(\"{:>2}{} {}\".format(\"\", \"datastore standby nodes:\", standby))\n\n print(\"addons:\")\n if enabled_addons and len(enabled_addons) > 0:\n print(\"{:>2}{}\".format(\"\", \"enabled:\"))\n for enabled in enabled_addons:\n print(console_formatter.format(\"\", enabled[\"name\"], enabled[\"description\"]))\n if disabled_addons and len(disabled_addons) > 0:\n print(\"{:>2}{}\".format(\"\", \"disabled:\"))\n for disabled in disabled_addons:\n print(console_formatter.format(\"\", disabled[\"name\"], disabled[\"description\"]))\n else:\n print(\"microk8s is not running. Use microk8s inspect for a deeper inspection.\")\n\n\ndef print_short_yaml(isReady, enabled_addons, disabled_addons):\n print(\"microk8s:\")\n print(\"{:>2}{} {}\".format(\"\", \"running:\", isReady))\n\n if isReady:\n print(\"addons:\")\n for enabled in enabled_addons:\n print(\" {}: enabled\".format(enabled[\"name\"]))\n\n for disabled in disabled_addons:\n print(\" {}: disabled\".format(disabled[\"name\"]))\n else:\n print(\n \"{:>2}{} {}\".format(\n \"\",\n \"message:\",\n \"microk8s is not running. Use microk8s inspect for a deeper inspection.\",\n )\n )\n\n\ndef print_yaml(isReady, enabled_addons, disabled_addons):\n print(\"microk8s:\")\n print(\"{:>2}{} {}\".format(\"\", \"running:\", isReady))\n\n print(\"{:>2}\".format(\"high-availability:\"))\n ha_enabled = is_ha_enabled()\n print(\"{:>2}{} {}\".format(\"\", \"enabled:\", ha_enabled))\n if ha_enabled:\n info = get_dqlite_info()\n print(\"{:>2}{}\".format(\"\", \"nodes:\"))\n for node in info:\n print(\"{:>6}address: {:<1}\".format(\"- \", node[0]))\n print(\"{:>6}role: {:<1}\".format(\"\", node[1]))\n\n if isReady:\n print(\"{:>2}\".format(\"addons:\"))\n for enabled in enabled_addons:\n print(\"{:>4}name: {:<1}\".format(\"- \", enabled[\"name\"]))\n print(\"{:>4}description: {:<1}\".format(\"\", enabled[\"description\"]))\n print(\"{:>4}version: {:<1}\".format(\"\", enabled[\"version\"]))\n print(\"{:>4}status: enabled\".format(\"\"))\n\n for disabled in disabled_addons:\n print(\"{:>4}name: {:<1}\".format(\"- \", disabled[\"name\"]))\n print(\"{:>4}description: {:<1}\".format(\"\", disabled[\"description\"]))\n print(\"{:>4}version: {:<1}\".format(\"\", disabled[\"version\"]))\n print(\"{:>4}status: disabled\".format(\"\"))\n else:\n print(\n \"{:>2}{} {}\".format(\n \"\",\n \"message:\",\n \"microk8s is not running. Use microk8s inspect for a deeper inspection.\",\n )\n )\n\n\ndef print_addon_status(enabled):\n if len(enabled) > 0:\n print(\"enabled\")\n else:\n print(\"disabled\")\n\n\ndef get_status(available_addons, isReady):\n enabled = []\n disabled = []\n if isReady:\n # 'all' does not include ingress\n kube_output = kubectl_get(\"all,ingress\")\n cluster_output = kubectl_get_clusterroles()\n kube_output = kube_output + cluster_output\n for addon in available_addons:\n found = False\n for row in kube_output.split(\"\\n\"):\n if is_enabled(addon[\"check_status\"], row):\n enabled.append(addon)\n found = True\n break\n if not found:\n disabled.append(addon)\n\n return enabled, disabled\n\n\ndef ha_cluster_formed(info):\n voters = 0\n for node in info:\n if node[1] == \"voter\":\n voters += 1\n ha_formed = False\n if voters > 2:\n ha_formed = True\n return ha_formed\n\n\nif __name__ == \"__main__\":\n exit_if_no_permission()\n exit_if_stopped()\n is_cluster_locked()\n\n # initiate the parser with a description\n parser = argparse.ArgumentParser(\n description=\"Microk8s cluster status check.\", prog=\"microk8s status\"\n )\n parser.add_argument(\n \"--format\",\n help=\"print cluster and addon status, output can be in yaml, pretty or short\",\n default=\"pretty\",\n choices={\"pretty\", \"yaml\", \"short\"},\n )\n parser.add_argument(\n \"-w\", \"--wait-ready\", action=\"store_true\", help=\"wait until the cluster is in ready state\"\n )\n parser.add_argument(\n \"-t\",\n \"--timeout\",\n help=\"specify a timeout in seconds when waiting for the cluster to be ready.\",\n type=int,\n default=0,\n )\n parser.add_argument(\"-a\", \"--addon\", help=\"check the status of an addon.\", default=\"all\")\n parser.add_argument(\n \"--yaml\", action=\"store_true\", help=\"DEPRECATED, use '--format yaml' instead\"\n )\n\n # read arguments from the command line\n args = parser.parse_args()\n\n wait_ready = args.wait_ready\n timeout = args.timeout\n yaml_short = args.yaml\n\n if wait_ready:\n isReady = wait_for_ready(timeout)\n else:\n isReady = is_cluster_ready()\n\n available_addons = get_available_addons(get_current_arch())\n\n if args.addon != \"all\":\n available_addons = get_addon_by_name(available_addons, args.addon)\n\n enabled, disabled = get_status(available_addons, isReady)\n\n if args.addon != \"all\":\n print_addon_status(enabled)\n else:\n if args.format == \"yaml\":\n print_yaml(isReady, enabled, disabled)\n elif args.format == \"short\":\n print_short(isReady, enabled, disabled)\n else:\n if yaml_short:\n print_short_yaml(isReady, enabled, disabled)\n else:\n print_pretty(isReady, enabled, disabled)\n", "path": "scripts/wrappers/status.py"}]} |
gh_patches_debug_1269 | rasdani/github-patches | git_diff | napari__napari-6226 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
property in labels layer does not understand objects of different lengths
## 🐛 Bug
I am trying to use the properties attribute of `add_labels` to add a dictionary of properties that contains a self defined property that happens to be in the form of list but of unequal lengths. So every region has an extra property of different length. Similar to how image is a property that has different lengths for every region.
This is a silly example of what I have:
## To Reproduce
```python
import numpy as np
from skimage.measure import regionprops_table, label
import napari
def random_length_property(regionmask):
return [0] * np.random.randint(10)
image = data.coins()
viewer = napari.view_image(image)
label_image = label(image > 110)
table_props = regionprops_table(
label_image,
intensity_image=image,
extra_properties=(random_length_property,),
)
label_layer = viewer.add_labels(
label_image, name="segmentation", properties=table_props
)
napari.run()
```
And this makes napari unhappy with:
```bash
label_layer = viewer.add_labels(
File "C:\Users\elena.pascal\AppData\Local\pypoetry\Cache\virtualenvs\image-process-zoTHhXWu-py3.8\lib\site-packages\napari\components\viewer_model.py", line 4, in add_labels
import itertools
File "C:\Users\elena.pascal\AppData\Local\pypoetry\Cache\virtualenvs\image-process-zoTHhXWu-py3.8\lib\site-packages\napari\layers\labels\labels.py", line 328, in __init__
self._feature_table = _FeatureTable.from_layer(
File "C:\Users\elena.pascal\AppData\Local\pypoetry\Cache\virtualenvs\image-process-zoTHhXWu-py3.8\lib\site-packages\napari\layers\utils\layer_utils.py", line 956, in from_layer
return cls(features, defaults=feature_defaults, num_data=num_data)
File "C:\Users\elena.pascal\AppData\Local\pypoetry\Cache\virtualenvs\image-process-zoTHhXWu-py3.8\lib\site-packages\napari\layers\utils\layer_utils.py", line 788, in __init__
self._defaults = _validate_feature_defaults(defaults, self._values)
File "C:\Users\elena.pascal\AppData\Local\pypoetry\Cache\virtualenvs\image-process-zoTHhXWu-py3.8\lib\site-packages\napari\layers\utils\layer_utils.py", line 1015, in _validate_feature_defaults
defaults = {c: _get_default_column(values[c]) for c in values.columns}
File "C:\Users\elena.pascal\AppData\Local\pypoetry\Cache\virtualenvs\image-process-zoTHhXWu-py3.8\lib\site-packages\napari\layers\utils\layer_utils.py", line 1015, in <dictcomp>
defaults = {c: _get_default_column(values[c]) for c in values.columns}
File "C:\Users\elena.pascal\AppData\Local\pypoetry\Cache\virtualenvs\image-process-zoTHhXWu-py3.8\lib\site-packages\napari\layers\utils\layer_utils.py", line 975, in _get_default_column
return pd.Series(data=value, dtype=column.dtype, index=range(1))
File "C:\Users\elena.pascal\AppData\Local\pypoetry\Cache\virtualenvs\image-process-zoTHhXWu-py3.8\lib\site-packages\pandas\core\series.py", line 500, in __init__
com.require_length_match(data, index)
File "C:\Users\elena.pascal\AppData\Local\pypoetry\Cache\virtualenvs\image-process-zoTHhXWu-py3.8\lib\site-packages\pandas\core\common.py", line 576, in require_length_match
raise ValueError(
ValueError: Length of values (8) does not match length of index (1)
```
## Environment
napari: 0.4.17rc4.dev505+g7c9ea89d
Platform: Windows-10-10.0.19045-SP0
Python: 3.8.10 (tags/v3.8.10:3d8993a, May 3 2021, 11:48:03) [MSC v.1928 64 bit (AMD64)]
Qt: 5.15.2
PyQt5: 5.15.9
NumPy: 1.24.4
SciPy: 1.9.1
Dask: 2023.5.0
VisPy: 0.13.0
magicgui: 0.7.2
superqt: unknown
in-n-out: 0.1.8
app-model: 0.2.0
npe2: 0.7.2
OpenGL:
- GL version: 4.6.0 - Build 31.0.101.3959
- MAX_TEXTURE_SIZE: 16384
Screens:
- screen 1: resolution 2560x1440, scale 1.0
- screen 2: resolution 2560x1440, scale 1.0
Settings path:
- C:\Users\elena.pascal\AppData\Local\napari\illumion_033691400d65ecf164fea402e77f284e6b482050\settings.yaml
- Any other relevant information:
## Additional context
@jni answered here: https://forum.image.sc/t/struggle-to-add-property-of-unequal-length-to-labels-layer/86201?u=elena_pascal
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `napari/layers/utils/layer_utils.py`
Content:
```
1 from __future__ import annotations
2
3 import functools
4 import inspect
5 import warnings
6 from typing import (
7 TYPE_CHECKING,
8 Any,
9 Dict,
10 List,
11 NamedTuple,
12 Optional,
13 Sequence,
14 Tuple,
15 Union,
16 )
17
18 import dask
19 import numpy as np
20 import pandas as pd
21
22 from napari.utils.action_manager import action_manager
23 from napari.utils.events.custom_types import Array
24 from napari.utils.transforms import Affine
25 from napari.utils.translations import trans
26
27 if TYPE_CHECKING:
28 from typing import Mapping
29
30 import numpy.typing as npt
31
32
33 class Extent(NamedTuple):
34 """Extent of coordinates in a local data space and world space.
35
36 Each extent is a (2, D) array that stores the minimum and maximum coordinate
37 values in each of D dimensions. Both the minimum and maximum coordinates are
38 inclusive so form an axis-aligned, closed interval or a D-dimensional box
39 around all the coordinates.
40
41 Attributes
42 ----------
43 data : (2, D) array of floats
44 The minimum and maximum raw data coordinates ignoring any transforms like
45 translation or scale.
46 world : (2, D) array of floats
47 The minimum and maximum world coordinates after applying a transform to the
48 raw data coordinates that brings them into a potentially shared world space.
49 step : (D,) array of floats
50 The step in each dimension that when taken from the minimum world coordinate,
51 should form a regular grid that eventually hits the maximum world coordinate.
52 """
53
54 data: np.ndarray
55 world: np.ndarray
56 step: np.ndarray
57
58
59 def register_layer_action(
60 keymapprovider,
61 description: str,
62 repeatable: bool = False,
63 shortcuts: Optional[str] = None,
64 ):
65 """
66 Convenient decorator to register an action with the current Layers
67
68 It will use the function name as the action name. We force the description
69 to be given instead of function docstring for translation purpose.
70
71
72 Parameters
73 ----------
74 keymapprovider : KeymapProvider
75 class on which to register the keybindings - this will typically be
76 the instance in focus that will handle the keyboard shortcut.
77 description : str
78 The description of the action, this will typically be translated and
79 will be what will be used in tooltips.
80 repeatable : bool
81 A flag indicating whether the action autorepeats when key is held
82 shortcuts : str | List[str]
83 Shortcut to bind by default to the action we are registering.
84
85 Returns
86 -------
87 function:
88 Actual decorator to apply to a function. Given decorator returns the
89 function unmodified to allow decorator stacking.
90
91 """
92
93 def _inner(func):
94 nonlocal shortcuts
95 name = 'napari:' + func.__name__
96
97 action_manager.register_action(
98 name=name,
99 command=func,
100 description=description,
101 keymapprovider=keymapprovider,
102 repeatable=repeatable,
103 )
104 if shortcuts:
105 if isinstance(shortcuts, str):
106 shortcuts = [shortcuts]
107
108 for shortcut in shortcuts:
109 action_manager.bind_shortcut(name, shortcut)
110 return func
111
112 return _inner
113
114
115 def register_layer_attr_action(
116 keymapprovider,
117 description: str,
118 attribute_name: str,
119 shortcuts=None,
120 ):
121 """
122 Convenient decorator to register an action with the current Layers.
123 This will get and restore attribute from function first argument.
124
125 It will use the function name as the action name. We force the description
126 to be given instead of function docstring for translation purpose.
127
128 Parameters
129 ----------
130 keymapprovider : KeymapProvider
131 class on which to register the keybindings - this will typically be
132 the instance in focus that will handle the keyboard shortcut.
133 description : str
134 The description of the action, this will typically be translated and
135 will be what will be used in tooltips.
136 attribute_name : str
137 The name of the attribute to be restored if key is hold over `get_settings().get_settings().application.hold_button_delay.
138 shortcuts : str | List[str]
139 Shortcut to bind by default to the action we are registering.
140
141 Returns
142 -------
143 function:
144 Actual decorator to apply to a function. Given decorator returns the
145 function unmodified to allow decorator stacking.
146
147 """
148
149 def _handle(func):
150 sig = inspect.signature(func)
151 try:
152 first_variable_name = next(iter(sig.parameters))
153 except StopIteration as e:
154 raise RuntimeError(
155 trans._(
156 "If actions has no arguments there is no way to know what to set the attribute to.",
157 deferred=True,
158 ),
159 ) from e
160
161 @functools.wraps(func)
162 def _wrapper(*args, **kwargs):
163 obj = args[0] if args else kwargs[first_variable_name]
164 prev_mode = getattr(obj, attribute_name)
165 func(*args, **kwargs)
166
167 def _callback():
168 setattr(obj, attribute_name, prev_mode)
169
170 return _callback
171
172 repeatable = False # attribute actions are always non-repeatable
173 register_layer_action(
174 keymapprovider, description, repeatable, shortcuts
175 )(_wrapper)
176 return func
177
178 return _handle
179
180
181 def _nanmin(array):
182 """
183 call np.min but fall back to avoid nan and inf if necessary
184 """
185 min_value = np.min(array)
186 if not np.isfinite(min_value):
187 masked = array[np.isfinite(array)]
188 if masked.size == 0:
189 return 0
190 min_value = np.min(masked)
191 return min_value
192
193
194 def _nanmax(array):
195 """
196 call np.max but fall back to avoid nan and inf if necessary
197 """
198 max_value = np.max(array)
199 if not np.isfinite(max_value):
200 masked = array[np.isfinite(array)]
201 if masked.size == 0:
202 return 1
203 max_value = np.max(masked)
204 return max_value
205
206
207 def calc_data_range(data, rgb=False) -> Tuple[float, float]:
208 """Calculate range of data values. If all values are equal return [0, 1].
209
210 Parameters
211 ----------
212 data : array
213 Data to calculate range of values over.
214 rgb : bool
215 Flag if data is rgb.
216
217 Returns
218 -------
219 values : pair of floats
220 Minimum and maximum values in that order.
221
222 Notes
223 -----
224 If the data type is uint8, no calculation is performed, and 0-255 is
225 returned.
226 """
227 if data.dtype == np.uint8:
228 return (0, 255)
229
230 center: Union[int, List[int]]
231
232 if data.size > 1e7 and (data.ndim == 1 or (rgb and data.ndim == 2)):
233 # If data is very large take the average of start, middle and end.
234 center = int(data.shape[0] // 2)
235 slices = [
236 slice(0, 4096),
237 slice(center - 2048, center + 2048),
238 slice(-4096, None),
239 ]
240 reduced_data = [
241 [_nanmax(data[sl]) for sl in slices],
242 [_nanmin(data[sl]) for sl in slices],
243 ]
244 elif data.size > 1e7:
245 # If data is very large take the average of the top, bottom, and
246 # middle slices
247 offset = 2 + int(rgb)
248 bottom_plane_idx = (0,) * (data.ndim - offset)
249 middle_plane_idx = tuple(s // 2 for s in data.shape[:-offset])
250 top_plane_idx = tuple(s - 1 for s in data.shape[:-offset])
251 idxs = [bottom_plane_idx, middle_plane_idx, top_plane_idx]
252 # If each plane is also very large, look only at a subset of the image
253 if (
254 np.prod(data.shape[-offset:]) > 1e7
255 and data.shape[-offset] > 64
256 and data.shape[-offset + 1] > 64
257 ):
258 # Find a central patch of the image to take
259 center = [int(s // 2) for s in data.shape[-offset:]]
260 central_slice = tuple(slice(c - 31, c + 31) for c in center[:2])
261 reduced_data = [
262 [_nanmax(data[idx + central_slice]) for idx in idxs],
263 [_nanmin(data[idx + central_slice]) for idx in idxs],
264 ]
265 else:
266 reduced_data = [
267 [_nanmax(data[idx]) for idx in idxs],
268 [_nanmin(data[idx]) for idx in idxs],
269 ]
270 # compute everything in one go
271 reduced_data = dask.compute(*reduced_data)
272 else:
273 reduced_data = data
274
275 min_val = _nanmin(reduced_data)
276 max_val = _nanmax(reduced_data)
277
278 if min_val == max_val:
279 min_val = 0
280 max_val = 1
281 return (float(min_val), float(max_val))
282
283
284 def segment_normal(a, b, p=(0, 0, 1)):
285 """Determines the unit normal of the vector from a to b.
286
287 Parameters
288 ----------
289 a : np.ndarray
290 Length 2 array of first point or Nx2 array of points
291 b : np.ndarray
292 Length 2 array of second point or Nx2 array of points
293 p : 3-tuple, optional
294 orthogonal vector for segment calculation in 3D.
295
296 Returns
297 -------
298 unit_norm : np.ndarray
299 Length the unit normal of the vector from a to b. If a == b,
300 then returns [0, 0] or Nx2 array of vectors
301 """
302 d = b - a
303
304 if d.ndim == 1:
305 normal = np.array([d[1], -d[0]]) if len(d) == 2 else np.cross(d, p)
306 norm = np.linalg.norm(normal)
307 if norm == 0:
308 norm = 1
309 else:
310 if d.shape[1] == 2:
311 normal = np.stack([d[:, 1], -d[:, 0]], axis=0).transpose(1, 0)
312 else:
313 normal = np.cross(d, p)
314
315 norm = np.linalg.norm(normal, axis=1, keepdims=True)
316 ind = norm == 0
317 norm[ind] = 1
318 unit_norm = normal / norm
319
320 return unit_norm
321
322
323 def convert_to_uint8(data: np.ndarray) -> Optional[np.ndarray]:
324 """
325 Convert array content to uint8, always returning a copy.
326
327 Based on skimage.util.dtype._convert but limited to an output type uint8,
328 so should be equivalent to skimage.util.dtype.img_as_ubyte.
329
330 If all negative, values are clipped to 0.
331
332 If values are integers and below 256, this simply casts.
333 Otherwise the maximum value for the input data type is determined and
334 output values are proportionally scaled by this value.
335
336 Binary images are converted so that False -> 0, True -> 255.
337
338 Float images are multiplied by 255 and then cast to uint8.
339 """
340 out_dtype = np.dtype(np.uint8)
341 out_max = np.iinfo(out_dtype).max
342 if data.dtype == out_dtype:
343 return data
344 in_kind = data.dtype.kind
345 if in_kind == "b":
346 return data.astype(out_dtype) * 255
347 if in_kind == "f":
348 image_out = np.multiply(data, out_max, dtype=data.dtype)
349 np.rint(image_out, out=image_out)
350 np.clip(image_out, 0, out_max, out=image_out)
351 image_out = np.nan_to_num(image_out, copy=False)
352 return image_out.astype(out_dtype)
353
354 if in_kind in "ui":
355 if in_kind == "u":
356 if data.max() < out_max:
357 return data.astype(out_dtype)
358 return np.right_shift(data, (data.dtype.itemsize - 1) * 8).astype(
359 out_dtype
360 )
361
362 np.maximum(data, 0, out=data, dtype=data.dtype)
363 if data.dtype == np.int8:
364 return (data * 2).astype(np.uint8)
365 if data.max() < out_max:
366 return data.astype(out_dtype)
367 return np.right_shift(data, (data.dtype.itemsize - 1) * 8 - 1).astype(
368 out_dtype
369 )
370 return None
371
372
373 def get_current_properties(
374 properties: Dict[str, np.ndarray],
375 choices: Dict[str, np.ndarray],
376 num_data: int = 0,
377 ) -> Dict[str, Any]:
378 """Get the current property values from the properties or choices.
379
380 Parameters
381 ----------
382 properties : dict[str, np.ndarray]
383 The property values.
384 choices : dict[str, np.ndarray]
385 The property value choices.
386 num_data : int
387 The length of data that the properties represent (e.g. number of points).
388
389 Returns
390 -------
391 dict[str, Any]
392 A dictionary where the key is the property name and the value is the current
393 value of that property.
394 """
395 current_properties = {}
396 if num_data > 0:
397 current_properties = {
398 k: np.asarray([v[-1]]) for k, v in properties.items()
399 }
400 elif num_data == 0 and len(choices) > 0:
401 current_properties = {
402 k: np.asarray([v[0]]) for k, v in choices.items()
403 }
404 return current_properties
405
406
407 def dataframe_to_properties(
408 dataframe: pd.DataFrame,
409 ) -> Dict[str, np.ndarray]:
410 """Convert a dataframe to a properties dictionary.
411 Parameters
412 ----------
413 dataframe : DataFrame
414 The dataframe object to be converted to a properties dictionary
415 Returns
416 -------
417 dict[str, np.ndarray]
418 A properties dictionary where the key is the property name and the value
419 is an ndarray with the property value for each point.
420 """
421 return {col: np.asarray(dataframe[col]) for col in dataframe}
422
423
424 def validate_properties(
425 properties: Optional[Union[Dict[str, Array], pd.DataFrame]],
426 expected_len: Optional[int] = None,
427 ) -> Dict[str, np.ndarray]:
428 """Validate the type and size of properties and coerce values to numpy arrays.
429 Parameters
430 ----------
431 properties : dict[str, Array] or DataFrame
432 The property values.
433 expected_len : int
434 The expected length of each property value array.
435 Returns
436 -------
437 Dict[str, np.ndarray]
438 The property values.
439 """
440 if properties is None or len(properties) == 0:
441 return {}
442
443 if not isinstance(properties, dict):
444 properties = dataframe_to_properties(properties)
445
446 lens = [len(v) for v in properties.values()]
447 if expected_len is None:
448 expected_len = lens[0]
449 if any(v != expected_len for v in lens):
450 raise ValueError(
451 trans._(
452 "the number of items must be equal for all properties",
453 deferred=True,
454 )
455 )
456
457 return {k: np.asarray(v) for k, v in properties.items()}
458
459
460 def _validate_property_choices(property_choices):
461 if property_choices is None:
462 property_choices = {}
463 return {k: np.unique(v) for k, v in property_choices.items()}
464
465
466 def _coerce_current_properties_value(
467 value: Union[float, str, bool, list, tuple, np.ndarray]
468 ) -> np.ndarray:
469 """Coerce a value in a current_properties dictionary into the correct type.
470
471 Parameters
472 ----------
473 value : Union[float, str, int, bool, list, tuple, np.ndarray]
474 The value to be coerced.
475
476 Returns
477 -------
478 coerced_value : np.ndarray
479 The value in a 1D numpy array with length 1.
480 """
481 if isinstance(value, (np.ndarray, list, tuple)):
482 if len(value) != 1:
483 raise ValueError(
484 trans._(
485 'current_properties values should have length 1.',
486 deferred=True,
487 )
488 )
489 coerced_value = np.asarray(value)
490 else:
491 coerced_value = np.array([value])
492
493 return coerced_value
494
495
496 def coerce_current_properties(
497 current_properties: Mapping[
498 str, Union[float, str, int, bool, list, tuple, npt.NDArray]
499 ]
500 ) -> Dict[str, np.ndarray]:
501 """Coerce a current_properties dictionary into the correct type.
502
503
504 Parameters
505 ----------
506 current_properties : Dict[str, Union[float, str, int, bool, list, tuple, np.ndarray]]
507 The current_properties dictionary to be coerced.
508
509 Returns
510 -------
511 coerced_current_properties : Dict[str, np.ndarray]
512 The current_properties dictionary with string keys and 1D numpy array with length 1 values.
513 """
514 coerced_current_properties = {
515 k: _coerce_current_properties_value(v)
516 for k, v in current_properties.items()
517 }
518
519 return coerced_current_properties
520
521
522 def compute_multiscale_level(
523 requested_shape, shape_threshold, downsample_factors
524 ):
525 """Computed desired level of the multiscale given requested field of view.
526
527 The level of the multiscale should be the lowest resolution such that
528 the requested shape is above the shape threshold. By passing a shape
529 threshold corresponding to the shape of the canvas on the screen this
530 ensures that we have at least one data pixel per screen pixel, but no
531 more than we need.
532
533 Parameters
534 ----------
535 requested_shape : tuple
536 Requested shape of field of view in data coordinates
537 shape_threshold : tuple
538 Maximum size of a displayed tile in pixels.
539 downsample_factors : list of tuple
540 Downsampling factors for each level of the multiscale. Must be increasing
541 for each level of the multiscale.
542
543 Returns
544 -------
545 level : int
546 Level of the multiscale to be viewing.
547 """
548 # Scale shape by downsample factors
549 scaled_shape = requested_shape / downsample_factors
550
551 # Find the highest level (lowest resolution) allowed
552 locations = np.argwhere(np.all(scaled_shape > shape_threshold, axis=1))
553 level = locations[-1][0] if len(locations) > 0 else 0
554 return level
555
556
557 def compute_multiscale_level_and_corners(
558 corner_pixels, shape_threshold, downsample_factors
559 ):
560 """Computed desired level and corners of a multiscale view.
561
562 The level of the multiscale should be the lowest resolution such that
563 the requested shape is above the shape threshold. By passing a shape
564 threshold corresponding to the shape of the canvas on the screen this
565 ensures that we have at least one data pixel per screen pixel, but no
566 more than we need.
567
568 Parameters
569 ----------
570 corner_pixels : array (2, D)
571 Requested corner pixels at full resolution.
572 shape_threshold : tuple
573 Maximum size of a displayed tile in pixels.
574 downsample_factors : list of tuple
575 Downsampling factors for each level of the multiscale. Must be increasing
576 for each level of the multiscale.
577
578 Returns
579 -------
580 level : int
581 Level of the multiscale to be viewing.
582 corners : array (2, D)
583 Needed corner pixels at target resolution.
584 """
585 requested_shape = corner_pixels[1] - corner_pixels[0]
586 level = compute_multiscale_level(
587 requested_shape, shape_threshold, downsample_factors
588 )
589
590 corners = corner_pixels / downsample_factors[level]
591 corners = np.array([np.floor(corners[0]), np.ceil(corners[1])]).astype(int)
592
593 return level, corners
594
595
596 def coerce_affine(affine, *, ndim, name=None):
597 """Coerce a user input into an affine transform object.
598
599 If the input is already an affine transform object, that same object is returned
600 with a name change if the given name is not None. If the input is None, an identity
601 affine transform object of the given dimensionality is returned.
602
603 Parameters
604 ----------
605 affine : array-like or napari.utils.transforms.Affine
606 An existing affine transform object or an array-like that is its transform matrix.
607 ndim : int
608 The desired dimensionality of the transform. Ignored is affine is an Affine transform object.
609 name : str
610 The desired name of the transform.
611
612 Returns
613 -------
614 napari.utils.transforms.Affine
615 The input coerced into an affine transform object.
616 """
617 if affine is None:
618 affine = Affine(affine_matrix=np.eye(ndim + 1), ndim=ndim)
619 elif isinstance(affine, np.ndarray):
620 affine = Affine(affine_matrix=affine, ndim=ndim)
621 elif isinstance(affine, list):
622 affine = Affine(affine_matrix=np.array(affine), ndim=ndim)
623 elif not isinstance(affine, Affine):
624 raise TypeError(
625 trans._(
626 'affine input not recognized. must be either napari.utils.transforms.Affine or ndarray. Got {dtype}',
627 deferred=True,
628 dtype=type(affine),
629 )
630 )
631 if name is not None:
632 affine.name = name
633 return affine
634
635
636 def dims_displayed_world_to_layer(
637 dims_displayed_world: List[int],
638 ndim_world: int,
639 ndim_layer: int,
640 ) -> List[int]:
641 """Convert the dims_displayed from world dims to the layer dims.
642
643 This accounts differences in the number of dimensions in the world
644 dims versus the layer and for transpose and rolls.
645
646 Parameters
647 ----------
648 dims_displayed_world : List[int]
649 The dims_displayed in world coordinates (i.e., from viewer.dims.displayed).
650 ndim_world : int
651 The number of dimensions in the world coordinates (i.e., viewer.dims.ndim)
652 ndim_layer : int
653 The number of dimensions in layer the layer (i.e., layer.ndim).
654 """
655 if ndim_world > len(dims_displayed_world):
656 all_dims = list(range(ndim_world))
657 not_in_dims_displayed = [
658 d for d in all_dims if d not in dims_displayed_world
659 ]
660 order = not_in_dims_displayed + dims_displayed_world
661 else:
662 order = dims_displayed_world
663 offset = ndim_world - ndim_layer
664
665 order_arr = np.array(order)
666 if offset <= 0:
667 order = list(range(-offset)) + list(order_arr - offset)
668 else:
669 order = list(order_arr[order_arr >= offset] - offset)
670
671 n_display_world = len(dims_displayed_world)
672 if n_display_world > ndim_layer:
673 n_display_layer = ndim_layer
674 else:
675 n_display_layer = n_display_world
676 dims_displayed = order[-n_display_layer:]
677
678 return dims_displayed
679
680
681 def get_extent_world(data_extent, data_to_world, centered=None):
682 """Range of layer in world coordinates base on provided data_extent
683
684 Parameters
685 ----------
686 data_extent : array, shape (2, D)
687 Extent of layer in data coordinates.
688 data_to_world : napari.utils.transforms.Affine
689 The transform from data to world coordinates.
690
691 Returns
692 -------
693 extent_world : array, shape (2, D)
694 """
695 if centered is not None:
696 warnings.warn(
697 trans._(
698 'The `centered` argument is deprecated. '
699 'Extents are now always centered on data points.',
700 deferred=True,
701 ),
702 stacklevel=2,
703 )
704
705 D = data_extent.shape[1]
706 full_data_extent = np.array(np.meshgrid(*data_extent.T)).T.reshape(-1, D)
707 full_world_extent = data_to_world(full_data_extent)
708 world_extent = np.array(
709 [
710 np.min(full_world_extent, axis=0),
711 np.max(full_world_extent, axis=0),
712 ]
713 )
714 return world_extent
715
716
717 def features_to_pandas_dataframe(features: Any) -> pd.DataFrame:
718 """Coerces a layer's features property to a pandas DataFrame.
719
720 In general, this may copy the data from features into the returned
721 DataFrame so there is no guarantee that changing element values in the
722 returned DataFrame will also change values in the features property.
723
724 Parameters
725 ----------
726 features
727 The features property of a layer.
728
729 Returns
730 -------
731 pd.DataFrame
732 A pandas DataFrame that stores the given features.
733 """
734 return features
735
736
737 class _FeatureTable:
738 """Stores feature values and their defaults.
739
740 Parameters
741 ----------
742 values : Optional[Union[Dict[str, np.ndarray], pd.DataFrame]]
743 The features values, which will be passed to the pandas DataFrame initializer.
744 If this is a pandas DataFrame with a non-default index, that index
745 (except its length) will be ignored.
746 num_data : Optional[int]
747 The number of the elements in the layer calling this, such as
748 the number of points, which is used to check that the features
749 table has the expected number of rows. If None, then the default
750 DataFrame index is used.
751 defaults: Optional[Union[Dict[str, Any], pd.DataFrame]]
752 The default feature values, which if specified should have the same keys
753 as the values provided. If None, will be inferred from the values.
754 """
755
756 def __init__(
757 self,
758 values: Optional[Union[Dict[str, np.ndarray], pd.DataFrame]] = None,
759 *,
760 num_data: Optional[int] = None,
761 defaults: Optional[Union[Dict[str, Any], pd.DataFrame]] = None,
762 ) -> None:
763 self._values = _validate_features(values, num_data=num_data)
764 self._defaults = _validate_feature_defaults(defaults, self._values)
765
766 @property
767 def values(self) -> pd.DataFrame:
768 """The feature values table."""
769 return self._values
770
771 def set_values(self, values, *, num_data=None) -> None:
772 """Sets the feature values table."""
773 self._values = _validate_features(values, num_data=num_data)
774 self._defaults = _validate_feature_defaults(None, self._values)
775
776 @property
777 def defaults(self) -> pd.DataFrame:
778 """The default values one-row table."""
779 return self._defaults
780
781 def set_defaults(
782 self, defaults: Union[Dict[str, Any], pd.DataFrame]
783 ) -> None:
784 """Sets the feature default values."""
785 self._defaults = _validate_feature_defaults(defaults, self._values)
786
787 def properties(self) -> Dict[str, np.ndarray]:
788 """Converts this to a deprecated properties dictionary.
789
790 This will reference the features data when possible, but in general the
791 returned dictionary may contain copies of those data.
792
793 Returns
794 -------
795 Dict[str, np.ndarray]
796 The properties dictionary equivalent to the given features.
797 """
798 return _features_to_properties(self._values)
799
800 def choices(self) -> Dict[str, np.ndarray]:
801 """Converts this to a deprecated property choices dictionary.
802
803 Only categorical features will have corresponding entries in the dictionary.
804
805 Returns
806 -------
807 Dict[str, np.ndarray]
808 The property choices dictionary equivalent to this.
809 """
810 return {
811 name: series.dtype.categories.to_numpy()
812 for name, series in self._values.items()
813 if isinstance(series.dtype, pd.CategoricalDtype)
814 }
815
816 def currents(self) -> Dict[str, np.ndarray]:
817 """Converts the defaults table to a deprecated current properties dictionary."""
818 return _features_to_properties(self._defaults)
819
820 def set_currents(
821 self,
822 currents: Dict[str, npt.NDArray],
823 *,
824 update_indices: Optional[List[int]] = None,
825 ) -> None:
826 """Sets the default values using the deprecated current properties dictionary.
827
828 May also update some of the feature values to be equal to the new default values.
829
830 Parameters
831 ----------
832 currents : Dict[str, np.ndarray]
833 The new current property values.
834 update_indices : Optional[List[int]]
835 If not None, the all features values at the given row indices will be set to
836 the corresponding new current/default feature values.
837 """
838 currents = coerce_current_properties(currents)
839 self._defaults = _validate_features(currents, num_data=1)
840 if update_indices is not None:
841 for k in self._defaults:
842 self._values[k][update_indices] = self._defaults[k][0]
843
844 def resize(
845 self,
846 size: int,
847 ) -> None:
848 """Resize this padding with default values if required.
849
850 Parameters
851 ----------
852 size : int
853 The new size (number of rows) of the features table.
854 """
855 current_size = self._values.shape[0]
856 if size < current_size:
857 self.remove(range(size, current_size))
858 elif size > current_size:
859 to_append = self._defaults.iloc[np.zeros(size - current_size)]
860 self.append(to_append)
861
862 def append(self, to_append: pd.DataFrame) -> None:
863 """Append new feature rows to this.
864
865 Parameters
866 ----------
867 to_append : pd.DataFrame
868 The features to append.
869 """
870 self._values = pd.concat([self._values, to_append], ignore_index=True)
871
872 def remove(self, indices: Any) -> None:
873 """Remove rows from this by index.
874
875 Parameters
876 ----------
877 indices : Any
878 The indices of the rows to remove. Must be usable as the labels parameter
879 to pandas.DataFrame.drop.
880 """
881 self._values = self._values.drop(labels=indices, axis=0).reset_index(
882 drop=True
883 )
884
885 def reorder(self, order: Sequence[int]) -> None:
886 """Reorders the rows of the feature values table."""
887 self._values = self._values.iloc[order].reset_index(drop=True)
888
889 @classmethod
890 def from_layer(
891 cls,
892 *,
893 features: Optional[Union[Dict[str, np.ndarray], pd.DataFrame]] = None,
894 feature_defaults: Optional[Union[Dict[str, Any], pd.DataFrame]] = None,
895 properties: Optional[
896 Union[Dict[str, np.ndarray], pd.DataFrame]
897 ] = None,
898 property_choices: Optional[Dict[str, np.ndarray]] = None,
899 num_data: Optional[int] = None,
900 ) -> _FeatureTable:
901 """Coerces a layer's keyword arguments to a feature manager.
902
903 Parameters
904 ----------
905 features : Optional[Union[Dict[str, np.ndarray], pd.DataFrame]]
906 The features input to a layer.
907 properties : Optional[Union[Dict[str, np.ndarray], pd.DataFrame]]
908 The properties input to a layer.
909 property_choices : Optional[Dict[str, np.ndarray]]
910 The property choices input to a layer.
911 num_data : Optional[int]
912 The number of the elements in the layer calling this, such as
913 the number of points.
914
915 Returns
916 -------
917 _FeatureTable
918 The feature manager created from the given layer keyword arguments.
919
920 Raises
921 ------
922 ValueError
923 If the input property columns are not all the same length, or if
924 that length is not equal to the given num_data.
925 """
926 if properties is not None or property_choices is not None:
927 features = _features_from_properties(
928 properties=properties,
929 property_choices=property_choices,
930 num_data=num_data,
931 )
932 return cls(features, defaults=feature_defaults, num_data=num_data)
933
934
935 def _get_default_column(column: pd.Series) -> pd.Series:
936 """Get the default column of length 1 from a data column."""
937 value = None
938 if column.size > 0:
939 value = column.iloc[-1]
940 elif isinstance(column.dtype, pd.CategoricalDtype):
941 choices = column.dtype.categories
942 if choices.size > 0:
943 value = choices[0]
944 elif isinstance(column.dtype, np.dtype) and np.issubdtype(
945 column.dtype, np.integer
946 ):
947 # For numpy backed columns that store integers there's no way to
948 # store missing values, so passing None creates an np.float64 series
949 # containing NaN. Therefore, use a default of 0 instead.
950 value = 0
951 return pd.Series(data=value, dtype=column.dtype, index=range(1))
952
953
954 def _validate_features(
955 features: Optional[Union[Dict[str, np.ndarray], pd.DataFrame]],
956 *,
957 num_data: Optional[int] = None,
958 ) -> pd.DataFrame:
959 """Validates and coerces feature values into a pandas DataFrame.
960
961 See Also
962 --------
963 :class:`_FeatureTable` : See initialization for parameter descriptions.
964 """
965 if isinstance(features, pd.DataFrame):
966 features = features.reset_index(drop=True)
967 elif isinstance(features, dict):
968 # Convert all array-like objects into a numpy array.
969 # This section was introduced due to an unexpected behavior when using
970 # a pandas Series with mixed indices as input.
971 # This way should handle all array-like objects correctly.
972 # See https://github.com/napari/napari/pull/4755 for more details.
973 features = {
974 key: np.array(value, copy=False) for key, value in features.items()
975 }
976 index = None if num_data is None else range(num_data)
977 return pd.DataFrame(data=features, index=index)
978
979
980 def _validate_feature_defaults(
981 defaults: Optional[Union[Dict[str, Any], pd.DataFrame]],
982 values: pd.DataFrame,
983 ) -> pd.DataFrame:
984 """Validates and coerces feature default values into a pandas DataFrame.
985
986 See Also
987 --------
988 :class:`_FeatureTable` : See initialization for parameter descriptions.
989 """
990 if defaults is None:
991 defaults = {c: _get_default_column(values[c]) for c in values.columns}
992 else:
993 default_columns = set(defaults.keys())
994 value_columns = set(values.keys())
995 extra_defaults = default_columns - value_columns
996 if len(extra_defaults) > 0:
997 raise ValueError(
998 trans._(
999 'Feature defaults contain some extra columns not in feature values: {extra_defaults}',
1000 deferred=True,
1001 extra_defaults=extra_defaults,
1002 )
1003 )
1004 missing_defaults = value_columns - default_columns
1005 if len(missing_defaults) > 0:
1006 raise ValueError(
1007 trans._(
1008 'Feature defaults is missing some columns in feature values: {missing_defaults}',
1009 deferred=True,
1010 missing_defaults=missing_defaults,
1011 )
1012 )
1013 # Convert to series first to capture the per-column dtype from values,
1014 # since the DataFrame initializer does not support passing multiple dtypes.
1015 defaults = {
1016 c: pd.Series(
1017 defaults[c],
1018 dtype=values.dtypes[c],
1019 index=range(1),
1020 )
1021 for c in defaults
1022 }
1023
1024 return pd.DataFrame(defaults, index=range(1))
1025
1026
1027 def _features_from_properties(
1028 *,
1029 properties: Optional[Union[Dict[str, np.ndarray], pd.DataFrame]] = None,
1030 property_choices: Optional[Dict[str, np.ndarray]] = None,
1031 num_data: Optional[int] = None,
1032 ) -> pd.DataFrame:
1033 """Validates and coerces deprecated properties input into a features DataFrame.
1034
1035 See Also
1036 --------
1037 :meth:`_FeatureTable.from_layer`
1038 """
1039 # Create categorical series for any choices provided.
1040 if property_choices is not None:
1041 properties = pd.DataFrame(data=properties)
1042 for name, choices in property_choices.items():
1043 dtype = pd.CategoricalDtype(categories=choices)
1044 num_values = properties.shape[0] if num_data is None else num_data
1045 values = (
1046 properties[name] if name in properties else [None] * num_values
1047 )
1048 properties[name] = pd.Series(values, dtype=dtype)
1049 return _validate_features(properties, num_data=num_data)
1050
1051
1052 def _features_to_properties(features: pd.DataFrame) -> Dict[str, np.ndarray]:
1053 """Converts a features DataFrame to a deprecated properties dictionary.
1054
1055 See Also
1056 --------
1057 :meth:`_FeatureTable.properties`
1058 """
1059 return {name: series.to_numpy() for name, series in features.items()}
1060
1061
1062 def _unique_element(array: Array) -> Optional[Any]:
1063 """
1064 Returns the unique element along the 0th axis, if it exists; otherwise, returns None.
1065
1066 This is faster than np.unique, does not require extra tricks for nD arrays, and
1067 does not fail for non-sortable elements.
1068 """
1069 if len(array) == 0:
1070 return None
1071 el = array[0]
1072 if np.any(array[1:] != el):
1073 return None
1074 return el
1075
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/napari/layers/utils/layer_utils.py b/napari/layers/utils/layer_utils.py
--- a/napari/layers/utils/layer_utils.py
+++ b/napari/layers/utils/layer_utils.py
@@ -948,7 +948,7 @@
# store missing values, so passing None creates an np.float64 series
# containing NaN. Therefore, use a default of 0 instead.
value = 0
- return pd.Series(data=value, dtype=column.dtype, index=range(1))
+ return pd.Series(data=[value], dtype=column.dtype, index=range(1))
def _validate_features(
| {"golden_diff": "diff --git a/napari/layers/utils/layer_utils.py b/napari/layers/utils/layer_utils.py\n--- a/napari/layers/utils/layer_utils.py\n+++ b/napari/layers/utils/layer_utils.py\n@@ -948,7 +948,7 @@\n # store missing values, so passing None creates an np.float64 series\n # containing NaN. Therefore, use a default of 0 instead.\n value = 0\n- return pd.Series(data=value, dtype=column.dtype, index=range(1))\n+ return pd.Series(data=[value], dtype=column.dtype, index=range(1))\n \n \n def _validate_features(\n", "issue": "property in labels layer does not understand objects of different lengths\n## \ud83d\udc1b Bug\r\n\r\nI am trying to use the properties attribute of `add_labels` to add a dictionary of properties that contains a self defined property that happens to be in the form of list but of unequal lengths. So every region has an extra property of different length. Similar to how image is a property that has different lengths for every region.\r\n\r\nThis is a silly example of what I have:\r\n\r\n## To Reproduce\r\n\r\n```python\r\nimport numpy as np\r\nfrom skimage.measure import regionprops_table, label\r\nimport napari\r\n\r\ndef random_length_property(regionmask):\r\n return [0] * np.random.randint(10)\r\n\r\nimage = data.coins()\r\nviewer = napari.view_image(image)\r\n\r\nlabel_image = label(image > 110)\r\ntable_props = regionprops_table(\r\n label_image,\r\n intensity_image=image,\r\n extra_properties=(random_length_property,),\r\n )\r\n\r\nlabel_layer = viewer.add_labels(\r\n label_image, name=\"segmentation\", properties=table_props\r\n )\r\n\r\nnapari.run()\r\n```\r\n\r\nAnd this makes napari unhappy with:\r\n\r\n```bash \r\n label_layer = viewer.add_labels(\r\n File \"C:\\Users\\elena.pascal\\AppData\\Local\\pypoetry\\Cache\\virtualenvs\\image-process-zoTHhXWu-py3.8\\lib\\site-packages\\napari\\components\\viewer_model.py\", line 4, in add_labels\r\n import itertools\r\n File \"C:\\Users\\elena.pascal\\AppData\\Local\\pypoetry\\Cache\\virtualenvs\\image-process-zoTHhXWu-py3.8\\lib\\site-packages\\napari\\layers\\labels\\labels.py\", line 328, in __init__\r\n self._feature_table = _FeatureTable.from_layer(\r\n File \"C:\\Users\\elena.pascal\\AppData\\Local\\pypoetry\\Cache\\virtualenvs\\image-process-zoTHhXWu-py3.8\\lib\\site-packages\\napari\\layers\\utils\\layer_utils.py\", line 956, in from_layer\r\n return cls(features, defaults=feature_defaults, num_data=num_data)\r\n File \"C:\\Users\\elena.pascal\\AppData\\Local\\pypoetry\\Cache\\virtualenvs\\image-process-zoTHhXWu-py3.8\\lib\\site-packages\\napari\\layers\\utils\\layer_utils.py\", line 788, in __init__\r\n self._defaults = _validate_feature_defaults(defaults, self._values)\r\n File \"C:\\Users\\elena.pascal\\AppData\\Local\\pypoetry\\Cache\\virtualenvs\\image-process-zoTHhXWu-py3.8\\lib\\site-packages\\napari\\layers\\utils\\layer_utils.py\", line 1015, in _validate_feature_defaults\r\n defaults = {c: _get_default_column(values[c]) for c in values.columns}\r\n File \"C:\\Users\\elena.pascal\\AppData\\Local\\pypoetry\\Cache\\virtualenvs\\image-process-zoTHhXWu-py3.8\\lib\\site-packages\\napari\\layers\\utils\\layer_utils.py\", line 1015, in <dictcomp>\r\n defaults = {c: _get_default_column(values[c]) for c in values.columns}\r\n File \"C:\\Users\\elena.pascal\\AppData\\Local\\pypoetry\\Cache\\virtualenvs\\image-process-zoTHhXWu-py3.8\\lib\\site-packages\\napari\\layers\\utils\\layer_utils.py\", line 975, in _get_default_column\r\n return pd.Series(data=value, dtype=column.dtype, index=range(1))\r\n File \"C:\\Users\\elena.pascal\\AppData\\Local\\pypoetry\\Cache\\virtualenvs\\image-process-zoTHhXWu-py3.8\\lib\\site-packages\\pandas\\core\\series.py\", line 500, in __init__\r\n com.require_length_match(data, index)\r\n File \"C:\\Users\\elena.pascal\\AppData\\Local\\pypoetry\\Cache\\virtualenvs\\image-process-zoTHhXWu-py3.8\\lib\\site-packages\\pandas\\core\\common.py\", line 576, in require_length_match\r\n raise ValueError(\r\nValueError: Length of values (8) does not match length of index (1)\r\n```\r\n\r\n\r\n\r\n## Environment\r\n\r\nnapari: 0.4.17rc4.dev505+g7c9ea89d\r\nPlatform: Windows-10-10.0.19045-SP0\r\nPython: 3.8.10 (tags/v3.8.10:3d8993a, May 3 2021, 11:48:03) [MSC v.1928 64 bit (AMD64)]\r\nQt: 5.15.2\r\nPyQt5: 5.15.9\r\nNumPy: 1.24.4\r\nSciPy: 1.9.1\r\nDask: 2023.5.0\r\nVisPy: 0.13.0\r\nmagicgui: 0.7.2\r\nsuperqt: unknown\r\nin-n-out: 0.1.8\r\napp-model: 0.2.0\r\nnpe2: 0.7.2\r\n\r\nOpenGL:\r\n- GL version: 4.6.0 - Build 31.0.101.3959\r\n- MAX_TEXTURE_SIZE: 16384\r\n\r\nScreens:\r\n- screen 1: resolution 2560x1440, scale 1.0\r\n- screen 2: resolution 2560x1440, scale 1.0\r\n\r\nSettings path:\r\n- C:\\Users\\elena.pascal\\AppData\\Local\\napari\\illumion_033691400d65ecf164fea402e77f284e6b482050\\settings.yaml\r\n\r\n - Any other relevant information:\r\n\r\n## Additional context\r\n\r\n@jni answered here: https://forum.image.sc/t/struggle-to-add-property-of-unequal-length-to-labels-layer/86201?u=elena_pascal\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport functools\nimport inspect\nimport warnings\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Dict,\n List,\n NamedTuple,\n Optional,\n Sequence,\n Tuple,\n Union,\n)\n\nimport dask\nimport numpy as np\nimport pandas as pd\n\nfrom napari.utils.action_manager import action_manager\nfrom napari.utils.events.custom_types import Array\nfrom napari.utils.transforms import Affine\nfrom napari.utils.translations import trans\n\nif TYPE_CHECKING:\n from typing import Mapping\n\n import numpy.typing as npt\n\n\nclass Extent(NamedTuple):\n \"\"\"Extent of coordinates in a local data space and world space.\n\n Each extent is a (2, D) array that stores the minimum and maximum coordinate\n values in each of D dimensions. Both the minimum and maximum coordinates are\n inclusive so form an axis-aligned, closed interval or a D-dimensional box\n around all the coordinates.\n\n Attributes\n ----------\n data : (2, D) array of floats\n The minimum and maximum raw data coordinates ignoring any transforms like\n translation or scale.\n world : (2, D) array of floats\n The minimum and maximum world coordinates after applying a transform to the\n raw data coordinates that brings them into a potentially shared world space.\n step : (D,) array of floats\n The step in each dimension that when taken from the minimum world coordinate,\n should form a regular grid that eventually hits the maximum world coordinate.\n \"\"\"\n\n data: np.ndarray\n world: np.ndarray\n step: np.ndarray\n\n\ndef register_layer_action(\n keymapprovider,\n description: str,\n repeatable: bool = False,\n shortcuts: Optional[str] = None,\n):\n \"\"\"\n Convenient decorator to register an action with the current Layers\n\n It will use the function name as the action name. We force the description\n to be given instead of function docstring for translation purpose.\n\n\n Parameters\n ----------\n keymapprovider : KeymapProvider\n class on which to register the keybindings - this will typically be\n the instance in focus that will handle the keyboard shortcut.\n description : str\n The description of the action, this will typically be translated and\n will be what will be used in tooltips.\n repeatable : bool\n A flag indicating whether the action autorepeats when key is held\n shortcuts : str | List[str]\n Shortcut to bind by default to the action we are registering.\n\n Returns\n -------\n function:\n Actual decorator to apply to a function. Given decorator returns the\n function unmodified to allow decorator stacking.\n\n \"\"\"\n\n def _inner(func):\n nonlocal shortcuts\n name = 'napari:' + func.__name__\n\n action_manager.register_action(\n name=name,\n command=func,\n description=description,\n keymapprovider=keymapprovider,\n repeatable=repeatable,\n )\n if shortcuts:\n if isinstance(shortcuts, str):\n shortcuts = [shortcuts]\n\n for shortcut in shortcuts:\n action_manager.bind_shortcut(name, shortcut)\n return func\n\n return _inner\n\n\ndef register_layer_attr_action(\n keymapprovider,\n description: str,\n attribute_name: str,\n shortcuts=None,\n):\n \"\"\"\n Convenient decorator to register an action with the current Layers.\n This will get and restore attribute from function first argument.\n\n It will use the function name as the action name. We force the description\n to be given instead of function docstring for translation purpose.\n\n Parameters\n ----------\n keymapprovider : KeymapProvider\n class on which to register the keybindings - this will typically be\n the instance in focus that will handle the keyboard shortcut.\n description : str\n The description of the action, this will typically be translated and\n will be what will be used in tooltips.\n attribute_name : str\n The name of the attribute to be restored if key is hold over `get_settings().get_settings().application.hold_button_delay.\n shortcuts : str | List[str]\n Shortcut to bind by default to the action we are registering.\n\n Returns\n -------\n function:\n Actual decorator to apply to a function. Given decorator returns the\n function unmodified to allow decorator stacking.\n\n \"\"\"\n\n def _handle(func):\n sig = inspect.signature(func)\n try:\n first_variable_name = next(iter(sig.parameters))\n except StopIteration as e:\n raise RuntimeError(\n trans._(\n \"If actions has no arguments there is no way to know what to set the attribute to.\",\n deferred=True,\n ),\n ) from e\n\n @functools.wraps(func)\n def _wrapper(*args, **kwargs):\n obj = args[0] if args else kwargs[first_variable_name]\n prev_mode = getattr(obj, attribute_name)\n func(*args, **kwargs)\n\n def _callback():\n setattr(obj, attribute_name, prev_mode)\n\n return _callback\n\n repeatable = False # attribute actions are always non-repeatable\n register_layer_action(\n keymapprovider, description, repeatable, shortcuts\n )(_wrapper)\n return func\n\n return _handle\n\n\ndef _nanmin(array):\n \"\"\"\n call np.min but fall back to avoid nan and inf if necessary\n \"\"\"\n min_value = np.min(array)\n if not np.isfinite(min_value):\n masked = array[np.isfinite(array)]\n if masked.size == 0:\n return 0\n min_value = np.min(masked)\n return min_value\n\n\ndef _nanmax(array):\n \"\"\"\n call np.max but fall back to avoid nan and inf if necessary\n \"\"\"\n max_value = np.max(array)\n if not np.isfinite(max_value):\n masked = array[np.isfinite(array)]\n if masked.size == 0:\n return 1\n max_value = np.max(masked)\n return max_value\n\n\ndef calc_data_range(data, rgb=False) -> Tuple[float, float]:\n \"\"\"Calculate range of data values. If all values are equal return [0, 1].\n\n Parameters\n ----------\n data : array\n Data to calculate range of values over.\n rgb : bool\n Flag if data is rgb.\n\n Returns\n -------\n values : pair of floats\n Minimum and maximum values in that order.\n\n Notes\n -----\n If the data type is uint8, no calculation is performed, and 0-255 is\n returned.\n \"\"\"\n if data.dtype == np.uint8:\n return (0, 255)\n\n center: Union[int, List[int]]\n\n if data.size > 1e7 and (data.ndim == 1 or (rgb and data.ndim == 2)):\n # If data is very large take the average of start, middle and end.\n center = int(data.shape[0] // 2)\n slices = [\n slice(0, 4096),\n slice(center - 2048, center + 2048),\n slice(-4096, None),\n ]\n reduced_data = [\n [_nanmax(data[sl]) for sl in slices],\n [_nanmin(data[sl]) for sl in slices],\n ]\n elif data.size > 1e7:\n # If data is very large take the average of the top, bottom, and\n # middle slices\n offset = 2 + int(rgb)\n bottom_plane_idx = (0,) * (data.ndim - offset)\n middle_plane_idx = tuple(s // 2 for s in data.shape[:-offset])\n top_plane_idx = tuple(s - 1 for s in data.shape[:-offset])\n idxs = [bottom_plane_idx, middle_plane_idx, top_plane_idx]\n # If each plane is also very large, look only at a subset of the image\n if (\n np.prod(data.shape[-offset:]) > 1e7\n and data.shape[-offset] > 64\n and data.shape[-offset + 1] > 64\n ):\n # Find a central patch of the image to take\n center = [int(s // 2) for s in data.shape[-offset:]]\n central_slice = tuple(slice(c - 31, c + 31) for c in center[:2])\n reduced_data = [\n [_nanmax(data[idx + central_slice]) for idx in idxs],\n [_nanmin(data[idx + central_slice]) for idx in idxs],\n ]\n else:\n reduced_data = [\n [_nanmax(data[idx]) for idx in idxs],\n [_nanmin(data[idx]) for idx in idxs],\n ]\n # compute everything in one go\n reduced_data = dask.compute(*reduced_data)\n else:\n reduced_data = data\n\n min_val = _nanmin(reduced_data)\n max_val = _nanmax(reduced_data)\n\n if min_val == max_val:\n min_val = 0\n max_val = 1\n return (float(min_val), float(max_val))\n\n\ndef segment_normal(a, b, p=(0, 0, 1)):\n \"\"\"Determines the unit normal of the vector from a to b.\n\n Parameters\n ----------\n a : np.ndarray\n Length 2 array of first point or Nx2 array of points\n b : np.ndarray\n Length 2 array of second point or Nx2 array of points\n p : 3-tuple, optional\n orthogonal vector for segment calculation in 3D.\n\n Returns\n -------\n unit_norm : np.ndarray\n Length the unit normal of the vector from a to b. If a == b,\n then returns [0, 0] or Nx2 array of vectors\n \"\"\"\n d = b - a\n\n if d.ndim == 1:\n normal = np.array([d[1], -d[0]]) if len(d) == 2 else np.cross(d, p)\n norm = np.linalg.norm(normal)\n if norm == 0:\n norm = 1\n else:\n if d.shape[1] == 2:\n normal = np.stack([d[:, 1], -d[:, 0]], axis=0).transpose(1, 0)\n else:\n normal = np.cross(d, p)\n\n norm = np.linalg.norm(normal, axis=1, keepdims=True)\n ind = norm == 0\n norm[ind] = 1\n unit_norm = normal / norm\n\n return unit_norm\n\n\ndef convert_to_uint8(data: np.ndarray) -> Optional[np.ndarray]:\n \"\"\"\n Convert array content to uint8, always returning a copy.\n\n Based on skimage.util.dtype._convert but limited to an output type uint8,\n so should be equivalent to skimage.util.dtype.img_as_ubyte.\n\n If all negative, values are clipped to 0.\n\n If values are integers and below 256, this simply casts.\n Otherwise the maximum value for the input data type is determined and\n output values are proportionally scaled by this value.\n\n Binary images are converted so that False -> 0, True -> 255.\n\n Float images are multiplied by 255 and then cast to uint8.\n \"\"\"\n out_dtype = np.dtype(np.uint8)\n out_max = np.iinfo(out_dtype).max\n if data.dtype == out_dtype:\n return data\n in_kind = data.dtype.kind\n if in_kind == \"b\":\n return data.astype(out_dtype) * 255\n if in_kind == \"f\":\n image_out = np.multiply(data, out_max, dtype=data.dtype)\n np.rint(image_out, out=image_out)\n np.clip(image_out, 0, out_max, out=image_out)\n image_out = np.nan_to_num(image_out, copy=False)\n return image_out.astype(out_dtype)\n\n if in_kind in \"ui\":\n if in_kind == \"u\":\n if data.max() < out_max:\n return data.astype(out_dtype)\n return np.right_shift(data, (data.dtype.itemsize - 1) * 8).astype(\n out_dtype\n )\n\n np.maximum(data, 0, out=data, dtype=data.dtype)\n if data.dtype == np.int8:\n return (data * 2).astype(np.uint8)\n if data.max() < out_max:\n return data.astype(out_dtype)\n return np.right_shift(data, (data.dtype.itemsize - 1) * 8 - 1).astype(\n out_dtype\n )\n return None\n\n\ndef get_current_properties(\n properties: Dict[str, np.ndarray],\n choices: Dict[str, np.ndarray],\n num_data: int = 0,\n) -> Dict[str, Any]:\n \"\"\"Get the current property values from the properties or choices.\n\n Parameters\n ----------\n properties : dict[str, np.ndarray]\n The property values.\n choices : dict[str, np.ndarray]\n The property value choices.\n num_data : int\n The length of data that the properties represent (e.g. number of points).\n\n Returns\n -------\n dict[str, Any]\n A dictionary where the key is the property name and the value is the current\n value of that property.\n \"\"\"\n current_properties = {}\n if num_data > 0:\n current_properties = {\n k: np.asarray([v[-1]]) for k, v in properties.items()\n }\n elif num_data == 0 and len(choices) > 0:\n current_properties = {\n k: np.asarray([v[0]]) for k, v in choices.items()\n }\n return current_properties\n\n\ndef dataframe_to_properties(\n dataframe: pd.DataFrame,\n) -> Dict[str, np.ndarray]:\n \"\"\"Convert a dataframe to a properties dictionary.\n Parameters\n ----------\n dataframe : DataFrame\n The dataframe object to be converted to a properties dictionary\n Returns\n -------\n dict[str, np.ndarray]\n A properties dictionary where the key is the property name and the value\n is an ndarray with the property value for each point.\n \"\"\"\n return {col: np.asarray(dataframe[col]) for col in dataframe}\n\n\ndef validate_properties(\n properties: Optional[Union[Dict[str, Array], pd.DataFrame]],\n expected_len: Optional[int] = None,\n) -> Dict[str, np.ndarray]:\n \"\"\"Validate the type and size of properties and coerce values to numpy arrays.\n Parameters\n ----------\n properties : dict[str, Array] or DataFrame\n The property values.\n expected_len : int\n The expected length of each property value array.\n Returns\n -------\n Dict[str, np.ndarray]\n The property values.\n \"\"\"\n if properties is None or len(properties) == 0:\n return {}\n\n if not isinstance(properties, dict):\n properties = dataframe_to_properties(properties)\n\n lens = [len(v) for v in properties.values()]\n if expected_len is None:\n expected_len = lens[0]\n if any(v != expected_len for v in lens):\n raise ValueError(\n trans._(\n \"the number of items must be equal for all properties\",\n deferred=True,\n )\n )\n\n return {k: np.asarray(v) for k, v in properties.items()}\n\n\ndef _validate_property_choices(property_choices):\n if property_choices is None:\n property_choices = {}\n return {k: np.unique(v) for k, v in property_choices.items()}\n\n\ndef _coerce_current_properties_value(\n value: Union[float, str, bool, list, tuple, np.ndarray]\n) -> np.ndarray:\n \"\"\"Coerce a value in a current_properties dictionary into the correct type.\n\n Parameters\n ----------\n value : Union[float, str, int, bool, list, tuple, np.ndarray]\n The value to be coerced.\n\n Returns\n -------\n coerced_value : np.ndarray\n The value in a 1D numpy array with length 1.\n \"\"\"\n if isinstance(value, (np.ndarray, list, tuple)):\n if len(value) != 1:\n raise ValueError(\n trans._(\n 'current_properties values should have length 1.',\n deferred=True,\n )\n )\n coerced_value = np.asarray(value)\n else:\n coerced_value = np.array([value])\n\n return coerced_value\n\n\ndef coerce_current_properties(\n current_properties: Mapping[\n str, Union[float, str, int, bool, list, tuple, npt.NDArray]\n ]\n) -> Dict[str, np.ndarray]:\n \"\"\"Coerce a current_properties dictionary into the correct type.\n\n\n Parameters\n ----------\n current_properties : Dict[str, Union[float, str, int, bool, list, tuple, np.ndarray]]\n The current_properties dictionary to be coerced.\n\n Returns\n -------\n coerced_current_properties : Dict[str, np.ndarray]\n The current_properties dictionary with string keys and 1D numpy array with length 1 values.\n \"\"\"\n coerced_current_properties = {\n k: _coerce_current_properties_value(v)\n for k, v in current_properties.items()\n }\n\n return coerced_current_properties\n\n\ndef compute_multiscale_level(\n requested_shape, shape_threshold, downsample_factors\n):\n \"\"\"Computed desired level of the multiscale given requested field of view.\n\n The level of the multiscale should be the lowest resolution such that\n the requested shape is above the shape threshold. By passing a shape\n threshold corresponding to the shape of the canvas on the screen this\n ensures that we have at least one data pixel per screen pixel, but no\n more than we need.\n\n Parameters\n ----------\n requested_shape : tuple\n Requested shape of field of view in data coordinates\n shape_threshold : tuple\n Maximum size of a displayed tile in pixels.\n downsample_factors : list of tuple\n Downsampling factors for each level of the multiscale. Must be increasing\n for each level of the multiscale.\n\n Returns\n -------\n level : int\n Level of the multiscale to be viewing.\n \"\"\"\n # Scale shape by downsample factors\n scaled_shape = requested_shape / downsample_factors\n\n # Find the highest level (lowest resolution) allowed\n locations = np.argwhere(np.all(scaled_shape > shape_threshold, axis=1))\n level = locations[-1][0] if len(locations) > 0 else 0\n return level\n\n\ndef compute_multiscale_level_and_corners(\n corner_pixels, shape_threshold, downsample_factors\n):\n \"\"\"Computed desired level and corners of a multiscale view.\n\n The level of the multiscale should be the lowest resolution such that\n the requested shape is above the shape threshold. By passing a shape\n threshold corresponding to the shape of the canvas on the screen this\n ensures that we have at least one data pixel per screen pixel, but no\n more than we need.\n\n Parameters\n ----------\n corner_pixels : array (2, D)\n Requested corner pixels at full resolution.\n shape_threshold : tuple\n Maximum size of a displayed tile in pixels.\n downsample_factors : list of tuple\n Downsampling factors for each level of the multiscale. Must be increasing\n for each level of the multiscale.\n\n Returns\n -------\n level : int\n Level of the multiscale to be viewing.\n corners : array (2, D)\n Needed corner pixels at target resolution.\n \"\"\"\n requested_shape = corner_pixels[1] - corner_pixels[0]\n level = compute_multiscale_level(\n requested_shape, shape_threshold, downsample_factors\n )\n\n corners = corner_pixels / downsample_factors[level]\n corners = np.array([np.floor(corners[0]), np.ceil(corners[1])]).astype(int)\n\n return level, corners\n\n\ndef coerce_affine(affine, *, ndim, name=None):\n \"\"\"Coerce a user input into an affine transform object.\n\n If the input is already an affine transform object, that same object is returned\n with a name change if the given name is not None. If the input is None, an identity\n affine transform object of the given dimensionality is returned.\n\n Parameters\n ----------\n affine : array-like or napari.utils.transforms.Affine\n An existing affine transform object or an array-like that is its transform matrix.\n ndim : int\n The desired dimensionality of the transform. Ignored is affine is an Affine transform object.\n name : str\n The desired name of the transform.\n\n Returns\n -------\n napari.utils.transforms.Affine\n The input coerced into an affine transform object.\n \"\"\"\n if affine is None:\n affine = Affine(affine_matrix=np.eye(ndim + 1), ndim=ndim)\n elif isinstance(affine, np.ndarray):\n affine = Affine(affine_matrix=affine, ndim=ndim)\n elif isinstance(affine, list):\n affine = Affine(affine_matrix=np.array(affine), ndim=ndim)\n elif not isinstance(affine, Affine):\n raise TypeError(\n trans._(\n 'affine input not recognized. must be either napari.utils.transforms.Affine or ndarray. Got {dtype}',\n deferred=True,\n dtype=type(affine),\n )\n )\n if name is not None:\n affine.name = name\n return affine\n\n\ndef dims_displayed_world_to_layer(\n dims_displayed_world: List[int],\n ndim_world: int,\n ndim_layer: int,\n) -> List[int]:\n \"\"\"Convert the dims_displayed from world dims to the layer dims.\n\n This accounts differences in the number of dimensions in the world\n dims versus the layer and for transpose and rolls.\n\n Parameters\n ----------\n dims_displayed_world : List[int]\n The dims_displayed in world coordinates (i.e., from viewer.dims.displayed).\n ndim_world : int\n The number of dimensions in the world coordinates (i.e., viewer.dims.ndim)\n ndim_layer : int\n The number of dimensions in layer the layer (i.e., layer.ndim).\n \"\"\"\n if ndim_world > len(dims_displayed_world):\n all_dims = list(range(ndim_world))\n not_in_dims_displayed = [\n d for d in all_dims if d not in dims_displayed_world\n ]\n order = not_in_dims_displayed + dims_displayed_world\n else:\n order = dims_displayed_world\n offset = ndim_world - ndim_layer\n\n order_arr = np.array(order)\n if offset <= 0:\n order = list(range(-offset)) + list(order_arr - offset)\n else:\n order = list(order_arr[order_arr >= offset] - offset)\n\n n_display_world = len(dims_displayed_world)\n if n_display_world > ndim_layer:\n n_display_layer = ndim_layer\n else:\n n_display_layer = n_display_world\n dims_displayed = order[-n_display_layer:]\n\n return dims_displayed\n\n\ndef get_extent_world(data_extent, data_to_world, centered=None):\n \"\"\"Range of layer in world coordinates base on provided data_extent\n\n Parameters\n ----------\n data_extent : array, shape (2, D)\n Extent of layer in data coordinates.\n data_to_world : napari.utils.transforms.Affine\n The transform from data to world coordinates.\n\n Returns\n -------\n extent_world : array, shape (2, D)\n \"\"\"\n if centered is not None:\n warnings.warn(\n trans._(\n 'The `centered` argument is deprecated. '\n 'Extents are now always centered on data points.',\n deferred=True,\n ),\n stacklevel=2,\n )\n\n D = data_extent.shape[1]\n full_data_extent = np.array(np.meshgrid(*data_extent.T)).T.reshape(-1, D)\n full_world_extent = data_to_world(full_data_extent)\n world_extent = np.array(\n [\n np.min(full_world_extent, axis=0),\n np.max(full_world_extent, axis=0),\n ]\n )\n return world_extent\n\n\ndef features_to_pandas_dataframe(features: Any) -> pd.DataFrame:\n \"\"\"Coerces a layer's features property to a pandas DataFrame.\n\n In general, this may copy the data from features into the returned\n DataFrame so there is no guarantee that changing element values in the\n returned DataFrame will also change values in the features property.\n\n Parameters\n ----------\n features\n The features property of a layer.\n\n Returns\n -------\n pd.DataFrame\n A pandas DataFrame that stores the given features.\n \"\"\"\n return features\n\n\nclass _FeatureTable:\n \"\"\"Stores feature values and their defaults.\n\n Parameters\n ----------\n values : Optional[Union[Dict[str, np.ndarray], pd.DataFrame]]\n The features values, which will be passed to the pandas DataFrame initializer.\n If this is a pandas DataFrame with a non-default index, that index\n (except its length) will be ignored.\n num_data : Optional[int]\n The number of the elements in the layer calling this, such as\n the number of points, which is used to check that the features\n table has the expected number of rows. If None, then the default\n DataFrame index is used.\n defaults: Optional[Union[Dict[str, Any], pd.DataFrame]]\n The default feature values, which if specified should have the same keys\n as the values provided. If None, will be inferred from the values.\n \"\"\"\n\n def __init__(\n self,\n values: Optional[Union[Dict[str, np.ndarray], pd.DataFrame]] = None,\n *,\n num_data: Optional[int] = None,\n defaults: Optional[Union[Dict[str, Any], pd.DataFrame]] = None,\n ) -> None:\n self._values = _validate_features(values, num_data=num_data)\n self._defaults = _validate_feature_defaults(defaults, self._values)\n\n @property\n def values(self) -> pd.DataFrame:\n \"\"\"The feature values table.\"\"\"\n return self._values\n\n def set_values(self, values, *, num_data=None) -> None:\n \"\"\"Sets the feature values table.\"\"\"\n self._values = _validate_features(values, num_data=num_data)\n self._defaults = _validate_feature_defaults(None, self._values)\n\n @property\n def defaults(self) -> pd.DataFrame:\n \"\"\"The default values one-row table.\"\"\"\n return self._defaults\n\n def set_defaults(\n self, defaults: Union[Dict[str, Any], pd.DataFrame]\n ) -> None:\n \"\"\"Sets the feature default values.\"\"\"\n self._defaults = _validate_feature_defaults(defaults, self._values)\n\n def properties(self) -> Dict[str, np.ndarray]:\n \"\"\"Converts this to a deprecated properties dictionary.\n\n This will reference the features data when possible, but in general the\n returned dictionary may contain copies of those data.\n\n Returns\n -------\n Dict[str, np.ndarray]\n The properties dictionary equivalent to the given features.\n \"\"\"\n return _features_to_properties(self._values)\n\n def choices(self) -> Dict[str, np.ndarray]:\n \"\"\"Converts this to a deprecated property choices dictionary.\n\n Only categorical features will have corresponding entries in the dictionary.\n\n Returns\n -------\n Dict[str, np.ndarray]\n The property choices dictionary equivalent to this.\n \"\"\"\n return {\n name: series.dtype.categories.to_numpy()\n for name, series in self._values.items()\n if isinstance(series.dtype, pd.CategoricalDtype)\n }\n\n def currents(self) -> Dict[str, np.ndarray]:\n \"\"\"Converts the defaults table to a deprecated current properties dictionary.\"\"\"\n return _features_to_properties(self._defaults)\n\n def set_currents(\n self,\n currents: Dict[str, npt.NDArray],\n *,\n update_indices: Optional[List[int]] = None,\n ) -> None:\n \"\"\"Sets the default values using the deprecated current properties dictionary.\n\n May also update some of the feature values to be equal to the new default values.\n\n Parameters\n ----------\n currents : Dict[str, np.ndarray]\n The new current property values.\n update_indices : Optional[List[int]]\n If not None, the all features values at the given row indices will be set to\n the corresponding new current/default feature values.\n \"\"\"\n currents = coerce_current_properties(currents)\n self._defaults = _validate_features(currents, num_data=1)\n if update_indices is not None:\n for k in self._defaults:\n self._values[k][update_indices] = self._defaults[k][0]\n\n def resize(\n self,\n size: int,\n ) -> None:\n \"\"\"Resize this padding with default values if required.\n\n Parameters\n ----------\n size : int\n The new size (number of rows) of the features table.\n \"\"\"\n current_size = self._values.shape[0]\n if size < current_size:\n self.remove(range(size, current_size))\n elif size > current_size:\n to_append = self._defaults.iloc[np.zeros(size - current_size)]\n self.append(to_append)\n\n def append(self, to_append: pd.DataFrame) -> None:\n \"\"\"Append new feature rows to this.\n\n Parameters\n ----------\n to_append : pd.DataFrame\n The features to append.\n \"\"\"\n self._values = pd.concat([self._values, to_append], ignore_index=True)\n\n def remove(self, indices: Any) -> None:\n \"\"\"Remove rows from this by index.\n\n Parameters\n ----------\n indices : Any\n The indices of the rows to remove. Must be usable as the labels parameter\n to pandas.DataFrame.drop.\n \"\"\"\n self._values = self._values.drop(labels=indices, axis=0).reset_index(\n drop=True\n )\n\n def reorder(self, order: Sequence[int]) -> None:\n \"\"\"Reorders the rows of the feature values table.\"\"\"\n self._values = self._values.iloc[order].reset_index(drop=True)\n\n @classmethod\n def from_layer(\n cls,\n *,\n features: Optional[Union[Dict[str, np.ndarray], pd.DataFrame]] = None,\n feature_defaults: Optional[Union[Dict[str, Any], pd.DataFrame]] = None,\n properties: Optional[\n Union[Dict[str, np.ndarray], pd.DataFrame]\n ] = None,\n property_choices: Optional[Dict[str, np.ndarray]] = None,\n num_data: Optional[int] = None,\n ) -> _FeatureTable:\n \"\"\"Coerces a layer's keyword arguments to a feature manager.\n\n Parameters\n ----------\n features : Optional[Union[Dict[str, np.ndarray], pd.DataFrame]]\n The features input to a layer.\n properties : Optional[Union[Dict[str, np.ndarray], pd.DataFrame]]\n The properties input to a layer.\n property_choices : Optional[Dict[str, np.ndarray]]\n The property choices input to a layer.\n num_data : Optional[int]\n The number of the elements in the layer calling this, such as\n the number of points.\n\n Returns\n -------\n _FeatureTable\n The feature manager created from the given layer keyword arguments.\n\n Raises\n ------\n ValueError\n If the input property columns are not all the same length, or if\n that length is not equal to the given num_data.\n \"\"\"\n if properties is not None or property_choices is not None:\n features = _features_from_properties(\n properties=properties,\n property_choices=property_choices,\n num_data=num_data,\n )\n return cls(features, defaults=feature_defaults, num_data=num_data)\n\n\ndef _get_default_column(column: pd.Series) -> pd.Series:\n \"\"\"Get the default column of length 1 from a data column.\"\"\"\n value = None\n if column.size > 0:\n value = column.iloc[-1]\n elif isinstance(column.dtype, pd.CategoricalDtype):\n choices = column.dtype.categories\n if choices.size > 0:\n value = choices[0]\n elif isinstance(column.dtype, np.dtype) and np.issubdtype(\n column.dtype, np.integer\n ):\n # For numpy backed columns that store integers there's no way to\n # store missing values, so passing None creates an np.float64 series\n # containing NaN. Therefore, use a default of 0 instead.\n value = 0\n return pd.Series(data=value, dtype=column.dtype, index=range(1))\n\n\ndef _validate_features(\n features: Optional[Union[Dict[str, np.ndarray], pd.DataFrame]],\n *,\n num_data: Optional[int] = None,\n) -> pd.DataFrame:\n \"\"\"Validates and coerces feature values into a pandas DataFrame.\n\n See Also\n --------\n :class:`_FeatureTable` : See initialization for parameter descriptions.\n \"\"\"\n if isinstance(features, pd.DataFrame):\n features = features.reset_index(drop=True)\n elif isinstance(features, dict):\n # Convert all array-like objects into a numpy array.\n # This section was introduced due to an unexpected behavior when using\n # a pandas Series with mixed indices as input.\n # This way should handle all array-like objects correctly.\n # See https://github.com/napari/napari/pull/4755 for more details.\n features = {\n key: np.array(value, copy=False) for key, value in features.items()\n }\n index = None if num_data is None else range(num_data)\n return pd.DataFrame(data=features, index=index)\n\n\ndef _validate_feature_defaults(\n defaults: Optional[Union[Dict[str, Any], pd.DataFrame]],\n values: pd.DataFrame,\n) -> pd.DataFrame:\n \"\"\"Validates and coerces feature default values into a pandas DataFrame.\n\n See Also\n --------\n :class:`_FeatureTable` : See initialization for parameter descriptions.\n \"\"\"\n if defaults is None:\n defaults = {c: _get_default_column(values[c]) for c in values.columns}\n else:\n default_columns = set(defaults.keys())\n value_columns = set(values.keys())\n extra_defaults = default_columns - value_columns\n if len(extra_defaults) > 0:\n raise ValueError(\n trans._(\n 'Feature defaults contain some extra columns not in feature values: {extra_defaults}',\n deferred=True,\n extra_defaults=extra_defaults,\n )\n )\n missing_defaults = value_columns - default_columns\n if len(missing_defaults) > 0:\n raise ValueError(\n trans._(\n 'Feature defaults is missing some columns in feature values: {missing_defaults}',\n deferred=True,\n missing_defaults=missing_defaults,\n )\n )\n # Convert to series first to capture the per-column dtype from values,\n # since the DataFrame initializer does not support passing multiple dtypes.\n defaults = {\n c: pd.Series(\n defaults[c],\n dtype=values.dtypes[c],\n index=range(1),\n )\n for c in defaults\n }\n\n return pd.DataFrame(defaults, index=range(1))\n\n\ndef _features_from_properties(\n *,\n properties: Optional[Union[Dict[str, np.ndarray], pd.DataFrame]] = None,\n property_choices: Optional[Dict[str, np.ndarray]] = None,\n num_data: Optional[int] = None,\n) -> pd.DataFrame:\n \"\"\"Validates and coerces deprecated properties input into a features DataFrame.\n\n See Also\n --------\n :meth:`_FeatureTable.from_layer`\n \"\"\"\n # Create categorical series for any choices provided.\n if property_choices is not None:\n properties = pd.DataFrame(data=properties)\n for name, choices in property_choices.items():\n dtype = pd.CategoricalDtype(categories=choices)\n num_values = properties.shape[0] if num_data is None else num_data\n values = (\n properties[name] if name in properties else [None] * num_values\n )\n properties[name] = pd.Series(values, dtype=dtype)\n return _validate_features(properties, num_data=num_data)\n\n\ndef _features_to_properties(features: pd.DataFrame) -> Dict[str, np.ndarray]:\n \"\"\"Converts a features DataFrame to a deprecated properties dictionary.\n\n See Also\n --------\n :meth:`_FeatureTable.properties`\n \"\"\"\n return {name: series.to_numpy() for name, series in features.items()}\n\n\ndef _unique_element(array: Array) -> Optional[Any]:\n \"\"\"\n Returns the unique element along the 0th axis, if it exists; otherwise, returns None.\n\n This is faster than np.unique, does not require extra tricks for nD arrays, and\n does not fail for non-sortable elements.\n \"\"\"\n if len(array) == 0:\n return None\n el = array[0]\n if np.any(array[1:] != el):\n return None\n return el\n", "path": "napari/layers/utils/layer_utils.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport functools\nimport inspect\nimport warnings\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Dict,\n List,\n NamedTuple,\n Optional,\n Sequence,\n Tuple,\n Union,\n)\n\nimport dask\nimport numpy as np\nimport pandas as pd\n\nfrom napari.utils.action_manager import action_manager\nfrom napari.utils.events.custom_types import Array\nfrom napari.utils.transforms import Affine\nfrom napari.utils.translations import trans\n\nif TYPE_CHECKING:\n from typing import Mapping\n\n import numpy.typing as npt\n\n\nclass Extent(NamedTuple):\n \"\"\"Extent of coordinates in a local data space and world space.\n\n Each extent is a (2, D) array that stores the minimum and maximum coordinate\n values in each of D dimensions. Both the minimum and maximum coordinates are\n inclusive so form an axis-aligned, closed interval or a D-dimensional box\n around all the coordinates.\n\n Attributes\n ----------\n data : (2, D) array of floats\n The minimum and maximum raw data coordinates ignoring any transforms like\n translation or scale.\n world : (2, D) array of floats\n The minimum and maximum world coordinates after applying a transform to the\n raw data coordinates that brings them into a potentially shared world space.\n step : (D,) array of floats\n The step in each dimension that when taken from the minimum world coordinate,\n should form a regular grid that eventually hits the maximum world coordinate.\n \"\"\"\n\n data: np.ndarray\n world: np.ndarray\n step: np.ndarray\n\n\ndef register_layer_action(\n keymapprovider,\n description: str,\n repeatable: bool = False,\n shortcuts: Optional[str] = None,\n):\n \"\"\"\n Convenient decorator to register an action with the current Layers\n\n It will use the function name as the action name. We force the description\n to be given instead of function docstring for translation purpose.\n\n\n Parameters\n ----------\n keymapprovider : KeymapProvider\n class on which to register the keybindings - this will typically be\n the instance in focus that will handle the keyboard shortcut.\n description : str\n The description of the action, this will typically be translated and\n will be what will be used in tooltips.\n repeatable : bool\n A flag indicating whether the action autorepeats when key is held\n shortcuts : str | List[str]\n Shortcut to bind by default to the action we are registering.\n\n Returns\n -------\n function:\n Actual decorator to apply to a function. Given decorator returns the\n function unmodified to allow decorator stacking.\n\n \"\"\"\n\n def _inner(func):\n nonlocal shortcuts\n name = 'napari:' + func.__name__\n\n action_manager.register_action(\n name=name,\n command=func,\n description=description,\n keymapprovider=keymapprovider,\n repeatable=repeatable,\n )\n if shortcuts:\n if isinstance(shortcuts, str):\n shortcuts = [shortcuts]\n\n for shortcut in shortcuts:\n action_manager.bind_shortcut(name, shortcut)\n return func\n\n return _inner\n\n\ndef register_layer_attr_action(\n keymapprovider,\n description: str,\n attribute_name: str,\n shortcuts=None,\n):\n \"\"\"\n Convenient decorator to register an action with the current Layers.\n This will get and restore attribute from function first argument.\n\n It will use the function name as the action name. We force the description\n to be given instead of function docstring for translation purpose.\n\n Parameters\n ----------\n keymapprovider : KeymapProvider\n class on which to register the keybindings - this will typically be\n the instance in focus that will handle the keyboard shortcut.\n description : str\n The description of the action, this will typically be translated and\n will be what will be used in tooltips.\n attribute_name : str\n The name of the attribute to be restored if key is hold over `get_settings().get_settings().application.hold_button_delay.\n shortcuts : str | List[str]\n Shortcut to bind by default to the action we are registering.\n\n Returns\n -------\n function:\n Actual decorator to apply to a function. Given decorator returns the\n function unmodified to allow decorator stacking.\n\n \"\"\"\n\n def _handle(func):\n sig = inspect.signature(func)\n try:\n first_variable_name = next(iter(sig.parameters))\n except StopIteration as e:\n raise RuntimeError(\n trans._(\n \"If actions has no arguments there is no way to know what to set the attribute to.\",\n deferred=True,\n ),\n ) from e\n\n @functools.wraps(func)\n def _wrapper(*args, **kwargs):\n obj = args[0] if args else kwargs[first_variable_name]\n prev_mode = getattr(obj, attribute_name)\n func(*args, **kwargs)\n\n def _callback():\n setattr(obj, attribute_name, prev_mode)\n\n return _callback\n\n repeatable = False # attribute actions are always non-repeatable\n register_layer_action(\n keymapprovider, description, repeatable, shortcuts\n )(_wrapper)\n return func\n\n return _handle\n\n\ndef _nanmin(array):\n \"\"\"\n call np.min but fall back to avoid nan and inf if necessary\n \"\"\"\n min_value = np.min(array)\n if not np.isfinite(min_value):\n masked = array[np.isfinite(array)]\n if masked.size == 0:\n return 0\n min_value = np.min(masked)\n return min_value\n\n\ndef _nanmax(array):\n \"\"\"\n call np.max but fall back to avoid nan and inf if necessary\n \"\"\"\n max_value = np.max(array)\n if not np.isfinite(max_value):\n masked = array[np.isfinite(array)]\n if masked.size == 0:\n return 1\n max_value = np.max(masked)\n return max_value\n\n\ndef calc_data_range(data, rgb=False) -> Tuple[float, float]:\n \"\"\"Calculate range of data values. If all values are equal return [0, 1].\n\n Parameters\n ----------\n data : array\n Data to calculate range of values over.\n rgb : bool\n Flag if data is rgb.\n\n Returns\n -------\n values : pair of floats\n Minimum and maximum values in that order.\n\n Notes\n -----\n If the data type is uint8, no calculation is performed, and 0-255 is\n returned.\n \"\"\"\n if data.dtype == np.uint8:\n return (0, 255)\n\n center: Union[int, List[int]]\n\n if data.size > 1e7 and (data.ndim == 1 or (rgb and data.ndim == 2)):\n # If data is very large take the average of start, middle and end.\n center = int(data.shape[0] // 2)\n slices = [\n slice(0, 4096),\n slice(center - 2048, center + 2048),\n slice(-4096, None),\n ]\n reduced_data = [\n [_nanmax(data[sl]) for sl in slices],\n [_nanmin(data[sl]) for sl in slices],\n ]\n elif data.size > 1e7:\n # If data is very large take the average of the top, bottom, and\n # middle slices\n offset = 2 + int(rgb)\n bottom_plane_idx = (0,) * (data.ndim - offset)\n middle_plane_idx = tuple(s // 2 for s in data.shape[:-offset])\n top_plane_idx = tuple(s - 1 for s in data.shape[:-offset])\n idxs = [bottom_plane_idx, middle_plane_idx, top_plane_idx]\n # If each plane is also very large, look only at a subset of the image\n if (\n np.prod(data.shape[-offset:]) > 1e7\n and data.shape[-offset] > 64\n and data.shape[-offset + 1] > 64\n ):\n # Find a central patch of the image to take\n center = [int(s // 2) for s in data.shape[-offset:]]\n central_slice = tuple(slice(c - 31, c + 31) for c in center[:2])\n reduced_data = [\n [_nanmax(data[idx + central_slice]) for idx in idxs],\n [_nanmin(data[idx + central_slice]) for idx in idxs],\n ]\n else:\n reduced_data = [\n [_nanmax(data[idx]) for idx in idxs],\n [_nanmin(data[idx]) for idx in idxs],\n ]\n # compute everything in one go\n reduced_data = dask.compute(*reduced_data)\n else:\n reduced_data = data\n\n min_val = _nanmin(reduced_data)\n max_val = _nanmax(reduced_data)\n\n if min_val == max_val:\n min_val = 0\n max_val = 1\n return (float(min_val), float(max_val))\n\n\ndef segment_normal(a, b, p=(0, 0, 1)):\n \"\"\"Determines the unit normal of the vector from a to b.\n\n Parameters\n ----------\n a : np.ndarray\n Length 2 array of first point or Nx2 array of points\n b : np.ndarray\n Length 2 array of second point or Nx2 array of points\n p : 3-tuple, optional\n orthogonal vector for segment calculation in 3D.\n\n Returns\n -------\n unit_norm : np.ndarray\n Length the unit normal of the vector from a to b. If a == b,\n then returns [0, 0] or Nx2 array of vectors\n \"\"\"\n d = b - a\n\n if d.ndim == 1:\n normal = np.array([d[1], -d[0]]) if len(d) == 2 else np.cross(d, p)\n norm = np.linalg.norm(normal)\n if norm == 0:\n norm = 1\n else:\n if d.shape[1] == 2:\n normal = np.stack([d[:, 1], -d[:, 0]], axis=0).transpose(1, 0)\n else:\n normal = np.cross(d, p)\n\n norm = np.linalg.norm(normal, axis=1, keepdims=True)\n ind = norm == 0\n norm[ind] = 1\n unit_norm = normal / norm\n\n return unit_norm\n\n\ndef convert_to_uint8(data: np.ndarray) -> Optional[np.ndarray]:\n \"\"\"\n Convert array content to uint8, always returning a copy.\n\n Based on skimage.util.dtype._convert but limited to an output type uint8,\n so should be equivalent to skimage.util.dtype.img_as_ubyte.\n\n If all negative, values are clipped to 0.\n\n If values are integers and below 256, this simply casts.\n Otherwise the maximum value for the input data type is determined and\n output values are proportionally scaled by this value.\n\n Binary images are converted so that False -> 0, True -> 255.\n\n Float images are multiplied by 255 and then cast to uint8.\n \"\"\"\n out_dtype = np.dtype(np.uint8)\n out_max = np.iinfo(out_dtype).max\n if data.dtype == out_dtype:\n return data\n in_kind = data.dtype.kind\n if in_kind == \"b\":\n return data.astype(out_dtype) * 255\n if in_kind == \"f\":\n image_out = np.multiply(data, out_max, dtype=data.dtype)\n np.rint(image_out, out=image_out)\n np.clip(image_out, 0, out_max, out=image_out)\n image_out = np.nan_to_num(image_out, copy=False)\n return image_out.astype(out_dtype)\n\n if in_kind in \"ui\":\n if in_kind == \"u\":\n if data.max() < out_max:\n return data.astype(out_dtype)\n return np.right_shift(data, (data.dtype.itemsize - 1) * 8).astype(\n out_dtype\n )\n\n np.maximum(data, 0, out=data, dtype=data.dtype)\n if data.dtype == np.int8:\n return (data * 2).astype(np.uint8)\n if data.max() < out_max:\n return data.astype(out_dtype)\n return np.right_shift(data, (data.dtype.itemsize - 1) * 8 - 1).astype(\n out_dtype\n )\n return None\n\n\ndef get_current_properties(\n properties: Dict[str, np.ndarray],\n choices: Dict[str, np.ndarray],\n num_data: int = 0,\n) -> Dict[str, Any]:\n \"\"\"Get the current property values from the properties or choices.\n\n Parameters\n ----------\n properties : dict[str, np.ndarray]\n The property values.\n choices : dict[str, np.ndarray]\n The property value choices.\n num_data : int\n The length of data that the properties represent (e.g. number of points).\n\n Returns\n -------\n dict[str, Any]\n A dictionary where the key is the property name and the value is the current\n value of that property.\n \"\"\"\n current_properties = {}\n if num_data > 0:\n current_properties = {\n k: np.asarray([v[-1]]) for k, v in properties.items()\n }\n elif num_data == 0 and len(choices) > 0:\n current_properties = {\n k: np.asarray([v[0]]) for k, v in choices.items()\n }\n return current_properties\n\n\ndef dataframe_to_properties(\n dataframe: pd.DataFrame,\n) -> Dict[str, np.ndarray]:\n \"\"\"Convert a dataframe to a properties dictionary.\n Parameters\n ----------\n dataframe : DataFrame\n The dataframe object to be converted to a properties dictionary\n Returns\n -------\n dict[str, np.ndarray]\n A properties dictionary where the key is the property name and the value\n is an ndarray with the property value for each point.\n \"\"\"\n return {col: np.asarray(dataframe[col]) for col in dataframe}\n\n\ndef validate_properties(\n properties: Optional[Union[Dict[str, Array], pd.DataFrame]],\n expected_len: Optional[int] = None,\n) -> Dict[str, np.ndarray]:\n \"\"\"Validate the type and size of properties and coerce values to numpy arrays.\n Parameters\n ----------\n properties : dict[str, Array] or DataFrame\n The property values.\n expected_len : int\n The expected length of each property value array.\n Returns\n -------\n Dict[str, np.ndarray]\n The property values.\n \"\"\"\n if properties is None or len(properties) == 0:\n return {}\n\n if not isinstance(properties, dict):\n properties = dataframe_to_properties(properties)\n\n lens = [len(v) for v in properties.values()]\n if expected_len is None:\n expected_len = lens[0]\n if any(v != expected_len for v in lens):\n raise ValueError(\n trans._(\n \"the number of items must be equal for all properties\",\n deferred=True,\n )\n )\n\n return {k: np.asarray(v) for k, v in properties.items()}\n\n\ndef _validate_property_choices(property_choices):\n if property_choices is None:\n property_choices = {}\n return {k: np.unique(v) for k, v in property_choices.items()}\n\n\ndef _coerce_current_properties_value(\n value: Union[float, str, bool, list, tuple, np.ndarray]\n) -> np.ndarray:\n \"\"\"Coerce a value in a current_properties dictionary into the correct type.\n\n Parameters\n ----------\n value : Union[float, str, int, bool, list, tuple, np.ndarray]\n The value to be coerced.\n\n Returns\n -------\n coerced_value : np.ndarray\n The value in a 1D numpy array with length 1.\n \"\"\"\n if isinstance(value, (np.ndarray, list, tuple)):\n if len(value) != 1:\n raise ValueError(\n trans._(\n 'current_properties values should have length 1.',\n deferred=True,\n )\n )\n coerced_value = np.asarray(value)\n else:\n coerced_value = np.array([value])\n\n return coerced_value\n\n\ndef coerce_current_properties(\n current_properties: Mapping[\n str, Union[float, str, int, bool, list, tuple, npt.NDArray]\n ]\n) -> Dict[str, np.ndarray]:\n \"\"\"Coerce a current_properties dictionary into the correct type.\n\n\n Parameters\n ----------\n current_properties : Dict[str, Union[float, str, int, bool, list, tuple, np.ndarray]]\n The current_properties dictionary to be coerced.\n\n Returns\n -------\n coerced_current_properties : Dict[str, np.ndarray]\n The current_properties dictionary with string keys and 1D numpy array with length 1 values.\n \"\"\"\n coerced_current_properties = {\n k: _coerce_current_properties_value(v)\n for k, v in current_properties.items()\n }\n\n return coerced_current_properties\n\n\ndef compute_multiscale_level(\n requested_shape, shape_threshold, downsample_factors\n):\n \"\"\"Computed desired level of the multiscale given requested field of view.\n\n The level of the multiscale should be the lowest resolution such that\n the requested shape is above the shape threshold. By passing a shape\n threshold corresponding to the shape of the canvas on the screen this\n ensures that we have at least one data pixel per screen pixel, but no\n more than we need.\n\n Parameters\n ----------\n requested_shape : tuple\n Requested shape of field of view in data coordinates\n shape_threshold : tuple\n Maximum size of a displayed tile in pixels.\n downsample_factors : list of tuple\n Downsampling factors for each level of the multiscale. Must be increasing\n for each level of the multiscale.\n\n Returns\n -------\n level : int\n Level of the multiscale to be viewing.\n \"\"\"\n # Scale shape by downsample factors\n scaled_shape = requested_shape / downsample_factors\n\n # Find the highest level (lowest resolution) allowed\n locations = np.argwhere(np.all(scaled_shape > shape_threshold, axis=1))\n level = locations[-1][0] if len(locations) > 0 else 0\n return level\n\n\ndef compute_multiscale_level_and_corners(\n corner_pixels, shape_threshold, downsample_factors\n):\n \"\"\"Computed desired level and corners of a multiscale view.\n\n The level of the multiscale should be the lowest resolution such that\n the requested shape is above the shape threshold. By passing a shape\n threshold corresponding to the shape of the canvas on the screen this\n ensures that we have at least one data pixel per screen pixel, but no\n more than we need.\n\n Parameters\n ----------\n corner_pixels : array (2, D)\n Requested corner pixels at full resolution.\n shape_threshold : tuple\n Maximum size of a displayed tile in pixels.\n downsample_factors : list of tuple\n Downsampling factors for each level of the multiscale. Must be increasing\n for each level of the multiscale.\n\n Returns\n -------\n level : int\n Level of the multiscale to be viewing.\n corners : array (2, D)\n Needed corner pixels at target resolution.\n \"\"\"\n requested_shape = corner_pixels[1] - corner_pixels[0]\n level = compute_multiscale_level(\n requested_shape, shape_threshold, downsample_factors\n )\n\n corners = corner_pixels / downsample_factors[level]\n corners = np.array([np.floor(corners[0]), np.ceil(corners[1])]).astype(int)\n\n return level, corners\n\n\ndef coerce_affine(affine, *, ndim, name=None):\n \"\"\"Coerce a user input into an affine transform object.\n\n If the input is already an affine transform object, that same object is returned\n with a name change if the given name is not None. If the input is None, an identity\n affine transform object of the given dimensionality is returned.\n\n Parameters\n ----------\n affine : array-like or napari.utils.transforms.Affine\n An existing affine transform object or an array-like that is its transform matrix.\n ndim : int\n The desired dimensionality of the transform. Ignored is affine is an Affine transform object.\n name : str\n The desired name of the transform.\n\n Returns\n -------\n napari.utils.transforms.Affine\n The input coerced into an affine transform object.\n \"\"\"\n if affine is None:\n affine = Affine(affine_matrix=np.eye(ndim + 1), ndim=ndim)\n elif isinstance(affine, np.ndarray):\n affine = Affine(affine_matrix=affine, ndim=ndim)\n elif isinstance(affine, list):\n affine = Affine(affine_matrix=np.array(affine), ndim=ndim)\n elif not isinstance(affine, Affine):\n raise TypeError(\n trans._(\n 'affine input not recognized. must be either napari.utils.transforms.Affine or ndarray. Got {dtype}',\n deferred=True,\n dtype=type(affine),\n )\n )\n if name is not None:\n affine.name = name\n return affine\n\n\ndef dims_displayed_world_to_layer(\n dims_displayed_world: List[int],\n ndim_world: int,\n ndim_layer: int,\n) -> List[int]:\n \"\"\"Convert the dims_displayed from world dims to the layer dims.\n\n This accounts differences in the number of dimensions in the world\n dims versus the layer and for transpose and rolls.\n\n Parameters\n ----------\n dims_displayed_world : List[int]\n The dims_displayed in world coordinates (i.e., from viewer.dims.displayed).\n ndim_world : int\n The number of dimensions in the world coordinates (i.e., viewer.dims.ndim)\n ndim_layer : int\n The number of dimensions in layer the layer (i.e., layer.ndim).\n \"\"\"\n if ndim_world > len(dims_displayed_world):\n all_dims = list(range(ndim_world))\n not_in_dims_displayed = [\n d for d in all_dims if d not in dims_displayed_world\n ]\n order = not_in_dims_displayed + dims_displayed_world\n else:\n order = dims_displayed_world\n offset = ndim_world - ndim_layer\n\n order_arr = np.array(order)\n if offset <= 0:\n order = list(range(-offset)) + list(order_arr - offset)\n else:\n order = list(order_arr[order_arr >= offset] - offset)\n\n n_display_world = len(dims_displayed_world)\n if n_display_world > ndim_layer:\n n_display_layer = ndim_layer\n else:\n n_display_layer = n_display_world\n dims_displayed = order[-n_display_layer:]\n\n return dims_displayed\n\n\ndef get_extent_world(data_extent, data_to_world, centered=None):\n \"\"\"Range of layer in world coordinates base on provided data_extent\n\n Parameters\n ----------\n data_extent : array, shape (2, D)\n Extent of layer in data coordinates.\n data_to_world : napari.utils.transforms.Affine\n The transform from data to world coordinates.\n\n Returns\n -------\n extent_world : array, shape (2, D)\n \"\"\"\n if centered is not None:\n warnings.warn(\n trans._(\n 'The `centered` argument is deprecated. '\n 'Extents are now always centered on data points.',\n deferred=True,\n ),\n stacklevel=2,\n )\n\n D = data_extent.shape[1]\n full_data_extent = np.array(np.meshgrid(*data_extent.T)).T.reshape(-1, D)\n full_world_extent = data_to_world(full_data_extent)\n world_extent = np.array(\n [\n np.min(full_world_extent, axis=0),\n np.max(full_world_extent, axis=0),\n ]\n )\n return world_extent\n\n\ndef features_to_pandas_dataframe(features: Any) -> pd.DataFrame:\n \"\"\"Coerces a layer's features property to a pandas DataFrame.\n\n In general, this may copy the data from features into the returned\n DataFrame so there is no guarantee that changing element values in the\n returned DataFrame will also change values in the features property.\n\n Parameters\n ----------\n features\n The features property of a layer.\n\n Returns\n -------\n pd.DataFrame\n A pandas DataFrame that stores the given features.\n \"\"\"\n return features\n\n\nclass _FeatureTable:\n \"\"\"Stores feature values and their defaults.\n\n Parameters\n ----------\n values : Optional[Union[Dict[str, np.ndarray], pd.DataFrame]]\n The features values, which will be passed to the pandas DataFrame initializer.\n If this is a pandas DataFrame with a non-default index, that index\n (except its length) will be ignored.\n num_data : Optional[int]\n The number of the elements in the layer calling this, such as\n the number of points, which is used to check that the features\n table has the expected number of rows. If None, then the default\n DataFrame index is used.\n defaults: Optional[Union[Dict[str, Any], pd.DataFrame]]\n The default feature values, which if specified should have the same keys\n as the values provided. If None, will be inferred from the values.\n \"\"\"\n\n def __init__(\n self,\n values: Optional[Union[Dict[str, np.ndarray], pd.DataFrame]] = None,\n *,\n num_data: Optional[int] = None,\n defaults: Optional[Union[Dict[str, Any], pd.DataFrame]] = None,\n ) -> None:\n self._values = _validate_features(values, num_data=num_data)\n self._defaults = _validate_feature_defaults(defaults, self._values)\n\n @property\n def values(self) -> pd.DataFrame:\n \"\"\"The feature values table.\"\"\"\n return self._values\n\n def set_values(self, values, *, num_data=None) -> None:\n \"\"\"Sets the feature values table.\"\"\"\n self._values = _validate_features(values, num_data=num_data)\n self._defaults = _validate_feature_defaults(None, self._values)\n\n @property\n def defaults(self) -> pd.DataFrame:\n \"\"\"The default values one-row table.\"\"\"\n return self._defaults\n\n def set_defaults(\n self, defaults: Union[Dict[str, Any], pd.DataFrame]\n ) -> None:\n \"\"\"Sets the feature default values.\"\"\"\n self._defaults = _validate_feature_defaults(defaults, self._values)\n\n def properties(self) -> Dict[str, np.ndarray]:\n \"\"\"Converts this to a deprecated properties dictionary.\n\n This will reference the features data when possible, but in general the\n returned dictionary may contain copies of those data.\n\n Returns\n -------\n Dict[str, np.ndarray]\n The properties dictionary equivalent to the given features.\n \"\"\"\n return _features_to_properties(self._values)\n\n def choices(self) -> Dict[str, np.ndarray]:\n \"\"\"Converts this to a deprecated property choices dictionary.\n\n Only categorical features will have corresponding entries in the dictionary.\n\n Returns\n -------\n Dict[str, np.ndarray]\n The property choices dictionary equivalent to this.\n \"\"\"\n return {\n name: series.dtype.categories.to_numpy()\n for name, series in self._values.items()\n if isinstance(series.dtype, pd.CategoricalDtype)\n }\n\n def currents(self) -> Dict[str, np.ndarray]:\n \"\"\"Converts the defaults table to a deprecated current properties dictionary.\"\"\"\n return _features_to_properties(self._defaults)\n\n def set_currents(\n self,\n currents: Dict[str, npt.NDArray],\n *,\n update_indices: Optional[List[int]] = None,\n ) -> None:\n \"\"\"Sets the default values using the deprecated current properties dictionary.\n\n May also update some of the feature values to be equal to the new default values.\n\n Parameters\n ----------\n currents : Dict[str, np.ndarray]\n The new current property values.\n update_indices : Optional[List[int]]\n If not None, the all features values at the given row indices will be set to\n the corresponding new current/default feature values.\n \"\"\"\n currents = coerce_current_properties(currents)\n self._defaults = _validate_features(currents, num_data=1)\n if update_indices is not None:\n for k in self._defaults:\n self._values[k][update_indices] = self._defaults[k][0]\n\n def resize(\n self,\n size: int,\n ) -> None:\n \"\"\"Resize this padding with default values if required.\n\n Parameters\n ----------\n size : int\n The new size (number of rows) of the features table.\n \"\"\"\n current_size = self._values.shape[0]\n if size < current_size:\n self.remove(range(size, current_size))\n elif size > current_size:\n to_append = self._defaults.iloc[np.zeros(size - current_size)]\n self.append(to_append)\n\n def append(self, to_append: pd.DataFrame) -> None:\n \"\"\"Append new feature rows to this.\n\n Parameters\n ----------\n to_append : pd.DataFrame\n The features to append.\n \"\"\"\n self._values = pd.concat([self._values, to_append], ignore_index=True)\n\n def remove(self, indices: Any) -> None:\n \"\"\"Remove rows from this by index.\n\n Parameters\n ----------\n indices : Any\n The indices of the rows to remove. Must be usable as the labels parameter\n to pandas.DataFrame.drop.\n \"\"\"\n self._values = self._values.drop(labels=indices, axis=0).reset_index(\n drop=True\n )\n\n def reorder(self, order: Sequence[int]) -> None:\n \"\"\"Reorders the rows of the feature values table.\"\"\"\n self._values = self._values.iloc[order].reset_index(drop=True)\n\n @classmethod\n def from_layer(\n cls,\n *,\n features: Optional[Union[Dict[str, np.ndarray], pd.DataFrame]] = None,\n feature_defaults: Optional[Union[Dict[str, Any], pd.DataFrame]] = None,\n properties: Optional[\n Union[Dict[str, np.ndarray], pd.DataFrame]\n ] = None,\n property_choices: Optional[Dict[str, np.ndarray]] = None,\n num_data: Optional[int] = None,\n ) -> _FeatureTable:\n \"\"\"Coerces a layer's keyword arguments to a feature manager.\n\n Parameters\n ----------\n features : Optional[Union[Dict[str, np.ndarray], pd.DataFrame]]\n The features input to a layer.\n properties : Optional[Union[Dict[str, np.ndarray], pd.DataFrame]]\n The properties input to a layer.\n property_choices : Optional[Dict[str, np.ndarray]]\n The property choices input to a layer.\n num_data : Optional[int]\n The number of the elements in the layer calling this, such as\n the number of points.\n\n Returns\n -------\n _FeatureTable\n The feature manager created from the given layer keyword arguments.\n\n Raises\n ------\n ValueError\n If the input property columns are not all the same length, or if\n that length is not equal to the given num_data.\n \"\"\"\n if properties is not None or property_choices is not None:\n features = _features_from_properties(\n properties=properties,\n property_choices=property_choices,\n num_data=num_data,\n )\n return cls(features, defaults=feature_defaults, num_data=num_data)\n\n\ndef _get_default_column(column: pd.Series) -> pd.Series:\n \"\"\"Get the default column of length 1 from a data column.\"\"\"\n value = None\n if column.size > 0:\n value = column.iloc[-1]\n elif isinstance(column.dtype, pd.CategoricalDtype):\n choices = column.dtype.categories\n if choices.size > 0:\n value = choices[0]\n elif isinstance(column.dtype, np.dtype) and np.issubdtype(\n column.dtype, np.integer\n ):\n # For numpy backed columns that store integers there's no way to\n # store missing values, so passing None creates an np.float64 series\n # containing NaN. Therefore, use a default of 0 instead.\n value = 0\n return pd.Series(data=[value], dtype=column.dtype, index=range(1))\n\n\ndef _validate_features(\n features: Optional[Union[Dict[str, np.ndarray], pd.DataFrame]],\n *,\n num_data: Optional[int] = None,\n) -> pd.DataFrame:\n \"\"\"Validates and coerces feature values into a pandas DataFrame.\n\n See Also\n --------\n :class:`_FeatureTable` : See initialization for parameter descriptions.\n \"\"\"\n if isinstance(features, pd.DataFrame):\n features = features.reset_index(drop=True)\n elif isinstance(features, dict):\n # Convert all array-like objects into a numpy array.\n # This section was introduced due to an unexpected behavior when using\n # a pandas Series with mixed indices as input.\n # This way should handle all array-like objects correctly.\n # See https://github.com/napari/napari/pull/4755 for more details.\n features = {\n key: np.array(value, copy=False) for key, value in features.items()\n }\n index = None if num_data is None else range(num_data)\n return pd.DataFrame(data=features, index=index)\n\n\ndef _validate_feature_defaults(\n defaults: Optional[Union[Dict[str, Any], pd.DataFrame]],\n values: pd.DataFrame,\n) -> pd.DataFrame:\n \"\"\"Validates and coerces feature default values into a pandas DataFrame.\n\n See Also\n --------\n :class:`_FeatureTable` : See initialization for parameter descriptions.\n \"\"\"\n if defaults is None:\n defaults = {c: _get_default_column(values[c]) for c in values.columns}\n else:\n default_columns = set(defaults.keys())\n value_columns = set(values.keys())\n extra_defaults = default_columns - value_columns\n if len(extra_defaults) > 0:\n raise ValueError(\n trans._(\n 'Feature defaults contain some extra columns not in feature values: {extra_defaults}',\n deferred=True,\n extra_defaults=extra_defaults,\n )\n )\n missing_defaults = value_columns - default_columns\n if len(missing_defaults) > 0:\n raise ValueError(\n trans._(\n 'Feature defaults is missing some columns in feature values: {missing_defaults}',\n deferred=True,\n missing_defaults=missing_defaults,\n )\n )\n # Convert to series first to capture the per-column dtype from values,\n # since the DataFrame initializer does not support passing multiple dtypes.\n defaults = {\n c: pd.Series(\n defaults[c],\n dtype=values.dtypes[c],\n index=range(1),\n )\n for c in defaults\n }\n\n return pd.DataFrame(defaults, index=range(1))\n\n\ndef _features_from_properties(\n *,\n properties: Optional[Union[Dict[str, np.ndarray], pd.DataFrame]] = None,\n property_choices: Optional[Dict[str, np.ndarray]] = None,\n num_data: Optional[int] = None,\n) -> pd.DataFrame:\n \"\"\"Validates and coerces deprecated properties input into a features DataFrame.\n\n See Also\n --------\n :meth:`_FeatureTable.from_layer`\n \"\"\"\n # Create categorical series for any choices provided.\n if property_choices is not None:\n properties = pd.DataFrame(data=properties)\n for name, choices in property_choices.items():\n dtype = pd.CategoricalDtype(categories=choices)\n num_values = properties.shape[0] if num_data is None else num_data\n values = (\n properties[name] if name in properties else [None] * num_values\n )\n properties[name] = pd.Series(values, dtype=dtype)\n return _validate_features(properties, num_data=num_data)\n\n\ndef _features_to_properties(features: pd.DataFrame) -> Dict[str, np.ndarray]:\n \"\"\"Converts a features DataFrame to a deprecated properties dictionary.\n\n See Also\n --------\n :meth:`_FeatureTable.properties`\n \"\"\"\n return {name: series.to_numpy() for name, series in features.items()}\n\n\ndef _unique_element(array: Array) -> Optional[Any]:\n \"\"\"\n Returns the unique element along the 0th axis, if it exists; otherwise, returns None.\n\n This is faster than np.unique, does not require extra tricks for nD arrays, and\n does not fail for non-sortable elements.\n \"\"\"\n if len(array) == 0:\n return None\n el = array[0]\n if np.any(array[1:] != el):\n return None\n return el\n", "path": "napari/layers/utils/layer_utils.py"}]} |
gh_patches_debug_1270 | rasdani/github-patches | git_diff | vyperlang__vyper-3936 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`vyper-serve` is still lingering in `setup.py`
### Version Information
* vyper Version (output of `vyper --version`): doesn't matter
* OS: doesn't matter
* Python Version (output of `python --version`): doesn't matter
### What's your issue about?
You removed `vyper-serve` with this commit: https://github.com/vyperlang/vyper/commit/98f502baea6385fe25dbf94a70fb4eddc9f02f56, but you forgot to remove `vyper-serve` from `setup.py`:
```python
entry_points={
"console_scripts": [
"vyper=vyper.cli.vyper_compile:_parse_cli_args",
"vyper-serve=vyper.cli.vyper_serve:_parse_cli_args",
"fang=vyper.cli.vyper_ir:_parse_cli_args",
"vyper-json=vyper.cli.vyper_json:_parse_cli_args",
]
},
```
### How can it be fixed?
Remove `vyper-serve` line.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import os
4 import re
5 import subprocess
6
7 from setuptools import setup
8
9 extras_require = {
10 "test": [
11 "pytest>=8.0,<9.0",
12 "pytest-cov>=4.1,<5.0",
13 "pytest-instafail>=0.4,<1.0",
14 "pytest-xdist>=3.0,<3.4",
15 "pytest-split>=0.7.0,<1.0",
16 "eth-tester[py-evm]>=0.10.0b4,<0.11",
17 "eth_abi>=4.0.0,<5.0.0",
18 "py-evm>=0.10.0b4,<0.11",
19 "web3==6.0.0",
20 "lark==1.1.9",
21 "hypothesis[lark]>=6.0,<7.0",
22 "eth-stdlib==0.2.7",
23 "setuptools",
24 "hexbytes>=1.2",
25 ],
26 "lint": [
27 "black==23.12.0",
28 "flake8==6.1.0",
29 "flake8-bugbear==23.12.2",
30 "flake8-use-fstring==1.4",
31 "isort==5.13.2",
32 "mypy==1.5",
33 ],
34 "dev": ["ipython", "pre-commit", "pyinstaller", "twine"],
35 }
36
37 extras_require["dev"] = extras_require["dev"] + extras_require["test"] + extras_require["lint"]
38
39 with open("README.md", "r") as f:
40 long_description = f.read()
41
42
43 # strip local version
44 def _local_version(version):
45 return ""
46
47
48 def _global_version(version):
49 from setuptools_scm.version import guess_next_dev_version
50
51 # strip `.devN` suffix since it is not semver compatible
52 # minor regex hack to avoid messing too much with setuptools-scm internals
53 version_str = guess_next_dev_version(version)
54 return re.sub(r"\.dev\d+", "", version_str)
55
56
57 hash_file_rel_path = os.path.join("vyper", "vyper_git_commithash.txt")
58 hashfile = os.path.relpath(hash_file_rel_path)
59
60 # there is no way in setuptools-scm to get metadata besides the package
61 # version into version.py. (and we need that version to be PEP440 compliant
62 # in order to get it into pypi). so, add the commit hash to the package
63 # separately, in order so that we can add it to `vyper --version`.
64 try:
65 commithash = subprocess.check_output("git rev-parse --short HEAD".split())
66 commithash_str = commithash.decode("utf-8").strip()
67 with open(hashfile, "w") as fh:
68 fh.write(commithash_str)
69 except subprocess.CalledProcessError:
70 pass
71
72
73 setup(
74 name="vyper",
75 use_scm_version={
76 "local_scheme": _local_version,
77 "version_scheme": _global_version,
78 "write_to": "vyper/version.py",
79 },
80 description="Vyper: the Pythonic Programming Language for the EVM",
81 long_description=long_description,
82 long_description_content_type="text/markdown",
83 author="Vyper Team",
84 author_email="",
85 url="https://github.com/vyperlang/vyper",
86 license="Apache License 2.0",
87 keywords="ethereum evm smart contract language",
88 include_package_data=True,
89 packages=["vyper"],
90 python_requires=">=3.10,<4",
91 py_modules=["vyper"],
92 install_requires=[
93 "cbor2>=5.4.6,<6",
94 "asttokens>=2.0.5,<3",
95 "pycryptodome>=3.5.1,<4",
96 "packaging>=23.1,<24",
97 "importlib-metadata",
98 "wheel",
99 ],
100 setup_requires=["pytest-runner", "setuptools_scm>=7.1.0,<8.0.0"],
101 tests_require=extras_require["test"],
102 extras_require=extras_require,
103 entry_points={
104 "console_scripts": [
105 "vyper=vyper.cli.vyper_compile:_parse_cli_args",
106 "vyper-serve=vyper.cli.vyper_serve:_parse_cli_args",
107 "fang=vyper.cli.vyper_ir:_parse_cli_args",
108 "vyper-json=vyper.cli.vyper_json:_parse_cli_args",
109 ]
110 },
111 classifiers=[
112 "Intended Audience :: Developers",
113 "License :: OSI Approved :: Apache Software License",
114 "Programming Language :: Python :: 3.10",
115 "Programming Language :: Python :: 3.11",
116 "Programming Language :: Python :: 3.12",
117 ],
118 package_data={"vyper.ast": ["grammar.lark"]},
119 data_files=[("", [hash_file_rel_path])],
120 )
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -103,7 +103,6 @@
entry_points={
"console_scripts": [
"vyper=vyper.cli.vyper_compile:_parse_cli_args",
- "vyper-serve=vyper.cli.vyper_serve:_parse_cli_args",
"fang=vyper.cli.vyper_ir:_parse_cli_args",
"vyper-json=vyper.cli.vyper_json:_parse_cli_args",
]
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -103,7 +103,6 @@\n entry_points={\n \"console_scripts\": [\n \"vyper=vyper.cli.vyper_compile:_parse_cli_args\",\n- \"vyper-serve=vyper.cli.vyper_serve:_parse_cli_args\",\n \"fang=vyper.cli.vyper_ir:_parse_cli_args\",\n \"vyper-json=vyper.cli.vyper_json:_parse_cli_args\",\n ]\n", "issue": "`vyper-serve` is still lingering in `setup.py`\n### Version Information\r\n\r\n* vyper Version (output of `vyper --version`): doesn't matter\r\n* OS: doesn't matter\r\n* Python Version (output of `python --version`): doesn't matter\r\n\r\n### What's your issue about?\r\n\r\nYou removed `vyper-serve` with this commit: https://github.com/vyperlang/vyper/commit/98f502baea6385fe25dbf94a70fb4eddc9f02f56, but you forgot to remove `vyper-serve` from `setup.py`:\r\n\r\n```python\r\nentry_points={\r\n \"console_scripts\": [\r\n \"vyper=vyper.cli.vyper_compile:_parse_cli_args\",\r\n \"vyper-serve=vyper.cli.vyper_serve:_parse_cli_args\",\r\n \"fang=vyper.cli.vyper_ir:_parse_cli_args\",\r\n \"vyper-json=vyper.cli.vyper_json:_parse_cli_args\",\r\n ]\r\n },\r\n```\r\n\r\n### How can it be fixed?\r\n\r\nRemove `vyper-serve` line.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport subprocess\n\nfrom setuptools import setup\n\nextras_require = {\n \"test\": [\n \"pytest>=8.0,<9.0\",\n \"pytest-cov>=4.1,<5.0\",\n \"pytest-instafail>=0.4,<1.0\",\n \"pytest-xdist>=3.0,<3.4\",\n \"pytest-split>=0.7.0,<1.0\",\n \"eth-tester[py-evm]>=0.10.0b4,<0.11\",\n \"eth_abi>=4.0.0,<5.0.0\",\n \"py-evm>=0.10.0b4,<0.11\",\n \"web3==6.0.0\",\n \"lark==1.1.9\",\n \"hypothesis[lark]>=6.0,<7.0\",\n \"eth-stdlib==0.2.7\",\n \"setuptools\",\n \"hexbytes>=1.2\",\n ],\n \"lint\": [\n \"black==23.12.0\",\n \"flake8==6.1.0\",\n \"flake8-bugbear==23.12.2\",\n \"flake8-use-fstring==1.4\",\n \"isort==5.13.2\",\n \"mypy==1.5\",\n ],\n \"dev\": [\"ipython\", \"pre-commit\", \"pyinstaller\", \"twine\"],\n}\n\nextras_require[\"dev\"] = extras_require[\"dev\"] + extras_require[\"test\"] + extras_require[\"lint\"]\n\nwith open(\"README.md\", \"r\") as f:\n long_description = f.read()\n\n\n# strip local version\ndef _local_version(version):\n return \"\"\n\n\ndef _global_version(version):\n from setuptools_scm.version import guess_next_dev_version\n\n # strip `.devN` suffix since it is not semver compatible\n # minor regex hack to avoid messing too much with setuptools-scm internals\n version_str = guess_next_dev_version(version)\n return re.sub(r\"\\.dev\\d+\", \"\", version_str)\n\n\nhash_file_rel_path = os.path.join(\"vyper\", \"vyper_git_commithash.txt\")\nhashfile = os.path.relpath(hash_file_rel_path)\n\n# there is no way in setuptools-scm to get metadata besides the package\n# version into version.py. (and we need that version to be PEP440 compliant\n# in order to get it into pypi). so, add the commit hash to the package\n# separately, in order so that we can add it to `vyper --version`.\ntry:\n commithash = subprocess.check_output(\"git rev-parse --short HEAD\".split())\n commithash_str = commithash.decode(\"utf-8\").strip()\n with open(hashfile, \"w\") as fh:\n fh.write(commithash_str)\nexcept subprocess.CalledProcessError:\n pass\n\n\nsetup(\n name=\"vyper\",\n use_scm_version={\n \"local_scheme\": _local_version,\n \"version_scheme\": _global_version,\n \"write_to\": \"vyper/version.py\",\n },\n description=\"Vyper: the Pythonic Programming Language for the EVM\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Vyper Team\",\n author_email=\"\",\n url=\"https://github.com/vyperlang/vyper\",\n license=\"Apache License 2.0\",\n keywords=\"ethereum evm smart contract language\",\n include_package_data=True,\n packages=[\"vyper\"],\n python_requires=\">=3.10,<4\",\n py_modules=[\"vyper\"],\n install_requires=[\n \"cbor2>=5.4.6,<6\",\n \"asttokens>=2.0.5,<3\",\n \"pycryptodome>=3.5.1,<4\",\n \"packaging>=23.1,<24\",\n \"importlib-metadata\",\n \"wheel\",\n ],\n setup_requires=[\"pytest-runner\", \"setuptools_scm>=7.1.0,<8.0.0\"],\n tests_require=extras_require[\"test\"],\n extras_require=extras_require,\n entry_points={\n \"console_scripts\": [\n \"vyper=vyper.cli.vyper_compile:_parse_cli_args\",\n \"vyper-serve=vyper.cli.vyper_serve:_parse_cli_args\",\n \"fang=vyper.cli.vyper_ir:_parse_cli_args\",\n \"vyper-json=vyper.cli.vyper_json:_parse_cli_args\",\n ]\n },\n classifiers=[\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: 3.12\",\n ],\n package_data={\"vyper.ast\": [\"grammar.lark\"]},\n data_files=[(\"\", [hash_file_rel_path])],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport subprocess\n\nfrom setuptools import setup\n\nextras_require = {\n \"test\": [\n \"pytest>=8.0,<9.0\",\n \"pytest-cov>=4.1,<5.0\",\n \"pytest-instafail>=0.4,<1.0\",\n \"pytest-xdist>=3.0,<3.4\",\n \"pytest-split>=0.7.0,<1.0\",\n \"eth-tester[py-evm]>=0.10.0b4,<0.11\",\n \"eth_abi>=4.0.0,<5.0.0\",\n \"py-evm>=0.10.0b4,<0.11\",\n \"web3==6.0.0\",\n \"lark==1.1.9\",\n \"hypothesis[lark]>=6.0,<7.0\",\n \"eth-stdlib==0.2.7\",\n \"setuptools\",\n \"hexbytes>=1.2\",\n ],\n \"lint\": [\n \"black==23.12.0\",\n \"flake8==6.1.0\",\n \"flake8-bugbear==23.12.2\",\n \"flake8-use-fstring==1.4\",\n \"isort==5.13.2\",\n \"mypy==1.5\",\n ],\n \"dev\": [\"ipython\", \"pre-commit\", \"pyinstaller\", \"twine\"],\n}\n\nextras_require[\"dev\"] = extras_require[\"dev\"] + extras_require[\"test\"] + extras_require[\"lint\"]\n\nwith open(\"README.md\", \"r\") as f:\n long_description = f.read()\n\n\n# strip local version\ndef _local_version(version):\n return \"\"\n\n\ndef _global_version(version):\n from setuptools_scm.version import guess_next_dev_version\n\n # strip `.devN` suffix since it is not semver compatible\n # minor regex hack to avoid messing too much with setuptools-scm internals\n version_str = guess_next_dev_version(version)\n return re.sub(r\"\\.dev\\d+\", \"\", version_str)\n\n\nhash_file_rel_path = os.path.join(\"vyper\", \"vyper_git_commithash.txt\")\nhashfile = os.path.relpath(hash_file_rel_path)\n\n# there is no way in setuptools-scm to get metadata besides the package\n# version into version.py. (and we need that version to be PEP440 compliant\n# in order to get it into pypi). so, add the commit hash to the package\n# separately, in order so that we can add it to `vyper --version`.\ntry:\n commithash = subprocess.check_output(\"git rev-parse --short HEAD\".split())\n commithash_str = commithash.decode(\"utf-8\").strip()\n with open(hashfile, \"w\") as fh:\n fh.write(commithash_str)\nexcept subprocess.CalledProcessError:\n pass\n\n\nsetup(\n name=\"vyper\",\n use_scm_version={\n \"local_scheme\": _local_version,\n \"version_scheme\": _global_version,\n \"write_to\": \"vyper/version.py\",\n },\n description=\"Vyper: the Pythonic Programming Language for the EVM\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Vyper Team\",\n author_email=\"\",\n url=\"https://github.com/vyperlang/vyper\",\n license=\"Apache License 2.0\",\n keywords=\"ethereum evm smart contract language\",\n include_package_data=True,\n packages=[\"vyper\"],\n python_requires=\">=3.10,<4\",\n py_modules=[\"vyper\"],\n install_requires=[\n \"cbor2>=5.4.6,<6\",\n \"asttokens>=2.0.5,<3\",\n \"pycryptodome>=3.5.1,<4\",\n \"packaging>=23.1,<24\",\n \"importlib-metadata\",\n \"wheel\",\n ],\n setup_requires=[\"pytest-runner\", \"setuptools_scm>=7.1.0,<8.0.0\"],\n tests_require=extras_require[\"test\"],\n extras_require=extras_require,\n entry_points={\n \"console_scripts\": [\n \"vyper=vyper.cli.vyper_compile:_parse_cli_args\",\n \"fang=vyper.cli.vyper_ir:_parse_cli_args\",\n \"vyper-json=vyper.cli.vyper_json:_parse_cli_args\",\n ]\n },\n classifiers=[\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: 3.12\",\n ],\n package_data={\"vyper.ast\": [\"grammar.lark\"]},\n data_files=[(\"\", [hash_file_rel_path])],\n)\n", "path": "setup.py"}]} |
gh_patches_debug_1271 | rasdani/github-patches | git_diff | scverse__scanpy-783 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
get.rank_genes_groups() key argument not used
`rank_genes_groups_df` takes `key` as an argument and the docs says it is the key differential expression groups were stored under. However, the function does not use that key and fetches DE results from the default 'rank_genes_groups' key.
line 55 under `rank_genes_groups_df() ` in scanpy/get.py
`d[k] = adata.uns["rank_genes_groups"][k][group]` should be changed to `d[k] = adata.uns[key][k][group]`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scanpy/get.py`
Content:
```
1 """This module contains helper functions for accessing data."""
2 from typing import Optional, Iterable, Tuple
3
4 import numpy as np
5 import pandas as pd
6 from scipy.sparse import spmatrix
7
8 from anndata import AnnData
9 # --------------------------------------------------------------------------------
10 # Plotting data helpers
11 # --------------------------------------------------------------------------------
12
13
14 # TODO: implement diffxpy method, make singledispatch
15 def rank_genes_groups_df(
16 adata: AnnData,
17 group: str, # Can this be something other than a str?
18 *,
19 key: str = "rank_genes_groups",
20 pval_cutoff: Optional[float] = None,
21 log2fc_min: Optional[float] = None,
22 log2fc_max: Optional[float] = None,
23 gene_symbols: Optional[str] = None
24 ) -> pd.DataFrame:
25 """
26 :func:`scanpy.tl.rank_genes_groups` results in the form of a :class:`pd.DataFrame`.
27
28 Params
29 ------
30 adata
31 Object to get results from.
32 group
33 Which group (as in :func:`scanpy.tl.rank_genes_groups`'s `groupby`
34 argument) to return results from.
35 key
36 Key differential expression groups were stored under.
37 pval_cutoff
38 Minimum adjusted pval to return.
39 log2fc_min
40 Minumum logfc to return.
41 log2fc_max
42 Maximum logfc to return.
43 gene_symbols
44 Column name in `.var` DataFrame that stores gene symbols. Specifying
45 this will add that column to the returned dataframe.
46
47 Example
48 -------
49 >>> pbmc = sc.datasets.pbmc68k_reduced()
50 >>> sc.tl.rank_genes_groups(pbmc, groupby="louvain", use_raw=True, n_genes=pbmc.shape[1])
51 >>> dedf = sc.get.rank_genes_groups_df(pbmc, group="0")
52 """
53 d = pd.DataFrame()
54 for k in ['scores', 'names', 'logfoldchanges', 'pvals', 'pvals_adj']:
55 d[k] = adata.uns["rank_genes_groups"][k][group]
56 if pval_cutoff is not None:
57 d = d[d["pvals_adj"] < pval_cutoff]
58 if log2fc_min is not None:
59 d = d[d["logfoldchanges"] > log2fc_min]
60 if log2fc_max is not None:
61 d = d[d["logfoldchanges"] < log2fc_max]
62 if gene_symbols is not None:
63 d = d.join(adata.var[gene_symbols], on="names")
64 return d
65
66
67 def obs_df(
68 adata: AnnData,
69 keys: Iterable[str] = (),
70 obsm_keys: Iterable[Tuple[str, int]] = (),
71 *,
72 layer: str = None,
73 gene_symbols: str = None,
74 use_raw: bool = False
75 ) -> pd.DataFrame:
76 """\
77 Return values for observations in adata.
78
79 Params
80 ------
81 adata
82 AnnData object to get values from.
83 keys
84 Keys from either `.var_names`, `.var[gene_symbols]`, or `.obs.columns`.
85 obsm_keys
86 Tuple of `(key from obsm, column index of obsm[key])`.
87 layer
88 Layer of `adata` to use as expression values.
89 gene_symbols
90 Column of `adata.var` to search for `keys` in.
91 use_raw
92 Whether to get expression values from `adata.raw`.
93
94 Returns
95 -------
96 A dataframe with `adata.obs_names` as index, and values specified by `keys`
97 and `obsm_keys`.
98
99 Examples
100 --------
101 Getting value for plotting:
102
103 >>> pbmc = sc.datasets.pbmc68k_reduced()
104 >>> plotdf = sc.get.obs_df(
105 pbmc,
106 keys=["CD8B", "n_genes"],
107 obsm_keys=[("X_umap", 0), ("X_umap", 1)]
108 )
109 >>> plotdf.plot.scatter("X_umap0", "X_umap1", c="CD8B")
110
111 Calculating mean expression for marker genes by cluster:
112
113 >>> pbmc = sc.datasets.pbmc68k_reduced()
114 >>> marker_genes = ['CD79A', 'MS4A1', 'CD8A', 'CD8B', 'LYZ']
115 >>> genedf = sc.get.obs_df(
116 pbmc,
117 keys=["louvain", *marker_genes]
118 )
119 >>> grouped = genedf.groupby("louvain")
120 >>> mean, var = grouped.mean(), grouped.var()
121 """
122 if use_raw:
123 assert layer is None, "Cannot specify use_raw=True and a layer at the same time."
124 if gene_symbols is not None:
125 gene_names = pd.Series(adata.raw.var_names, index=adata.raw.var[gene_symbols])
126 else:
127 gene_names = pd.Series(adata.raw.var_names, index=adata.raw.var_names)
128 else:
129 if gene_symbols is not None:
130 gene_names = pd.Series(adata.var_names, index=adata.var[gene_symbols])
131 else:
132 gene_names = pd.Series(adata.var_names, index=adata.var_names)
133 lookup_keys = []
134 not_found = []
135 for key in keys:
136 if key in adata.obs.columns:
137 lookup_keys.append(key)
138 elif key in gene_names.index:
139 lookup_keys.append(gene_names[key])
140 else:
141 not_found.append(key)
142 if len(not_found) > 0:
143 if use_raw:
144 if gene_symbols is None:
145 gene_error = "`adata.raw.var_names`"
146 else:
147 gene_error = "gene_symbols column `adata.raw.var[{}].values`".format(gene_symbols)
148 else:
149 if gene_symbols is None:
150 gene_error = "`adata.var_names`"
151 else:
152 gene_error = "gene_symbols column `adata.var[{}].values`".format(gene_symbols)
153 raise KeyError(
154 f"Could not find keys '{not_found}' in columns of `adata.obs` or in"
155 f" {gene_error}."
156 )
157
158 # Make df
159 df = pd.DataFrame(index=adata.obs_names)
160 for k, l in zip(keys, lookup_keys):
161 if not use_raw or k in adata.obs.columns:
162 df[k] = adata.obs_vector(l, layer=layer)
163 else:
164 df[k] = adata.raw.obs_vector(l)
165 for k, idx in obsm_keys:
166 added_k = f"{k}-{idx}"
167 val = adata.obsm[k]
168 if isinstance(val, np.ndarray):
169 df[added_k] = np.ravel(val[:, idx])
170 elif isinstance(val, spmatrix):
171 df[added_k] = np.ravel(val[:, idx].toarray())
172 elif isinstance(val, pd.DataFrame):
173 df[added_k] = val.loc[:, idx]
174 return df
175
176
177 def var_df(
178 adata: AnnData,
179 keys: Iterable[str] = (),
180 varm_keys: Iterable[Tuple[str, int]] = (),
181 *,
182 layer: str = None,
183 ) -> pd.DataFrame:
184 """\
185 Return values for observations in adata.
186
187 Params
188 ------
189 adata
190 AnnData object to get values from.
191 keys
192 Keys from either `.obs_names`, or `.var.columns`.
193 varm_keys
194 Tuple of `(key from varm, column index of varm[key])`.
195 layer
196 Layer of `adata` to use as expression values.
197
198 Returns
199 -------
200 A dataframe with `adata.var_names` as index, and values specified by `keys`
201 and `varm_keys`.
202 """
203 # Argument handling
204 lookup_keys = []
205 not_found = []
206 for key in keys:
207 if key in adata.var.columns:
208 lookup_keys.append(key)
209 elif key in adata.obs_names:
210 lookup_keys.append(key)
211 else:
212 not_found.append(key)
213 if len(not_found) > 0:
214 raise KeyError(
215 f"Could not find keys '{not_found}' in columns of `adata.var` or"
216 " in `adata.obs_names`."
217 )
218
219 # Make df
220 df = pd.DataFrame(index=adata.var_names)
221 for k, l in zip(keys, lookup_keys):
222 df[k] = adata.var_vector(l, layer=layer)
223 for k, idx in varm_keys:
224 added_k = f"{k}-{idx}"
225 val = adata.varm[k]
226 if isinstance(val, np.ndarray):
227 df[added_k] = np.ravel(val[:, idx])
228 elif isinstance(val, spmatrix):
229 df[added_k] = np.ravel(val[:, idx].toarray())
230 elif isinstance(val, pd.DataFrame):
231 df[added_k] = val.loc[:, idx]
232 return df
233
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scanpy/get.py b/scanpy/get.py
--- a/scanpy/get.py
+++ b/scanpy/get.py
@@ -52,7 +52,7 @@
"""
d = pd.DataFrame()
for k in ['scores', 'names', 'logfoldchanges', 'pvals', 'pvals_adj']:
- d[k] = adata.uns["rank_genes_groups"][k][group]
+ d[k] = adata.uns[key][k][group]
if pval_cutoff is not None:
d = d[d["pvals_adj"] < pval_cutoff]
if log2fc_min is not None:
| {"golden_diff": "diff --git a/scanpy/get.py b/scanpy/get.py\n--- a/scanpy/get.py\n+++ b/scanpy/get.py\n@@ -52,7 +52,7 @@\n \"\"\"\n d = pd.DataFrame()\n for k in ['scores', 'names', 'logfoldchanges', 'pvals', 'pvals_adj']:\n- d[k] = adata.uns[\"rank_genes_groups\"][k][group]\n+ d[k] = adata.uns[key][k][group]\n if pval_cutoff is not None:\n d = d[d[\"pvals_adj\"] < pval_cutoff]\n if log2fc_min is not None:\n", "issue": "get.rank_genes_groups() key argument not used\n`rank_genes_groups_df` takes `key` as an argument and the docs says it is the key differential expression groups were stored under. However, the function does not use that key and fetches DE results from the default 'rank_genes_groups' key.\r\n\r\nline 55 under `rank_genes_groups_df() ` in scanpy/get.py\r\n`d[k] = adata.uns[\"rank_genes_groups\"][k][group]` should be changed to `d[k] = adata.uns[key][k][group]`\n", "before_files": [{"content": "\"\"\"This module contains helper functions for accessing data.\"\"\"\nfrom typing import Optional, Iterable, Tuple\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.sparse import spmatrix\n\nfrom anndata import AnnData\n# --------------------------------------------------------------------------------\n# Plotting data helpers\n# --------------------------------------------------------------------------------\n\n\n# TODO: implement diffxpy method, make singledispatch\ndef rank_genes_groups_df(\n adata: AnnData,\n group: str, # Can this be something other than a str?\n *,\n key: str = \"rank_genes_groups\",\n pval_cutoff: Optional[float] = None,\n log2fc_min: Optional[float] = None,\n log2fc_max: Optional[float] = None,\n gene_symbols: Optional[str] = None\n) -> pd.DataFrame:\n \"\"\"\n :func:`scanpy.tl.rank_genes_groups` results in the form of a :class:`pd.DataFrame`.\n\n Params\n ------\n adata\n Object to get results from.\n group\n Which group (as in :func:`scanpy.tl.rank_genes_groups`'s `groupby`\n argument) to return results from.\n key\n Key differential expression groups were stored under.\n pval_cutoff\n Minimum adjusted pval to return.\n log2fc_min\n Minumum logfc to return.\n log2fc_max\n Maximum logfc to return.\n gene_symbols\n Column name in `.var` DataFrame that stores gene symbols. Specifying\n this will add that column to the returned dataframe.\n\n Example\n -------\n >>> pbmc = sc.datasets.pbmc68k_reduced()\n >>> sc.tl.rank_genes_groups(pbmc, groupby=\"louvain\", use_raw=True, n_genes=pbmc.shape[1])\n >>> dedf = sc.get.rank_genes_groups_df(pbmc, group=\"0\")\n \"\"\"\n d = pd.DataFrame()\n for k in ['scores', 'names', 'logfoldchanges', 'pvals', 'pvals_adj']:\n d[k] = adata.uns[\"rank_genes_groups\"][k][group]\n if pval_cutoff is not None:\n d = d[d[\"pvals_adj\"] < pval_cutoff]\n if log2fc_min is not None:\n d = d[d[\"logfoldchanges\"] > log2fc_min]\n if log2fc_max is not None:\n d = d[d[\"logfoldchanges\"] < log2fc_max]\n if gene_symbols is not None:\n d = d.join(adata.var[gene_symbols], on=\"names\")\n return d\n\n\ndef obs_df(\n adata: AnnData,\n keys: Iterable[str] = (),\n obsm_keys: Iterable[Tuple[str, int]] = (),\n *,\n layer: str = None,\n gene_symbols: str = None,\n use_raw: bool = False\n) -> pd.DataFrame:\n \"\"\"\\\n Return values for observations in adata.\n\n Params\n ------\n adata\n AnnData object to get values from.\n keys\n Keys from either `.var_names`, `.var[gene_symbols]`, or `.obs.columns`.\n obsm_keys\n Tuple of `(key from obsm, column index of obsm[key])`.\n layer\n Layer of `adata` to use as expression values.\n gene_symbols\n Column of `adata.var` to search for `keys` in.\n use_raw\n Whether to get expression values from `adata.raw`.\n\n Returns\n -------\n A dataframe with `adata.obs_names` as index, and values specified by `keys`\n and `obsm_keys`.\n\n Examples\n --------\n Getting value for plotting:\n\n >>> pbmc = sc.datasets.pbmc68k_reduced()\n >>> plotdf = sc.get.obs_df(\n pbmc,\n keys=[\"CD8B\", \"n_genes\"],\n obsm_keys=[(\"X_umap\", 0), (\"X_umap\", 1)]\n )\n >>> plotdf.plot.scatter(\"X_umap0\", \"X_umap1\", c=\"CD8B\")\n\n Calculating mean expression for marker genes by cluster:\n\n >>> pbmc = sc.datasets.pbmc68k_reduced()\n >>> marker_genes = ['CD79A', 'MS4A1', 'CD8A', 'CD8B', 'LYZ']\n >>> genedf = sc.get.obs_df(\n pbmc,\n keys=[\"louvain\", *marker_genes]\n )\n >>> grouped = genedf.groupby(\"louvain\")\n >>> mean, var = grouped.mean(), grouped.var()\n \"\"\"\n if use_raw:\n assert layer is None, \"Cannot specify use_raw=True and a layer at the same time.\"\n if gene_symbols is not None:\n gene_names = pd.Series(adata.raw.var_names, index=adata.raw.var[gene_symbols])\n else:\n gene_names = pd.Series(adata.raw.var_names, index=adata.raw.var_names)\n else:\n if gene_symbols is not None:\n gene_names = pd.Series(adata.var_names, index=adata.var[gene_symbols])\n else:\n gene_names = pd.Series(adata.var_names, index=adata.var_names)\n lookup_keys = []\n not_found = []\n for key in keys:\n if key in adata.obs.columns:\n lookup_keys.append(key)\n elif key in gene_names.index:\n lookup_keys.append(gene_names[key])\n else:\n not_found.append(key)\n if len(not_found) > 0:\n if use_raw:\n if gene_symbols is None:\n gene_error = \"`adata.raw.var_names`\"\n else:\n gene_error = \"gene_symbols column `adata.raw.var[{}].values`\".format(gene_symbols)\n else:\n if gene_symbols is None:\n gene_error = \"`adata.var_names`\"\n else:\n gene_error = \"gene_symbols column `adata.var[{}].values`\".format(gene_symbols)\n raise KeyError(\n f\"Could not find keys '{not_found}' in columns of `adata.obs` or in\"\n f\" {gene_error}.\"\n )\n\n # Make df\n df = pd.DataFrame(index=adata.obs_names)\n for k, l in zip(keys, lookup_keys):\n if not use_raw or k in adata.obs.columns:\n df[k] = adata.obs_vector(l, layer=layer)\n else:\n df[k] = adata.raw.obs_vector(l)\n for k, idx in obsm_keys:\n added_k = f\"{k}-{idx}\"\n val = adata.obsm[k]\n if isinstance(val, np.ndarray):\n df[added_k] = np.ravel(val[:, idx])\n elif isinstance(val, spmatrix):\n df[added_k] = np.ravel(val[:, idx].toarray())\n elif isinstance(val, pd.DataFrame):\n df[added_k] = val.loc[:, idx]\n return df\n\n\ndef var_df(\n adata: AnnData,\n keys: Iterable[str] = (),\n varm_keys: Iterable[Tuple[str, int]] = (),\n *,\n layer: str = None,\n) -> pd.DataFrame:\n \"\"\"\\\n Return values for observations in adata.\n\n Params\n ------\n adata\n AnnData object to get values from.\n keys\n Keys from either `.obs_names`, or `.var.columns`.\n varm_keys\n Tuple of `(key from varm, column index of varm[key])`.\n layer\n Layer of `adata` to use as expression values.\n\n Returns\n -------\n A dataframe with `adata.var_names` as index, and values specified by `keys`\n and `varm_keys`.\n \"\"\"\n # Argument handling\n lookup_keys = []\n not_found = []\n for key in keys:\n if key in adata.var.columns:\n lookup_keys.append(key)\n elif key in adata.obs_names:\n lookup_keys.append(key)\n else:\n not_found.append(key)\n if len(not_found) > 0:\n raise KeyError(\n f\"Could not find keys '{not_found}' in columns of `adata.var` or\"\n \" in `adata.obs_names`.\"\n )\n\n # Make df\n df = pd.DataFrame(index=adata.var_names)\n for k, l in zip(keys, lookup_keys):\n df[k] = adata.var_vector(l, layer=layer)\n for k, idx in varm_keys:\n added_k = f\"{k}-{idx}\"\n val = adata.varm[k]\n if isinstance(val, np.ndarray):\n df[added_k] = np.ravel(val[:, idx])\n elif isinstance(val, spmatrix):\n df[added_k] = np.ravel(val[:, idx].toarray())\n elif isinstance(val, pd.DataFrame):\n df[added_k] = val.loc[:, idx]\n return df\n", "path": "scanpy/get.py"}], "after_files": [{"content": "\"\"\"This module contains helper functions for accessing data.\"\"\"\nfrom typing import Optional, Iterable, Tuple\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.sparse import spmatrix\n\nfrom anndata import AnnData\n# --------------------------------------------------------------------------------\n# Plotting data helpers\n# --------------------------------------------------------------------------------\n\n\n# TODO: implement diffxpy method, make singledispatch\ndef rank_genes_groups_df(\n adata: AnnData,\n group: str, # Can this be something other than a str?\n *,\n key: str = \"rank_genes_groups\",\n pval_cutoff: Optional[float] = None,\n log2fc_min: Optional[float] = None,\n log2fc_max: Optional[float] = None,\n gene_symbols: Optional[str] = None\n) -> pd.DataFrame:\n \"\"\"\n :func:`scanpy.tl.rank_genes_groups` results in the form of a :class:`pd.DataFrame`.\n\n Params\n ------\n adata\n Object to get results from.\n group\n Which group (as in :func:`scanpy.tl.rank_genes_groups`'s `groupby`\n argument) to return results from.\n key\n Key differential expression groups were stored under.\n pval_cutoff\n Minimum adjusted pval to return.\n log2fc_min\n Minumum logfc to return.\n log2fc_max\n Maximum logfc to return.\n gene_symbols\n Column name in `.var` DataFrame that stores gene symbols. Specifying\n this will add that column to the returned dataframe.\n\n Example\n -------\n >>> pbmc = sc.datasets.pbmc68k_reduced()\n >>> sc.tl.rank_genes_groups(pbmc, groupby=\"louvain\", use_raw=True, n_genes=pbmc.shape[1])\n >>> dedf = sc.get.rank_genes_groups_df(pbmc, group=\"0\")\n \"\"\"\n d = pd.DataFrame()\n for k in ['scores', 'names', 'logfoldchanges', 'pvals', 'pvals_adj']:\n d[k] = adata.uns[key][k][group]\n if pval_cutoff is not None:\n d = d[d[\"pvals_adj\"] < pval_cutoff]\n if log2fc_min is not None:\n d = d[d[\"logfoldchanges\"] > log2fc_min]\n if log2fc_max is not None:\n d = d[d[\"logfoldchanges\"] < log2fc_max]\n if gene_symbols is not None:\n d = d.join(adata.var[gene_symbols], on=\"names\")\n return d\n\n\ndef obs_df(\n adata: AnnData,\n keys: Iterable[str] = (),\n obsm_keys: Iterable[Tuple[str, int]] = (),\n *,\n layer: str = None,\n gene_symbols: str = None,\n use_raw: bool = False\n) -> pd.DataFrame:\n \"\"\"\\\n Return values for observations in adata.\n\n Params\n ------\n adata\n AnnData object to get values from.\n keys\n Keys from either `.var_names`, `.var[gene_symbols]`, or `.obs.columns`.\n obsm_keys\n Tuple of `(key from obsm, column index of obsm[key])`.\n layer\n Layer of `adata` to use as expression values.\n gene_symbols\n Column of `adata.var` to search for `keys` in.\n use_raw\n Whether to get expression values from `adata.raw`.\n\n Returns\n -------\n A dataframe with `adata.obs_names` as index, and values specified by `keys`\n and `obsm_keys`.\n\n Examples\n --------\n Getting value for plotting:\n\n >>> pbmc = sc.datasets.pbmc68k_reduced()\n >>> plotdf = sc.get.obs_df(\n pbmc,\n keys=[\"CD8B\", \"n_genes\"],\n obsm_keys=[(\"X_umap\", 0), (\"X_umap\", 1)]\n )\n >>> plotdf.plot.scatter(\"X_umap0\", \"X_umap1\", c=\"CD8B\")\n\n Calculating mean expression for marker genes by cluster:\n\n >>> pbmc = sc.datasets.pbmc68k_reduced()\n >>> marker_genes = ['CD79A', 'MS4A1', 'CD8A', 'CD8B', 'LYZ']\n >>> genedf = sc.get.obs_df(\n pbmc,\n keys=[\"louvain\", *marker_genes]\n )\n >>> grouped = genedf.groupby(\"louvain\")\n >>> mean, var = grouped.mean(), grouped.var()\n \"\"\"\n if use_raw:\n assert layer is None, \"Cannot specify use_raw=True and a layer at the same time.\"\n if gene_symbols is not None:\n gene_names = pd.Series(adata.raw.var_names, index=adata.raw.var[gene_symbols])\n else:\n gene_names = pd.Series(adata.raw.var_names, index=adata.raw.var_names)\n else:\n if gene_symbols is not None:\n gene_names = pd.Series(adata.var_names, index=adata.var[gene_symbols])\n else:\n gene_names = pd.Series(adata.var_names, index=adata.var_names)\n lookup_keys = []\n not_found = []\n for key in keys:\n if key in adata.obs.columns:\n lookup_keys.append(key)\n elif key in gene_names.index:\n lookup_keys.append(gene_names[key])\n else:\n not_found.append(key)\n if len(not_found) > 0:\n if use_raw:\n if gene_symbols is None:\n gene_error = \"`adata.raw.var_names`\"\n else:\n gene_error = \"gene_symbols column `adata.raw.var[{}].values`\".format(gene_symbols)\n else:\n if gene_symbols is None:\n gene_error = \"`adata.var_names`\"\n else:\n gene_error = \"gene_symbols column `adata.var[{}].values`\".format(gene_symbols)\n raise KeyError(\n f\"Could not find keys '{not_found}' in columns of `adata.obs` or in\"\n f\" {gene_error}.\"\n )\n\n # Make df\n df = pd.DataFrame(index=adata.obs_names)\n for k, l in zip(keys, lookup_keys):\n if not use_raw or k in adata.obs.columns:\n df[k] = adata.obs_vector(l, layer=layer)\n else:\n df[k] = adata.raw.obs_vector(l)\n for k, idx in obsm_keys:\n added_k = f\"{k}-{idx}\"\n val = adata.obsm[k]\n if isinstance(val, np.ndarray):\n df[added_k] = np.ravel(val[:, idx])\n elif isinstance(val, spmatrix):\n df[added_k] = np.ravel(val[:, idx].toarray())\n elif isinstance(val, pd.DataFrame):\n df[added_k] = val.loc[:, idx]\n return df\n\n\ndef var_df(\n adata: AnnData,\n keys: Iterable[str] = (),\n varm_keys: Iterable[Tuple[str, int]] = (),\n *,\n layer: str = None,\n) -> pd.DataFrame:\n \"\"\"\\\n Return values for observations in adata.\n\n Params\n ------\n adata\n AnnData object to get values from.\n keys\n Keys from either `.obs_names`, or `.var.columns`.\n varm_keys\n Tuple of `(key from varm, column index of varm[key])`.\n layer\n Layer of `adata` to use as expression values.\n\n Returns\n -------\n A dataframe with `adata.var_names` as index, and values specified by `keys`\n and `varm_keys`.\n \"\"\"\n # Argument handling\n lookup_keys = []\n not_found = []\n for key in keys:\n if key in adata.var.columns:\n lookup_keys.append(key)\n elif key in adata.obs_names:\n lookup_keys.append(key)\n else:\n not_found.append(key)\n if len(not_found) > 0:\n raise KeyError(\n f\"Could not find keys '{not_found}' in columns of `adata.var` or\"\n \" in `adata.obs_names`.\"\n )\n\n # Make df\n df = pd.DataFrame(index=adata.var_names)\n for k, l in zip(keys, lookup_keys):\n df[k] = adata.var_vector(l, layer=layer)\n for k, idx in varm_keys:\n added_k = f\"{k}-{idx}\"\n val = adata.varm[k]\n if isinstance(val, np.ndarray):\n df[added_k] = np.ravel(val[:, idx])\n elif isinstance(val, spmatrix):\n df[added_k] = np.ravel(val[:, idx].toarray())\n elif isinstance(val, pd.DataFrame):\n df[added_k] = val.loc[:, idx]\n return df\n", "path": "scanpy/get.py"}]} |
gh_patches_debug_1272 | rasdani/github-patches | git_diff | doccano__doccano-841 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Signup verification email not received
How to reproduce the behaviour
---------
I setup the project using AWS one-click deployment button. Everything works fine, but when a new user sign ups, email verification is not received. I believe I have to set up a email host configurations in `settings.py`. How do I set it up as the project has already been deployed? Is it in the `/env.list` file, or the AWS one-click automatically does this?
```
# necessary for email verification of new accounts
EMAIL_USE_TLS = env.bool('EMAIL_USE_TLS', False)
EMAIL_HOST = env('EMAIL_HOST', None)
EMAIL_HOST_USER = env('EMAIL_HOST_USER', None)
EMAIL_HOST_PASSWORD = env('EMAIL_HOST_PASSWORD', None)
EMAIL_PORT = env.int('EMAIL_PORT', 587)
```
Your Environment
---------
<!-- Include details of your environment. -->
* Operating System: AWS ubuntu
* Python Version Used: 3.6
* When you install doccano: Mar 30, 2020
* How did you install doccano (Heroku button etc): AWS one-click deployment
---------
Also, when I deployed the project using docker-compose by pulling the github project, the project looks older and not as in demo http://doccano.herokuapp.com/. Why is that? Am I missing something here?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/app/settings.py`
Content:
```
1 """
2 Django settings for app project.
3
4 For more information on this file, see
5 https://docs.djangoproject.com/en/2.0/topics/settings/
6
7 For the full list of settings and their values, see
8 https://docs.djangoproject.com/en/2.0/ref/settings/
9
10 Any setting that is configured via an environment variable may
11 also be set in a `.env` file in the project base directory.
12 """
13 from os import path
14
15 import django_heroku
16 import dj_database_url
17 from environs import Env
18 from furl import furl
19
20
21 # Build paths inside the project like this: path.join(BASE_DIR, ...)
22 BASE_DIR = path.dirname(path.dirname(path.abspath(__file__)))
23
24 env = Env()
25 env.read_env(path.join(BASE_DIR, '.env'), recurse=False)
26
27
28 # Quick-start development settings - unsuitable for production
29 # See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
30
31 # SECURITY WARNING: keep the secret key used in production secret!
32 SECRET_KEY = env('SECRET_KEY',
33 'v8sk33sy82!uw3ty=!jjv5vp7=s2phrzw(m(hrn^f7e_#1h2al')
34
35 # SECURITY WARNING: don't run with debug turned on in production!
36 DEBUG = env.bool('DEBUG', True)
37
38 # True if you want to allow users to be able to create an account
39 ALLOW_SIGNUP = env.bool('ALLOW_SIGNUP', True)
40
41 # ALLOWED_HOSTS = []
42
43
44 # Application definition
45
46 INSTALLED_APPS = [
47 'whitenoise.runserver_nostatic',
48 'django.contrib.admin',
49 'django.contrib.auth',
50 'django.contrib.contenttypes',
51 'django.contrib.sessions',
52 'django.contrib.messages',
53 'django.contrib.staticfiles',
54 'server.apps.ServerConfig',
55 'api.apps.ApiConfig',
56 'widget_tweaks',
57 'rest_framework',
58 'rest_framework.authtoken',
59 'django_filters',
60 'social_django',
61 'polymorphic',
62 'webpack_loader',
63 'corsheaders',
64 'drf_yasg'
65 ]
66
67 CLOUD_BROWSER_APACHE_LIBCLOUD_PROVIDER = env('CLOUD_BROWSER_LIBCLOUD_PROVIDER', None)
68 CLOUD_BROWSER_APACHE_LIBCLOUD_ACCOUNT = env('CLOUD_BROWSER_LIBCLOUD_ACCOUNT', None)
69 CLOUD_BROWSER_APACHE_LIBCLOUD_SECRET_KEY = env('CLOUD_BROWSER_LIBCLOUD_KEY', None)
70
71 if CLOUD_BROWSER_APACHE_LIBCLOUD_PROVIDER:
72 CLOUD_BROWSER_DATASTORE = 'ApacheLibcloud'
73 CLOUD_BROWSER_OBJECT_REDIRECT_URL = '/v1/cloud-upload'
74 INSTALLED_APPS.append('cloud_browser')
75
76 MIDDLEWARE = [
77 'django.middleware.security.SecurityMiddleware',
78 'whitenoise.middleware.WhiteNoiseMiddleware',
79 'django.contrib.sessions.middleware.SessionMiddleware',
80 'django.middleware.common.CommonMiddleware',
81 'django.middleware.csrf.CsrfViewMiddleware',
82 'django.contrib.auth.middleware.AuthenticationMiddleware',
83 'django.contrib.messages.middleware.MessageMiddleware',
84 'django.middleware.clickjacking.XFrameOptionsMiddleware',
85 'social_django.middleware.SocialAuthExceptionMiddleware',
86 'applicationinsights.django.ApplicationInsightsMiddleware',
87 'corsheaders.middleware.CorsMiddleware',
88 ]
89
90 ROOT_URLCONF = 'app.urls'
91
92 TEMPLATES = [
93 {
94 'BACKEND': 'django.template.backends.django.DjangoTemplates',
95 'DIRS': [path.join(BASE_DIR, 'server/templates'), path.join(BASE_DIR, 'authentification/templates')],
96 'APP_DIRS': True,
97 'OPTIONS': {
98 'context_processors': [
99 'django.template.context_processors.debug',
100 'django.template.context_processors.request',
101 'django.contrib.auth.context_processors.auth',
102 'django.contrib.messages.context_processors.messages',
103 'social_django.context_processors.backends',
104 'social_django.context_processors.login_redirect',
105 ],
106 'libraries': {
107 'analytics': 'server.templatetags.analytics',
108 'utils_templating': 'authentification.templatetags.utils_templating',
109 },
110 },
111 },
112 ]
113
114 # Static files (CSS, JavaScript, Images)
115 # https://docs.djangoproject.com/en/2.0/howto/static-files/
116
117 STATIC_URL = '/static/'
118 STATIC_ROOT = path.join(BASE_DIR, 'staticfiles')
119
120 STATICFILES_DIRS = [
121 static_path
122 for static_path in (
123 path.join(BASE_DIR, 'server', 'static', 'assets'),
124 path.join(BASE_DIR, 'server', 'static', 'static'),
125 )
126 if path.isdir(static_path)
127 ]
128
129 STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
130
131 WEBPACK_LOADER = {
132 'DEFAULT': {
133 'CACHE': not DEBUG,
134 'BUNDLE_DIR_NAME': 'bundle/',
135 'STATS_FILE': path.join(BASE_DIR, 'server', 'static', 'webpack-stats.json'),
136 'POLL_INTERVAL': 0.1,
137 'TIMEOUT': None,
138 'IGNORE': [r'.*\.hot-update.js', r'.+\.map']
139 }
140 }
141
142 WSGI_APPLICATION = 'app.wsgi.application'
143
144 AUTHENTICATION_BACKENDS = [
145 'social_core.backends.github.GithubOAuth2',
146 'social_core.backends.azuread_tenant.AzureADTenantOAuth2',
147 'django.contrib.auth.backends.ModelBackend',
148 ]
149
150 HEADER_AUTH_USER_NAME = env('HEADER_AUTH_USER_NAME', '')
151 HEADER_AUTH_USER_GROUPS = env('HEADER_AUTH_USER_GROUPS', '')
152 HEADER_AUTH_ADMIN_GROUP_NAME = env('HEADER_AUTH_ADMIN_GROUP_NAME', '')
153 HEADER_AUTH_GROUPS_SEPERATOR = env('HEADER_AUTH_GROUPS_SEPERATOR', default=',')
154
155 if HEADER_AUTH_USER_NAME and HEADER_AUTH_USER_GROUPS and HEADER_AUTH_ADMIN_GROUP_NAME:
156 MIDDLEWARE.append('server.middleware.HeaderAuthMiddleware')
157 AUTHENTICATION_BACKENDS.append('django.contrib.auth.backends.RemoteUserBackend')
158
159 SOCIAL_AUTH_GITHUB_KEY = env('OAUTH_GITHUB_KEY', None)
160 SOCIAL_AUTH_GITHUB_SECRET = env('OAUTH_GITHUB_SECRET', None)
161 GITHUB_ADMIN_ORG_NAME = env('GITHUB_ADMIN_ORG_NAME', None)
162 GITHUB_ADMIN_TEAM_NAME = env('GITHUB_ADMIN_TEAM_NAME', None)
163
164 if GITHUB_ADMIN_ORG_NAME and GITHUB_ADMIN_TEAM_NAME:
165 SOCIAL_AUTH_GITHUB_SCOPE = ['read:org']
166
167 SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_KEY = env('OAUTH_AAD_KEY', None)
168 SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_SECRET = env('OAUTH_AAD_SECRET', None)
169 SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_TENANT_ID = env('OAUTH_AAD_TENANT', None)
170 AZUREAD_ADMIN_GROUP_ID = env('AZUREAD_ADMIN_GROUP_ID', None)
171
172 if AZUREAD_ADMIN_GROUP_ID:
173 SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_RESOURCE = 'https://graph.microsoft.com/'
174 SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_SCOPE = ['Directory.Read.All']
175
176 SOCIAL_AUTH_PIPELINE = [
177 'social_core.pipeline.social_auth.social_details',
178 'social_core.pipeline.social_auth.social_uid',
179 'social_core.pipeline.social_auth.auth_allowed',
180 'social_core.pipeline.social_auth.social_user',
181 'social_core.pipeline.user.get_username',
182 'social_core.pipeline.user.create_user',
183 'social_core.pipeline.social_auth.associate_user',
184 'social_core.pipeline.social_auth.load_extra_data',
185 'social_core.pipeline.user.user_details',
186 'server.social_auth.fetch_github_permissions',
187 'server.social_auth.fetch_azuread_permissions',
188 ]
189
190 ROLE_PROJECT_ADMIN = env('ROLE_PROJECT_ADMIN', 'project_admin')
191 ROLE_ANNOTATOR = env('ROLE_ANNOTATOR', 'annotator')
192 ROLE_ANNOTATION_APPROVER = env('ROLE_ANNOTATION_APPROVER', 'annotation_approver')
193
194 # Database
195 # https://docs.djangoproject.com/en/2.0/ref/settings/#databases
196
197 DATABASES = {
198 'default': {
199 'ENGINE': 'django.db.backends.sqlite3',
200 'NAME': path.join(BASE_DIR, 'db.sqlite3'),
201 }
202 }
203
204
205 # Password validation
206 # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
207
208 AUTH_PASSWORD_VALIDATORS = [
209 {
210 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
211 },
212 {
213 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
214 },
215 {
216 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
217 },
218 {
219 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
220 },
221 ]
222
223 REST_FRAMEWORK = {
224 # Use Django's standard `django.contrib.auth` permissions,
225 # or allow read-only access for unauthenticated users.
226 'DEFAULT_PERMISSION_CLASSES': [
227 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly',
228 'rest_framework.permissions.IsAuthenticated',
229 ],
230 'DEFAULT_AUTHENTICATION_CLASSES': (
231 'rest_framework.authentication.SessionAuthentication',
232 'rest_framework.authentication.TokenAuthentication',
233 ),
234 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
235 'PAGE_SIZE': env.int('DOCCANO_PAGE_SIZE', default=5),
236 'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',),
237 'SEARCH_PARAM': 'q',
238 'DEFAULT_RENDERER_CLASSES': (
239 'rest_framework.renderers.JSONRenderer',
240 'rest_framework.renderers.BrowsableAPIRenderer',
241 'rest_framework_xml.renderers.XMLRenderer'
242 )
243 }
244
245 # Internationalization
246 # https://docs.djangoproject.com/en/2.0/topics/i18n/
247
248 LANGUAGE_CODE = 'en-us'
249
250 TIME_ZONE = 'UTC'
251
252 USE_I18N = True
253
254 USE_L10N = True
255
256 USE_TZ = True
257
258 TEST_RUNNER = 'xmlrunner.extra.djangotestrunner.XMLTestRunner'
259 TEST_OUTPUT_DIR = path.join(BASE_DIR, 'junitxml')
260
261 LOGIN_URL = '/login/'
262 LOGIN_REDIRECT_URL = '/projects/'
263 LOGOUT_REDIRECT_URL = '/'
264
265 django_heroku.settings(locals(), test_runner=False)
266
267 # Change 'default' database configuration with $DATABASE_URL.
268 DATABASES['default'].update(dj_database_url.config(
269 env='DATABASE_URL',
270 conn_max_age=env.int('DATABASE_CONN_MAX_AGE', 500),
271 ssl_require='sslmode' not in furl(env('DATABASE_URL', '')).args,
272 ))
273
274 # work-around for dj-database-url: explicitly disable ssl for sqlite
275 if DATABASES['default'].get('ENGINE') == 'django.db.backends.sqlite3':
276 DATABASES['default'].get('OPTIONS', {}).pop('sslmode', None)
277
278 # work-around for dj-database-url: patch ssl for mysql
279 if DATABASES['default'].get('ENGINE') == 'django.db.backends.mysql':
280 DATABASES['default'].get('OPTIONS', {}).pop('sslmode', None)
281 if env('MYSQL_SSL_CA', None):
282 DATABASES['default'].setdefault('OPTIONS', {})\
283 .setdefault('ssl', {}).setdefault('ca', env('MYSQL_SSL_CA', None))
284
285 # default to a sensible modern driver for Azure SQL
286 if DATABASES['default'].get('ENGINE') == 'sql_server.pyodbc':
287 DATABASES['default'].setdefault('OPTIONS', {})\
288 .setdefault('driver', 'ODBC Driver 17 for SQL Server')
289
290 # Honor the 'X-Forwarded-Proto' header for request.is_secure()
291 SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
292 SESSION_COOKIE_SECURE = env.bool('SESSION_COOKIE_SECURE', False)
293 CSRF_COOKIE_SECURE = env.bool('CSRF_COOKIE_SECURE', False)
294 CSRF_TRUSTED_ORIGINS = env.list('CSRF_TRUSTED_ORIGINS', [])
295
296 # Allow all host headers
297 # ALLOWED_HOSTS = ['*']
298
299 # Size of the batch for creating documents
300 # on the import phase
301 IMPORT_BATCH_SIZE = env.int('IMPORT_BATCH_SIZE', 500)
302
303 GOOGLE_TRACKING_ID = env('GOOGLE_TRACKING_ID', 'UA-125643874-2').strip()
304
305 AZURE_APPINSIGHTS_IKEY = env('AZURE_APPINSIGHTS_IKEY', None)
306 APPLICATION_INSIGHTS = {
307 'ikey': AZURE_APPINSIGHTS_IKEY if AZURE_APPINSIGHTS_IKEY else None,
308 'endpoint': env('AZURE_APPINSIGHTS_ENDPOINT', None),
309 }
310
311 # necessary for email verification of new accounts
312 EMAIL_USE_TLS = env.bool('EMAIL_USE_TLS', False)
313 EMAIL_HOST = env('EMAIL_HOST', None)
314 EMAIL_HOST_USER = env('EMAIL_HOST_USER', None)
315 EMAIL_HOST_PASSWORD = env('EMAIL_HOST_PASSWORD', None)
316 EMAIL_PORT = env.int('EMAIL_PORT', 587)
317
318 if not EMAIL_HOST:
319 EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
320
321
322 if DEBUG:
323 CORS_ORIGIN_WHITELIST = (
324 'http://127.0.0.1:3000',
325 'http://0.0.0.0:3000',
326 'http://localhost:3000'
327 )
328
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/app/settings.py b/app/app/settings.py
--- a/app/app/settings.py
+++ b/app/app/settings.py
@@ -314,6 +314,7 @@
EMAIL_HOST_USER = env('EMAIL_HOST_USER', None)
EMAIL_HOST_PASSWORD = env('EMAIL_HOST_PASSWORD', None)
EMAIL_PORT = env.int('EMAIL_PORT', 587)
+DEFAULT_FROM_EMAIL = env('DEFAULT_FROM_EMAIL', 'webmaster@localhost')
if not EMAIL_HOST:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
| {"golden_diff": "diff --git a/app/app/settings.py b/app/app/settings.py\n--- a/app/app/settings.py\n+++ b/app/app/settings.py\n@@ -314,6 +314,7 @@\n EMAIL_HOST_USER = env('EMAIL_HOST_USER', None)\n EMAIL_HOST_PASSWORD = env('EMAIL_HOST_PASSWORD', None)\n EMAIL_PORT = env.int('EMAIL_PORT', 587)\n+DEFAULT_FROM_EMAIL = env('DEFAULT_FROM_EMAIL', 'webmaster@localhost')\n \n if not EMAIL_HOST:\n EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n", "issue": "Signup verification email not received\nHow to reproduce the behaviour\r\n---------\r\nI setup the project using AWS one-click deployment button. Everything works fine, but when a new user sign ups, email verification is not received. I believe I have to set up a email host configurations in `settings.py`. How do I set it up as the project has already been deployed? Is it in the `/env.list` file, or the AWS one-click automatically does this?\r\n\r\n```\r\n# necessary for email verification of new accounts\r\nEMAIL_USE_TLS = env.bool('EMAIL_USE_TLS', False)\r\nEMAIL_HOST = env('EMAIL_HOST', None)\r\nEMAIL_HOST_USER = env('EMAIL_HOST_USER', None)\r\nEMAIL_HOST_PASSWORD = env('EMAIL_HOST_PASSWORD', None)\r\nEMAIL_PORT = env.int('EMAIL_PORT', 587)\r\n```\r\nYour Environment\r\n---------\r\n<!-- Include details of your environment. -->\r\n\r\n* Operating System: AWS ubuntu\r\n* Python Version Used: 3.6\r\n* When you install doccano: Mar 30, 2020\r\n* How did you install doccano (Heroku button etc): AWS one-click deployment\r\n\r\n---------\r\nAlso, when I deployed the project using docker-compose by pulling the github project, the project looks older and not as in demo http://doccano.herokuapp.com/. Why is that? Am I missing something here?\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nDjango settings for app project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/2.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/2.0/ref/settings/\n\nAny setting that is configured via an environment variable may\nalso be set in a `.env` file in the project base directory.\n\"\"\"\nfrom os import path\n\nimport django_heroku\nimport dj_database_url\nfrom environs import Env\nfrom furl import furl\n\n\n# Build paths inside the project like this: path.join(BASE_DIR, ...)\nBASE_DIR = path.dirname(path.dirname(path.abspath(__file__)))\n\nenv = Env()\nenv.read_env(path.join(BASE_DIR, '.env'), recurse=False)\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = env('SECRET_KEY',\n 'v8sk33sy82!uw3ty=!jjv5vp7=s2phrzw(m(hrn^f7e_#1h2al')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = env.bool('DEBUG', True)\n\n# True if you want to allow users to be able to create an account\nALLOW_SIGNUP = env.bool('ALLOW_SIGNUP', True)\n\n# ALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'whitenoise.runserver_nostatic',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'server.apps.ServerConfig',\n 'api.apps.ApiConfig',\n 'widget_tweaks',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'django_filters',\n 'social_django',\n 'polymorphic',\n 'webpack_loader',\n 'corsheaders',\n 'drf_yasg'\n]\n\nCLOUD_BROWSER_APACHE_LIBCLOUD_PROVIDER = env('CLOUD_BROWSER_LIBCLOUD_PROVIDER', None)\nCLOUD_BROWSER_APACHE_LIBCLOUD_ACCOUNT = env('CLOUD_BROWSER_LIBCLOUD_ACCOUNT', None)\nCLOUD_BROWSER_APACHE_LIBCLOUD_SECRET_KEY = env('CLOUD_BROWSER_LIBCLOUD_KEY', None)\n\nif CLOUD_BROWSER_APACHE_LIBCLOUD_PROVIDER:\n CLOUD_BROWSER_DATASTORE = 'ApacheLibcloud'\n CLOUD_BROWSER_OBJECT_REDIRECT_URL = '/v1/cloud-upload'\n INSTALLED_APPS.append('cloud_browser')\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'social_django.middleware.SocialAuthExceptionMiddleware',\n 'applicationinsights.django.ApplicationInsightsMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n]\n\nROOT_URLCONF = 'app.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [path.join(BASE_DIR, 'server/templates'), path.join(BASE_DIR, 'authentification/templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'social_django.context_processors.backends',\n 'social_django.context_processors.login_redirect',\n ],\n 'libraries': {\n 'analytics': 'server.templatetags.analytics',\n 'utils_templating': 'authentification.templatetags.utils_templating',\n },\n },\n },\n]\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.0/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATIC_ROOT = path.join(BASE_DIR, 'staticfiles')\n\nSTATICFILES_DIRS = [\n static_path\n for static_path in (\n path.join(BASE_DIR, 'server', 'static', 'assets'),\n path.join(BASE_DIR, 'server', 'static', 'static'),\n )\n if path.isdir(static_path)\n]\n\nSTATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'\n\nWEBPACK_LOADER = {\n 'DEFAULT': {\n 'CACHE': not DEBUG,\n 'BUNDLE_DIR_NAME': 'bundle/',\n 'STATS_FILE': path.join(BASE_DIR, 'server', 'static', 'webpack-stats.json'),\n 'POLL_INTERVAL': 0.1,\n 'TIMEOUT': None,\n 'IGNORE': [r'.*\\.hot-update.js', r'.+\\.map']\n }\n}\n\nWSGI_APPLICATION = 'app.wsgi.application'\n\nAUTHENTICATION_BACKENDS = [\n 'social_core.backends.github.GithubOAuth2',\n 'social_core.backends.azuread_tenant.AzureADTenantOAuth2',\n 'django.contrib.auth.backends.ModelBackend',\n]\n\nHEADER_AUTH_USER_NAME = env('HEADER_AUTH_USER_NAME', '')\nHEADER_AUTH_USER_GROUPS = env('HEADER_AUTH_USER_GROUPS', '')\nHEADER_AUTH_ADMIN_GROUP_NAME = env('HEADER_AUTH_ADMIN_GROUP_NAME', '')\nHEADER_AUTH_GROUPS_SEPERATOR = env('HEADER_AUTH_GROUPS_SEPERATOR', default=',')\n\nif HEADER_AUTH_USER_NAME and HEADER_AUTH_USER_GROUPS and HEADER_AUTH_ADMIN_GROUP_NAME:\n MIDDLEWARE.append('server.middleware.HeaderAuthMiddleware')\n AUTHENTICATION_BACKENDS.append('django.contrib.auth.backends.RemoteUserBackend')\n\nSOCIAL_AUTH_GITHUB_KEY = env('OAUTH_GITHUB_KEY', None)\nSOCIAL_AUTH_GITHUB_SECRET = env('OAUTH_GITHUB_SECRET', None)\nGITHUB_ADMIN_ORG_NAME = env('GITHUB_ADMIN_ORG_NAME', None)\nGITHUB_ADMIN_TEAM_NAME = env('GITHUB_ADMIN_TEAM_NAME', None)\n\nif GITHUB_ADMIN_ORG_NAME and GITHUB_ADMIN_TEAM_NAME:\n SOCIAL_AUTH_GITHUB_SCOPE = ['read:org']\n\nSOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_KEY = env('OAUTH_AAD_KEY', None)\nSOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_SECRET = env('OAUTH_AAD_SECRET', None)\nSOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_TENANT_ID = env('OAUTH_AAD_TENANT', None)\nAZUREAD_ADMIN_GROUP_ID = env('AZUREAD_ADMIN_GROUP_ID', None)\n\nif AZUREAD_ADMIN_GROUP_ID:\n SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_RESOURCE = 'https://graph.microsoft.com/'\n SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_SCOPE = ['Directory.Read.All']\n\nSOCIAL_AUTH_PIPELINE = [\n 'social_core.pipeline.social_auth.social_details',\n 'social_core.pipeline.social_auth.social_uid',\n 'social_core.pipeline.social_auth.auth_allowed',\n 'social_core.pipeline.social_auth.social_user',\n 'social_core.pipeline.user.get_username',\n 'social_core.pipeline.user.create_user',\n 'social_core.pipeline.social_auth.associate_user',\n 'social_core.pipeline.social_auth.load_extra_data',\n 'social_core.pipeline.user.user_details',\n 'server.social_auth.fetch_github_permissions',\n 'server.social_auth.fetch_azuread_permissions',\n]\n\nROLE_PROJECT_ADMIN = env('ROLE_PROJECT_ADMIN', 'project_admin')\nROLE_ANNOTATOR = env('ROLE_ANNOTATOR', 'annotator')\nROLE_ANNOTATION_APPROVER = env('ROLE_ANNOTATION_APPROVER', 'annotation_approver')\n\n# Database\n# https://docs.djangoproject.com/en/2.0/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\nREST_FRAMEWORK = {\n # Use Django's standard `django.contrib.auth` permissions,\n # or allow read-only access for unauthenticated users.\n 'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly',\n 'rest_framework.permissions.IsAuthenticated',\n ],\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.SessionAuthentication',\n 'rest_framework.authentication.TokenAuthentication',\n ),\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',\n 'PAGE_SIZE': env.int('DOCCANO_PAGE_SIZE', default=5),\n 'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',),\n 'SEARCH_PARAM': 'q',\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n 'rest_framework.renderers.BrowsableAPIRenderer',\n 'rest_framework_xml.renderers.XMLRenderer'\n )\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.0/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nTEST_RUNNER = 'xmlrunner.extra.djangotestrunner.XMLTestRunner'\nTEST_OUTPUT_DIR = path.join(BASE_DIR, 'junitxml')\n\nLOGIN_URL = '/login/'\nLOGIN_REDIRECT_URL = '/projects/'\nLOGOUT_REDIRECT_URL = '/'\n\ndjango_heroku.settings(locals(), test_runner=False)\n\n# Change 'default' database configuration with $DATABASE_URL.\nDATABASES['default'].update(dj_database_url.config(\n env='DATABASE_URL',\n conn_max_age=env.int('DATABASE_CONN_MAX_AGE', 500),\n ssl_require='sslmode' not in furl(env('DATABASE_URL', '')).args,\n))\n\n# work-around for dj-database-url: explicitly disable ssl for sqlite\nif DATABASES['default'].get('ENGINE') == 'django.db.backends.sqlite3':\n DATABASES['default'].get('OPTIONS', {}).pop('sslmode', None)\n\n# work-around for dj-database-url: patch ssl for mysql\nif DATABASES['default'].get('ENGINE') == 'django.db.backends.mysql':\n DATABASES['default'].get('OPTIONS', {}).pop('sslmode', None)\n if env('MYSQL_SSL_CA', None):\n DATABASES['default'].setdefault('OPTIONS', {})\\\n .setdefault('ssl', {}).setdefault('ca', env('MYSQL_SSL_CA', None))\n\n# default to a sensible modern driver for Azure SQL\nif DATABASES['default'].get('ENGINE') == 'sql_server.pyodbc':\n DATABASES['default'].setdefault('OPTIONS', {})\\\n .setdefault('driver', 'ODBC Driver 17 for SQL Server')\n\n# Honor the 'X-Forwarded-Proto' header for request.is_secure()\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\nSESSION_COOKIE_SECURE = env.bool('SESSION_COOKIE_SECURE', False)\nCSRF_COOKIE_SECURE = env.bool('CSRF_COOKIE_SECURE', False)\nCSRF_TRUSTED_ORIGINS = env.list('CSRF_TRUSTED_ORIGINS', [])\n\n# Allow all host headers\n# ALLOWED_HOSTS = ['*']\n\n# Size of the batch for creating documents\n# on the import phase\nIMPORT_BATCH_SIZE = env.int('IMPORT_BATCH_SIZE', 500)\n\nGOOGLE_TRACKING_ID = env('GOOGLE_TRACKING_ID', 'UA-125643874-2').strip()\n\nAZURE_APPINSIGHTS_IKEY = env('AZURE_APPINSIGHTS_IKEY', None)\nAPPLICATION_INSIGHTS = {\n 'ikey': AZURE_APPINSIGHTS_IKEY if AZURE_APPINSIGHTS_IKEY else None,\n 'endpoint': env('AZURE_APPINSIGHTS_ENDPOINT', None),\n}\n\n# necessary for email verification of new accounts\nEMAIL_USE_TLS = env.bool('EMAIL_USE_TLS', False)\nEMAIL_HOST = env('EMAIL_HOST', None)\nEMAIL_HOST_USER = env('EMAIL_HOST_USER', None)\nEMAIL_HOST_PASSWORD = env('EMAIL_HOST_PASSWORD', None)\nEMAIL_PORT = env.int('EMAIL_PORT', 587)\n\nif not EMAIL_HOST:\n EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n\nif DEBUG:\n CORS_ORIGIN_WHITELIST = (\n 'http://127.0.0.1:3000',\n 'http://0.0.0.0:3000',\n 'http://localhost:3000'\n )\n", "path": "app/app/settings.py"}], "after_files": [{"content": "\"\"\"\nDjango settings for app project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/2.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/2.0/ref/settings/\n\nAny setting that is configured via an environment variable may\nalso be set in a `.env` file in the project base directory.\n\"\"\"\nfrom os import path\n\nimport django_heroku\nimport dj_database_url\nfrom environs import Env\nfrom furl import furl\n\n\n# Build paths inside the project like this: path.join(BASE_DIR, ...)\nBASE_DIR = path.dirname(path.dirname(path.abspath(__file__)))\n\nenv = Env()\nenv.read_env(path.join(BASE_DIR, '.env'), recurse=False)\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = env('SECRET_KEY',\n 'v8sk33sy82!uw3ty=!jjv5vp7=s2phrzw(m(hrn^f7e_#1h2al')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = env.bool('DEBUG', True)\n\n# True if you want to allow users to be able to create an account\nALLOW_SIGNUP = env.bool('ALLOW_SIGNUP', True)\n\n# ALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'whitenoise.runserver_nostatic',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'server.apps.ServerConfig',\n 'api.apps.ApiConfig',\n 'widget_tweaks',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'django_filters',\n 'social_django',\n 'polymorphic',\n 'webpack_loader',\n 'corsheaders',\n 'drf_yasg'\n]\n\nCLOUD_BROWSER_APACHE_LIBCLOUD_PROVIDER = env('CLOUD_BROWSER_LIBCLOUD_PROVIDER', None)\nCLOUD_BROWSER_APACHE_LIBCLOUD_ACCOUNT = env('CLOUD_BROWSER_LIBCLOUD_ACCOUNT', None)\nCLOUD_BROWSER_APACHE_LIBCLOUD_SECRET_KEY = env('CLOUD_BROWSER_LIBCLOUD_KEY', None)\n\nif CLOUD_BROWSER_APACHE_LIBCLOUD_PROVIDER:\n CLOUD_BROWSER_DATASTORE = 'ApacheLibcloud'\n CLOUD_BROWSER_OBJECT_REDIRECT_URL = '/v1/cloud-upload'\n INSTALLED_APPS.append('cloud_browser')\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'social_django.middleware.SocialAuthExceptionMiddleware',\n 'applicationinsights.django.ApplicationInsightsMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n]\n\nROOT_URLCONF = 'app.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [path.join(BASE_DIR, 'server/templates'), path.join(BASE_DIR, 'authentification/templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'social_django.context_processors.backends',\n 'social_django.context_processors.login_redirect',\n ],\n 'libraries': {\n 'analytics': 'server.templatetags.analytics',\n 'utils_templating': 'authentification.templatetags.utils_templating',\n },\n },\n },\n]\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.0/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATIC_ROOT = path.join(BASE_DIR, 'staticfiles')\n\nSTATICFILES_DIRS = [\n static_path\n for static_path in (\n path.join(BASE_DIR, 'server', 'static', 'assets'),\n path.join(BASE_DIR, 'server', 'static', 'static'),\n )\n if path.isdir(static_path)\n]\n\nSTATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'\n\nWEBPACK_LOADER = {\n 'DEFAULT': {\n 'CACHE': not DEBUG,\n 'BUNDLE_DIR_NAME': 'bundle/',\n 'STATS_FILE': path.join(BASE_DIR, 'server', 'static', 'webpack-stats.json'),\n 'POLL_INTERVAL': 0.1,\n 'TIMEOUT': None,\n 'IGNORE': [r'.*\\.hot-update.js', r'.+\\.map']\n }\n}\n\nWSGI_APPLICATION = 'app.wsgi.application'\n\nAUTHENTICATION_BACKENDS = [\n 'social_core.backends.github.GithubOAuth2',\n 'social_core.backends.azuread_tenant.AzureADTenantOAuth2',\n 'django.contrib.auth.backends.ModelBackend',\n]\n\nHEADER_AUTH_USER_NAME = env('HEADER_AUTH_USER_NAME', '')\nHEADER_AUTH_USER_GROUPS = env('HEADER_AUTH_USER_GROUPS', '')\nHEADER_AUTH_ADMIN_GROUP_NAME = env('HEADER_AUTH_ADMIN_GROUP_NAME', '')\nHEADER_AUTH_GROUPS_SEPERATOR = env('HEADER_AUTH_GROUPS_SEPERATOR', default=',')\n\nif HEADER_AUTH_USER_NAME and HEADER_AUTH_USER_GROUPS and HEADER_AUTH_ADMIN_GROUP_NAME:\n MIDDLEWARE.append('server.middleware.HeaderAuthMiddleware')\n AUTHENTICATION_BACKENDS.append('django.contrib.auth.backends.RemoteUserBackend')\n\nSOCIAL_AUTH_GITHUB_KEY = env('OAUTH_GITHUB_KEY', None)\nSOCIAL_AUTH_GITHUB_SECRET = env('OAUTH_GITHUB_SECRET', None)\nGITHUB_ADMIN_ORG_NAME = env('GITHUB_ADMIN_ORG_NAME', None)\nGITHUB_ADMIN_TEAM_NAME = env('GITHUB_ADMIN_TEAM_NAME', None)\n\nif GITHUB_ADMIN_ORG_NAME and GITHUB_ADMIN_TEAM_NAME:\n SOCIAL_AUTH_GITHUB_SCOPE = ['read:org']\n\nSOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_KEY = env('OAUTH_AAD_KEY', None)\nSOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_SECRET = env('OAUTH_AAD_SECRET', None)\nSOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_TENANT_ID = env('OAUTH_AAD_TENANT', None)\nAZUREAD_ADMIN_GROUP_ID = env('AZUREAD_ADMIN_GROUP_ID', None)\n\nif AZUREAD_ADMIN_GROUP_ID:\n SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_RESOURCE = 'https://graph.microsoft.com/'\n SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_SCOPE = ['Directory.Read.All']\n\nSOCIAL_AUTH_PIPELINE = [\n 'social_core.pipeline.social_auth.social_details',\n 'social_core.pipeline.social_auth.social_uid',\n 'social_core.pipeline.social_auth.auth_allowed',\n 'social_core.pipeline.social_auth.social_user',\n 'social_core.pipeline.user.get_username',\n 'social_core.pipeline.user.create_user',\n 'social_core.pipeline.social_auth.associate_user',\n 'social_core.pipeline.social_auth.load_extra_data',\n 'social_core.pipeline.user.user_details',\n 'server.social_auth.fetch_github_permissions',\n 'server.social_auth.fetch_azuread_permissions',\n]\n\nROLE_PROJECT_ADMIN = env('ROLE_PROJECT_ADMIN', 'project_admin')\nROLE_ANNOTATOR = env('ROLE_ANNOTATOR', 'annotator')\nROLE_ANNOTATION_APPROVER = env('ROLE_ANNOTATION_APPROVER', 'annotation_approver')\n\n# Database\n# https://docs.djangoproject.com/en/2.0/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\nREST_FRAMEWORK = {\n # Use Django's standard `django.contrib.auth` permissions,\n # or allow read-only access for unauthenticated users.\n 'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly',\n 'rest_framework.permissions.IsAuthenticated',\n ],\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.SessionAuthentication',\n 'rest_framework.authentication.TokenAuthentication',\n ),\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',\n 'PAGE_SIZE': env.int('DOCCANO_PAGE_SIZE', default=5),\n 'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',),\n 'SEARCH_PARAM': 'q',\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n 'rest_framework.renderers.BrowsableAPIRenderer',\n 'rest_framework_xml.renderers.XMLRenderer'\n )\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.0/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nTEST_RUNNER = 'xmlrunner.extra.djangotestrunner.XMLTestRunner'\nTEST_OUTPUT_DIR = path.join(BASE_DIR, 'junitxml')\n\nLOGIN_URL = '/login/'\nLOGIN_REDIRECT_URL = '/projects/'\nLOGOUT_REDIRECT_URL = '/'\n\ndjango_heroku.settings(locals(), test_runner=False)\n\n# Change 'default' database configuration with $DATABASE_URL.\nDATABASES['default'].update(dj_database_url.config(\n env='DATABASE_URL',\n conn_max_age=env.int('DATABASE_CONN_MAX_AGE', 500),\n ssl_require='sslmode' not in furl(env('DATABASE_URL', '')).args,\n))\n\n# work-around for dj-database-url: explicitly disable ssl for sqlite\nif DATABASES['default'].get('ENGINE') == 'django.db.backends.sqlite3':\n DATABASES['default'].get('OPTIONS', {}).pop('sslmode', None)\n\n# work-around for dj-database-url: patch ssl for mysql\nif DATABASES['default'].get('ENGINE') == 'django.db.backends.mysql':\n DATABASES['default'].get('OPTIONS', {}).pop('sslmode', None)\n if env('MYSQL_SSL_CA', None):\n DATABASES['default'].setdefault('OPTIONS', {})\\\n .setdefault('ssl', {}).setdefault('ca', env('MYSQL_SSL_CA', None))\n\n# default to a sensible modern driver for Azure SQL\nif DATABASES['default'].get('ENGINE') == 'sql_server.pyodbc':\n DATABASES['default'].setdefault('OPTIONS', {})\\\n .setdefault('driver', 'ODBC Driver 17 for SQL Server')\n\n# Honor the 'X-Forwarded-Proto' header for request.is_secure()\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\nSESSION_COOKIE_SECURE = env.bool('SESSION_COOKIE_SECURE', False)\nCSRF_COOKIE_SECURE = env.bool('CSRF_COOKIE_SECURE', False)\nCSRF_TRUSTED_ORIGINS = env.list('CSRF_TRUSTED_ORIGINS', [])\n\n# Allow all host headers\n# ALLOWED_HOSTS = ['*']\n\n# Size of the batch for creating documents\n# on the import phase\nIMPORT_BATCH_SIZE = env.int('IMPORT_BATCH_SIZE', 500)\n\nGOOGLE_TRACKING_ID = env('GOOGLE_TRACKING_ID', 'UA-125643874-2').strip()\n\nAZURE_APPINSIGHTS_IKEY = env('AZURE_APPINSIGHTS_IKEY', None)\nAPPLICATION_INSIGHTS = {\n 'ikey': AZURE_APPINSIGHTS_IKEY if AZURE_APPINSIGHTS_IKEY else None,\n 'endpoint': env('AZURE_APPINSIGHTS_ENDPOINT', None),\n}\n\n# necessary for email verification of new accounts\nEMAIL_USE_TLS = env.bool('EMAIL_USE_TLS', False)\nEMAIL_HOST = env('EMAIL_HOST', None)\nEMAIL_HOST_USER = env('EMAIL_HOST_USER', None)\nEMAIL_HOST_PASSWORD = env('EMAIL_HOST_PASSWORD', None)\nEMAIL_PORT = env.int('EMAIL_PORT', 587)\nDEFAULT_FROM_EMAIL = env('DEFAULT_FROM_EMAIL', 'webmaster@localhost')\n\nif not EMAIL_HOST:\n EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n\nif DEBUG:\n CORS_ORIGIN_WHITELIST = (\n 'http://127.0.0.1:3000',\n 'http://0.0.0.0:3000',\n 'http://localhost:3000'\n )\n", "path": "app/app/settings.py"}]} |
gh_patches_debug_1273 | rasdani/github-patches | git_diff | streamlink__streamlink-2229 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
powerapp.py No plugin can handle URL
## Error Report
- [X] This is a bug report and I have read the Posting Guidelines.
### Description
powerapp.com.tr should be able to play the stations
### Expected / actual behavior
Inserting the page in the streamlink does not play the stream. About my web browser Firefox I see the picture and hear the sound synonymous
### Reproduction steps / Explicit stream URLs to test
1.www.powerapp.com.tr/tvs/powertv/
streamlink http://www.powerapp.com.tr/tvs/powertv best
### log output
> streamlink http://www.powerapp.com.tr/tvs/powertv best
error: No plugin can handle URL: http://www.powerapp.com.tr/tvs/powertv
> error: No plugin can handle URL: http://www.powerapp.com.tr/tvs/powertv
error:: The term "error:" was not used as the name of a cmdlet, a function, a script file, or a
recognized executable program. Check the spelling of the name, or if the path is correct (provided
contain) and repeat the process.
In line: 1 character: 1
+ error: No plugin can handle URL: http://www.powerapp.com.tr/tvs/power ...
+~~~~~
+ CategoryInfo: ObjectNotFound: (error :: String) [], CommandNotFoundException
+ FullyQualifiedErrorId: CommandNotFoundException
### Additional comments, screenshots, etc.
Screenshot
https://i.ibb.co/g99nXC0/france.jpg
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/powerapp.py`
Content:
```
1 from __future__ import print_function
2 import re
3
4 from streamlink.plugin import Plugin
5 from streamlink.plugin.api import validate
6 from streamlink.stream import HLSStream
7
8
9 class PowerApp(Plugin):
10 url_re = re.compile(r"https?://(?:www.)?powerapp.com.tr/tv/(\w+)")
11 api_url = "http://api.powergroup.com.tr/Channels/{0}/?appRef=iPowerWeb&apiVersion=11"
12 api_schema = validate.Schema(validate.all({
13 "errorCode": 0,
14 "response": {
15 "channel_stream_url": validate.url()
16 }
17 }, validate.get("response")))
18
19 @classmethod
20 def can_handle_url(cls, url):
21 return cls.url_re.match(url) is not None
22
23 def _get_streams(self):
24 channel = self.url_re.match(self.url).group(1)
25
26 res = self.session.http.get(self.api_url.format(channel))
27 data = self.session.http.json(res, schema=self.api_schema)
28
29 return HLSStream.parse_variant_playlist(self.session, data["channel_stream_url"])
30
31
32 __plugin__ = PowerApp
33
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/powerapp.py b/src/streamlink/plugins/powerapp.py
--- a/src/streamlink/plugins/powerapp.py
+++ b/src/streamlink/plugins/powerapp.py
@@ -7,7 +7,7 @@
class PowerApp(Plugin):
- url_re = re.compile(r"https?://(?:www.)?powerapp.com.tr/tv/(\w+)")
+ url_re = re.compile(r"https?://(?:www.)?powerapp.com.tr/tvs?/(\w+)")
api_url = "http://api.powergroup.com.tr/Channels/{0}/?appRef=iPowerWeb&apiVersion=11"
api_schema = validate.Schema(validate.all({
"errorCode": 0,
| {"golden_diff": "diff --git a/src/streamlink/plugins/powerapp.py b/src/streamlink/plugins/powerapp.py\n--- a/src/streamlink/plugins/powerapp.py\n+++ b/src/streamlink/plugins/powerapp.py\n@@ -7,7 +7,7 @@\n \n \n class PowerApp(Plugin):\n- url_re = re.compile(r\"https?://(?:www.)?powerapp.com.tr/tv/(\\w+)\")\n+ url_re = re.compile(r\"https?://(?:www.)?powerapp.com.tr/tvs?/(\\w+)\")\n api_url = \"http://api.powergroup.com.tr/Channels/{0}/?appRef=iPowerWeb&apiVersion=11\"\n api_schema = validate.Schema(validate.all({\n \"errorCode\": 0,\n", "issue": "powerapp.py No plugin can handle URL\n## Error Report\r\n\r\n- [X] This is a bug report and I have read the Posting Guidelines.\r\n\r\n### Description\r\n\r\n powerapp.com.tr should be able to play the stations\r\n\r\n\r\n### Expected / actual behavior\r\n\r\nInserting the page in the streamlink does not play the stream. About my web browser Firefox I see the picture and hear the sound synonymous\r\n\r\n### Reproduction steps / Explicit stream URLs to test\r\n\r\n1.www.powerapp.com.tr/tvs/powertv/\r\n\r\nstreamlink http://www.powerapp.com.tr/tvs/powertv best\r\n\r\n### log output\r\n\r\n> streamlink http://www.powerapp.com.tr/tvs/powertv best\r\nerror: No plugin can handle URL: http://www.powerapp.com.tr/tvs/powertv\r\n> error: No plugin can handle URL: http://www.powerapp.com.tr/tvs/powertv\r\nerror:: The term \"error:\" was not used as the name of a cmdlet, a function, a script file, or a\r\nrecognized executable program. Check the spelling of the name, or if the path is correct (provided\r\ncontain) and repeat the process.\r\nIn line: 1 character: 1\r\n+ error: No plugin can handle URL: http://www.powerapp.com.tr/tvs/power ...\r\n+~~~~~\r\n + CategoryInfo: ObjectNotFound: (error :: String) [], CommandNotFoundException\r\n + FullyQualifiedErrorId: CommandNotFoundException\r\n\r\n\r\n### Additional comments, screenshots, etc.\r\n\r\n Screenshot\r\n\r\nhttps://i.ibb.co/g99nXC0/france.jpg\n", "before_files": [{"content": "from __future__ import print_function\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\n\n\nclass PowerApp(Plugin):\n url_re = re.compile(r\"https?://(?:www.)?powerapp.com.tr/tv/(\\w+)\")\n api_url = \"http://api.powergroup.com.tr/Channels/{0}/?appRef=iPowerWeb&apiVersion=11\"\n api_schema = validate.Schema(validate.all({\n \"errorCode\": 0,\n \"response\": {\n \"channel_stream_url\": validate.url()\n }\n }, validate.get(\"response\")))\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def _get_streams(self):\n channel = self.url_re.match(self.url).group(1)\n\n res = self.session.http.get(self.api_url.format(channel))\n data = self.session.http.json(res, schema=self.api_schema)\n\n return HLSStream.parse_variant_playlist(self.session, data[\"channel_stream_url\"])\n\n\n__plugin__ = PowerApp\n", "path": "src/streamlink/plugins/powerapp.py"}], "after_files": [{"content": "from __future__ import print_function\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\n\n\nclass PowerApp(Plugin):\n url_re = re.compile(r\"https?://(?:www.)?powerapp.com.tr/tvs?/(\\w+)\")\n api_url = \"http://api.powergroup.com.tr/Channels/{0}/?appRef=iPowerWeb&apiVersion=11\"\n api_schema = validate.Schema(validate.all({\n \"errorCode\": 0,\n \"response\": {\n \"channel_stream_url\": validate.url()\n }\n }, validate.get(\"response\")))\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def _get_streams(self):\n channel = self.url_re.match(self.url).group(1)\n\n res = self.session.http.get(self.api_url.format(channel))\n data = self.session.http.json(res, schema=self.api_schema)\n\n return HLSStream.parse_variant_playlist(self.session, data[\"channel_stream_url\"])\n\n\n__plugin__ = PowerApp\n", "path": "src/streamlink/plugins/powerapp.py"}]} |
gh_patches_debug_1274 | rasdani/github-patches | git_diff | getredash__redash-784 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError: 'datetime.timedelta' object has no attribute 'isoformat'
On the latest 0.9.2-rc:
```
[2016-01-21 14:30:36,838: ERROR/MainProcess] Task redash.tasks.execute_query[766d3f9f-68a6-4a64-8cd9-b7e4e18bf2af] raised unexpected: AttributeError("'datetime.timedelta' object has no attribute 'isoformat'",)
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/celery/app/trace.py", line 240, in trace_task
R = retval = fun(*args, **kwargs)
File "/opt/redash/redash/tasks.py", line 31, in __call__
return super(BaseTask, self).__call__(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/celery/app/trace.py", line 437, in __protected_call__
return self.run(*args, **kwargs)
File "/opt/redash/redash/tasks.py", line 286, in execute_query
data, error = query_runner.run_query(annotated_query)
File "/opt/redash/redash/query_runner/pg.py", line 132, in run_query
json_data = json.dumps(data, cls=JSONEncoder)
File "/usr/lib/python2.7/json/__init__.py", line 250, in dumps
sort_keys=sort_keys, **kw).encode(obj)
File "/usr/lib/python2.7/json/encoder.py", line 207, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/usr/lib/python2.7/json/encoder.py", line 270, in iterencode
return _iterencode(o, 0)
File "/opt/redash/redash/utils.py", line 57, in default
return o.isoformat()
AttributeError: 'datetime.timedelta' object has no attribute 'isoformat'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/utils.py`
Content:
```
1 import cStringIO
2 import csv
3 import codecs
4 import decimal
5 import datetime
6 import json
7 import random
8 import re
9 import hashlib
10 import pytz
11
12 COMMENTS_REGEX = re.compile("/\*.*?\*/")
13
14
15 def utcnow():
16 """Return datetime.now value with timezone specified.
17
18 Without the timezone data, when the timestamp stored to the database it gets the current timezone of the server,
19 which leads to errors in calculations.
20 """
21 return datetime.datetime.now(pytz.utc)
22
23
24 def slugify(s):
25 return re.sub('[^a-z0-9_\-]+', '-', s.lower())
26
27
28 def gen_query_hash(sql):
29 """Returns hash of the given query after stripping all comments, line breaks and multiple
30 spaces, and lower casing all text.
31
32 TODO: possible issue - the following queries will get the same id:
33 1. SELECT 1 FROM table WHERE column='Value';
34 2. SELECT 1 FROM table where column='value';
35 """
36 sql = COMMENTS_REGEX.sub("", sql)
37 sql = "".join(sql.split()).lower()
38 return hashlib.md5(sql.encode('utf-8')).hexdigest()
39
40
41 def generate_token(length):
42 chars = ('abcdefghijklmnopqrstuvwxyz'
43 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
44 '0123456789')
45
46 rand = random.SystemRandom()
47 return ''.join(rand.choice(chars) for x in range(length))
48
49 class JSONEncoder(json.JSONEncoder):
50 """Custom JSON encoding class, to handle Decimal and datetime.date instances.
51 """
52 def default(self, o):
53 if isinstance(o, decimal.Decimal):
54 return float(o)
55
56 if isinstance(o, (datetime.date, datetime.time, datetime.timedelta)):
57 return o.isoformat()
58
59 super(JSONEncoder, self).default(o)
60
61
62 def json_dumps(data):
63 return json.dumps(data, cls=JSONEncoder)
64
65
66 def build_url(request, host, path):
67 parts = request.host.split(':')
68 if len(parts) > 1:
69 port = parts[1]
70 if (port, request.scheme) not in (('80', 'http'), ('443', 'https')):
71 host = '{}:{}'.format(host, port)
72
73 return "{}://{}{}".format(request.scheme, host, path)
74
75
76 class UnicodeWriter:
77 """
78 A CSV writer which will write rows to CSV file "f",
79 which is encoded in the given encoding.
80 """
81 def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
82 # Redirect output to a queue
83 self.queue = cStringIO.StringIO()
84 self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
85 self.stream = f
86 self.encoder = codecs.getincrementalencoder(encoding)()
87
88 def _encode_utf8(self, val):
89 if isinstance(val, (unicode, str)):
90 return val.encode('utf-8')
91
92 return val
93
94 def writerow(self, row):
95 self.writer.writerow([self._encode_utf8(s) for s in row])
96 # Fetch UTF-8 output from the queue ...
97 data = self.queue.getvalue()
98 data = data.decode("utf-8")
99 # ... and reencode it into the target encoding
100 data = self.encoder.encode(data)
101 # write to the target stream
102 self.stream.write(data)
103 # empty queue
104 self.queue.truncate(0)
105
106 def writerows(self, rows):
107 for row in rows:
108 self.writerow(row)
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redash/utils.py b/redash/utils.py
--- a/redash/utils.py
+++ b/redash/utils.py
@@ -53,9 +53,12 @@
if isinstance(o, decimal.Decimal):
return float(o)
- if isinstance(o, (datetime.date, datetime.time, datetime.timedelta)):
+ if isinstance(o, (datetime.date, datetime.time)):
return o.isoformat()
-
+
+ if isinstance(o, datetime.timedelta):
+ return str(o)
+
super(JSONEncoder, self).default(o)
| {"golden_diff": "diff --git a/redash/utils.py b/redash/utils.py\n--- a/redash/utils.py\n+++ b/redash/utils.py\n@@ -53,9 +53,12 @@\n if isinstance(o, decimal.Decimal):\n return float(o)\n \n- if isinstance(o, (datetime.date, datetime.time, datetime.timedelta)):\n+ if isinstance(o, (datetime.date, datetime.time)):\n return o.isoformat()\n- \n+\n+ if isinstance(o, datetime.timedelta):\n+ return str(o)\n+\n super(JSONEncoder, self).default(o)\n", "issue": "AttributeError: 'datetime.timedelta' object has no attribute 'isoformat'\nOn the latest 0.9.2-rc:\n\n```\n[2016-01-21 14:30:36,838: ERROR/MainProcess] Task redash.tasks.execute_query[766d3f9f-68a6-4a64-8cd9-b7e4e18bf2af] raised unexpected: AttributeError(\"'datetime.timedelta' object has no attribute 'isoformat'\",)\nTraceback (most recent call last):\n File \"/usr/local/lib/python2.7/dist-packages/celery/app/trace.py\", line 240, in trace_task\n R = retval = fun(*args, **kwargs)\n File \"/opt/redash/redash/tasks.py\", line 31, in __call__\n return super(BaseTask, self).__call__(*args, **kwargs)\n File \"/usr/local/lib/python2.7/dist-packages/celery/app/trace.py\", line 437, in __protected_call__\n return self.run(*args, **kwargs)\n File \"/opt/redash/redash/tasks.py\", line 286, in execute_query\n data, error = query_runner.run_query(annotated_query)\n File \"/opt/redash/redash/query_runner/pg.py\", line 132, in run_query\n json_data = json.dumps(data, cls=JSONEncoder)\n File \"/usr/lib/python2.7/json/__init__.py\", line 250, in dumps\n sort_keys=sort_keys, **kw).encode(obj)\n File \"/usr/lib/python2.7/json/encoder.py\", line 207, in encode\n chunks = self.iterencode(o, _one_shot=True)\n File \"/usr/lib/python2.7/json/encoder.py\", line 270, in iterencode\n return _iterencode(o, 0)\n File \"/opt/redash/redash/utils.py\", line 57, in default\n return o.isoformat()\nAttributeError: 'datetime.timedelta' object has no attribute 'isoformat'\n```\n\n", "before_files": [{"content": "import cStringIO\nimport csv\nimport codecs\nimport decimal\nimport datetime\nimport json\nimport random\nimport re\nimport hashlib\nimport pytz\n\nCOMMENTS_REGEX = re.compile(\"/\\*.*?\\*/\")\n\n\ndef utcnow():\n \"\"\"Return datetime.now value with timezone specified.\n\n Without the timezone data, when the timestamp stored to the database it gets the current timezone of the server,\n which leads to errors in calculations.\n \"\"\"\n return datetime.datetime.now(pytz.utc)\n\n\ndef slugify(s):\n return re.sub('[^a-z0-9_\\-]+', '-', s.lower())\n\n\ndef gen_query_hash(sql):\n \"\"\"Returns hash of the given query after stripping all comments, line breaks and multiple\n spaces, and lower casing all text.\n\n TODO: possible issue - the following queries will get the same id:\n 1. SELECT 1 FROM table WHERE column='Value';\n 2. SELECT 1 FROM table where column='value';\n \"\"\"\n sql = COMMENTS_REGEX.sub(\"\", sql)\n sql = \"\".join(sql.split()).lower()\n return hashlib.md5(sql.encode('utf-8')).hexdigest()\n\n\ndef generate_token(length):\n chars = ('abcdefghijklmnopqrstuvwxyz'\n 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n '0123456789')\n\n rand = random.SystemRandom()\n return ''.join(rand.choice(chars) for x in range(length))\n\nclass JSONEncoder(json.JSONEncoder):\n \"\"\"Custom JSON encoding class, to handle Decimal and datetime.date instances.\n \"\"\"\n def default(self, o):\n if isinstance(o, decimal.Decimal):\n return float(o)\n\n if isinstance(o, (datetime.date, datetime.time, datetime.timedelta)):\n return o.isoformat()\n \n super(JSONEncoder, self).default(o)\n\n\ndef json_dumps(data):\n return json.dumps(data, cls=JSONEncoder)\n\n\ndef build_url(request, host, path):\n parts = request.host.split(':')\n if len(parts) > 1:\n port = parts[1]\n if (port, request.scheme) not in (('80', 'http'), ('443', 'https')):\n host = '{}:{}'.format(host, port)\n\n return \"{}://{}{}\".format(request.scheme, host, path)\n\n\nclass UnicodeWriter:\n \"\"\"\n A CSV writer which will write rows to CSV file \"f\",\n which is encoded in the given encoding.\n \"\"\"\n def __init__(self, f, dialect=csv.excel, encoding=\"utf-8\", **kwds):\n # Redirect output to a queue\n self.queue = cStringIO.StringIO()\n self.writer = csv.writer(self.queue, dialect=dialect, **kwds)\n self.stream = f\n self.encoder = codecs.getincrementalencoder(encoding)()\n\n def _encode_utf8(self, val):\n if isinstance(val, (unicode, str)):\n return val.encode('utf-8')\n\n return val\n\n def writerow(self, row):\n self.writer.writerow([self._encode_utf8(s) for s in row])\n # Fetch UTF-8 output from the queue ...\n data = self.queue.getvalue()\n data = data.decode(\"utf-8\")\n # ... and reencode it into the target encoding\n data = self.encoder.encode(data)\n # write to the target stream\n self.stream.write(data)\n # empty queue\n self.queue.truncate(0)\n\n def writerows(self, rows):\n for row in rows:\n self.writerow(row)\n", "path": "redash/utils.py"}], "after_files": [{"content": "import cStringIO\nimport csv\nimport codecs\nimport decimal\nimport datetime\nimport json\nimport random\nimport re\nimport hashlib\nimport pytz\n\nCOMMENTS_REGEX = re.compile(\"/\\*.*?\\*/\")\n\n\ndef utcnow():\n \"\"\"Return datetime.now value with timezone specified.\n\n Without the timezone data, when the timestamp stored to the database it gets the current timezone of the server,\n which leads to errors in calculations.\n \"\"\"\n return datetime.datetime.now(pytz.utc)\n\n\ndef slugify(s):\n return re.sub('[^a-z0-9_\\-]+', '-', s.lower())\n\n\ndef gen_query_hash(sql):\n \"\"\"Returns hash of the given query after stripping all comments, line breaks and multiple\n spaces, and lower casing all text.\n\n TODO: possible issue - the following queries will get the same id:\n 1. SELECT 1 FROM table WHERE column='Value';\n 2. SELECT 1 FROM table where column='value';\n \"\"\"\n sql = COMMENTS_REGEX.sub(\"\", sql)\n sql = \"\".join(sql.split()).lower()\n return hashlib.md5(sql.encode('utf-8')).hexdigest()\n\n\ndef generate_token(length):\n chars = ('abcdefghijklmnopqrstuvwxyz'\n 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n '0123456789')\n\n rand = random.SystemRandom()\n return ''.join(rand.choice(chars) for x in range(length))\n\nclass JSONEncoder(json.JSONEncoder):\n \"\"\"Custom JSON encoding class, to handle Decimal and datetime.date instances.\n \"\"\"\n def default(self, o):\n if isinstance(o, decimal.Decimal):\n return float(o)\n\n if isinstance(o, (datetime.date, datetime.time)):\n return o.isoformat()\n\n if isinstance(o, datetime.timedelta):\n return str(o)\n\n super(JSONEncoder, self).default(o)\n\n\ndef json_dumps(data):\n return json.dumps(data, cls=JSONEncoder)\n\n\ndef build_url(request, host, path):\n parts = request.host.split(':')\n if len(parts) > 1:\n port = parts[1]\n if (port, request.scheme) not in (('80', 'http'), ('443', 'https')):\n host = '{}:{}'.format(host, port)\n\n return \"{}://{}{}\".format(request.scheme, host, path)\n\n\nclass UnicodeWriter:\n \"\"\"\n A CSV writer which will write rows to CSV file \"f\",\n which is encoded in the given encoding.\n \"\"\"\n def __init__(self, f, dialect=csv.excel, encoding=\"utf-8\", **kwds):\n # Redirect output to a queue\n self.queue = cStringIO.StringIO()\n self.writer = csv.writer(self.queue, dialect=dialect, **kwds)\n self.stream = f\n self.encoder = codecs.getincrementalencoder(encoding)()\n\n def _encode_utf8(self, val):\n if isinstance(val, (unicode, str)):\n return val.encode('utf-8')\n\n return val\n\n def writerow(self, row):\n self.writer.writerow([self._encode_utf8(s) for s in row])\n # Fetch UTF-8 output from the queue ...\n data = self.queue.getvalue()\n data = data.decode(\"utf-8\")\n # ... and reencode it into the target encoding\n data = self.encoder.encode(data)\n # write to the target stream\n self.stream.write(data)\n # empty queue\n self.queue.truncate(0)\n\n def writerows(self, rows):\n for row in rows:\n self.writerow(row)\n", "path": "redash/utils.py"}]} |
gh_patches_debug_1275 | rasdani/github-patches | git_diff | pypa__setuptools-2538 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Newlines in the `description` field produce a malformed PKG-INFO
We discovered this accidentally by way of https://github.com/zopefoundation/zc.relation/issues/4#issuecomment-397532224: if you pass a string containing newlines to the `description` argument of `setup()`, setuptools will generate a malformed PKG-INFO.
To reproduce:
```
# setup.py
from setuptools import setup
setup(
name='test-package',
version='0.1',
author='Blah Blah',
author_email='[email protected]',
description='description\n\n',
py_modules=['blah'],
)
```
(The contents of `blah.py` do not matter, but the file should exist.)
Run `python setup.py sdist` and the inspect `test_package.egg-info/PKG-INFO`. For me, with setuptools 39.1.0, it looks like this:
```
Metadata-Version: 1.0
Name: test-package
Version: 0.1
Summary: description
Home-page: UNKNOWN
Author: Blah Blah
Author-email: [email protected]
License: UNKNOWN
Description: UNKNOWN
Platform: UNKNOWN
```
The extra newlines lead tools to treat the rest of the PKG-INFO as a long_description.
I would expect `setuptools` to complain about the newlines in the `description` field, or at least escape them properly (i.e. prepend whitespace, like it does for the `long_description` field).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setuptools/dist.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 __all__ = ['Distribution']
3
4 import io
5 import sys
6 import re
7 import os
8 import warnings
9 import numbers
10 import distutils.log
11 import distutils.core
12 import distutils.cmd
13 import distutils.dist
14 from distutils.util import strtobool
15 from distutils.debug import DEBUG
16 from distutils.fancy_getopt import translate_longopt
17 import itertools
18
19 from collections import defaultdict
20 from email import message_from_file
21
22 from distutils.errors import DistutilsOptionError, DistutilsSetupError
23 from distutils.util import rfc822_escape
24 from distutils.version import StrictVersion
25
26 from setuptools.extern import packaging
27 from setuptools.extern import ordered_set
28
29 from . import SetuptoolsDeprecationWarning
30
31 import setuptools
32 from setuptools import windows_support
33 from setuptools.monkey import get_unpatched
34 from setuptools.config import parse_configuration
35 import pkg_resources
36
37 __import__('setuptools.extern.packaging.specifiers')
38 __import__('setuptools.extern.packaging.version')
39
40
41 def _get_unpatched(cls):
42 warnings.warn("Do not call this function", DistDeprecationWarning)
43 return get_unpatched(cls)
44
45
46 def get_metadata_version(self):
47 mv = getattr(self, 'metadata_version', None)
48
49 if mv is None:
50 if self.long_description_content_type or self.provides_extras:
51 mv = StrictVersion('2.1')
52 elif (self.maintainer is not None or
53 self.maintainer_email is not None or
54 getattr(self, 'python_requires', None) is not None or
55 self.project_urls):
56 mv = StrictVersion('1.2')
57 elif (self.provides or self.requires or self.obsoletes or
58 self.classifiers or self.download_url):
59 mv = StrictVersion('1.1')
60 else:
61 mv = StrictVersion('1.0')
62
63 self.metadata_version = mv
64
65 return mv
66
67
68 def read_pkg_file(self, file):
69 """Reads the metadata values from a file object."""
70 msg = message_from_file(file)
71
72 def _read_field(name):
73 value = msg[name]
74 if value == 'UNKNOWN':
75 return None
76 return value
77
78 def _read_list(name):
79 values = msg.get_all(name, None)
80 if values == []:
81 return None
82 return values
83
84 self.metadata_version = StrictVersion(msg['metadata-version'])
85 self.name = _read_field('name')
86 self.version = _read_field('version')
87 self.description = _read_field('summary')
88 # we are filling author only.
89 self.author = _read_field('author')
90 self.maintainer = None
91 self.author_email = _read_field('author-email')
92 self.maintainer_email = None
93 self.url = _read_field('home-page')
94 self.license = _read_field('license')
95
96 if 'download-url' in msg:
97 self.download_url = _read_field('download-url')
98 else:
99 self.download_url = None
100
101 self.long_description = _read_field('description')
102 self.description = _read_field('summary')
103
104 if 'keywords' in msg:
105 self.keywords = _read_field('keywords').split(',')
106
107 self.platforms = _read_list('platform')
108 self.classifiers = _read_list('classifier')
109
110 # PEP 314 - these fields only exist in 1.1
111 if self.metadata_version == StrictVersion('1.1'):
112 self.requires = _read_list('requires')
113 self.provides = _read_list('provides')
114 self.obsoletes = _read_list('obsoletes')
115 else:
116 self.requires = None
117 self.provides = None
118 self.obsoletes = None
119
120
121 def single_line(val):
122 # quick and dirty validation for description pypa/setuptools#1390
123 if '\n' in val:
124 raise ValueError("newlines not allowed")
125 return val
126
127
128 # Based on Python 3.5 version
129 def write_pkg_file(self, file): # noqa: C901 # is too complex (14) # FIXME
130 """Write the PKG-INFO format data to a file object.
131 """
132 version = self.get_metadata_version()
133
134 def write_field(key, value):
135 file.write("%s: %s\n" % (key, value))
136
137 write_field('Metadata-Version', str(version))
138 write_field('Name', self.get_name())
139 write_field('Version', self.get_version())
140 write_field('Summary', single_line(self.get_description()))
141 write_field('Home-page', self.get_url())
142
143 if version < StrictVersion('1.2'):
144 write_field('Author', self.get_contact())
145 write_field('Author-email', self.get_contact_email())
146 else:
147 optional_fields = (
148 ('Author', 'author'),
149 ('Author-email', 'author_email'),
150 ('Maintainer', 'maintainer'),
151 ('Maintainer-email', 'maintainer_email'),
152 )
153
154 for field, attr in optional_fields:
155 attr_val = getattr(self, attr)
156
157 if attr_val is not None:
158 write_field(field, attr_val)
159
160 write_field('License', self.get_license())
161 if self.download_url:
162 write_field('Download-URL', self.download_url)
163 for project_url in self.project_urls.items():
164 write_field('Project-URL', '%s, %s' % project_url)
165
166 long_desc = rfc822_escape(self.get_long_description())
167 write_field('Description', long_desc)
168
169 keywords = ','.join(self.get_keywords())
170 if keywords:
171 write_field('Keywords', keywords)
172
173 if version >= StrictVersion('1.2'):
174 for platform in self.get_platforms():
175 write_field('Platform', platform)
176 else:
177 self._write_list(file, 'Platform', self.get_platforms())
178
179 self._write_list(file, 'Classifier', self.get_classifiers())
180
181 # PEP 314
182 self._write_list(file, 'Requires', self.get_requires())
183 self._write_list(file, 'Provides', self.get_provides())
184 self._write_list(file, 'Obsoletes', self.get_obsoletes())
185
186 # Setuptools specific for PEP 345
187 if hasattr(self, 'python_requires'):
188 write_field('Requires-Python', self.python_requires)
189
190 # PEP 566
191 if self.long_description_content_type:
192 write_field(
193 'Description-Content-Type',
194 self.long_description_content_type
195 )
196 if self.provides_extras:
197 for extra in self.provides_extras:
198 write_field('Provides-Extra', extra)
199
200
201 sequence = tuple, list
202
203
204 def check_importable(dist, attr, value):
205 try:
206 ep = pkg_resources.EntryPoint.parse('x=' + value)
207 assert not ep.extras
208 except (TypeError, ValueError, AttributeError, AssertionError) as e:
209 raise DistutilsSetupError(
210 "%r must be importable 'module:attrs' string (got %r)"
211 % (attr, value)
212 ) from e
213
214
215 def assert_string_list(dist, attr, value):
216 """Verify that value is a string list"""
217 try:
218 # verify that value is a list or tuple to exclude unordered
219 # or single-use iterables
220 assert isinstance(value, (list, tuple))
221 # verify that elements of value are strings
222 assert ''.join(value) != value
223 except (TypeError, ValueError, AttributeError, AssertionError) as e:
224 raise DistutilsSetupError(
225 "%r must be a list of strings (got %r)" % (attr, value)
226 ) from e
227
228
229 def check_nsp(dist, attr, value):
230 """Verify that namespace packages are valid"""
231 ns_packages = value
232 assert_string_list(dist, attr, ns_packages)
233 for nsp in ns_packages:
234 if not dist.has_contents_for(nsp):
235 raise DistutilsSetupError(
236 "Distribution contains no modules or packages for " +
237 "namespace package %r" % nsp
238 )
239 parent, sep, child = nsp.rpartition('.')
240 if parent and parent not in ns_packages:
241 distutils.log.warn(
242 "WARNING: %r is declared as a package namespace, but %r"
243 " is not: please correct this in setup.py", nsp, parent
244 )
245
246
247 def check_extras(dist, attr, value):
248 """Verify that extras_require mapping is valid"""
249 try:
250 list(itertools.starmap(_check_extra, value.items()))
251 except (TypeError, ValueError, AttributeError) as e:
252 raise DistutilsSetupError(
253 "'extras_require' must be a dictionary whose values are "
254 "strings or lists of strings containing valid project/version "
255 "requirement specifiers."
256 ) from e
257
258
259 def _check_extra(extra, reqs):
260 name, sep, marker = extra.partition(':')
261 if marker and pkg_resources.invalid_marker(marker):
262 raise DistutilsSetupError("Invalid environment marker: " + marker)
263 list(pkg_resources.parse_requirements(reqs))
264
265
266 def assert_bool(dist, attr, value):
267 """Verify that value is True, False, 0, or 1"""
268 if bool(value) != value:
269 tmpl = "{attr!r} must be a boolean value (got {value!r})"
270 raise DistutilsSetupError(tmpl.format(attr=attr, value=value))
271
272
273 def check_requirements(dist, attr, value):
274 """Verify that install_requires is a valid requirements list"""
275 try:
276 list(pkg_resources.parse_requirements(value))
277 if isinstance(value, (dict, set)):
278 raise TypeError("Unordered types are not allowed")
279 except (TypeError, ValueError) as error:
280 tmpl = (
281 "{attr!r} must be a string or list of strings "
282 "containing valid project/version requirement specifiers; {error}"
283 )
284 raise DistutilsSetupError(
285 tmpl.format(attr=attr, error=error)
286 ) from error
287
288
289 def check_specifier(dist, attr, value):
290 """Verify that value is a valid version specifier"""
291 try:
292 packaging.specifiers.SpecifierSet(value)
293 except packaging.specifiers.InvalidSpecifier as error:
294 tmpl = (
295 "{attr!r} must be a string "
296 "containing valid version specifiers; {error}"
297 )
298 raise DistutilsSetupError(
299 tmpl.format(attr=attr, error=error)
300 ) from error
301
302
303 def check_entry_points(dist, attr, value):
304 """Verify that entry_points map is parseable"""
305 try:
306 pkg_resources.EntryPoint.parse_map(value)
307 except ValueError as e:
308 raise DistutilsSetupError(e) from e
309
310
311 def check_test_suite(dist, attr, value):
312 if not isinstance(value, str):
313 raise DistutilsSetupError("test_suite must be a string")
314
315
316 def check_package_data(dist, attr, value):
317 """Verify that value is a dictionary of package names to glob lists"""
318 if not isinstance(value, dict):
319 raise DistutilsSetupError(
320 "{!r} must be a dictionary mapping package names to lists of "
321 "string wildcard patterns".format(attr))
322 for k, v in value.items():
323 if not isinstance(k, str):
324 raise DistutilsSetupError(
325 "keys of {!r} dict must be strings (got {!r})"
326 .format(attr, k)
327 )
328 assert_string_list(dist, 'values of {!r} dict'.format(attr), v)
329
330
331 def check_packages(dist, attr, value):
332 for pkgname in value:
333 if not re.match(r'\w+(\.\w+)*', pkgname):
334 distutils.log.warn(
335 "WARNING: %r not a valid package name; please use only "
336 ".-separated package names in setup.py", pkgname
337 )
338
339
340 _Distribution = get_unpatched(distutils.core.Distribution)
341
342
343 class Distribution(_Distribution):
344 """Distribution with support for tests and package data
345
346 This is an enhanced version of 'distutils.dist.Distribution' that
347 effectively adds the following new optional keyword arguments to 'setup()':
348
349 'install_requires' -- a string or sequence of strings specifying project
350 versions that the distribution requires when installed, in the format
351 used by 'pkg_resources.require()'. They will be installed
352 automatically when the package is installed. If you wish to use
353 packages that are not available in PyPI, or want to give your users an
354 alternate download location, you can add a 'find_links' option to the
355 '[easy_install]' section of your project's 'setup.cfg' file, and then
356 setuptools will scan the listed web pages for links that satisfy the
357 requirements.
358
359 'extras_require' -- a dictionary mapping names of optional "extras" to the
360 additional requirement(s) that using those extras incurs. For example,
361 this::
362
363 extras_require = dict(reST = ["docutils>=0.3", "reSTedit"])
364
365 indicates that the distribution can optionally provide an extra
366 capability called "reST", but it can only be used if docutils and
367 reSTedit are installed. If the user installs your package using
368 EasyInstall and requests one of your extras, the corresponding
369 additional requirements will be installed if needed.
370
371 'test_suite' -- the name of a test suite to run for the 'test' command.
372 If the user runs 'python setup.py test', the package will be installed,
373 and the named test suite will be run. The format is the same as
374 would be used on a 'unittest.py' command line. That is, it is the
375 dotted name of an object to import and call to generate a test suite.
376
377 'package_data' -- a dictionary mapping package names to lists of filenames
378 or globs to use to find data files contained in the named packages.
379 If the dictionary has filenames or globs listed under '""' (the empty
380 string), those names will be searched for in every package, in addition
381 to any names for the specific package. Data files found using these
382 names/globs will be installed along with the package, in the same
383 location as the package. Note that globs are allowed to reference
384 the contents of non-package subdirectories, as long as you use '/' as
385 a path separator. (Globs are automatically converted to
386 platform-specific paths at runtime.)
387
388 In addition to these new keywords, this class also has several new methods
389 for manipulating the distribution's contents. For example, the 'include()'
390 and 'exclude()' methods can be thought of as in-place add and subtract
391 commands that add or remove packages, modules, extensions, and so on from
392 the distribution.
393 """
394
395 _DISTUTILS_UNSUPPORTED_METADATA = {
396 'long_description_content_type': None,
397 'project_urls': dict,
398 'provides_extras': ordered_set.OrderedSet,
399 'license_files': ordered_set.OrderedSet,
400 }
401
402 _patched_dist = None
403
404 def patch_missing_pkg_info(self, attrs):
405 # Fake up a replacement for the data that would normally come from
406 # PKG-INFO, but which might not yet be built if this is a fresh
407 # checkout.
408 #
409 if not attrs or 'name' not in attrs or 'version' not in attrs:
410 return
411 key = pkg_resources.safe_name(str(attrs['name'])).lower()
412 dist = pkg_resources.working_set.by_key.get(key)
413 if dist is not None and not dist.has_metadata('PKG-INFO'):
414 dist._version = pkg_resources.safe_version(str(attrs['version']))
415 self._patched_dist = dist
416
417 def __init__(self, attrs=None):
418 have_package_data = hasattr(self, "package_data")
419 if not have_package_data:
420 self.package_data = {}
421 attrs = attrs or {}
422 self.dist_files = []
423 # Filter-out setuptools' specific options.
424 self.src_root = attrs.pop("src_root", None)
425 self.patch_missing_pkg_info(attrs)
426 self.dependency_links = attrs.pop('dependency_links', [])
427 self.setup_requires = attrs.pop('setup_requires', [])
428 for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
429 vars(self).setdefault(ep.name, None)
430 _Distribution.__init__(self, {
431 k: v for k, v in attrs.items()
432 if k not in self._DISTUTILS_UNSUPPORTED_METADATA
433 })
434
435 # Fill-in missing metadata fields not supported by distutils.
436 # Note some fields may have been set by other tools (e.g. pbr)
437 # above; they are taken preferrentially to setup() arguments
438 for option, default in self._DISTUTILS_UNSUPPORTED_METADATA.items():
439 for source in self.metadata.__dict__, attrs:
440 if option in source:
441 value = source[option]
442 break
443 else:
444 value = default() if default else None
445 setattr(self.metadata, option, value)
446
447 self.metadata.version = self._normalize_version(
448 self._validate_version(self.metadata.version))
449 self._finalize_requires()
450
451 @staticmethod
452 def _normalize_version(version):
453 if isinstance(version, setuptools.sic) or version is None:
454 return version
455
456 normalized = str(packaging.version.Version(version))
457 if version != normalized:
458 tmpl = "Normalizing '{version}' to '{normalized}'"
459 warnings.warn(tmpl.format(**locals()))
460 return normalized
461 return version
462
463 @staticmethod
464 def _validate_version(version):
465 if isinstance(version, numbers.Number):
466 # Some people apparently take "version number" too literally :)
467 version = str(version)
468
469 if version is not None:
470 try:
471 packaging.version.Version(version)
472 except (packaging.version.InvalidVersion, TypeError):
473 warnings.warn(
474 "The version specified (%r) is an invalid version, this "
475 "may not work as expected with newer versions of "
476 "setuptools, pip, and PyPI. Please see PEP 440 for more "
477 "details." % version
478 )
479 return setuptools.sic(version)
480 return version
481
482 def _finalize_requires(self):
483 """
484 Set `metadata.python_requires` and fix environment markers
485 in `install_requires` and `extras_require`.
486 """
487 if getattr(self, 'python_requires', None):
488 self.metadata.python_requires = self.python_requires
489
490 if getattr(self, 'extras_require', None):
491 for extra in self.extras_require.keys():
492 # Since this gets called multiple times at points where the
493 # keys have become 'converted' extras, ensure that we are only
494 # truly adding extras we haven't seen before here.
495 extra = extra.split(':')[0]
496 if extra:
497 self.metadata.provides_extras.add(extra)
498
499 self._convert_extras_requirements()
500 self._move_install_requirements_markers()
501
502 def _convert_extras_requirements(self):
503 """
504 Convert requirements in `extras_require` of the form
505 `"extra": ["barbazquux; {marker}"]` to
506 `"extra:{marker}": ["barbazquux"]`.
507 """
508 spec_ext_reqs = getattr(self, 'extras_require', None) or {}
509 self._tmp_extras_require = defaultdict(list)
510 for section, v in spec_ext_reqs.items():
511 # Do not strip empty sections.
512 self._tmp_extras_require[section]
513 for r in pkg_resources.parse_requirements(v):
514 suffix = self._suffix_for(r)
515 self._tmp_extras_require[section + suffix].append(r)
516
517 @staticmethod
518 def _suffix_for(req):
519 """
520 For a requirement, return the 'extras_require' suffix for
521 that requirement.
522 """
523 return ':' + str(req.marker) if req.marker else ''
524
525 def _move_install_requirements_markers(self):
526 """
527 Move requirements in `install_requires` that are using environment
528 markers `extras_require`.
529 """
530
531 # divide the install_requires into two sets, simple ones still
532 # handled by install_requires and more complex ones handled
533 # by extras_require.
534
535 def is_simple_req(req):
536 return not req.marker
537
538 spec_inst_reqs = getattr(self, 'install_requires', None) or ()
539 inst_reqs = list(pkg_resources.parse_requirements(spec_inst_reqs))
540 simple_reqs = filter(is_simple_req, inst_reqs)
541 complex_reqs = itertools.filterfalse(is_simple_req, inst_reqs)
542 self.install_requires = list(map(str, simple_reqs))
543
544 for r in complex_reqs:
545 self._tmp_extras_require[':' + str(r.marker)].append(r)
546 self.extras_require = dict(
547 (k, [str(r) for r in map(self._clean_req, v)])
548 for k, v in self._tmp_extras_require.items()
549 )
550
551 def _clean_req(self, req):
552 """
553 Given a Requirement, remove environment markers and return it.
554 """
555 req.marker = None
556 return req
557
558 # FIXME: 'Distribution._parse_config_files' is too complex (14)
559 def _parse_config_files(self, filenames=None): # noqa: C901
560 """
561 Adapted from distutils.dist.Distribution.parse_config_files,
562 this method provides the same functionality in subtly-improved
563 ways.
564 """
565 from configparser import ConfigParser
566
567 # Ignore install directory options if we have a venv
568 ignore_options = [] if sys.prefix == sys.base_prefix else [
569 'install-base', 'install-platbase', 'install-lib',
570 'install-platlib', 'install-purelib', 'install-headers',
571 'install-scripts', 'install-data', 'prefix', 'exec-prefix',
572 'home', 'user', 'root',
573 ]
574
575 ignore_options = frozenset(ignore_options)
576
577 if filenames is None:
578 filenames = self.find_config_files()
579
580 if DEBUG:
581 self.announce("Distribution.parse_config_files():")
582
583 parser = ConfigParser()
584 for filename in filenames:
585 with io.open(filename, encoding='utf-8') as reader:
586 if DEBUG:
587 self.announce(" reading {filename}".format(**locals()))
588 parser.read_file(reader)
589 for section in parser.sections():
590 options = parser.options(section)
591 opt_dict = self.get_option_dict(section)
592
593 for opt in options:
594 if opt == '__name__' or opt in ignore_options:
595 continue
596
597 val = parser.get(section, opt)
598 opt = opt.replace('-', '_')
599 opt_dict[opt] = (filename, val)
600
601 # Make the ConfigParser forget everything (so we retain
602 # the original filenames that options come from)
603 parser.__init__()
604
605 if 'global' not in self.command_options:
606 return
607
608 # If there was a "global" section in the config file, use it
609 # to set Distribution options.
610
611 for (opt, (src, val)) in self.command_options['global'].items():
612 alias = self.negative_opt.get(opt)
613 if alias:
614 val = not strtobool(val)
615 elif opt in ('verbose', 'dry_run'): # ugh!
616 val = strtobool(val)
617
618 try:
619 setattr(self, alias or opt, val)
620 except ValueError as e:
621 raise DistutilsOptionError(e) from e
622
623 # FIXME: 'Distribution._set_command_options' is too complex (14)
624 def _set_command_options(self, command_obj, option_dict=None): # noqa: C901
625 """
626 Set the options for 'command_obj' from 'option_dict'. Basically
627 this means copying elements of a dictionary ('option_dict') to
628 attributes of an instance ('command').
629
630 'command_obj' must be a Command instance. If 'option_dict' is not
631 supplied, uses the standard option dictionary for this command
632 (from 'self.command_options').
633
634 (Adopted from distutils.dist.Distribution._set_command_options)
635 """
636 command_name = command_obj.get_command_name()
637 if option_dict is None:
638 option_dict = self.get_option_dict(command_name)
639
640 if DEBUG:
641 self.announce(" setting options for '%s' command:" % command_name)
642 for (option, (source, value)) in option_dict.items():
643 if DEBUG:
644 self.announce(" %s = %s (from %s)" % (option, value,
645 source))
646 try:
647 bool_opts = [translate_longopt(o)
648 for o in command_obj.boolean_options]
649 except AttributeError:
650 bool_opts = []
651 try:
652 neg_opt = command_obj.negative_opt
653 except AttributeError:
654 neg_opt = {}
655
656 try:
657 is_string = isinstance(value, str)
658 if option in neg_opt and is_string:
659 setattr(command_obj, neg_opt[option], not strtobool(value))
660 elif option in bool_opts and is_string:
661 setattr(command_obj, option, strtobool(value))
662 elif hasattr(command_obj, option):
663 setattr(command_obj, option, value)
664 else:
665 raise DistutilsOptionError(
666 "error in %s: command '%s' has no such option '%s'"
667 % (source, command_name, option))
668 except ValueError as e:
669 raise DistutilsOptionError(e) from e
670
671 def parse_config_files(self, filenames=None, ignore_option_errors=False):
672 """Parses configuration files from various levels
673 and loads configuration.
674
675 """
676 self._parse_config_files(filenames=filenames)
677
678 parse_configuration(self, self.command_options,
679 ignore_option_errors=ignore_option_errors)
680 self._finalize_requires()
681
682 def fetch_build_eggs(self, requires):
683 """Resolve pre-setup requirements"""
684 resolved_dists = pkg_resources.working_set.resolve(
685 pkg_resources.parse_requirements(requires),
686 installer=self.fetch_build_egg,
687 replace_conflicting=True,
688 )
689 for dist in resolved_dists:
690 pkg_resources.working_set.add(dist, replace=True)
691 return resolved_dists
692
693 def finalize_options(self):
694 """
695 Allow plugins to apply arbitrary operations to the
696 distribution. Each hook may optionally define a 'order'
697 to influence the order of execution. Smaller numbers
698 go first and the default is 0.
699 """
700 group = 'setuptools.finalize_distribution_options'
701
702 def by_order(hook):
703 return getattr(hook, 'order', 0)
704 eps = map(lambda e: e.load(), pkg_resources.iter_entry_points(group))
705 for ep in sorted(eps, key=by_order):
706 ep(self)
707
708 def _finalize_setup_keywords(self):
709 for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
710 value = getattr(self, ep.name, None)
711 if value is not None:
712 ep.require(installer=self.fetch_build_egg)
713 ep.load()(self, ep.name, value)
714
715 def _finalize_2to3_doctests(self):
716 if getattr(self, 'convert_2to3_doctests', None):
717 # XXX may convert to set here when we can rely on set being builtin
718 self.convert_2to3_doctests = [
719 os.path.abspath(p)
720 for p in self.convert_2to3_doctests
721 ]
722 else:
723 self.convert_2to3_doctests = []
724
725 def get_egg_cache_dir(self):
726 egg_cache_dir = os.path.join(os.curdir, '.eggs')
727 if not os.path.exists(egg_cache_dir):
728 os.mkdir(egg_cache_dir)
729 windows_support.hide_file(egg_cache_dir)
730 readme_txt_filename = os.path.join(egg_cache_dir, 'README.txt')
731 with open(readme_txt_filename, 'w') as f:
732 f.write('This directory contains eggs that were downloaded '
733 'by setuptools to build, test, and run plug-ins.\n\n')
734 f.write('This directory caches those eggs to prevent '
735 'repeated downloads.\n\n')
736 f.write('However, it is safe to delete this directory.\n\n')
737
738 return egg_cache_dir
739
740 def fetch_build_egg(self, req):
741 """Fetch an egg needed for building"""
742 from setuptools.installer import fetch_build_egg
743 return fetch_build_egg(self, req)
744
745 def get_command_class(self, command):
746 """Pluggable version of get_command_class()"""
747 if command in self.cmdclass:
748 return self.cmdclass[command]
749
750 eps = pkg_resources.iter_entry_points('distutils.commands', command)
751 for ep in eps:
752 ep.require(installer=self.fetch_build_egg)
753 self.cmdclass[command] = cmdclass = ep.load()
754 return cmdclass
755 else:
756 return _Distribution.get_command_class(self, command)
757
758 def print_commands(self):
759 for ep in pkg_resources.iter_entry_points('distutils.commands'):
760 if ep.name not in self.cmdclass:
761 # don't require extras as the commands won't be invoked
762 cmdclass = ep.resolve()
763 self.cmdclass[ep.name] = cmdclass
764 return _Distribution.print_commands(self)
765
766 def get_command_list(self):
767 for ep in pkg_resources.iter_entry_points('distutils.commands'):
768 if ep.name not in self.cmdclass:
769 # don't require extras as the commands won't be invoked
770 cmdclass = ep.resolve()
771 self.cmdclass[ep.name] = cmdclass
772 return _Distribution.get_command_list(self)
773
774 def include(self, **attrs):
775 """Add items to distribution that are named in keyword arguments
776
777 For example, 'dist.include(py_modules=["x"])' would add 'x' to
778 the distribution's 'py_modules' attribute, if it was not already
779 there.
780
781 Currently, this method only supports inclusion for attributes that are
782 lists or tuples. If you need to add support for adding to other
783 attributes in this or a subclass, you can add an '_include_X' method,
784 where 'X' is the name of the attribute. The method will be called with
785 the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})'
786 will try to call 'dist._include_foo({"bar":"baz"})', which can then
787 handle whatever special inclusion logic is needed.
788 """
789 for k, v in attrs.items():
790 include = getattr(self, '_include_' + k, None)
791 if include:
792 include(v)
793 else:
794 self._include_misc(k, v)
795
796 def exclude_package(self, package):
797 """Remove packages, modules, and extensions in named package"""
798
799 pfx = package + '.'
800 if self.packages:
801 self.packages = [
802 p for p in self.packages
803 if p != package and not p.startswith(pfx)
804 ]
805
806 if self.py_modules:
807 self.py_modules = [
808 p for p in self.py_modules
809 if p != package and not p.startswith(pfx)
810 ]
811
812 if self.ext_modules:
813 self.ext_modules = [
814 p for p in self.ext_modules
815 if p.name != package and not p.name.startswith(pfx)
816 ]
817
818 def has_contents_for(self, package):
819 """Return true if 'exclude_package(package)' would do something"""
820
821 pfx = package + '.'
822
823 for p in self.iter_distribution_names():
824 if p == package or p.startswith(pfx):
825 return True
826
827 def _exclude_misc(self, name, value):
828 """Handle 'exclude()' for list/tuple attrs without a special handler"""
829 if not isinstance(value, sequence):
830 raise DistutilsSetupError(
831 "%s: setting must be a list or tuple (%r)" % (name, value)
832 )
833 try:
834 old = getattr(self, name)
835 except AttributeError as e:
836 raise DistutilsSetupError(
837 "%s: No such distribution setting" % name
838 ) from e
839 if old is not None and not isinstance(old, sequence):
840 raise DistutilsSetupError(
841 name + ": this setting cannot be changed via include/exclude"
842 )
843 elif old:
844 setattr(self, name, [item for item in old if item not in value])
845
846 def _include_misc(self, name, value):
847 """Handle 'include()' for list/tuple attrs without a special handler"""
848
849 if not isinstance(value, sequence):
850 raise DistutilsSetupError(
851 "%s: setting must be a list (%r)" % (name, value)
852 )
853 try:
854 old = getattr(self, name)
855 except AttributeError as e:
856 raise DistutilsSetupError(
857 "%s: No such distribution setting" % name
858 ) from e
859 if old is None:
860 setattr(self, name, value)
861 elif not isinstance(old, sequence):
862 raise DistutilsSetupError(
863 name + ": this setting cannot be changed via include/exclude"
864 )
865 else:
866 new = [item for item in value if item not in old]
867 setattr(self, name, old + new)
868
869 def exclude(self, **attrs):
870 """Remove items from distribution that are named in keyword arguments
871
872 For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
873 the distribution's 'py_modules' attribute. Excluding packages uses
874 the 'exclude_package()' method, so all of the package's contained
875 packages, modules, and extensions are also excluded.
876
877 Currently, this method only supports exclusion from attributes that are
878 lists or tuples. If you need to add support for excluding from other
879 attributes in this or a subclass, you can add an '_exclude_X' method,
880 where 'X' is the name of the attribute. The method will be called with
881 the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})'
882 will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
883 handle whatever special exclusion logic is needed.
884 """
885 for k, v in attrs.items():
886 exclude = getattr(self, '_exclude_' + k, None)
887 if exclude:
888 exclude(v)
889 else:
890 self._exclude_misc(k, v)
891
892 def _exclude_packages(self, packages):
893 if not isinstance(packages, sequence):
894 raise DistutilsSetupError(
895 "packages: setting must be a list or tuple (%r)" % (packages,)
896 )
897 list(map(self.exclude_package, packages))
898
899 def _parse_command_opts(self, parser, args):
900 # Remove --with-X/--without-X options when processing command args
901 self.global_options = self.__class__.global_options
902 self.negative_opt = self.__class__.negative_opt
903
904 # First, expand any aliases
905 command = args[0]
906 aliases = self.get_option_dict('aliases')
907 while command in aliases:
908 src, alias = aliases[command]
909 del aliases[command] # ensure each alias can expand only once!
910 import shlex
911 args[:1] = shlex.split(alias, True)
912 command = args[0]
913
914 nargs = _Distribution._parse_command_opts(self, parser, args)
915
916 # Handle commands that want to consume all remaining arguments
917 cmd_class = self.get_command_class(command)
918 if getattr(cmd_class, 'command_consumes_arguments', None):
919 self.get_option_dict(command)['args'] = ("command line", nargs)
920 if nargs is not None:
921 return []
922
923 return nargs
924
925 def get_cmdline_options(self):
926 """Return a '{cmd: {opt:val}}' map of all command-line options
927
928 Option names are all long, but do not include the leading '--', and
929 contain dashes rather than underscores. If the option doesn't take
930 an argument (e.g. '--quiet'), the 'val' is 'None'.
931
932 Note that options provided by config files are intentionally excluded.
933 """
934
935 d = {}
936
937 for cmd, opts in self.command_options.items():
938
939 for opt, (src, val) in opts.items():
940
941 if src != "command line":
942 continue
943
944 opt = opt.replace('_', '-')
945
946 if val == 0:
947 cmdobj = self.get_command_obj(cmd)
948 neg_opt = self.negative_opt.copy()
949 neg_opt.update(getattr(cmdobj, 'negative_opt', {}))
950 for neg, pos in neg_opt.items():
951 if pos == opt:
952 opt = neg
953 val = None
954 break
955 else:
956 raise AssertionError("Shouldn't be able to get here")
957
958 elif val == 1:
959 val = None
960
961 d.setdefault(cmd, {})[opt] = val
962
963 return d
964
965 def iter_distribution_names(self):
966 """Yield all packages, modules, and extension names in distribution"""
967
968 for pkg in self.packages or ():
969 yield pkg
970
971 for module in self.py_modules or ():
972 yield module
973
974 for ext in self.ext_modules or ():
975 if isinstance(ext, tuple):
976 name, buildinfo = ext
977 else:
978 name = ext.name
979 if name.endswith('module'):
980 name = name[:-6]
981 yield name
982
983 def handle_display_options(self, option_order):
984 """If there were any non-global "display-only" options
985 (--help-commands or the metadata display options) on the command
986 line, display the requested info and return true; else return
987 false.
988 """
989 import sys
990
991 if self.help_commands:
992 return _Distribution.handle_display_options(self, option_order)
993
994 # Stdout may be StringIO (e.g. in tests)
995 if not isinstance(sys.stdout, io.TextIOWrapper):
996 return _Distribution.handle_display_options(self, option_order)
997
998 # Don't wrap stdout if utf-8 is already the encoding. Provides
999 # workaround for #334.
1000 if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):
1001 return _Distribution.handle_display_options(self, option_order)
1002
1003 # Print metadata in UTF-8 no matter the platform
1004 encoding = sys.stdout.encoding
1005 errors = sys.stdout.errors
1006 newline = sys.platform != 'win32' and '\n' or None
1007 line_buffering = sys.stdout.line_buffering
1008
1009 sys.stdout = io.TextIOWrapper(
1010 sys.stdout.detach(), 'utf-8', errors, newline, line_buffering)
1011 try:
1012 return _Distribution.handle_display_options(self, option_order)
1013 finally:
1014 sys.stdout = io.TextIOWrapper(
1015 sys.stdout.detach(), encoding, errors, newline, line_buffering)
1016
1017
1018 class DistDeprecationWarning(SetuptoolsDeprecationWarning):
1019 """Class for warning about deprecations in dist in
1020 setuptools. Not ignored by default, unlike DeprecationWarning."""
1021
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setuptools/dist.py b/setuptools/dist.py
--- a/setuptools/dist.py
+++ b/setuptools/dist.py
@@ -121,7 +121,9 @@
def single_line(val):
# quick and dirty validation for description pypa/setuptools#1390
if '\n' in val:
- raise ValueError("newlines not allowed")
+ # TODO after 2021-07-31: Replace with `raise ValueError("newlines not allowed")`
+ warnings.UserWarning("newlines not allowed and will break in the future")
+ val = val.replace('\n', ' ')
return val
| {"golden_diff": "diff --git a/setuptools/dist.py b/setuptools/dist.py\n--- a/setuptools/dist.py\n+++ b/setuptools/dist.py\n@@ -121,7 +121,9 @@\n def single_line(val):\n # quick and dirty validation for description pypa/setuptools#1390\n if '\\n' in val:\n- raise ValueError(\"newlines not allowed\")\n+ # TODO after 2021-07-31: Replace with `raise ValueError(\"newlines not allowed\")`\n+ warnings.UserWarning(\"newlines not allowed and will break in the future\")\n+ val = val.replace('\\n', ' ')\n return val\n", "issue": "Newlines in the `description` field produce a malformed PKG-INFO\nWe discovered this accidentally by way of https://github.com/zopefoundation/zc.relation/issues/4#issuecomment-397532224: if you pass a string containing newlines to the `description` argument of `setup()`, setuptools will generate a malformed PKG-INFO.\r\n\r\nTo reproduce:\r\n\r\n```\r\n# setup.py\r\nfrom setuptools import setup\r\nsetup(\r\n name='test-package',\r\n version='0.1',\r\n author='Blah Blah',\r\n author_email='[email protected]',\r\n description='description\\n\\n',\r\n py_modules=['blah'],\r\n)\r\n```\r\n(The contents of `blah.py` do not matter, but the file should exist.)\r\n\r\nRun `python setup.py sdist` and the inspect `test_package.egg-info/PKG-INFO`. For me, with setuptools 39.1.0, it looks like this:\r\n\r\n```\r\nMetadata-Version: 1.0\r\nName: test-package\r\nVersion: 0.1\r\nSummary: description\r\n\r\n\r\nHome-page: UNKNOWN\r\nAuthor: Blah Blah\r\nAuthor-email: [email protected]\r\nLicense: UNKNOWN\r\nDescription: UNKNOWN\r\nPlatform: UNKNOWN\r\n```\r\n\r\nThe extra newlines lead tools to treat the rest of the PKG-INFO as a long_description.\r\n\r\nI would expect `setuptools` to complain about the newlines in the `description` field, or at least escape them properly (i.e. prepend whitespace, like it does for the `long_description` field).\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n__all__ = ['Distribution']\n\nimport io\nimport sys\nimport re\nimport os\nimport warnings\nimport numbers\nimport distutils.log\nimport distutils.core\nimport distutils.cmd\nimport distutils.dist\nfrom distutils.util import strtobool\nfrom distutils.debug import DEBUG\nfrom distutils.fancy_getopt import translate_longopt\nimport itertools\n\nfrom collections import defaultdict\nfrom email import message_from_file\n\nfrom distutils.errors import DistutilsOptionError, DistutilsSetupError\nfrom distutils.util import rfc822_escape\nfrom distutils.version import StrictVersion\n\nfrom setuptools.extern import packaging\nfrom setuptools.extern import ordered_set\n\nfrom . import SetuptoolsDeprecationWarning\n\nimport setuptools\nfrom setuptools import windows_support\nfrom setuptools.monkey import get_unpatched\nfrom setuptools.config import parse_configuration\nimport pkg_resources\n\n__import__('setuptools.extern.packaging.specifiers')\n__import__('setuptools.extern.packaging.version')\n\n\ndef _get_unpatched(cls):\n warnings.warn(\"Do not call this function\", DistDeprecationWarning)\n return get_unpatched(cls)\n\n\ndef get_metadata_version(self):\n mv = getattr(self, 'metadata_version', None)\n\n if mv is None:\n if self.long_description_content_type or self.provides_extras:\n mv = StrictVersion('2.1')\n elif (self.maintainer is not None or\n self.maintainer_email is not None or\n getattr(self, 'python_requires', None) is not None or\n self.project_urls):\n mv = StrictVersion('1.2')\n elif (self.provides or self.requires or self.obsoletes or\n self.classifiers or self.download_url):\n mv = StrictVersion('1.1')\n else:\n mv = StrictVersion('1.0')\n\n self.metadata_version = mv\n\n return mv\n\n\ndef read_pkg_file(self, file):\n \"\"\"Reads the metadata values from a file object.\"\"\"\n msg = message_from_file(file)\n\n def _read_field(name):\n value = msg[name]\n if value == 'UNKNOWN':\n return None\n return value\n\n def _read_list(name):\n values = msg.get_all(name, None)\n if values == []:\n return None\n return values\n\n self.metadata_version = StrictVersion(msg['metadata-version'])\n self.name = _read_field('name')\n self.version = _read_field('version')\n self.description = _read_field('summary')\n # we are filling author only.\n self.author = _read_field('author')\n self.maintainer = None\n self.author_email = _read_field('author-email')\n self.maintainer_email = None\n self.url = _read_field('home-page')\n self.license = _read_field('license')\n\n if 'download-url' in msg:\n self.download_url = _read_field('download-url')\n else:\n self.download_url = None\n\n self.long_description = _read_field('description')\n self.description = _read_field('summary')\n\n if 'keywords' in msg:\n self.keywords = _read_field('keywords').split(',')\n\n self.platforms = _read_list('platform')\n self.classifiers = _read_list('classifier')\n\n # PEP 314 - these fields only exist in 1.1\n if self.metadata_version == StrictVersion('1.1'):\n self.requires = _read_list('requires')\n self.provides = _read_list('provides')\n self.obsoletes = _read_list('obsoletes')\n else:\n self.requires = None\n self.provides = None\n self.obsoletes = None\n\n\ndef single_line(val):\n # quick and dirty validation for description pypa/setuptools#1390\n if '\\n' in val:\n raise ValueError(\"newlines not allowed\")\n return val\n\n\n# Based on Python 3.5 version\ndef write_pkg_file(self, file): # noqa: C901 # is too complex (14) # FIXME\n \"\"\"Write the PKG-INFO format data to a file object.\n \"\"\"\n version = self.get_metadata_version()\n\n def write_field(key, value):\n file.write(\"%s: %s\\n\" % (key, value))\n\n write_field('Metadata-Version', str(version))\n write_field('Name', self.get_name())\n write_field('Version', self.get_version())\n write_field('Summary', single_line(self.get_description()))\n write_field('Home-page', self.get_url())\n\n if version < StrictVersion('1.2'):\n write_field('Author', self.get_contact())\n write_field('Author-email', self.get_contact_email())\n else:\n optional_fields = (\n ('Author', 'author'),\n ('Author-email', 'author_email'),\n ('Maintainer', 'maintainer'),\n ('Maintainer-email', 'maintainer_email'),\n )\n\n for field, attr in optional_fields:\n attr_val = getattr(self, attr)\n\n if attr_val is not None:\n write_field(field, attr_val)\n\n write_field('License', self.get_license())\n if self.download_url:\n write_field('Download-URL', self.download_url)\n for project_url in self.project_urls.items():\n write_field('Project-URL', '%s, %s' % project_url)\n\n long_desc = rfc822_escape(self.get_long_description())\n write_field('Description', long_desc)\n\n keywords = ','.join(self.get_keywords())\n if keywords:\n write_field('Keywords', keywords)\n\n if version >= StrictVersion('1.2'):\n for platform in self.get_platforms():\n write_field('Platform', platform)\n else:\n self._write_list(file, 'Platform', self.get_platforms())\n\n self._write_list(file, 'Classifier', self.get_classifiers())\n\n # PEP 314\n self._write_list(file, 'Requires', self.get_requires())\n self._write_list(file, 'Provides', self.get_provides())\n self._write_list(file, 'Obsoletes', self.get_obsoletes())\n\n # Setuptools specific for PEP 345\n if hasattr(self, 'python_requires'):\n write_field('Requires-Python', self.python_requires)\n\n # PEP 566\n if self.long_description_content_type:\n write_field(\n 'Description-Content-Type',\n self.long_description_content_type\n )\n if self.provides_extras:\n for extra in self.provides_extras:\n write_field('Provides-Extra', extra)\n\n\nsequence = tuple, list\n\n\ndef check_importable(dist, attr, value):\n try:\n ep = pkg_resources.EntryPoint.parse('x=' + value)\n assert not ep.extras\n except (TypeError, ValueError, AttributeError, AssertionError) as e:\n raise DistutilsSetupError(\n \"%r must be importable 'module:attrs' string (got %r)\"\n % (attr, value)\n ) from e\n\n\ndef assert_string_list(dist, attr, value):\n \"\"\"Verify that value is a string list\"\"\"\n try:\n # verify that value is a list or tuple to exclude unordered\n # or single-use iterables\n assert isinstance(value, (list, tuple))\n # verify that elements of value are strings\n assert ''.join(value) != value\n except (TypeError, ValueError, AttributeError, AssertionError) as e:\n raise DistutilsSetupError(\n \"%r must be a list of strings (got %r)\" % (attr, value)\n ) from e\n\n\ndef check_nsp(dist, attr, value):\n \"\"\"Verify that namespace packages are valid\"\"\"\n ns_packages = value\n assert_string_list(dist, attr, ns_packages)\n for nsp in ns_packages:\n if not dist.has_contents_for(nsp):\n raise DistutilsSetupError(\n \"Distribution contains no modules or packages for \" +\n \"namespace package %r\" % nsp\n )\n parent, sep, child = nsp.rpartition('.')\n if parent and parent not in ns_packages:\n distutils.log.warn(\n \"WARNING: %r is declared as a package namespace, but %r\"\n \" is not: please correct this in setup.py\", nsp, parent\n )\n\n\ndef check_extras(dist, attr, value):\n \"\"\"Verify that extras_require mapping is valid\"\"\"\n try:\n list(itertools.starmap(_check_extra, value.items()))\n except (TypeError, ValueError, AttributeError) as e:\n raise DistutilsSetupError(\n \"'extras_require' must be a dictionary whose values are \"\n \"strings or lists of strings containing valid project/version \"\n \"requirement specifiers.\"\n ) from e\n\n\ndef _check_extra(extra, reqs):\n name, sep, marker = extra.partition(':')\n if marker and pkg_resources.invalid_marker(marker):\n raise DistutilsSetupError(\"Invalid environment marker: \" + marker)\n list(pkg_resources.parse_requirements(reqs))\n\n\ndef assert_bool(dist, attr, value):\n \"\"\"Verify that value is True, False, 0, or 1\"\"\"\n if bool(value) != value:\n tmpl = \"{attr!r} must be a boolean value (got {value!r})\"\n raise DistutilsSetupError(tmpl.format(attr=attr, value=value))\n\n\ndef check_requirements(dist, attr, value):\n \"\"\"Verify that install_requires is a valid requirements list\"\"\"\n try:\n list(pkg_resources.parse_requirements(value))\n if isinstance(value, (dict, set)):\n raise TypeError(\"Unordered types are not allowed\")\n except (TypeError, ValueError) as error:\n tmpl = (\n \"{attr!r} must be a string or list of strings \"\n \"containing valid project/version requirement specifiers; {error}\"\n )\n raise DistutilsSetupError(\n tmpl.format(attr=attr, error=error)\n ) from error\n\n\ndef check_specifier(dist, attr, value):\n \"\"\"Verify that value is a valid version specifier\"\"\"\n try:\n packaging.specifiers.SpecifierSet(value)\n except packaging.specifiers.InvalidSpecifier as error:\n tmpl = (\n \"{attr!r} must be a string \"\n \"containing valid version specifiers; {error}\"\n )\n raise DistutilsSetupError(\n tmpl.format(attr=attr, error=error)\n ) from error\n\n\ndef check_entry_points(dist, attr, value):\n \"\"\"Verify that entry_points map is parseable\"\"\"\n try:\n pkg_resources.EntryPoint.parse_map(value)\n except ValueError as e:\n raise DistutilsSetupError(e) from e\n\n\ndef check_test_suite(dist, attr, value):\n if not isinstance(value, str):\n raise DistutilsSetupError(\"test_suite must be a string\")\n\n\ndef check_package_data(dist, attr, value):\n \"\"\"Verify that value is a dictionary of package names to glob lists\"\"\"\n if not isinstance(value, dict):\n raise DistutilsSetupError(\n \"{!r} must be a dictionary mapping package names to lists of \"\n \"string wildcard patterns\".format(attr))\n for k, v in value.items():\n if not isinstance(k, str):\n raise DistutilsSetupError(\n \"keys of {!r} dict must be strings (got {!r})\"\n .format(attr, k)\n )\n assert_string_list(dist, 'values of {!r} dict'.format(attr), v)\n\n\ndef check_packages(dist, attr, value):\n for pkgname in value:\n if not re.match(r'\\w+(\\.\\w+)*', pkgname):\n distutils.log.warn(\n \"WARNING: %r not a valid package name; please use only \"\n \".-separated package names in setup.py\", pkgname\n )\n\n\n_Distribution = get_unpatched(distutils.core.Distribution)\n\n\nclass Distribution(_Distribution):\n \"\"\"Distribution with support for tests and package data\n\n This is an enhanced version of 'distutils.dist.Distribution' that\n effectively adds the following new optional keyword arguments to 'setup()':\n\n 'install_requires' -- a string or sequence of strings specifying project\n versions that the distribution requires when installed, in the format\n used by 'pkg_resources.require()'. They will be installed\n automatically when the package is installed. If you wish to use\n packages that are not available in PyPI, or want to give your users an\n alternate download location, you can add a 'find_links' option to the\n '[easy_install]' section of your project's 'setup.cfg' file, and then\n setuptools will scan the listed web pages for links that satisfy the\n requirements.\n\n 'extras_require' -- a dictionary mapping names of optional \"extras\" to the\n additional requirement(s) that using those extras incurs. For example,\n this::\n\n extras_require = dict(reST = [\"docutils>=0.3\", \"reSTedit\"])\n\n indicates that the distribution can optionally provide an extra\n capability called \"reST\", but it can only be used if docutils and\n reSTedit are installed. If the user installs your package using\n EasyInstall and requests one of your extras, the corresponding\n additional requirements will be installed if needed.\n\n 'test_suite' -- the name of a test suite to run for the 'test' command.\n If the user runs 'python setup.py test', the package will be installed,\n and the named test suite will be run. The format is the same as\n would be used on a 'unittest.py' command line. That is, it is the\n dotted name of an object to import and call to generate a test suite.\n\n 'package_data' -- a dictionary mapping package names to lists of filenames\n or globs to use to find data files contained in the named packages.\n If the dictionary has filenames or globs listed under '\"\"' (the empty\n string), those names will be searched for in every package, in addition\n to any names for the specific package. Data files found using these\n names/globs will be installed along with the package, in the same\n location as the package. Note that globs are allowed to reference\n the contents of non-package subdirectories, as long as you use '/' as\n a path separator. (Globs are automatically converted to\n platform-specific paths at runtime.)\n\n In addition to these new keywords, this class also has several new methods\n for manipulating the distribution's contents. For example, the 'include()'\n and 'exclude()' methods can be thought of as in-place add and subtract\n commands that add or remove packages, modules, extensions, and so on from\n the distribution.\n \"\"\"\n\n _DISTUTILS_UNSUPPORTED_METADATA = {\n 'long_description_content_type': None,\n 'project_urls': dict,\n 'provides_extras': ordered_set.OrderedSet,\n 'license_files': ordered_set.OrderedSet,\n }\n\n _patched_dist = None\n\n def patch_missing_pkg_info(self, attrs):\n # Fake up a replacement for the data that would normally come from\n # PKG-INFO, but which might not yet be built if this is a fresh\n # checkout.\n #\n if not attrs or 'name' not in attrs or 'version' not in attrs:\n return\n key = pkg_resources.safe_name(str(attrs['name'])).lower()\n dist = pkg_resources.working_set.by_key.get(key)\n if dist is not None and not dist.has_metadata('PKG-INFO'):\n dist._version = pkg_resources.safe_version(str(attrs['version']))\n self._patched_dist = dist\n\n def __init__(self, attrs=None):\n have_package_data = hasattr(self, \"package_data\")\n if not have_package_data:\n self.package_data = {}\n attrs = attrs or {}\n self.dist_files = []\n # Filter-out setuptools' specific options.\n self.src_root = attrs.pop(\"src_root\", None)\n self.patch_missing_pkg_info(attrs)\n self.dependency_links = attrs.pop('dependency_links', [])\n self.setup_requires = attrs.pop('setup_requires', [])\n for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):\n vars(self).setdefault(ep.name, None)\n _Distribution.__init__(self, {\n k: v for k, v in attrs.items()\n if k not in self._DISTUTILS_UNSUPPORTED_METADATA\n })\n\n # Fill-in missing metadata fields not supported by distutils.\n # Note some fields may have been set by other tools (e.g. pbr)\n # above; they are taken preferrentially to setup() arguments\n for option, default in self._DISTUTILS_UNSUPPORTED_METADATA.items():\n for source in self.metadata.__dict__, attrs:\n if option in source:\n value = source[option]\n break\n else:\n value = default() if default else None\n setattr(self.metadata, option, value)\n\n self.metadata.version = self._normalize_version(\n self._validate_version(self.metadata.version))\n self._finalize_requires()\n\n @staticmethod\n def _normalize_version(version):\n if isinstance(version, setuptools.sic) or version is None:\n return version\n\n normalized = str(packaging.version.Version(version))\n if version != normalized:\n tmpl = \"Normalizing '{version}' to '{normalized}'\"\n warnings.warn(tmpl.format(**locals()))\n return normalized\n return version\n\n @staticmethod\n def _validate_version(version):\n if isinstance(version, numbers.Number):\n # Some people apparently take \"version number\" too literally :)\n version = str(version)\n\n if version is not None:\n try:\n packaging.version.Version(version)\n except (packaging.version.InvalidVersion, TypeError):\n warnings.warn(\n \"The version specified (%r) is an invalid version, this \"\n \"may not work as expected with newer versions of \"\n \"setuptools, pip, and PyPI. Please see PEP 440 for more \"\n \"details.\" % version\n )\n return setuptools.sic(version)\n return version\n\n def _finalize_requires(self):\n \"\"\"\n Set `metadata.python_requires` and fix environment markers\n in `install_requires` and `extras_require`.\n \"\"\"\n if getattr(self, 'python_requires', None):\n self.metadata.python_requires = self.python_requires\n\n if getattr(self, 'extras_require', None):\n for extra in self.extras_require.keys():\n # Since this gets called multiple times at points where the\n # keys have become 'converted' extras, ensure that we are only\n # truly adding extras we haven't seen before here.\n extra = extra.split(':')[0]\n if extra:\n self.metadata.provides_extras.add(extra)\n\n self._convert_extras_requirements()\n self._move_install_requirements_markers()\n\n def _convert_extras_requirements(self):\n \"\"\"\n Convert requirements in `extras_require` of the form\n `\"extra\": [\"barbazquux; {marker}\"]` to\n `\"extra:{marker}\": [\"barbazquux\"]`.\n \"\"\"\n spec_ext_reqs = getattr(self, 'extras_require', None) or {}\n self._tmp_extras_require = defaultdict(list)\n for section, v in spec_ext_reqs.items():\n # Do not strip empty sections.\n self._tmp_extras_require[section]\n for r in pkg_resources.parse_requirements(v):\n suffix = self._suffix_for(r)\n self._tmp_extras_require[section + suffix].append(r)\n\n @staticmethod\n def _suffix_for(req):\n \"\"\"\n For a requirement, return the 'extras_require' suffix for\n that requirement.\n \"\"\"\n return ':' + str(req.marker) if req.marker else ''\n\n def _move_install_requirements_markers(self):\n \"\"\"\n Move requirements in `install_requires` that are using environment\n markers `extras_require`.\n \"\"\"\n\n # divide the install_requires into two sets, simple ones still\n # handled by install_requires and more complex ones handled\n # by extras_require.\n\n def is_simple_req(req):\n return not req.marker\n\n spec_inst_reqs = getattr(self, 'install_requires', None) or ()\n inst_reqs = list(pkg_resources.parse_requirements(spec_inst_reqs))\n simple_reqs = filter(is_simple_req, inst_reqs)\n complex_reqs = itertools.filterfalse(is_simple_req, inst_reqs)\n self.install_requires = list(map(str, simple_reqs))\n\n for r in complex_reqs:\n self._tmp_extras_require[':' + str(r.marker)].append(r)\n self.extras_require = dict(\n (k, [str(r) for r in map(self._clean_req, v)])\n for k, v in self._tmp_extras_require.items()\n )\n\n def _clean_req(self, req):\n \"\"\"\n Given a Requirement, remove environment markers and return it.\n \"\"\"\n req.marker = None\n return req\n\n # FIXME: 'Distribution._parse_config_files' is too complex (14)\n def _parse_config_files(self, filenames=None): # noqa: C901\n \"\"\"\n Adapted from distutils.dist.Distribution.parse_config_files,\n this method provides the same functionality in subtly-improved\n ways.\n \"\"\"\n from configparser import ConfigParser\n\n # Ignore install directory options if we have a venv\n ignore_options = [] if sys.prefix == sys.base_prefix else [\n 'install-base', 'install-platbase', 'install-lib',\n 'install-platlib', 'install-purelib', 'install-headers',\n 'install-scripts', 'install-data', 'prefix', 'exec-prefix',\n 'home', 'user', 'root',\n ]\n\n ignore_options = frozenset(ignore_options)\n\n if filenames is None:\n filenames = self.find_config_files()\n\n if DEBUG:\n self.announce(\"Distribution.parse_config_files():\")\n\n parser = ConfigParser()\n for filename in filenames:\n with io.open(filename, encoding='utf-8') as reader:\n if DEBUG:\n self.announce(\" reading {filename}\".format(**locals()))\n parser.read_file(reader)\n for section in parser.sections():\n options = parser.options(section)\n opt_dict = self.get_option_dict(section)\n\n for opt in options:\n if opt == '__name__' or opt in ignore_options:\n continue\n\n val = parser.get(section, opt)\n opt = opt.replace('-', '_')\n opt_dict[opt] = (filename, val)\n\n # Make the ConfigParser forget everything (so we retain\n # the original filenames that options come from)\n parser.__init__()\n\n if 'global' not in self.command_options:\n return\n\n # If there was a \"global\" section in the config file, use it\n # to set Distribution options.\n\n for (opt, (src, val)) in self.command_options['global'].items():\n alias = self.negative_opt.get(opt)\n if alias:\n val = not strtobool(val)\n elif opt in ('verbose', 'dry_run'): # ugh!\n val = strtobool(val)\n\n try:\n setattr(self, alias or opt, val)\n except ValueError as e:\n raise DistutilsOptionError(e) from e\n\n # FIXME: 'Distribution._set_command_options' is too complex (14)\n def _set_command_options(self, command_obj, option_dict=None): # noqa: C901\n \"\"\"\n Set the options for 'command_obj' from 'option_dict'. Basically\n this means copying elements of a dictionary ('option_dict') to\n attributes of an instance ('command').\n\n 'command_obj' must be a Command instance. If 'option_dict' is not\n supplied, uses the standard option dictionary for this command\n (from 'self.command_options').\n\n (Adopted from distutils.dist.Distribution._set_command_options)\n \"\"\"\n command_name = command_obj.get_command_name()\n if option_dict is None:\n option_dict = self.get_option_dict(command_name)\n\n if DEBUG:\n self.announce(\" setting options for '%s' command:\" % command_name)\n for (option, (source, value)) in option_dict.items():\n if DEBUG:\n self.announce(\" %s = %s (from %s)\" % (option, value,\n source))\n try:\n bool_opts = [translate_longopt(o)\n for o in command_obj.boolean_options]\n except AttributeError:\n bool_opts = []\n try:\n neg_opt = command_obj.negative_opt\n except AttributeError:\n neg_opt = {}\n\n try:\n is_string = isinstance(value, str)\n if option in neg_opt and is_string:\n setattr(command_obj, neg_opt[option], not strtobool(value))\n elif option in bool_opts and is_string:\n setattr(command_obj, option, strtobool(value))\n elif hasattr(command_obj, option):\n setattr(command_obj, option, value)\n else:\n raise DistutilsOptionError(\n \"error in %s: command '%s' has no such option '%s'\"\n % (source, command_name, option))\n except ValueError as e:\n raise DistutilsOptionError(e) from e\n\n def parse_config_files(self, filenames=None, ignore_option_errors=False):\n \"\"\"Parses configuration files from various levels\n and loads configuration.\n\n \"\"\"\n self._parse_config_files(filenames=filenames)\n\n parse_configuration(self, self.command_options,\n ignore_option_errors=ignore_option_errors)\n self._finalize_requires()\n\n def fetch_build_eggs(self, requires):\n \"\"\"Resolve pre-setup requirements\"\"\"\n resolved_dists = pkg_resources.working_set.resolve(\n pkg_resources.parse_requirements(requires),\n installer=self.fetch_build_egg,\n replace_conflicting=True,\n )\n for dist in resolved_dists:\n pkg_resources.working_set.add(dist, replace=True)\n return resolved_dists\n\n def finalize_options(self):\n \"\"\"\n Allow plugins to apply arbitrary operations to the\n distribution. Each hook may optionally define a 'order'\n to influence the order of execution. Smaller numbers\n go first and the default is 0.\n \"\"\"\n group = 'setuptools.finalize_distribution_options'\n\n def by_order(hook):\n return getattr(hook, 'order', 0)\n eps = map(lambda e: e.load(), pkg_resources.iter_entry_points(group))\n for ep in sorted(eps, key=by_order):\n ep(self)\n\n def _finalize_setup_keywords(self):\n for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):\n value = getattr(self, ep.name, None)\n if value is not None:\n ep.require(installer=self.fetch_build_egg)\n ep.load()(self, ep.name, value)\n\n def _finalize_2to3_doctests(self):\n if getattr(self, 'convert_2to3_doctests', None):\n # XXX may convert to set here when we can rely on set being builtin\n self.convert_2to3_doctests = [\n os.path.abspath(p)\n for p in self.convert_2to3_doctests\n ]\n else:\n self.convert_2to3_doctests = []\n\n def get_egg_cache_dir(self):\n egg_cache_dir = os.path.join(os.curdir, '.eggs')\n if not os.path.exists(egg_cache_dir):\n os.mkdir(egg_cache_dir)\n windows_support.hide_file(egg_cache_dir)\n readme_txt_filename = os.path.join(egg_cache_dir, 'README.txt')\n with open(readme_txt_filename, 'w') as f:\n f.write('This directory contains eggs that were downloaded '\n 'by setuptools to build, test, and run plug-ins.\\n\\n')\n f.write('This directory caches those eggs to prevent '\n 'repeated downloads.\\n\\n')\n f.write('However, it is safe to delete this directory.\\n\\n')\n\n return egg_cache_dir\n\n def fetch_build_egg(self, req):\n \"\"\"Fetch an egg needed for building\"\"\"\n from setuptools.installer import fetch_build_egg\n return fetch_build_egg(self, req)\n\n def get_command_class(self, command):\n \"\"\"Pluggable version of get_command_class()\"\"\"\n if command in self.cmdclass:\n return self.cmdclass[command]\n\n eps = pkg_resources.iter_entry_points('distutils.commands', command)\n for ep in eps:\n ep.require(installer=self.fetch_build_egg)\n self.cmdclass[command] = cmdclass = ep.load()\n return cmdclass\n else:\n return _Distribution.get_command_class(self, command)\n\n def print_commands(self):\n for ep in pkg_resources.iter_entry_points('distutils.commands'):\n if ep.name not in self.cmdclass:\n # don't require extras as the commands won't be invoked\n cmdclass = ep.resolve()\n self.cmdclass[ep.name] = cmdclass\n return _Distribution.print_commands(self)\n\n def get_command_list(self):\n for ep in pkg_resources.iter_entry_points('distutils.commands'):\n if ep.name not in self.cmdclass:\n # don't require extras as the commands won't be invoked\n cmdclass = ep.resolve()\n self.cmdclass[ep.name] = cmdclass\n return _Distribution.get_command_list(self)\n\n def include(self, **attrs):\n \"\"\"Add items to distribution that are named in keyword arguments\n\n For example, 'dist.include(py_modules=[\"x\"])' would add 'x' to\n the distribution's 'py_modules' attribute, if it was not already\n there.\n\n Currently, this method only supports inclusion for attributes that are\n lists or tuples. If you need to add support for adding to other\n attributes in this or a subclass, you can add an '_include_X' method,\n where 'X' is the name of the attribute. The method will be called with\n the value passed to 'include()'. So, 'dist.include(foo={\"bar\":\"baz\"})'\n will try to call 'dist._include_foo({\"bar\":\"baz\"})', which can then\n handle whatever special inclusion logic is needed.\n \"\"\"\n for k, v in attrs.items():\n include = getattr(self, '_include_' + k, None)\n if include:\n include(v)\n else:\n self._include_misc(k, v)\n\n def exclude_package(self, package):\n \"\"\"Remove packages, modules, and extensions in named package\"\"\"\n\n pfx = package + '.'\n if self.packages:\n self.packages = [\n p for p in self.packages\n if p != package and not p.startswith(pfx)\n ]\n\n if self.py_modules:\n self.py_modules = [\n p for p in self.py_modules\n if p != package and not p.startswith(pfx)\n ]\n\n if self.ext_modules:\n self.ext_modules = [\n p for p in self.ext_modules\n if p.name != package and not p.name.startswith(pfx)\n ]\n\n def has_contents_for(self, package):\n \"\"\"Return true if 'exclude_package(package)' would do something\"\"\"\n\n pfx = package + '.'\n\n for p in self.iter_distribution_names():\n if p == package or p.startswith(pfx):\n return True\n\n def _exclude_misc(self, name, value):\n \"\"\"Handle 'exclude()' for list/tuple attrs without a special handler\"\"\"\n if not isinstance(value, sequence):\n raise DistutilsSetupError(\n \"%s: setting must be a list or tuple (%r)\" % (name, value)\n )\n try:\n old = getattr(self, name)\n except AttributeError as e:\n raise DistutilsSetupError(\n \"%s: No such distribution setting\" % name\n ) from e\n if old is not None and not isinstance(old, sequence):\n raise DistutilsSetupError(\n name + \": this setting cannot be changed via include/exclude\"\n )\n elif old:\n setattr(self, name, [item for item in old if item not in value])\n\n def _include_misc(self, name, value):\n \"\"\"Handle 'include()' for list/tuple attrs without a special handler\"\"\"\n\n if not isinstance(value, sequence):\n raise DistutilsSetupError(\n \"%s: setting must be a list (%r)\" % (name, value)\n )\n try:\n old = getattr(self, name)\n except AttributeError as e:\n raise DistutilsSetupError(\n \"%s: No such distribution setting\" % name\n ) from e\n if old is None:\n setattr(self, name, value)\n elif not isinstance(old, sequence):\n raise DistutilsSetupError(\n name + \": this setting cannot be changed via include/exclude\"\n )\n else:\n new = [item for item in value if item not in old]\n setattr(self, name, old + new)\n\n def exclude(self, **attrs):\n \"\"\"Remove items from distribution that are named in keyword arguments\n\n For example, 'dist.exclude(py_modules=[\"x\"])' would remove 'x' from\n the distribution's 'py_modules' attribute. Excluding packages uses\n the 'exclude_package()' method, so all of the package's contained\n packages, modules, and extensions are also excluded.\n\n Currently, this method only supports exclusion from attributes that are\n lists or tuples. If you need to add support for excluding from other\n attributes in this or a subclass, you can add an '_exclude_X' method,\n where 'X' is the name of the attribute. The method will be called with\n the value passed to 'exclude()'. So, 'dist.exclude(foo={\"bar\":\"baz\"})'\n will try to call 'dist._exclude_foo({\"bar\":\"baz\"})', which can then\n handle whatever special exclusion logic is needed.\n \"\"\"\n for k, v in attrs.items():\n exclude = getattr(self, '_exclude_' + k, None)\n if exclude:\n exclude(v)\n else:\n self._exclude_misc(k, v)\n\n def _exclude_packages(self, packages):\n if not isinstance(packages, sequence):\n raise DistutilsSetupError(\n \"packages: setting must be a list or tuple (%r)\" % (packages,)\n )\n list(map(self.exclude_package, packages))\n\n def _parse_command_opts(self, parser, args):\n # Remove --with-X/--without-X options when processing command args\n self.global_options = self.__class__.global_options\n self.negative_opt = self.__class__.negative_opt\n\n # First, expand any aliases\n command = args[0]\n aliases = self.get_option_dict('aliases')\n while command in aliases:\n src, alias = aliases[command]\n del aliases[command] # ensure each alias can expand only once!\n import shlex\n args[:1] = shlex.split(alias, True)\n command = args[0]\n\n nargs = _Distribution._parse_command_opts(self, parser, args)\n\n # Handle commands that want to consume all remaining arguments\n cmd_class = self.get_command_class(command)\n if getattr(cmd_class, 'command_consumes_arguments', None):\n self.get_option_dict(command)['args'] = (\"command line\", nargs)\n if nargs is not None:\n return []\n\n return nargs\n\n def get_cmdline_options(self):\n \"\"\"Return a '{cmd: {opt:val}}' map of all command-line options\n\n Option names are all long, but do not include the leading '--', and\n contain dashes rather than underscores. If the option doesn't take\n an argument (e.g. '--quiet'), the 'val' is 'None'.\n\n Note that options provided by config files are intentionally excluded.\n \"\"\"\n\n d = {}\n\n for cmd, opts in self.command_options.items():\n\n for opt, (src, val) in opts.items():\n\n if src != \"command line\":\n continue\n\n opt = opt.replace('_', '-')\n\n if val == 0:\n cmdobj = self.get_command_obj(cmd)\n neg_opt = self.negative_opt.copy()\n neg_opt.update(getattr(cmdobj, 'negative_opt', {}))\n for neg, pos in neg_opt.items():\n if pos == opt:\n opt = neg\n val = None\n break\n else:\n raise AssertionError(\"Shouldn't be able to get here\")\n\n elif val == 1:\n val = None\n\n d.setdefault(cmd, {})[opt] = val\n\n return d\n\n def iter_distribution_names(self):\n \"\"\"Yield all packages, modules, and extension names in distribution\"\"\"\n\n for pkg in self.packages or ():\n yield pkg\n\n for module in self.py_modules or ():\n yield module\n\n for ext in self.ext_modules or ():\n if isinstance(ext, tuple):\n name, buildinfo = ext\n else:\n name = ext.name\n if name.endswith('module'):\n name = name[:-6]\n yield name\n\n def handle_display_options(self, option_order):\n \"\"\"If there were any non-global \"display-only\" options\n (--help-commands or the metadata display options) on the command\n line, display the requested info and return true; else return\n false.\n \"\"\"\n import sys\n\n if self.help_commands:\n return _Distribution.handle_display_options(self, option_order)\n\n # Stdout may be StringIO (e.g. in tests)\n if not isinstance(sys.stdout, io.TextIOWrapper):\n return _Distribution.handle_display_options(self, option_order)\n\n # Don't wrap stdout if utf-8 is already the encoding. Provides\n # workaround for #334.\n if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):\n return _Distribution.handle_display_options(self, option_order)\n\n # Print metadata in UTF-8 no matter the platform\n encoding = sys.stdout.encoding\n errors = sys.stdout.errors\n newline = sys.platform != 'win32' and '\\n' or None\n line_buffering = sys.stdout.line_buffering\n\n sys.stdout = io.TextIOWrapper(\n sys.stdout.detach(), 'utf-8', errors, newline, line_buffering)\n try:\n return _Distribution.handle_display_options(self, option_order)\n finally:\n sys.stdout = io.TextIOWrapper(\n sys.stdout.detach(), encoding, errors, newline, line_buffering)\n\n\nclass DistDeprecationWarning(SetuptoolsDeprecationWarning):\n \"\"\"Class for warning about deprecations in dist in\n setuptools. Not ignored by default, unlike DeprecationWarning.\"\"\"\n", "path": "setuptools/dist.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n__all__ = ['Distribution']\n\nimport io\nimport sys\nimport re\nimport os\nimport warnings\nimport numbers\nimport distutils.log\nimport distutils.core\nimport distutils.cmd\nimport distutils.dist\nfrom distutils.util import strtobool\nfrom distutils.debug import DEBUG\nfrom distutils.fancy_getopt import translate_longopt\nimport itertools\n\nfrom collections import defaultdict\nfrom email import message_from_file\n\nfrom distutils.errors import DistutilsOptionError, DistutilsSetupError\nfrom distutils.util import rfc822_escape\nfrom distutils.version import StrictVersion\n\nfrom setuptools.extern import packaging\nfrom setuptools.extern import ordered_set\n\nfrom . import SetuptoolsDeprecationWarning\n\nimport setuptools\nfrom setuptools import windows_support\nfrom setuptools.monkey import get_unpatched\nfrom setuptools.config import parse_configuration\nimport pkg_resources\n\n__import__('setuptools.extern.packaging.specifiers')\n__import__('setuptools.extern.packaging.version')\n\n\ndef _get_unpatched(cls):\n warnings.warn(\"Do not call this function\", DistDeprecationWarning)\n return get_unpatched(cls)\n\n\ndef get_metadata_version(self):\n mv = getattr(self, 'metadata_version', None)\n\n if mv is None:\n if self.long_description_content_type or self.provides_extras:\n mv = StrictVersion('2.1')\n elif (self.maintainer is not None or\n self.maintainer_email is not None or\n getattr(self, 'python_requires', None) is not None or\n self.project_urls):\n mv = StrictVersion('1.2')\n elif (self.provides or self.requires or self.obsoletes or\n self.classifiers or self.download_url):\n mv = StrictVersion('1.1')\n else:\n mv = StrictVersion('1.0')\n\n self.metadata_version = mv\n\n return mv\n\n\ndef read_pkg_file(self, file):\n \"\"\"Reads the metadata values from a file object.\"\"\"\n msg = message_from_file(file)\n\n def _read_field(name):\n value = msg[name]\n if value == 'UNKNOWN':\n return None\n return value\n\n def _read_list(name):\n values = msg.get_all(name, None)\n if values == []:\n return None\n return values\n\n self.metadata_version = StrictVersion(msg['metadata-version'])\n self.name = _read_field('name')\n self.version = _read_field('version')\n self.description = _read_field('summary')\n # we are filling author only.\n self.author = _read_field('author')\n self.maintainer = None\n self.author_email = _read_field('author-email')\n self.maintainer_email = None\n self.url = _read_field('home-page')\n self.license = _read_field('license')\n\n if 'download-url' in msg:\n self.download_url = _read_field('download-url')\n else:\n self.download_url = None\n\n self.long_description = _read_field('description')\n self.description = _read_field('summary')\n\n if 'keywords' in msg:\n self.keywords = _read_field('keywords').split(',')\n\n self.platforms = _read_list('platform')\n self.classifiers = _read_list('classifier')\n\n # PEP 314 - these fields only exist in 1.1\n if self.metadata_version == StrictVersion('1.1'):\n self.requires = _read_list('requires')\n self.provides = _read_list('provides')\n self.obsoletes = _read_list('obsoletes')\n else:\n self.requires = None\n self.provides = None\n self.obsoletes = None\n\n\ndef single_line(val):\n # quick and dirty validation for description pypa/setuptools#1390\n if '\\n' in val:\n # TODO after 2021-07-31: Replace with `raise ValueError(\"newlines not allowed\")`\n warnings.UserWarning(\"newlines not allowed and will break in the future\")\n val = val.replace('\\n', ' ')\n return val\n\n\n# Based on Python 3.5 version\ndef write_pkg_file(self, file): # noqa: C901 # is too complex (14) # FIXME\n \"\"\"Write the PKG-INFO format data to a file object.\n \"\"\"\n version = self.get_metadata_version()\n\n def write_field(key, value):\n file.write(\"%s: %s\\n\" % (key, value))\n\n write_field('Metadata-Version', str(version))\n write_field('Name', self.get_name())\n write_field('Version', self.get_version())\n write_field('Summary', single_line(self.get_description()))\n write_field('Home-page', self.get_url())\n\n if version < StrictVersion('1.2'):\n write_field('Author', self.get_contact())\n write_field('Author-email', self.get_contact_email())\n else:\n optional_fields = (\n ('Author', 'author'),\n ('Author-email', 'author_email'),\n ('Maintainer', 'maintainer'),\n ('Maintainer-email', 'maintainer_email'),\n )\n\n for field, attr in optional_fields:\n attr_val = getattr(self, attr)\n\n if attr_val is not None:\n write_field(field, attr_val)\n\n write_field('License', self.get_license())\n if self.download_url:\n write_field('Download-URL', self.download_url)\n for project_url in self.project_urls.items():\n write_field('Project-URL', '%s, %s' % project_url)\n\n long_desc = rfc822_escape(self.get_long_description())\n write_field('Description', long_desc)\n\n keywords = ','.join(self.get_keywords())\n if keywords:\n write_field('Keywords', keywords)\n\n if version >= StrictVersion('1.2'):\n for platform in self.get_platforms():\n write_field('Platform', platform)\n else:\n self._write_list(file, 'Platform', self.get_platforms())\n\n self._write_list(file, 'Classifier', self.get_classifiers())\n\n # PEP 314\n self._write_list(file, 'Requires', self.get_requires())\n self._write_list(file, 'Provides', self.get_provides())\n self._write_list(file, 'Obsoletes', self.get_obsoletes())\n\n # Setuptools specific for PEP 345\n if hasattr(self, 'python_requires'):\n write_field('Requires-Python', self.python_requires)\n\n # PEP 566\n if self.long_description_content_type:\n write_field(\n 'Description-Content-Type',\n self.long_description_content_type\n )\n if self.provides_extras:\n for extra in self.provides_extras:\n write_field('Provides-Extra', extra)\n\n\nsequence = tuple, list\n\n\ndef check_importable(dist, attr, value):\n try:\n ep = pkg_resources.EntryPoint.parse('x=' + value)\n assert not ep.extras\n except (TypeError, ValueError, AttributeError, AssertionError) as e:\n raise DistutilsSetupError(\n \"%r must be importable 'module:attrs' string (got %r)\"\n % (attr, value)\n ) from e\n\n\ndef assert_string_list(dist, attr, value):\n \"\"\"Verify that value is a string list\"\"\"\n try:\n # verify that value is a list or tuple to exclude unordered\n # or single-use iterables\n assert isinstance(value, (list, tuple))\n # verify that elements of value are strings\n assert ''.join(value) != value\n except (TypeError, ValueError, AttributeError, AssertionError) as e:\n raise DistutilsSetupError(\n \"%r must be a list of strings (got %r)\" % (attr, value)\n ) from e\n\n\ndef check_nsp(dist, attr, value):\n \"\"\"Verify that namespace packages are valid\"\"\"\n ns_packages = value\n assert_string_list(dist, attr, ns_packages)\n for nsp in ns_packages:\n if not dist.has_contents_for(nsp):\n raise DistutilsSetupError(\n \"Distribution contains no modules or packages for \" +\n \"namespace package %r\" % nsp\n )\n parent, sep, child = nsp.rpartition('.')\n if parent and parent not in ns_packages:\n distutils.log.warn(\n \"WARNING: %r is declared as a package namespace, but %r\"\n \" is not: please correct this in setup.py\", nsp, parent\n )\n\n\ndef check_extras(dist, attr, value):\n \"\"\"Verify that extras_require mapping is valid\"\"\"\n try:\n list(itertools.starmap(_check_extra, value.items()))\n except (TypeError, ValueError, AttributeError) as e:\n raise DistutilsSetupError(\n \"'extras_require' must be a dictionary whose values are \"\n \"strings or lists of strings containing valid project/version \"\n \"requirement specifiers.\"\n ) from e\n\n\ndef _check_extra(extra, reqs):\n name, sep, marker = extra.partition(':')\n if marker and pkg_resources.invalid_marker(marker):\n raise DistutilsSetupError(\"Invalid environment marker: \" + marker)\n list(pkg_resources.parse_requirements(reqs))\n\n\ndef assert_bool(dist, attr, value):\n \"\"\"Verify that value is True, False, 0, or 1\"\"\"\n if bool(value) != value:\n tmpl = \"{attr!r} must be a boolean value (got {value!r})\"\n raise DistutilsSetupError(tmpl.format(attr=attr, value=value))\n\n\ndef check_requirements(dist, attr, value):\n \"\"\"Verify that install_requires is a valid requirements list\"\"\"\n try:\n list(pkg_resources.parse_requirements(value))\n if isinstance(value, (dict, set)):\n raise TypeError(\"Unordered types are not allowed\")\n except (TypeError, ValueError) as error:\n tmpl = (\n \"{attr!r} must be a string or list of strings \"\n \"containing valid project/version requirement specifiers; {error}\"\n )\n raise DistutilsSetupError(\n tmpl.format(attr=attr, error=error)\n ) from error\n\n\ndef check_specifier(dist, attr, value):\n \"\"\"Verify that value is a valid version specifier\"\"\"\n try:\n packaging.specifiers.SpecifierSet(value)\n except packaging.specifiers.InvalidSpecifier as error:\n tmpl = (\n \"{attr!r} must be a string \"\n \"containing valid version specifiers; {error}\"\n )\n raise DistutilsSetupError(\n tmpl.format(attr=attr, error=error)\n ) from error\n\n\ndef check_entry_points(dist, attr, value):\n \"\"\"Verify that entry_points map is parseable\"\"\"\n try:\n pkg_resources.EntryPoint.parse_map(value)\n except ValueError as e:\n raise DistutilsSetupError(e) from e\n\n\ndef check_test_suite(dist, attr, value):\n if not isinstance(value, str):\n raise DistutilsSetupError(\"test_suite must be a string\")\n\n\ndef check_package_data(dist, attr, value):\n \"\"\"Verify that value is a dictionary of package names to glob lists\"\"\"\n if not isinstance(value, dict):\n raise DistutilsSetupError(\n \"{!r} must be a dictionary mapping package names to lists of \"\n \"string wildcard patterns\".format(attr))\n for k, v in value.items():\n if not isinstance(k, str):\n raise DistutilsSetupError(\n \"keys of {!r} dict must be strings (got {!r})\"\n .format(attr, k)\n )\n assert_string_list(dist, 'values of {!r} dict'.format(attr), v)\n\n\ndef check_packages(dist, attr, value):\n for pkgname in value:\n if not re.match(r'\\w+(\\.\\w+)*', pkgname):\n distutils.log.warn(\n \"WARNING: %r not a valid package name; please use only \"\n \".-separated package names in setup.py\", pkgname\n )\n\n\n_Distribution = get_unpatched(distutils.core.Distribution)\n\n\nclass Distribution(_Distribution):\n \"\"\"Distribution with support for tests and package data\n\n This is an enhanced version of 'distutils.dist.Distribution' that\n effectively adds the following new optional keyword arguments to 'setup()':\n\n 'install_requires' -- a string or sequence of strings specifying project\n versions that the distribution requires when installed, in the format\n used by 'pkg_resources.require()'. They will be installed\n automatically when the package is installed. If you wish to use\n packages that are not available in PyPI, or want to give your users an\n alternate download location, you can add a 'find_links' option to the\n '[easy_install]' section of your project's 'setup.cfg' file, and then\n setuptools will scan the listed web pages for links that satisfy the\n requirements.\n\n 'extras_require' -- a dictionary mapping names of optional \"extras\" to the\n additional requirement(s) that using those extras incurs. For example,\n this::\n\n extras_require = dict(reST = [\"docutils>=0.3\", \"reSTedit\"])\n\n indicates that the distribution can optionally provide an extra\n capability called \"reST\", but it can only be used if docutils and\n reSTedit are installed. If the user installs your package using\n EasyInstall and requests one of your extras, the corresponding\n additional requirements will be installed if needed.\n\n 'test_suite' -- the name of a test suite to run for the 'test' command.\n If the user runs 'python setup.py test', the package will be installed,\n and the named test suite will be run. The format is the same as\n would be used on a 'unittest.py' command line. That is, it is the\n dotted name of an object to import and call to generate a test suite.\n\n 'package_data' -- a dictionary mapping package names to lists of filenames\n or globs to use to find data files contained in the named packages.\n If the dictionary has filenames or globs listed under '\"\"' (the empty\n string), those names will be searched for in every package, in addition\n to any names for the specific package. Data files found using these\n names/globs will be installed along with the package, in the same\n location as the package. Note that globs are allowed to reference\n the contents of non-package subdirectories, as long as you use '/' as\n a path separator. (Globs are automatically converted to\n platform-specific paths at runtime.)\n\n In addition to these new keywords, this class also has several new methods\n for manipulating the distribution's contents. For example, the 'include()'\n and 'exclude()' methods can be thought of as in-place add and subtract\n commands that add or remove packages, modules, extensions, and so on from\n the distribution.\n \"\"\"\n\n _DISTUTILS_UNSUPPORTED_METADATA = {\n 'long_description_content_type': None,\n 'project_urls': dict,\n 'provides_extras': ordered_set.OrderedSet,\n 'license_files': ordered_set.OrderedSet,\n }\n\n _patched_dist = None\n\n def patch_missing_pkg_info(self, attrs):\n # Fake up a replacement for the data that would normally come from\n # PKG-INFO, but which might not yet be built if this is a fresh\n # checkout.\n #\n if not attrs or 'name' not in attrs or 'version' not in attrs:\n return\n key = pkg_resources.safe_name(str(attrs['name'])).lower()\n dist = pkg_resources.working_set.by_key.get(key)\n if dist is not None and not dist.has_metadata('PKG-INFO'):\n dist._version = pkg_resources.safe_version(str(attrs['version']))\n self._patched_dist = dist\n\n def __init__(self, attrs=None):\n have_package_data = hasattr(self, \"package_data\")\n if not have_package_data:\n self.package_data = {}\n attrs = attrs or {}\n self.dist_files = []\n # Filter-out setuptools' specific options.\n self.src_root = attrs.pop(\"src_root\", None)\n self.patch_missing_pkg_info(attrs)\n self.dependency_links = attrs.pop('dependency_links', [])\n self.setup_requires = attrs.pop('setup_requires', [])\n for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):\n vars(self).setdefault(ep.name, None)\n _Distribution.__init__(self, {\n k: v for k, v in attrs.items()\n if k not in self._DISTUTILS_UNSUPPORTED_METADATA\n })\n\n # Fill-in missing metadata fields not supported by distutils.\n # Note some fields may have been set by other tools (e.g. pbr)\n # above; they are taken preferrentially to setup() arguments\n for option, default in self._DISTUTILS_UNSUPPORTED_METADATA.items():\n for source in self.metadata.__dict__, attrs:\n if option in source:\n value = source[option]\n break\n else:\n value = default() if default else None\n setattr(self.metadata, option, value)\n\n self.metadata.version = self._normalize_version(\n self._validate_version(self.metadata.version))\n self._finalize_requires()\n\n @staticmethod\n def _normalize_version(version):\n if isinstance(version, setuptools.sic) or version is None:\n return version\n\n normalized = str(packaging.version.Version(version))\n if version != normalized:\n tmpl = \"Normalizing '{version}' to '{normalized}'\"\n warnings.warn(tmpl.format(**locals()))\n return normalized\n return version\n\n @staticmethod\n def _validate_version(version):\n if isinstance(version, numbers.Number):\n # Some people apparently take \"version number\" too literally :)\n version = str(version)\n\n if version is not None:\n try:\n packaging.version.Version(version)\n except (packaging.version.InvalidVersion, TypeError):\n warnings.warn(\n \"The version specified (%r) is an invalid version, this \"\n \"may not work as expected with newer versions of \"\n \"setuptools, pip, and PyPI. Please see PEP 440 for more \"\n \"details.\" % version\n )\n return setuptools.sic(version)\n return version\n\n def _finalize_requires(self):\n \"\"\"\n Set `metadata.python_requires` and fix environment markers\n in `install_requires` and `extras_require`.\n \"\"\"\n if getattr(self, 'python_requires', None):\n self.metadata.python_requires = self.python_requires\n\n if getattr(self, 'extras_require', None):\n for extra in self.extras_require.keys():\n # Since this gets called multiple times at points where the\n # keys have become 'converted' extras, ensure that we are only\n # truly adding extras we haven't seen before here.\n extra = extra.split(':')[0]\n if extra:\n self.metadata.provides_extras.add(extra)\n\n self._convert_extras_requirements()\n self._move_install_requirements_markers()\n\n def _convert_extras_requirements(self):\n \"\"\"\n Convert requirements in `extras_require` of the form\n `\"extra\": [\"barbazquux; {marker}\"]` to\n `\"extra:{marker}\": [\"barbazquux\"]`.\n \"\"\"\n spec_ext_reqs = getattr(self, 'extras_require', None) or {}\n self._tmp_extras_require = defaultdict(list)\n for section, v in spec_ext_reqs.items():\n # Do not strip empty sections.\n self._tmp_extras_require[section]\n for r in pkg_resources.parse_requirements(v):\n suffix = self._suffix_for(r)\n self._tmp_extras_require[section + suffix].append(r)\n\n @staticmethod\n def _suffix_for(req):\n \"\"\"\n For a requirement, return the 'extras_require' suffix for\n that requirement.\n \"\"\"\n return ':' + str(req.marker) if req.marker else ''\n\n def _move_install_requirements_markers(self):\n \"\"\"\n Move requirements in `install_requires` that are using environment\n markers `extras_require`.\n \"\"\"\n\n # divide the install_requires into two sets, simple ones still\n # handled by install_requires and more complex ones handled\n # by extras_require.\n\n def is_simple_req(req):\n return not req.marker\n\n spec_inst_reqs = getattr(self, 'install_requires', None) or ()\n inst_reqs = list(pkg_resources.parse_requirements(spec_inst_reqs))\n simple_reqs = filter(is_simple_req, inst_reqs)\n complex_reqs = itertools.filterfalse(is_simple_req, inst_reqs)\n self.install_requires = list(map(str, simple_reqs))\n\n for r in complex_reqs:\n self._tmp_extras_require[':' + str(r.marker)].append(r)\n self.extras_require = dict(\n (k, [str(r) for r in map(self._clean_req, v)])\n for k, v in self._tmp_extras_require.items()\n )\n\n def _clean_req(self, req):\n \"\"\"\n Given a Requirement, remove environment markers and return it.\n \"\"\"\n req.marker = None\n return req\n\n # FIXME: 'Distribution._parse_config_files' is too complex (14)\n def _parse_config_files(self, filenames=None): # noqa: C901\n \"\"\"\n Adapted from distutils.dist.Distribution.parse_config_files,\n this method provides the same functionality in subtly-improved\n ways.\n \"\"\"\n from configparser import ConfigParser\n\n # Ignore install directory options if we have a venv\n ignore_options = [] if sys.prefix == sys.base_prefix else [\n 'install-base', 'install-platbase', 'install-lib',\n 'install-platlib', 'install-purelib', 'install-headers',\n 'install-scripts', 'install-data', 'prefix', 'exec-prefix',\n 'home', 'user', 'root',\n ]\n\n ignore_options = frozenset(ignore_options)\n\n if filenames is None:\n filenames = self.find_config_files()\n\n if DEBUG:\n self.announce(\"Distribution.parse_config_files():\")\n\n parser = ConfigParser()\n for filename in filenames:\n with io.open(filename, encoding='utf-8') as reader:\n if DEBUG:\n self.announce(\" reading {filename}\".format(**locals()))\n parser.read_file(reader)\n for section in parser.sections():\n options = parser.options(section)\n opt_dict = self.get_option_dict(section)\n\n for opt in options:\n if opt == '__name__' or opt in ignore_options:\n continue\n\n val = parser.get(section, opt)\n opt = opt.replace('-', '_')\n opt_dict[opt] = (filename, val)\n\n # Make the ConfigParser forget everything (so we retain\n # the original filenames that options come from)\n parser.__init__()\n\n if 'global' not in self.command_options:\n return\n\n # If there was a \"global\" section in the config file, use it\n # to set Distribution options.\n\n for (opt, (src, val)) in self.command_options['global'].items():\n alias = self.negative_opt.get(opt)\n if alias:\n val = not strtobool(val)\n elif opt in ('verbose', 'dry_run'): # ugh!\n val = strtobool(val)\n\n try:\n setattr(self, alias or opt, val)\n except ValueError as e:\n raise DistutilsOptionError(e) from e\n\n # FIXME: 'Distribution._set_command_options' is too complex (14)\n def _set_command_options(self, command_obj, option_dict=None): # noqa: C901\n \"\"\"\n Set the options for 'command_obj' from 'option_dict'. Basically\n this means copying elements of a dictionary ('option_dict') to\n attributes of an instance ('command').\n\n 'command_obj' must be a Command instance. If 'option_dict' is not\n supplied, uses the standard option dictionary for this command\n (from 'self.command_options').\n\n (Adopted from distutils.dist.Distribution._set_command_options)\n \"\"\"\n command_name = command_obj.get_command_name()\n if option_dict is None:\n option_dict = self.get_option_dict(command_name)\n\n if DEBUG:\n self.announce(\" setting options for '%s' command:\" % command_name)\n for (option, (source, value)) in option_dict.items():\n if DEBUG:\n self.announce(\" %s = %s (from %s)\" % (option, value,\n source))\n try:\n bool_opts = [translate_longopt(o)\n for o in command_obj.boolean_options]\n except AttributeError:\n bool_opts = []\n try:\n neg_opt = command_obj.negative_opt\n except AttributeError:\n neg_opt = {}\n\n try:\n is_string = isinstance(value, str)\n if option in neg_opt and is_string:\n setattr(command_obj, neg_opt[option], not strtobool(value))\n elif option in bool_opts and is_string:\n setattr(command_obj, option, strtobool(value))\n elif hasattr(command_obj, option):\n setattr(command_obj, option, value)\n else:\n raise DistutilsOptionError(\n \"error in %s: command '%s' has no such option '%s'\"\n % (source, command_name, option))\n except ValueError as e:\n raise DistutilsOptionError(e) from e\n\n def parse_config_files(self, filenames=None, ignore_option_errors=False):\n \"\"\"Parses configuration files from various levels\n and loads configuration.\n\n \"\"\"\n self._parse_config_files(filenames=filenames)\n\n parse_configuration(self, self.command_options,\n ignore_option_errors=ignore_option_errors)\n self._finalize_requires()\n\n def fetch_build_eggs(self, requires):\n \"\"\"Resolve pre-setup requirements\"\"\"\n resolved_dists = pkg_resources.working_set.resolve(\n pkg_resources.parse_requirements(requires),\n installer=self.fetch_build_egg,\n replace_conflicting=True,\n )\n for dist in resolved_dists:\n pkg_resources.working_set.add(dist, replace=True)\n return resolved_dists\n\n def finalize_options(self):\n \"\"\"\n Allow plugins to apply arbitrary operations to the\n distribution. Each hook may optionally define a 'order'\n to influence the order of execution. Smaller numbers\n go first and the default is 0.\n \"\"\"\n group = 'setuptools.finalize_distribution_options'\n\n def by_order(hook):\n return getattr(hook, 'order', 0)\n eps = map(lambda e: e.load(), pkg_resources.iter_entry_points(group))\n for ep in sorted(eps, key=by_order):\n ep(self)\n\n def _finalize_setup_keywords(self):\n for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):\n value = getattr(self, ep.name, None)\n if value is not None:\n ep.require(installer=self.fetch_build_egg)\n ep.load()(self, ep.name, value)\n\n def _finalize_2to3_doctests(self):\n if getattr(self, 'convert_2to3_doctests', None):\n # XXX may convert to set here when we can rely on set being builtin\n self.convert_2to3_doctests = [\n os.path.abspath(p)\n for p in self.convert_2to3_doctests\n ]\n else:\n self.convert_2to3_doctests = []\n\n def get_egg_cache_dir(self):\n egg_cache_dir = os.path.join(os.curdir, '.eggs')\n if not os.path.exists(egg_cache_dir):\n os.mkdir(egg_cache_dir)\n windows_support.hide_file(egg_cache_dir)\n readme_txt_filename = os.path.join(egg_cache_dir, 'README.txt')\n with open(readme_txt_filename, 'w') as f:\n f.write('This directory contains eggs that were downloaded '\n 'by setuptools to build, test, and run plug-ins.\\n\\n')\n f.write('This directory caches those eggs to prevent '\n 'repeated downloads.\\n\\n')\n f.write('However, it is safe to delete this directory.\\n\\n')\n\n return egg_cache_dir\n\n def fetch_build_egg(self, req):\n \"\"\"Fetch an egg needed for building\"\"\"\n from setuptools.installer import fetch_build_egg\n return fetch_build_egg(self, req)\n\n def get_command_class(self, command):\n \"\"\"Pluggable version of get_command_class()\"\"\"\n if command in self.cmdclass:\n return self.cmdclass[command]\n\n eps = pkg_resources.iter_entry_points('distutils.commands', command)\n for ep in eps:\n ep.require(installer=self.fetch_build_egg)\n self.cmdclass[command] = cmdclass = ep.load()\n return cmdclass\n else:\n return _Distribution.get_command_class(self, command)\n\n def print_commands(self):\n for ep in pkg_resources.iter_entry_points('distutils.commands'):\n if ep.name not in self.cmdclass:\n # don't require extras as the commands won't be invoked\n cmdclass = ep.resolve()\n self.cmdclass[ep.name] = cmdclass\n return _Distribution.print_commands(self)\n\n def get_command_list(self):\n for ep in pkg_resources.iter_entry_points('distutils.commands'):\n if ep.name not in self.cmdclass:\n # don't require extras as the commands won't be invoked\n cmdclass = ep.resolve()\n self.cmdclass[ep.name] = cmdclass\n return _Distribution.get_command_list(self)\n\n def include(self, **attrs):\n \"\"\"Add items to distribution that are named in keyword arguments\n\n For example, 'dist.include(py_modules=[\"x\"])' would add 'x' to\n the distribution's 'py_modules' attribute, if it was not already\n there.\n\n Currently, this method only supports inclusion for attributes that are\n lists or tuples. If you need to add support for adding to other\n attributes in this or a subclass, you can add an '_include_X' method,\n where 'X' is the name of the attribute. The method will be called with\n the value passed to 'include()'. So, 'dist.include(foo={\"bar\":\"baz\"})'\n will try to call 'dist._include_foo({\"bar\":\"baz\"})', which can then\n handle whatever special inclusion logic is needed.\n \"\"\"\n for k, v in attrs.items():\n include = getattr(self, '_include_' + k, None)\n if include:\n include(v)\n else:\n self._include_misc(k, v)\n\n def exclude_package(self, package):\n \"\"\"Remove packages, modules, and extensions in named package\"\"\"\n\n pfx = package + '.'\n if self.packages:\n self.packages = [\n p for p in self.packages\n if p != package and not p.startswith(pfx)\n ]\n\n if self.py_modules:\n self.py_modules = [\n p for p in self.py_modules\n if p != package and not p.startswith(pfx)\n ]\n\n if self.ext_modules:\n self.ext_modules = [\n p for p in self.ext_modules\n if p.name != package and not p.name.startswith(pfx)\n ]\n\n def has_contents_for(self, package):\n \"\"\"Return true if 'exclude_package(package)' would do something\"\"\"\n\n pfx = package + '.'\n\n for p in self.iter_distribution_names():\n if p == package or p.startswith(pfx):\n return True\n\n def _exclude_misc(self, name, value):\n \"\"\"Handle 'exclude()' for list/tuple attrs without a special handler\"\"\"\n if not isinstance(value, sequence):\n raise DistutilsSetupError(\n \"%s: setting must be a list or tuple (%r)\" % (name, value)\n )\n try:\n old = getattr(self, name)\n except AttributeError as e:\n raise DistutilsSetupError(\n \"%s: No such distribution setting\" % name\n ) from e\n if old is not None and not isinstance(old, sequence):\n raise DistutilsSetupError(\n name + \": this setting cannot be changed via include/exclude\"\n )\n elif old:\n setattr(self, name, [item for item in old if item not in value])\n\n def _include_misc(self, name, value):\n \"\"\"Handle 'include()' for list/tuple attrs without a special handler\"\"\"\n\n if not isinstance(value, sequence):\n raise DistutilsSetupError(\n \"%s: setting must be a list (%r)\" % (name, value)\n )\n try:\n old = getattr(self, name)\n except AttributeError as e:\n raise DistutilsSetupError(\n \"%s: No such distribution setting\" % name\n ) from e\n if old is None:\n setattr(self, name, value)\n elif not isinstance(old, sequence):\n raise DistutilsSetupError(\n name + \": this setting cannot be changed via include/exclude\"\n )\n else:\n new = [item for item in value if item not in old]\n setattr(self, name, old + new)\n\n def exclude(self, **attrs):\n \"\"\"Remove items from distribution that are named in keyword arguments\n\n For example, 'dist.exclude(py_modules=[\"x\"])' would remove 'x' from\n the distribution's 'py_modules' attribute. Excluding packages uses\n the 'exclude_package()' method, so all of the package's contained\n packages, modules, and extensions are also excluded.\n\n Currently, this method only supports exclusion from attributes that are\n lists or tuples. If you need to add support for excluding from other\n attributes in this or a subclass, you can add an '_exclude_X' method,\n where 'X' is the name of the attribute. The method will be called with\n the value passed to 'exclude()'. So, 'dist.exclude(foo={\"bar\":\"baz\"})'\n will try to call 'dist._exclude_foo({\"bar\":\"baz\"})', which can then\n handle whatever special exclusion logic is needed.\n \"\"\"\n for k, v in attrs.items():\n exclude = getattr(self, '_exclude_' + k, None)\n if exclude:\n exclude(v)\n else:\n self._exclude_misc(k, v)\n\n def _exclude_packages(self, packages):\n if not isinstance(packages, sequence):\n raise DistutilsSetupError(\n \"packages: setting must be a list or tuple (%r)\" % (packages,)\n )\n list(map(self.exclude_package, packages))\n\n def _parse_command_opts(self, parser, args):\n # Remove --with-X/--without-X options when processing command args\n self.global_options = self.__class__.global_options\n self.negative_opt = self.__class__.negative_opt\n\n # First, expand any aliases\n command = args[0]\n aliases = self.get_option_dict('aliases')\n while command in aliases:\n src, alias = aliases[command]\n del aliases[command] # ensure each alias can expand only once!\n import shlex\n args[:1] = shlex.split(alias, True)\n command = args[0]\n\n nargs = _Distribution._parse_command_opts(self, parser, args)\n\n # Handle commands that want to consume all remaining arguments\n cmd_class = self.get_command_class(command)\n if getattr(cmd_class, 'command_consumes_arguments', None):\n self.get_option_dict(command)['args'] = (\"command line\", nargs)\n if nargs is not None:\n return []\n\n return nargs\n\n def get_cmdline_options(self):\n \"\"\"Return a '{cmd: {opt:val}}' map of all command-line options\n\n Option names are all long, but do not include the leading '--', and\n contain dashes rather than underscores. If the option doesn't take\n an argument (e.g. '--quiet'), the 'val' is 'None'.\n\n Note that options provided by config files are intentionally excluded.\n \"\"\"\n\n d = {}\n\n for cmd, opts in self.command_options.items():\n\n for opt, (src, val) in opts.items():\n\n if src != \"command line\":\n continue\n\n opt = opt.replace('_', '-')\n\n if val == 0:\n cmdobj = self.get_command_obj(cmd)\n neg_opt = self.negative_opt.copy()\n neg_opt.update(getattr(cmdobj, 'negative_opt', {}))\n for neg, pos in neg_opt.items():\n if pos == opt:\n opt = neg\n val = None\n break\n else:\n raise AssertionError(\"Shouldn't be able to get here\")\n\n elif val == 1:\n val = None\n\n d.setdefault(cmd, {})[opt] = val\n\n return d\n\n def iter_distribution_names(self):\n \"\"\"Yield all packages, modules, and extension names in distribution\"\"\"\n\n for pkg in self.packages or ():\n yield pkg\n\n for module in self.py_modules or ():\n yield module\n\n for ext in self.ext_modules or ():\n if isinstance(ext, tuple):\n name, buildinfo = ext\n else:\n name = ext.name\n if name.endswith('module'):\n name = name[:-6]\n yield name\n\n def handle_display_options(self, option_order):\n \"\"\"If there were any non-global \"display-only\" options\n (--help-commands or the metadata display options) on the command\n line, display the requested info and return true; else return\n false.\n \"\"\"\n import sys\n\n if self.help_commands:\n return _Distribution.handle_display_options(self, option_order)\n\n # Stdout may be StringIO (e.g. in tests)\n if not isinstance(sys.stdout, io.TextIOWrapper):\n return _Distribution.handle_display_options(self, option_order)\n\n # Don't wrap stdout if utf-8 is already the encoding. Provides\n # workaround for #334.\n if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):\n return _Distribution.handle_display_options(self, option_order)\n\n # Print metadata in UTF-8 no matter the platform\n encoding = sys.stdout.encoding\n errors = sys.stdout.errors\n newline = sys.platform != 'win32' and '\\n' or None\n line_buffering = sys.stdout.line_buffering\n\n sys.stdout = io.TextIOWrapper(\n sys.stdout.detach(), 'utf-8', errors, newline, line_buffering)\n try:\n return _Distribution.handle_display_options(self, option_order)\n finally:\n sys.stdout = io.TextIOWrapper(\n sys.stdout.detach(), encoding, errors, newline, line_buffering)\n\n\nclass DistDeprecationWarning(SetuptoolsDeprecationWarning):\n \"\"\"Class for warning about deprecations in dist in\n setuptools. Not ignored by default, unlike DeprecationWarning.\"\"\"\n", "path": "setuptools/dist.py"}]} |
gh_patches_debug_1276 | rasdani/github-patches | git_diff | encode__django-rest-framework-2948 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`max_decimal_places` in Decimal field are wrong calculated
We got an issue when number is formatted as `decimal.Decimal('2E+9')`.
How `DecimalField` counts decimals:
```
sign, digittuple, exponent = value.as_tuple()
decimals = abs(exponent)
```
However result of `decimal.Decimal('2E+9').as_tuple()[2]` is **9**, which is ok, but there are no decimal places in this number.
My solution is to not do `abs` and instead multiply by `-1`.
I can prepare PR tonight if you think it is valid.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rest_framework/fields.py`
Content:
```
1 from __future__ import unicode_literals
2 from django.conf import settings
3 from django.core.exceptions import ObjectDoesNotExist
4 from django.core.exceptions import ValidationError as DjangoValidationError
5 from django.core.validators import RegexValidator
6 from django.forms import ImageField as DjangoImageField
7 from django.utils import six, timezone
8 from django.utils.dateparse import parse_date, parse_datetime, parse_time
9 from django.utils.encoding import is_protected_type, smart_text
10 from django.utils.translation import ugettext_lazy as _
11 from rest_framework import ISO_8601
12 from rest_framework.compat import (
13 EmailValidator, MinValueValidator, MaxValueValidator,
14 MinLengthValidator, MaxLengthValidator, URLValidator, OrderedDict,
15 unicode_repr, unicode_to_repr
16 )
17 from rest_framework.exceptions import ValidationError
18 from rest_framework.settings import api_settings
19 from rest_framework.utils import html, representation, humanize_datetime
20 import collections
21 import copy
22 import datetime
23 import decimal
24 import inspect
25 import re
26 import uuid
27
28
29 class empty:
30 """
31 This class is used to represent no data being provided for a given input
32 or output value.
33
34 It is required because `None` may be a valid input or output value.
35 """
36 pass
37
38
39 def is_simple_callable(obj):
40 """
41 True if the object is a callable that takes no arguments.
42 """
43 function = inspect.isfunction(obj)
44 method = inspect.ismethod(obj)
45
46 if not (function or method):
47 return False
48
49 args, _, _, defaults = inspect.getargspec(obj)
50 len_args = len(args) if function else len(args) - 1
51 len_defaults = len(defaults) if defaults else 0
52 return len_args <= len_defaults
53
54
55 def get_attribute(instance, attrs):
56 """
57 Similar to Python's built in `getattr(instance, attr)`,
58 but takes a list of nested attributes, instead of a single attribute.
59
60 Also accepts either attribute lookup on objects or dictionary lookups.
61 """
62 for attr in attrs:
63 if instance is None:
64 # Break out early if we get `None` at any point in a nested lookup.
65 return None
66 try:
67 if isinstance(instance, collections.Mapping):
68 instance = instance[attr]
69 else:
70 instance = getattr(instance, attr)
71 except ObjectDoesNotExist:
72 return None
73 if is_simple_callable(instance):
74 try:
75 instance = instance()
76 except (AttributeError, KeyError) as exc:
77 # If we raised an Attribute or KeyError here it'd get treated
78 # as an omitted field in `Field.get_attribute()`. Instead we
79 # raise a ValueError to ensure the exception is not masked.
80 raise ValueError('Exception raised in callable attribute "{0}"; original exception was: {1}'.format(attr, exc))
81
82 return instance
83
84
85 def set_value(dictionary, keys, value):
86 """
87 Similar to Python's built in `dictionary[key] = value`,
88 but takes a list of nested keys instead of a single key.
89
90 set_value({'a': 1}, [], {'b': 2}) -> {'a': 1, 'b': 2}
91 set_value({'a': 1}, ['x'], 2) -> {'a': 1, 'x': 2}
92 set_value({'a': 1}, ['x', 'y'], 2) -> {'a': 1, 'x': {'y': 2}}
93 """
94 if not keys:
95 dictionary.update(value)
96 return
97
98 for key in keys[:-1]:
99 if key not in dictionary:
100 dictionary[key] = {}
101 dictionary = dictionary[key]
102
103 dictionary[keys[-1]] = value
104
105
106 class CreateOnlyDefault(object):
107 """
108 This class may be used to provide default values that are only used
109 for create operations, but that do not return any value for update
110 operations.
111 """
112 def __init__(self, default):
113 self.default = default
114
115 def set_context(self, serializer_field):
116 self.is_update = serializer_field.parent.instance is not None
117 if callable(self.default) and hasattr(self.default, 'set_context') and not self.is_update:
118 self.default.set_context(serializer_field)
119
120 def __call__(self):
121 if self.is_update:
122 raise SkipField()
123 if callable(self.default):
124 return self.default()
125 return self.default
126
127 def __repr__(self):
128 return unicode_to_repr(
129 '%s(%s)' % (self.__class__.__name__, unicode_repr(self.default))
130 )
131
132
133 class CurrentUserDefault(object):
134 def set_context(self, serializer_field):
135 self.user = serializer_field.context['request'].user
136
137 def __call__(self):
138 return self.user
139
140 def __repr__(self):
141 return unicode_to_repr('%s()' % self.__class__.__name__)
142
143
144 class SkipField(Exception):
145 pass
146
147
148 NOT_READ_ONLY_WRITE_ONLY = 'May not set both `read_only` and `write_only`'
149 NOT_READ_ONLY_REQUIRED = 'May not set both `read_only` and `required`'
150 NOT_REQUIRED_DEFAULT = 'May not set both `required` and `default`'
151 USE_READONLYFIELD = 'Field(read_only=True) should be ReadOnlyField'
152 MISSING_ERROR_MESSAGE = (
153 'ValidationError raised by `{class_name}`, but error key `{key}` does '
154 'not exist in the `error_messages` dictionary.'
155 )
156
157
158 class Field(object):
159 _creation_counter = 0
160
161 default_error_messages = {
162 'required': _('This field is required.'),
163 'null': _('This field may not be null.')
164 }
165 default_validators = []
166 default_empty_html = empty
167 initial = None
168
169 def __init__(self, read_only=False, write_only=False,
170 required=None, default=empty, initial=empty, source=None,
171 label=None, help_text=None, style=None,
172 error_messages=None, validators=None, allow_null=False):
173 self._creation_counter = Field._creation_counter
174 Field._creation_counter += 1
175
176 # If `required` is unset, then use `True` unless a default is provided.
177 if required is None:
178 required = default is empty and not read_only
179
180 # Some combinations of keyword arguments do not make sense.
181 assert not (read_only and write_only), NOT_READ_ONLY_WRITE_ONLY
182 assert not (read_only and required), NOT_READ_ONLY_REQUIRED
183 assert not (required and default is not empty), NOT_REQUIRED_DEFAULT
184 assert not (read_only and self.__class__ == Field), USE_READONLYFIELD
185
186 self.read_only = read_only
187 self.write_only = write_only
188 self.required = required
189 self.default = default
190 self.source = source
191 self.initial = self.initial if (initial is empty) else initial
192 self.label = label
193 self.help_text = help_text
194 self.style = {} if style is None else style
195 self.allow_null = allow_null
196
197 if self.default_empty_html is not empty:
198 if not required:
199 self.default_empty_html = empty
200 elif default is not empty:
201 self.default_empty_html = default
202
203 if validators is not None:
204 self.validators = validators[:]
205
206 # These are set up by `.bind()` when the field is added to a serializer.
207 self.field_name = None
208 self.parent = None
209
210 # Collect default error message from self and parent classes
211 messages = {}
212 for cls in reversed(self.__class__.__mro__):
213 messages.update(getattr(cls, 'default_error_messages', {}))
214 messages.update(error_messages or {})
215 self.error_messages = messages
216
217 def bind(self, field_name, parent):
218 """
219 Initializes the field name and parent for the field instance.
220 Called when a field is added to the parent serializer instance.
221 """
222
223 # In order to enforce a consistent style, we error if a redundant
224 # 'source' argument has been used. For example:
225 # my_field = serializer.CharField(source='my_field')
226 assert self.source != field_name, (
227 "It is redundant to specify `source='%s'` on field '%s' in "
228 "serializer '%s', because it is the same as the field name. "
229 "Remove the `source` keyword argument." %
230 (field_name, self.__class__.__name__, parent.__class__.__name__)
231 )
232
233 self.field_name = field_name
234 self.parent = parent
235
236 # `self.label` should default to being based on the field name.
237 if self.label is None:
238 self.label = field_name.replace('_', ' ').capitalize()
239
240 # self.source should default to being the same as the field name.
241 if self.source is None:
242 self.source = field_name
243
244 # self.source_attrs is a list of attributes that need to be looked up
245 # when serializing the instance, or populating the validated data.
246 if self.source == '*':
247 self.source_attrs = []
248 else:
249 self.source_attrs = self.source.split('.')
250
251 # .validators is a lazily loaded property, that gets its default
252 # value from `get_validators`.
253 @property
254 def validators(self):
255 if not hasattr(self, '_validators'):
256 self._validators = self.get_validators()
257 return self._validators
258
259 @validators.setter
260 def validators(self, validators):
261 self._validators = validators
262
263 def get_validators(self):
264 return self.default_validators[:]
265
266 def get_initial(self):
267 """
268 Return a value to use when the field is being returned as a primitive
269 value, without any object instance.
270 """
271 return self.initial
272
273 def get_value(self, dictionary):
274 """
275 Given the *incoming* primitive data, return the value for this field
276 that should be validated and transformed to a native value.
277 """
278 if html.is_html_input(dictionary):
279 # HTML forms will represent empty fields as '', and cannot
280 # represent None or False values directly.
281 if self.field_name not in dictionary:
282 if getattr(self.root, 'partial', False):
283 return empty
284 return self.default_empty_html
285 ret = dictionary[self.field_name]
286 if ret == '' and self.allow_null:
287 # If the field is blank, and null is a valid value then
288 # determine if we should use null instead.
289 return '' if getattr(self, 'allow_blank', False) else None
290 return ret
291 return dictionary.get(self.field_name, empty)
292
293 def get_attribute(self, instance):
294 """
295 Given the *outgoing* object instance, return the primitive value
296 that should be used for this field.
297 """
298 try:
299 return get_attribute(instance, self.source_attrs)
300 except (KeyError, AttributeError) as exc:
301 if not self.required and self.default is empty:
302 raise SkipField()
303 msg = (
304 'Got {exc_type} when attempting to get a value for field '
305 '`{field}` on serializer `{serializer}`.\nThe serializer '
306 'field might be named incorrectly and not match '
307 'any attribute or key on the `{instance}` instance.\n'
308 'Original exception text was: {exc}.'.format(
309 exc_type=type(exc).__name__,
310 field=self.field_name,
311 serializer=self.parent.__class__.__name__,
312 instance=instance.__class__.__name__,
313 exc=exc
314 )
315 )
316 raise type(exc)(msg)
317
318 def get_default(self):
319 """
320 Return the default value to use when validating data if no input
321 is provided for this field.
322
323 If a default has not been set for this field then this will simply
324 return `empty`, indicating that no value should be set in the
325 validated data for this field.
326 """
327 if self.default is empty:
328 raise SkipField()
329 if callable(self.default):
330 if hasattr(self.default, 'set_context'):
331 self.default.set_context(self)
332 return self.default()
333 return self.default
334
335 def validate_empty_values(self, data):
336 """
337 Validate empty values, and either:
338
339 * Raise `ValidationError`, indicating invalid data.
340 * Raise `SkipField`, indicating that the field should be ignored.
341 * Return (True, data), indicating an empty value that should be
342 returned without any further validation being applied.
343 * Return (False, data), indicating a non-empty value, that should
344 have validation applied as normal.
345 """
346 if self.read_only:
347 return (True, self.get_default())
348
349 if data is empty:
350 if getattr(self.root, 'partial', False):
351 raise SkipField()
352 if self.required:
353 self.fail('required')
354 return (True, self.get_default())
355
356 if data is None:
357 if not self.allow_null:
358 self.fail('null')
359 return (True, None)
360
361 return (False, data)
362
363 def run_validation(self, data=empty):
364 """
365 Validate a simple representation and return the internal value.
366
367 The provided data may be `empty` if no representation was included
368 in the input.
369
370 May raise `SkipField` if the field should not be included in the
371 validated data.
372 """
373 (is_empty_value, data) = self.validate_empty_values(data)
374 if is_empty_value:
375 return data
376 value = self.to_internal_value(data)
377 self.run_validators(value)
378 return value
379
380 def run_validators(self, value):
381 """
382 Test the given value against all the validators on the field,
383 and either raise a `ValidationError` or simply return.
384 """
385 errors = []
386 for validator in self.validators:
387 if hasattr(validator, 'set_context'):
388 validator.set_context(self)
389
390 try:
391 validator(value)
392 except ValidationError as exc:
393 # If the validation error contains a mapping of fields to
394 # errors then simply raise it immediately rather than
395 # attempting to accumulate a list of errors.
396 if isinstance(exc.detail, dict):
397 raise
398 errors.extend(exc.detail)
399 except DjangoValidationError as exc:
400 errors.extend(exc.messages)
401 if errors:
402 raise ValidationError(errors)
403
404 def to_internal_value(self, data):
405 """
406 Transform the *incoming* primitive data into a native value.
407 """
408 raise NotImplementedError(
409 '{cls}.to_internal_value() must be implemented.'.format(
410 cls=self.__class__.__name__
411 )
412 )
413
414 def to_representation(self, value):
415 """
416 Transform the *outgoing* native value into primitive data.
417 """
418 raise NotImplementedError(
419 '{cls}.to_representation() must be implemented.\n'
420 'If you are upgrading from REST framework version 2 '
421 'you might want `ReadOnlyField`.'.format(
422 cls=self.__class__.__name__
423 )
424 )
425
426 def fail(self, key, **kwargs):
427 """
428 A helper method that simply raises a validation error.
429 """
430 try:
431 msg = self.error_messages[key]
432 except KeyError:
433 class_name = self.__class__.__name__
434 msg = MISSING_ERROR_MESSAGE.format(class_name=class_name, key=key)
435 raise AssertionError(msg)
436 message_string = msg.format(**kwargs)
437 raise ValidationError(message_string)
438
439 @property
440 def root(self):
441 """
442 Returns the top-level serializer for this field.
443 """
444 root = self
445 while root.parent is not None:
446 root = root.parent
447 return root
448
449 @property
450 def context(self):
451 """
452 Returns the context as passed to the root serializer on initialization.
453 """
454 return getattr(self.root, '_context', {})
455
456 def __new__(cls, *args, **kwargs):
457 """
458 When a field is instantiated, we store the arguments that were used,
459 so that we can present a helpful representation of the object.
460 """
461 instance = super(Field, cls).__new__(cls)
462 instance._args = args
463 instance._kwargs = kwargs
464 return instance
465
466 def __deepcopy__(self, memo):
467 """
468 When cloning fields we instantiate using the arguments it was
469 originally created with, rather than copying the complete state.
470 """
471 args = copy.deepcopy(self._args)
472 kwargs = dict(self._kwargs)
473 # Bit ugly, but we need to special case 'validators' as Django's
474 # RegexValidator does not support deepcopy.
475 # We treat validator callables as immutable objects.
476 # See https://github.com/tomchristie/django-rest-framework/issues/1954
477 validators = kwargs.pop('validators', None)
478 kwargs = copy.deepcopy(kwargs)
479 if validators is not None:
480 kwargs['validators'] = validators
481 return self.__class__(*args, **kwargs)
482
483 def __repr__(self):
484 """
485 Fields are represented using their initial calling arguments.
486 This allows us to create descriptive representations for serializer
487 instances that show all the declared fields on the serializer.
488 """
489 return unicode_to_repr(representation.field_repr(self))
490
491
492 # Boolean types...
493
494 class BooleanField(Field):
495 default_error_messages = {
496 'invalid': _('"{input}" is not a valid boolean.')
497 }
498 default_empty_html = False
499 initial = False
500 TRUE_VALUES = set(('t', 'T', 'true', 'True', 'TRUE', '1', 1, True))
501 FALSE_VALUES = set(('f', 'F', 'false', 'False', 'FALSE', '0', 0, 0.0, False))
502
503 def __init__(self, **kwargs):
504 assert 'allow_null' not in kwargs, '`allow_null` is not a valid option. Use `NullBooleanField` instead.'
505 super(BooleanField, self).__init__(**kwargs)
506
507 def to_internal_value(self, data):
508 if data in self.TRUE_VALUES:
509 return True
510 elif data in self.FALSE_VALUES:
511 return False
512 self.fail('invalid', input=data)
513
514 def to_representation(self, value):
515 if value in self.TRUE_VALUES:
516 return True
517 elif value in self.FALSE_VALUES:
518 return False
519 return bool(value)
520
521
522 class NullBooleanField(Field):
523 default_error_messages = {
524 'invalid': _('"{input}" is not a valid boolean.')
525 }
526 initial = None
527 TRUE_VALUES = set(('t', 'T', 'true', 'True', 'TRUE', '1', 1, True))
528 FALSE_VALUES = set(('f', 'F', 'false', 'False', 'FALSE', '0', 0, 0.0, False))
529 NULL_VALUES = set(('n', 'N', 'null', 'Null', 'NULL', '', None))
530
531 def __init__(self, **kwargs):
532 assert 'allow_null' not in kwargs, '`allow_null` is not a valid option.'
533 kwargs['allow_null'] = True
534 super(NullBooleanField, self).__init__(**kwargs)
535
536 def to_internal_value(self, data):
537 if data in self.TRUE_VALUES:
538 return True
539 elif data in self.FALSE_VALUES:
540 return False
541 elif data in self.NULL_VALUES:
542 return None
543 self.fail('invalid', input=data)
544
545 def to_representation(self, value):
546 if value in self.NULL_VALUES:
547 return None
548 if value in self.TRUE_VALUES:
549 return True
550 elif value in self.FALSE_VALUES:
551 return False
552 return bool(value)
553
554
555 # String types...
556
557 class CharField(Field):
558 default_error_messages = {
559 'blank': _('This field may not be blank.'),
560 'max_length': _('Ensure this field has no more than {max_length} characters.'),
561 'min_length': _('Ensure this field has at least {min_length} characters.')
562 }
563 initial = ''
564
565 def __init__(self, **kwargs):
566 self.allow_blank = kwargs.pop('allow_blank', False)
567 self.trim_whitespace = kwargs.pop('trim_whitespace', True)
568 self.max_length = kwargs.pop('max_length', None)
569 self.min_length = kwargs.pop('min_length', None)
570 super(CharField, self).__init__(**kwargs)
571 if self.max_length is not None:
572 message = self.error_messages['max_length'].format(max_length=self.max_length)
573 self.validators.append(MaxLengthValidator(self.max_length, message=message))
574 if self.min_length is not None:
575 message = self.error_messages['min_length'].format(min_length=self.min_length)
576 self.validators.append(MinLengthValidator(self.min_length, message=message))
577
578 def run_validation(self, data=empty):
579 # Test for the empty string here so that it does not get validated,
580 # and so that subclasses do not need to handle it explicitly
581 # inside the `to_internal_value()` method.
582 if data == '':
583 if not self.allow_blank:
584 self.fail('blank')
585 return ''
586 return super(CharField, self).run_validation(data)
587
588 def to_internal_value(self, data):
589 value = six.text_type(data)
590 return value.strip() if self.trim_whitespace else value
591
592 def to_representation(self, value):
593 return six.text_type(value)
594
595
596 class EmailField(CharField):
597 default_error_messages = {
598 'invalid': _('Enter a valid email address.')
599 }
600
601 def __init__(self, **kwargs):
602 super(EmailField, self).__init__(**kwargs)
603 validator = EmailValidator(message=self.error_messages['invalid'])
604 self.validators.append(validator)
605
606
607 class RegexField(CharField):
608 default_error_messages = {
609 'invalid': _('This value does not match the required pattern.')
610 }
611
612 def __init__(self, regex, **kwargs):
613 super(RegexField, self).__init__(**kwargs)
614 validator = RegexValidator(regex, message=self.error_messages['invalid'])
615 self.validators.append(validator)
616
617
618 class SlugField(CharField):
619 default_error_messages = {
620 'invalid': _('Enter a valid "slug" consisting of letters, numbers, underscores or hyphens.')
621 }
622
623 def __init__(self, **kwargs):
624 super(SlugField, self).__init__(**kwargs)
625 slug_regex = re.compile(r'^[-a-zA-Z0-9_]+$')
626 validator = RegexValidator(slug_regex, message=self.error_messages['invalid'])
627 self.validators.append(validator)
628
629
630 class URLField(CharField):
631 default_error_messages = {
632 'invalid': _('Enter a valid URL.')
633 }
634
635 def __init__(self, **kwargs):
636 super(URLField, self).__init__(**kwargs)
637 validator = URLValidator(message=self.error_messages['invalid'])
638 self.validators.append(validator)
639
640
641 class UUIDField(Field):
642 default_error_messages = {
643 'invalid': _('"{value}" is not a valid UUID.'),
644 }
645
646 def to_internal_value(self, data):
647 if not isinstance(data, uuid.UUID):
648 try:
649 return uuid.UUID(data)
650 except (ValueError, TypeError):
651 self.fail('invalid', value=data)
652 return data
653
654 def to_representation(self, value):
655 return str(value)
656
657
658 # Number types...
659
660 class IntegerField(Field):
661 default_error_messages = {
662 'invalid': _('A valid integer is required.'),
663 'max_value': _('Ensure this value is less than or equal to {max_value}.'),
664 'min_value': _('Ensure this value is greater than or equal to {min_value}.'),
665 'max_string_length': _('String value too large.')
666 }
667 MAX_STRING_LENGTH = 1000 # Guard against malicious string inputs.
668 re_decimal = re.compile(r'\.0*\s*$') # allow e.g. '1.0' as an int, but not '1.2'
669
670 def __init__(self, **kwargs):
671 self.max_value = kwargs.pop('max_value', None)
672 self.min_value = kwargs.pop('min_value', None)
673 super(IntegerField, self).__init__(**kwargs)
674 if self.max_value is not None:
675 message = self.error_messages['max_value'].format(max_value=self.max_value)
676 self.validators.append(MaxValueValidator(self.max_value, message=message))
677 if self.min_value is not None:
678 message = self.error_messages['min_value'].format(min_value=self.min_value)
679 self.validators.append(MinValueValidator(self.min_value, message=message))
680
681 def to_internal_value(self, data):
682 if isinstance(data, six.text_type) and len(data) > self.MAX_STRING_LENGTH:
683 self.fail('max_string_length')
684
685 try:
686 data = int(self.re_decimal.sub('', str(data)))
687 except (ValueError, TypeError):
688 self.fail('invalid')
689 return data
690
691 def to_representation(self, value):
692 return int(value)
693
694
695 class FloatField(Field):
696 default_error_messages = {
697 'invalid': _('A valid number is required.'),
698 'max_value': _('Ensure this value is less than or equal to {max_value}.'),
699 'min_value': _('Ensure this value is greater than or equal to {min_value}.'),
700 'max_string_length': _('String value too large.')
701 }
702 MAX_STRING_LENGTH = 1000 # Guard against malicious string inputs.
703
704 def __init__(self, **kwargs):
705 self.max_value = kwargs.pop('max_value', None)
706 self.min_value = kwargs.pop('min_value', None)
707 super(FloatField, self).__init__(**kwargs)
708 if self.max_value is not None:
709 message = self.error_messages['max_value'].format(max_value=self.max_value)
710 self.validators.append(MaxValueValidator(self.max_value, message=message))
711 if self.min_value is not None:
712 message = self.error_messages['min_value'].format(min_value=self.min_value)
713 self.validators.append(MinValueValidator(self.min_value, message=message))
714
715 def to_internal_value(self, data):
716 if isinstance(data, six.text_type) and len(data) > self.MAX_STRING_LENGTH:
717 self.fail('max_string_length')
718
719 try:
720 return float(data)
721 except (TypeError, ValueError):
722 self.fail('invalid')
723
724 def to_representation(self, value):
725 return float(value)
726
727
728 class DecimalField(Field):
729 default_error_messages = {
730 'invalid': _('A valid number is required.'),
731 'max_value': _('Ensure this value is less than or equal to {max_value}.'),
732 'min_value': _('Ensure this value is greater than or equal to {min_value}.'),
733 'max_digits': _('Ensure that there are no more than {max_digits} digits in total.'),
734 'max_decimal_places': _('Ensure that there are no more than {max_decimal_places} decimal places.'),
735 'max_whole_digits': _('Ensure that there are no more than {max_whole_digits} digits before the decimal point.'),
736 'max_string_length': _('String value too large.')
737 }
738 MAX_STRING_LENGTH = 1000 # Guard against malicious string inputs.
739
740 coerce_to_string = api_settings.COERCE_DECIMAL_TO_STRING
741
742 def __init__(self, max_digits, decimal_places, coerce_to_string=None, max_value=None, min_value=None, **kwargs):
743 self.max_digits = max_digits
744 self.decimal_places = decimal_places
745 self.coerce_to_string = coerce_to_string if (coerce_to_string is not None) else self.coerce_to_string
746
747 self.max_value = max_value
748 self.min_value = min_value
749
750 super(DecimalField, self).__init__(**kwargs)
751
752 if self.max_value is not None:
753 message = self.error_messages['max_value'].format(max_value=self.max_value)
754 self.validators.append(MaxValueValidator(self.max_value, message=message))
755 if self.min_value is not None:
756 message = self.error_messages['min_value'].format(min_value=self.min_value)
757 self.validators.append(MinValueValidator(self.min_value, message=message))
758
759 def to_internal_value(self, data):
760 """
761 Validates that the input is a decimal number. Returns a Decimal
762 instance. Returns None for empty values. Ensures that there are no more
763 than max_digits in the number, and no more than decimal_places digits
764 after the decimal point.
765 """
766 data = smart_text(data).strip()
767 if len(data) > self.MAX_STRING_LENGTH:
768 self.fail('max_string_length')
769
770 try:
771 value = decimal.Decimal(data)
772 except decimal.DecimalException:
773 self.fail('invalid')
774
775 # Check for NaN. It is the only value that isn't equal to itself,
776 # so we can use this to identify NaN values.
777 if value != value:
778 self.fail('invalid')
779
780 # Check for infinity and negative infinity.
781 if value in (decimal.Decimal('Inf'), decimal.Decimal('-Inf')):
782 self.fail('invalid')
783
784 sign, digittuple, exponent = value.as_tuple()
785 decimals = abs(exponent)
786 # digittuple doesn't include any leading zeros.
787 digits = len(digittuple)
788 if decimals > digits:
789 # We have leading zeros up to or past the decimal point. Count
790 # everything past the decimal point as a digit. We do not count
791 # 0 before the decimal point as a digit since that would mean
792 # we would not allow max_digits = decimal_places.
793 digits = decimals
794 whole_digits = digits - decimals
795
796 if self.max_digits is not None and digits > self.max_digits:
797 self.fail('max_digits', max_digits=self.max_digits)
798 if self.decimal_places is not None and decimals > self.decimal_places:
799 self.fail('max_decimal_places', max_decimal_places=self.decimal_places)
800 if self.max_digits is not None and self.decimal_places is not None and whole_digits > (self.max_digits - self.decimal_places):
801 self.fail('max_whole_digits', max_whole_digits=self.max_digits - self.decimal_places)
802
803 return value
804
805 def to_representation(self, value):
806 if not isinstance(value, decimal.Decimal):
807 value = decimal.Decimal(six.text_type(value).strip())
808
809 context = decimal.getcontext().copy()
810 context.prec = self.max_digits
811 quantized = value.quantize(
812 decimal.Decimal('.1') ** self.decimal_places,
813 context=context
814 )
815 if not self.coerce_to_string:
816 return quantized
817 return '{0:f}'.format(quantized)
818
819
820 # Date & time fields...
821
822 class DateTimeField(Field):
823 default_error_messages = {
824 'invalid': _('Datetime has wrong format. Use one of these formats instead: {format}.'),
825 'date': _('Expected a datetime but got a date.'),
826 }
827 format = api_settings.DATETIME_FORMAT
828 input_formats = api_settings.DATETIME_INPUT_FORMATS
829 default_timezone = timezone.get_default_timezone() if settings.USE_TZ else None
830
831 def __init__(self, format=empty, input_formats=None, default_timezone=None, *args, **kwargs):
832 self.format = format if format is not empty else self.format
833 self.input_formats = input_formats if input_formats is not None else self.input_formats
834 self.default_timezone = default_timezone if default_timezone is not None else self.default_timezone
835 super(DateTimeField, self).__init__(*args, **kwargs)
836
837 def enforce_timezone(self, value):
838 """
839 When `self.default_timezone` is `None`, always return naive datetimes.
840 When `self.default_timezone` is not `None`, always return aware datetimes.
841 """
842 if (self.default_timezone is not None) and not timezone.is_aware(value):
843 return timezone.make_aware(value, self.default_timezone)
844 elif (self.default_timezone is None) and timezone.is_aware(value):
845 return timezone.make_naive(value, timezone.UTC())
846 return value
847
848 def to_internal_value(self, value):
849 if isinstance(value, datetime.date) and not isinstance(value, datetime.datetime):
850 self.fail('date')
851
852 if isinstance(value, datetime.datetime):
853 return self.enforce_timezone(value)
854
855 for format in self.input_formats:
856 if format.lower() == ISO_8601:
857 try:
858 parsed = parse_datetime(value)
859 except (ValueError, TypeError):
860 pass
861 else:
862 if parsed is not None:
863 return self.enforce_timezone(parsed)
864 else:
865 try:
866 parsed = datetime.datetime.strptime(value, format)
867 except (ValueError, TypeError):
868 pass
869 else:
870 return self.enforce_timezone(parsed)
871
872 humanized_format = humanize_datetime.datetime_formats(self.input_formats)
873 self.fail('invalid', format=humanized_format)
874
875 def to_representation(self, value):
876 if self.format is None:
877 return value
878
879 if self.format.lower() == ISO_8601:
880 value = value.isoformat()
881 if value.endswith('+00:00'):
882 value = value[:-6] + 'Z'
883 return value
884 return value.strftime(self.format)
885
886
887 class DateField(Field):
888 default_error_messages = {
889 'invalid': _('Date has wrong format. Use one of these formats instead: {format}.'),
890 'datetime': _('Expected a date but got a datetime.'),
891 }
892 format = api_settings.DATE_FORMAT
893 input_formats = api_settings.DATE_INPUT_FORMATS
894
895 def __init__(self, format=empty, input_formats=None, *args, **kwargs):
896 self.format = format if format is not empty else self.format
897 self.input_formats = input_formats if input_formats is not None else self.input_formats
898 super(DateField, self).__init__(*args, **kwargs)
899
900 def to_internal_value(self, value):
901 if isinstance(value, datetime.datetime):
902 self.fail('datetime')
903
904 if isinstance(value, datetime.date):
905 return value
906
907 for format in self.input_formats:
908 if format.lower() == ISO_8601:
909 try:
910 parsed = parse_date(value)
911 except (ValueError, TypeError):
912 pass
913 else:
914 if parsed is not None:
915 return parsed
916 else:
917 try:
918 parsed = datetime.datetime.strptime(value, format)
919 except (ValueError, TypeError):
920 pass
921 else:
922 return parsed.date()
923
924 humanized_format = humanize_datetime.date_formats(self.input_formats)
925 self.fail('invalid', format=humanized_format)
926
927 def to_representation(self, value):
928 if not value:
929 return None
930
931 if self.format is None:
932 return value
933
934 # Applying a `DateField` to a datetime value is almost always
935 # not a sensible thing to do, as it means naively dropping
936 # any explicit or implicit timezone info.
937 assert not isinstance(value, datetime.datetime), (
938 'Expected a `date`, but got a `datetime`. Refusing to coerce, '
939 'as this may mean losing timezone information. Use a custom '
940 'read-only field and deal with timezone issues explicitly.'
941 )
942
943 if self.format.lower() == ISO_8601:
944 if (isinstance(value, str)):
945 value = datetime.datetime.strptime(value, '%Y-%m-%d').date()
946 return value.isoformat()
947
948 return value.strftime(self.format)
949
950
951 class TimeField(Field):
952 default_error_messages = {
953 'invalid': _('Time has wrong format. Use one of these formats instead: {format}.'),
954 }
955 format = api_settings.TIME_FORMAT
956 input_formats = api_settings.TIME_INPUT_FORMATS
957
958 def __init__(self, format=empty, input_formats=None, *args, **kwargs):
959 self.format = format if format is not empty else self.format
960 self.input_formats = input_formats if input_formats is not None else self.input_formats
961 super(TimeField, self).__init__(*args, **kwargs)
962
963 def to_internal_value(self, value):
964 if isinstance(value, datetime.time):
965 return value
966
967 for format in self.input_formats:
968 if format.lower() == ISO_8601:
969 try:
970 parsed = parse_time(value)
971 except (ValueError, TypeError):
972 pass
973 else:
974 if parsed is not None:
975 return parsed
976 else:
977 try:
978 parsed = datetime.datetime.strptime(value, format)
979 except (ValueError, TypeError):
980 pass
981 else:
982 return parsed.time()
983
984 humanized_format = humanize_datetime.time_formats(self.input_formats)
985 self.fail('invalid', format=humanized_format)
986
987 def to_representation(self, value):
988 if self.format is None:
989 return value
990
991 # Applying a `TimeField` to a datetime value is almost always
992 # not a sensible thing to do, as it means naively dropping
993 # any explicit or implicit timezone info.
994 assert not isinstance(value, datetime.datetime), (
995 'Expected a `time`, but got a `datetime`. Refusing to coerce, '
996 'as this may mean losing timezone information. Use a custom '
997 'read-only field and deal with timezone issues explicitly.'
998 )
999
1000 if self.format.lower() == ISO_8601:
1001 return value.isoformat()
1002 return value.strftime(self.format)
1003
1004
1005 # Choice types...
1006
1007 class ChoiceField(Field):
1008 default_error_messages = {
1009 'invalid_choice': _('"{input}" is not a valid choice.')
1010 }
1011
1012 def __init__(self, choices, **kwargs):
1013 # Allow either single or paired choices style:
1014 # choices = [1, 2, 3]
1015 # choices = [(1, 'First'), (2, 'Second'), (3, 'Third')]
1016 pairs = [
1017 isinstance(item, (list, tuple)) and len(item) == 2
1018 for item in choices
1019 ]
1020 if all(pairs):
1021 self.choices = OrderedDict([(key, display_value) for key, display_value in choices])
1022 else:
1023 self.choices = OrderedDict([(item, item) for item in choices])
1024
1025 # Map the string representation of choices to the underlying value.
1026 # Allows us to deal with eg. integer choices while supporting either
1027 # integer or string input, but still get the correct datatype out.
1028 self.choice_strings_to_values = dict([
1029 (six.text_type(key), key) for key in self.choices.keys()
1030 ])
1031
1032 self.allow_blank = kwargs.pop('allow_blank', False)
1033
1034 super(ChoiceField, self).__init__(**kwargs)
1035
1036 def to_internal_value(self, data):
1037 if data == '' and self.allow_blank:
1038 return ''
1039
1040 try:
1041 return self.choice_strings_to_values[six.text_type(data)]
1042 except KeyError:
1043 self.fail('invalid_choice', input=data)
1044
1045 def to_representation(self, value):
1046 if value in ('', None):
1047 return value
1048 return self.choice_strings_to_values.get(six.text_type(value), value)
1049
1050
1051 class MultipleChoiceField(ChoiceField):
1052 default_error_messages = {
1053 'invalid_choice': _('"{input}" is not a valid choice.'),
1054 'not_a_list': _('Expected a list of items but got type "{input_type}".')
1055 }
1056 default_empty_html = []
1057
1058 def get_value(self, dictionary):
1059 # We override the default field access in order to support
1060 # lists in HTML forms.
1061 if html.is_html_input(dictionary):
1062 return dictionary.getlist(self.field_name)
1063 return dictionary.get(self.field_name, empty)
1064
1065 def to_internal_value(self, data):
1066 if isinstance(data, type('')) or not hasattr(data, '__iter__'):
1067 self.fail('not_a_list', input_type=type(data).__name__)
1068
1069 return set([
1070 super(MultipleChoiceField, self).to_internal_value(item)
1071 for item in data
1072 ])
1073
1074 def to_representation(self, value):
1075 return set([
1076 self.choice_strings_to_values.get(six.text_type(item), item) for item in value
1077 ])
1078
1079
1080 # File types...
1081
1082 class FileField(Field):
1083 default_error_messages = {
1084 'required': _('No file was submitted.'),
1085 'invalid': _('The submitted data was not a file. Check the encoding type on the form.'),
1086 'no_name': _('No filename could be determined.'),
1087 'empty': _('The submitted file is empty.'),
1088 'max_length': _('Ensure this filename has at most {max_length} characters (it has {length}).'),
1089 }
1090 use_url = api_settings.UPLOADED_FILES_USE_URL
1091
1092 def __init__(self, *args, **kwargs):
1093 self.max_length = kwargs.pop('max_length', None)
1094 self.allow_empty_file = kwargs.pop('allow_empty_file', False)
1095 self.use_url = kwargs.pop('use_url', self.use_url)
1096 super(FileField, self).__init__(*args, **kwargs)
1097
1098 def to_internal_value(self, data):
1099 try:
1100 # `UploadedFile` objects should have name and size attributes.
1101 file_name = data.name
1102 file_size = data.size
1103 except AttributeError:
1104 self.fail('invalid')
1105
1106 if not file_name:
1107 self.fail('no_name')
1108 if not self.allow_empty_file and not file_size:
1109 self.fail('empty')
1110 if self.max_length and len(file_name) > self.max_length:
1111 self.fail('max_length', max_length=self.max_length, length=len(file_name))
1112
1113 return data
1114
1115 def to_representation(self, value):
1116 if self.use_url:
1117 if not value:
1118 return None
1119 url = value.url
1120 request = self.context.get('request', None)
1121 if request is not None:
1122 return request.build_absolute_uri(url)
1123 return url
1124 return value.name
1125
1126
1127 class ImageField(FileField):
1128 default_error_messages = {
1129 'invalid_image': _(
1130 'Upload a valid image. The file you uploaded was either not an image or a corrupted image.'
1131 ),
1132 }
1133
1134 def __init__(self, *args, **kwargs):
1135 self._DjangoImageField = kwargs.pop('_DjangoImageField', DjangoImageField)
1136 super(ImageField, self).__init__(*args, **kwargs)
1137
1138 def to_internal_value(self, data):
1139 # Image validation is a bit grungy, so we'll just outright
1140 # defer to Django's implementation so we don't need to
1141 # consider it, or treat PIL as a test dependency.
1142 file_object = super(ImageField, self).to_internal_value(data)
1143 django_field = self._DjangoImageField()
1144 django_field.error_messages = self.error_messages
1145 django_field.to_python(file_object)
1146 return file_object
1147
1148
1149 # Composite field types...
1150
1151 class _UnvalidatedField(Field):
1152 def __init__(self, *args, **kwargs):
1153 super(_UnvalidatedField, self).__init__(*args, **kwargs)
1154 self.allow_blank = True
1155 self.allow_null = True
1156
1157 def to_internal_value(self, data):
1158 return data
1159
1160 def to_representation(self, value):
1161 return value
1162
1163
1164 class ListField(Field):
1165 child = _UnvalidatedField()
1166 initial = []
1167 default_error_messages = {
1168 'not_a_list': _('Expected a list of items but got type "{input_type}".')
1169 }
1170
1171 def __init__(self, *args, **kwargs):
1172 self.child = kwargs.pop('child', copy.deepcopy(self.child))
1173 assert not inspect.isclass(self.child), '`child` has not been instantiated.'
1174 super(ListField, self).__init__(*args, **kwargs)
1175 self.child.bind(field_name='', parent=self)
1176
1177 def get_value(self, dictionary):
1178 # We override the default field access in order to support
1179 # lists in HTML forms.
1180 if html.is_html_input(dictionary):
1181 return html.parse_html_list(dictionary, prefix=self.field_name)
1182 return dictionary.get(self.field_name, empty)
1183
1184 def to_internal_value(self, data):
1185 """
1186 List of dicts of native values <- List of dicts of primitive datatypes.
1187 """
1188 if html.is_html_input(data):
1189 data = html.parse_html_list(data)
1190 if isinstance(data, type('')) or not hasattr(data, '__iter__'):
1191 self.fail('not_a_list', input_type=type(data).__name__)
1192 return [self.child.run_validation(item) for item in data]
1193
1194 def to_representation(self, data):
1195 """
1196 List of object instances -> List of dicts of primitive datatypes.
1197 """
1198 return [self.child.to_representation(item) for item in data]
1199
1200
1201 class DictField(Field):
1202 child = _UnvalidatedField()
1203 initial = {}
1204 default_error_messages = {
1205 'not_a_dict': _('Expected a dictionary of items but got type "{input_type}".')
1206 }
1207
1208 def __init__(self, *args, **kwargs):
1209 self.child = kwargs.pop('child', copy.deepcopy(self.child))
1210 assert not inspect.isclass(self.child), '`child` has not been instantiated.'
1211 super(DictField, self).__init__(*args, **kwargs)
1212 self.child.bind(field_name='', parent=self)
1213
1214 def get_value(self, dictionary):
1215 # We override the default field access in order to support
1216 # dictionaries in HTML forms.
1217 if html.is_html_input(dictionary):
1218 return html.parse_html_dict(dictionary, prefix=self.field_name)
1219 return dictionary.get(self.field_name, empty)
1220
1221 def to_internal_value(self, data):
1222 """
1223 Dicts of native values <- Dicts of primitive datatypes.
1224 """
1225 if html.is_html_input(data):
1226 data = html.parse_html_dict(data)
1227 if not isinstance(data, dict):
1228 self.fail('not_a_dict', input_type=type(data).__name__)
1229 return dict([
1230 (six.text_type(key), self.child.run_validation(value))
1231 for key, value in data.items()
1232 ])
1233
1234 def to_representation(self, value):
1235 """
1236 List of object instances -> List of dicts of primitive datatypes.
1237 """
1238 return dict([
1239 (six.text_type(key), self.child.to_representation(val))
1240 for key, val in value.items()
1241 ])
1242
1243
1244 # Miscellaneous field types...
1245
1246 class ReadOnlyField(Field):
1247 """
1248 A read-only field that simply returns the field value.
1249
1250 If the field is a method with no parameters, the method will be called
1251 and it's return value used as the representation.
1252
1253 For example, the following would call `get_expiry_date()` on the object:
1254
1255 class ExampleSerializer(self):
1256 expiry_date = ReadOnlyField(source='get_expiry_date')
1257 """
1258
1259 def __init__(self, **kwargs):
1260 kwargs['read_only'] = True
1261 super(ReadOnlyField, self).__init__(**kwargs)
1262
1263 def to_representation(self, value):
1264 return value
1265
1266
1267 class HiddenField(Field):
1268 """
1269 A hidden field does not take input from the user, or present any output,
1270 but it does populate a field in `validated_data`, based on its default
1271 value. This is particularly useful when we have a `unique_for_date`
1272 constraint on a pair of fields, as we need some way to include the date in
1273 the validated data.
1274 """
1275 def __init__(self, **kwargs):
1276 assert 'default' in kwargs, 'default is a required argument.'
1277 kwargs['write_only'] = True
1278 super(HiddenField, self).__init__(**kwargs)
1279
1280 def get_value(self, dictionary):
1281 # We always use the default value for `HiddenField`.
1282 # User input is never provided or accepted.
1283 return empty
1284
1285 def to_internal_value(self, data):
1286 return data
1287
1288
1289 class SerializerMethodField(Field):
1290 """
1291 A read-only field that get its representation from calling a method on the
1292 parent serializer class. The method called will be of the form
1293 "get_{field_name}", and should take a single argument, which is the
1294 object being serialized.
1295
1296 For example:
1297
1298 class ExampleSerializer(self):
1299 extra_info = SerializerMethodField()
1300
1301 def get_extra_info(self, obj):
1302 return ... # Calculate some data to return.
1303 """
1304 def __init__(self, method_name=None, **kwargs):
1305 self.method_name = method_name
1306 kwargs['source'] = '*'
1307 kwargs['read_only'] = True
1308 super(SerializerMethodField, self).__init__(**kwargs)
1309
1310 def bind(self, field_name, parent):
1311 # In order to enforce a consistent style, we error if a redundant
1312 # 'method_name' argument has been used. For example:
1313 # my_field = serializer.CharField(source='my_field')
1314 default_method_name = 'get_{field_name}'.format(field_name=field_name)
1315 assert self.method_name != default_method_name, (
1316 "It is redundant to specify `%s` on SerializerMethodField '%s' in "
1317 "serializer '%s', because it is the same as the default method name. "
1318 "Remove the `method_name` argument." %
1319 (self.method_name, field_name, parent.__class__.__name__)
1320 )
1321
1322 # The method name should default to `get_{field_name}`.
1323 if self.method_name is None:
1324 self.method_name = default_method_name
1325
1326 super(SerializerMethodField, self).bind(field_name, parent)
1327
1328 def to_representation(self, value):
1329 method = getattr(self.parent, self.method_name)
1330 return method(value)
1331
1332
1333 class ModelField(Field):
1334 """
1335 A generic field that can be used against an arbitrary model field.
1336
1337 This is used by `ModelSerializer` when dealing with custom model fields,
1338 that do not have a serializer field to be mapped to.
1339 """
1340 default_error_messages = {
1341 'max_length': _('Ensure this field has no more than {max_length} characters.'),
1342 }
1343
1344 def __init__(self, model_field, **kwargs):
1345 self.model_field = model_field
1346 # The `max_length` option is supported by Django's base `Field` class,
1347 # so we'd better support it here.
1348 max_length = kwargs.pop('max_length', None)
1349 super(ModelField, self).__init__(**kwargs)
1350 if max_length is not None:
1351 message = self.error_messages['max_length'].format(max_length=max_length)
1352 self.validators.append(MaxLengthValidator(max_length, message=message))
1353
1354 def to_internal_value(self, data):
1355 rel = getattr(self.model_field, 'rel', None)
1356 if rel is not None:
1357 return rel.to._meta.get_field(rel.field_name).to_python(data)
1358 return self.model_field.to_python(data)
1359
1360 def get_attribute(self, obj):
1361 # We pass the object instance onto `to_representation`,
1362 # not just the field attribute.
1363 return obj
1364
1365 def to_representation(self, obj):
1366 value = self.model_field._get_val_from_obj(obj)
1367 if is_protected_type(value):
1368 return value
1369 return self.model_field.value_to_string(obj)
1370
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rest_framework/fields.py b/rest_framework/fields.py
--- a/rest_framework/fields.py
+++ b/rest_framework/fields.py
@@ -782,7 +782,8 @@
self.fail('invalid')
sign, digittuple, exponent = value.as_tuple()
- decimals = abs(exponent)
+ decimals = exponent * decimal.Decimal(-1) if exponent < 0 else 0
+
# digittuple doesn't include any leading zeros.
digits = len(digittuple)
if decimals > digits:
| {"golden_diff": "diff --git a/rest_framework/fields.py b/rest_framework/fields.py\n--- a/rest_framework/fields.py\n+++ b/rest_framework/fields.py\n@@ -782,7 +782,8 @@\n self.fail('invalid')\n \n sign, digittuple, exponent = value.as_tuple()\n- decimals = abs(exponent)\n+ decimals = exponent * decimal.Decimal(-1) if exponent < 0 else 0\n+\n # digittuple doesn't include any leading zeros.\n digits = len(digittuple)\n if decimals > digits:\n", "issue": "`max_decimal_places` in Decimal field are wrong calculated\nWe got an issue when number is formatted as `decimal.Decimal('2E+9')`.\n\nHow `DecimalField` counts decimals:\n\n```\n sign, digittuple, exponent = value.as_tuple()\n decimals = abs(exponent)\n```\n\nHowever result of `decimal.Decimal('2E+9').as_tuple()[2]` is **9**, which is ok, but there are no decimal places in this number.\n\nMy solution is to not do `abs` and instead multiply by `-1`. \n\nI can prepare PR tonight if you think it is valid. \n\n", "before_files": [{"content": "from __future__ import unicode_literals\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.exceptions import ValidationError as DjangoValidationError\nfrom django.core.validators import RegexValidator\nfrom django.forms import ImageField as DjangoImageField\nfrom django.utils import six, timezone\nfrom django.utils.dateparse import parse_date, parse_datetime, parse_time\nfrom django.utils.encoding import is_protected_type, smart_text\nfrom django.utils.translation import ugettext_lazy as _\nfrom rest_framework import ISO_8601\nfrom rest_framework.compat import (\n EmailValidator, MinValueValidator, MaxValueValidator,\n MinLengthValidator, MaxLengthValidator, URLValidator, OrderedDict,\n unicode_repr, unicode_to_repr\n)\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.settings import api_settings\nfrom rest_framework.utils import html, representation, humanize_datetime\nimport collections\nimport copy\nimport datetime\nimport decimal\nimport inspect\nimport re\nimport uuid\n\n\nclass empty:\n \"\"\"\n This class is used to represent no data being provided for a given input\n or output value.\n\n It is required because `None` may be a valid input or output value.\n \"\"\"\n pass\n\n\ndef is_simple_callable(obj):\n \"\"\"\n True if the object is a callable that takes no arguments.\n \"\"\"\n function = inspect.isfunction(obj)\n method = inspect.ismethod(obj)\n\n if not (function or method):\n return False\n\n args, _, _, defaults = inspect.getargspec(obj)\n len_args = len(args) if function else len(args) - 1\n len_defaults = len(defaults) if defaults else 0\n return len_args <= len_defaults\n\n\ndef get_attribute(instance, attrs):\n \"\"\"\n Similar to Python's built in `getattr(instance, attr)`,\n but takes a list of nested attributes, instead of a single attribute.\n\n Also accepts either attribute lookup on objects or dictionary lookups.\n \"\"\"\n for attr in attrs:\n if instance is None:\n # Break out early if we get `None` at any point in a nested lookup.\n return None\n try:\n if isinstance(instance, collections.Mapping):\n instance = instance[attr]\n else:\n instance = getattr(instance, attr)\n except ObjectDoesNotExist:\n return None\n if is_simple_callable(instance):\n try:\n instance = instance()\n except (AttributeError, KeyError) as exc:\n # If we raised an Attribute or KeyError here it'd get treated\n # as an omitted field in `Field.get_attribute()`. Instead we\n # raise a ValueError to ensure the exception is not masked.\n raise ValueError('Exception raised in callable attribute \"{0}\"; original exception was: {1}'.format(attr, exc))\n\n return instance\n\n\ndef set_value(dictionary, keys, value):\n \"\"\"\n Similar to Python's built in `dictionary[key] = value`,\n but takes a list of nested keys instead of a single key.\n\n set_value({'a': 1}, [], {'b': 2}) -> {'a': 1, 'b': 2}\n set_value({'a': 1}, ['x'], 2) -> {'a': 1, 'x': 2}\n set_value({'a': 1}, ['x', 'y'], 2) -> {'a': 1, 'x': {'y': 2}}\n \"\"\"\n if not keys:\n dictionary.update(value)\n return\n\n for key in keys[:-1]:\n if key not in dictionary:\n dictionary[key] = {}\n dictionary = dictionary[key]\n\n dictionary[keys[-1]] = value\n\n\nclass CreateOnlyDefault(object):\n \"\"\"\n This class may be used to provide default values that are only used\n for create operations, but that do not return any value for update\n operations.\n \"\"\"\n def __init__(self, default):\n self.default = default\n\n def set_context(self, serializer_field):\n self.is_update = serializer_field.parent.instance is not None\n if callable(self.default) and hasattr(self.default, 'set_context') and not self.is_update:\n self.default.set_context(serializer_field)\n\n def __call__(self):\n if self.is_update:\n raise SkipField()\n if callable(self.default):\n return self.default()\n return self.default\n\n def __repr__(self):\n return unicode_to_repr(\n '%s(%s)' % (self.__class__.__name__, unicode_repr(self.default))\n )\n\n\nclass CurrentUserDefault(object):\n def set_context(self, serializer_field):\n self.user = serializer_field.context['request'].user\n\n def __call__(self):\n return self.user\n\n def __repr__(self):\n return unicode_to_repr('%s()' % self.__class__.__name__)\n\n\nclass SkipField(Exception):\n pass\n\n\nNOT_READ_ONLY_WRITE_ONLY = 'May not set both `read_only` and `write_only`'\nNOT_READ_ONLY_REQUIRED = 'May not set both `read_only` and `required`'\nNOT_REQUIRED_DEFAULT = 'May not set both `required` and `default`'\nUSE_READONLYFIELD = 'Field(read_only=True) should be ReadOnlyField'\nMISSING_ERROR_MESSAGE = (\n 'ValidationError raised by `{class_name}`, but error key `{key}` does '\n 'not exist in the `error_messages` dictionary.'\n)\n\n\nclass Field(object):\n _creation_counter = 0\n\n default_error_messages = {\n 'required': _('This field is required.'),\n 'null': _('This field may not be null.')\n }\n default_validators = []\n default_empty_html = empty\n initial = None\n\n def __init__(self, read_only=False, write_only=False,\n required=None, default=empty, initial=empty, source=None,\n label=None, help_text=None, style=None,\n error_messages=None, validators=None, allow_null=False):\n self._creation_counter = Field._creation_counter\n Field._creation_counter += 1\n\n # If `required` is unset, then use `True` unless a default is provided.\n if required is None:\n required = default is empty and not read_only\n\n # Some combinations of keyword arguments do not make sense.\n assert not (read_only and write_only), NOT_READ_ONLY_WRITE_ONLY\n assert not (read_only and required), NOT_READ_ONLY_REQUIRED\n assert not (required and default is not empty), NOT_REQUIRED_DEFAULT\n assert not (read_only and self.__class__ == Field), USE_READONLYFIELD\n\n self.read_only = read_only\n self.write_only = write_only\n self.required = required\n self.default = default\n self.source = source\n self.initial = self.initial if (initial is empty) else initial\n self.label = label\n self.help_text = help_text\n self.style = {} if style is None else style\n self.allow_null = allow_null\n\n if self.default_empty_html is not empty:\n if not required:\n self.default_empty_html = empty\n elif default is not empty:\n self.default_empty_html = default\n\n if validators is not None:\n self.validators = validators[:]\n\n # These are set up by `.bind()` when the field is added to a serializer.\n self.field_name = None\n self.parent = None\n\n # Collect default error message from self and parent classes\n messages = {}\n for cls in reversed(self.__class__.__mro__):\n messages.update(getattr(cls, 'default_error_messages', {}))\n messages.update(error_messages or {})\n self.error_messages = messages\n\n def bind(self, field_name, parent):\n \"\"\"\n Initializes the field name and parent for the field instance.\n Called when a field is added to the parent serializer instance.\n \"\"\"\n\n # In order to enforce a consistent style, we error if a redundant\n # 'source' argument has been used. For example:\n # my_field = serializer.CharField(source='my_field')\n assert self.source != field_name, (\n \"It is redundant to specify `source='%s'` on field '%s' in \"\n \"serializer '%s', because it is the same as the field name. \"\n \"Remove the `source` keyword argument.\" %\n (field_name, self.__class__.__name__, parent.__class__.__name__)\n )\n\n self.field_name = field_name\n self.parent = parent\n\n # `self.label` should default to being based on the field name.\n if self.label is None:\n self.label = field_name.replace('_', ' ').capitalize()\n\n # self.source should default to being the same as the field name.\n if self.source is None:\n self.source = field_name\n\n # self.source_attrs is a list of attributes that need to be looked up\n # when serializing the instance, or populating the validated data.\n if self.source == '*':\n self.source_attrs = []\n else:\n self.source_attrs = self.source.split('.')\n\n # .validators is a lazily loaded property, that gets its default\n # value from `get_validators`.\n @property\n def validators(self):\n if not hasattr(self, '_validators'):\n self._validators = self.get_validators()\n return self._validators\n\n @validators.setter\n def validators(self, validators):\n self._validators = validators\n\n def get_validators(self):\n return self.default_validators[:]\n\n def get_initial(self):\n \"\"\"\n Return a value to use when the field is being returned as a primitive\n value, without any object instance.\n \"\"\"\n return self.initial\n\n def get_value(self, dictionary):\n \"\"\"\n Given the *incoming* primitive data, return the value for this field\n that should be validated and transformed to a native value.\n \"\"\"\n if html.is_html_input(dictionary):\n # HTML forms will represent empty fields as '', and cannot\n # represent None or False values directly.\n if self.field_name not in dictionary:\n if getattr(self.root, 'partial', False):\n return empty\n return self.default_empty_html\n ret = dictionary[self.field_name]\n if ret == '' and self.allow_null:\n # If the field is blank, and null is a valid value then\n # determine if we should use null instead.\n return '' if getattr(self, 'allow_blank', False) else None\n return ret\n return dictionary.get(self.field_name, empty)\n\n def get_attribute(self, instance):\n \"\"\"\n Given the *outgoing* object instance, return the primitive value\n that should be used for this field.\n \"\"\"\n try:\n return get_attribute(instance, self.source_attrs)\n except (KeyError, AttributeError) as exc:\n if not self.required and self.default is empty:\n raise SkipField()\n msg = (\n 'Got {exc_type} when attempting to get a value for field '\n '`{field}` on serializer `{serializer}`.\\nThe serializer '\n 'field might be named incorrectly and not match '\n 'any attribute or key on the `{instance}` instance.\\n'\n 'Original exception text was: {exc}.'.format(\n exc_type=type(exc).__name__,\n field=self.field_name,\n serializer=self.parent.__class__.__name__,\n instance=instance.__class__.__name__,\n exc=exc\n )\n )\n raise type(exc)(msg)\n\n def get_default(self):\n \"\"\"\n Return the default value to use when validating data if no input\n is provided for this field.\n\n If a default has not been set for this field then this will simply\n return `empty`, indicating that no value should be set in the\n validated data for this field.\n \"\"\"\n if self.default is empty:\n raise SkipField()\n if callable(self.default):\n if hasattr(self.default, 'set_context'):\n self.default.set_context(self)\n return self.default()\n return self.default\n\n def validate_empty_values(self, data):\n \"\"\"\n Validate empty values, and either:\n\n * Raise `ValidationError`, indicating invalid data.\n * Raise `SkipField`, indicating that the field should be ignored.\n * Return (True, data), indicating an empty value that should be\n returned without any further validation being applied.\n * Return (False, data), indicating a non-empty value, that should\n have validation applied as normal.\n \"\"\"\n if self.read_only:\n return (True, self.get_default())\n\n if data is empty:\n if getattr(self.root, 'partial', False):\n raise SkipField()\n if self.required:\n self.fail('required')\n return (True, self.get_default())\n\n if data is None:\n if not self.allow_null:\n self.fail('null')\n return (True, None)\n\n return (False, data)\n\n def run_validation(self, data=empty):\n \"\"\"\n Validate a simple representation and return the internal value.\n\n The provided data may be `empty` if no representation was included\n in the input.\n\n May raise `SkipField` if the field should not be included in the\n validated data.\n \"\"\"\n (is_empty_value, data) = self.validate_empty_values(data)\n if is_empty_value:\n return data\n value = self.to_internal_value(data)\n self.run_validators(value)\n return value\n\n def run_validators(self, value):\n \"\"\"\n Test the given value against all the validators on the field,\n and either raise a `ValidationError` or simply return.\n \"\"\"\n errors = []\n for validator in self.validators:\n if hasattr(validator, 'set_context'):\n validator.set_context(self)\n\n try:\n validator(value)\n except ValidationError as exc:\n # If the validation error contains a mapping of fields to\n # errors then simply raise it immediately rather than\n # attempting to accumulate a list of errors.\n if isinstance(exc.detail, dict):\n raise\n errors.extend(exc.detail)\n except DjangoValidationError as exc:\n errors.extend(exc.messages)\n if errors:\n raise ValidationError(errors)\n\n def to_internal_value(self, data):\n \"\"\"\n Transform the *incoming* primitive data into a native value.\n \"\"\"\n raise NotImplementedError(\n '{cls}.to_internal_value() must be implemented.'.format(\n cls=self.__class__.__name__\n )\n )\n\n def to_representation(self, value):\n \"\"\"\n Transform the *outgoing* native value into primitive data.\n \"\"\"\n raise NotImplementedError(\n '{cls}.to_representation() must be implemented.\\n'\n 'If you are upgrading from REST framework version 2 '\n 'you might want `ReadOnlyField`.'.format(\n cls=self.__class__.__name__\n )\n )\n\n def fail(self, key, **kwargs):\n \"\"\"\n A helper method that simply raises a validation error.\n \"\"\"\n try:\n msg = self.error_messages[key]\n except KeyError:\n class_name = self.__class__.__name__\n msg = MISSING_ERROR_MESSAGE.format(class_name=class_name, key=key)\n raise AssertionError(msg)\n message_string = msg.format(**kwargs)\n raise ValidationError(message_string)\n\n @property\n def root(self):\n \"\"\"\n Returns the top-level serializer for this field.\n \"\"\"\n root = self\n while root.parent is not None:\n root = root.parent\n return root\n\n @property\n def context(self):\n \"\"\"\n Returns the context as passed to the root serializer on initialization.\n \"\"\"\n return getattr(self.root, '_context', {})\n\n def __new__(cls, *args, **kwargs):\n \"\"\"\n When a field is instantiated, we store the arguments that were used,\n so that we can present a helpful representation of the object.\n \"\"\"\n instance = super(Field, cls).__new__(cls)\n instance._args = args\n instance._kwargs = kwargs\n return instance\n\n def __deepcopy__(self, memo):\n \"\"\"\n When cloning fields we instantiate using the arguments it was\n originally created with, rather than copying the complete state.\n \"\"\"\n args = copy.deepcopy(self._args)\n kwargs = dict(self._kwargs)\n # Bit ugly, but we need to special case 'validators' as Django's\n # RegexValidator does not support deepcopy.\n # We treat validator callables as immutable objects.\n # See https://github.com/tomchristie/django-rest-framework/issues/1954\n validators = kwargs.pop('validators', None)\n kwargs = copy.deepcopy(kwargs)\n if validators is not None:\n kwargs['validators'] = validators\n return self.__class__(*args, **kwargs)\n\n def __repr__(self):\n \"\"\"\n Fields are represented using their initial calling arguments.\n This allows us to create descriptive representations for serializer\n instances that show all the declared fields on the serializer.\n \"\"\"\n return unicode_to_repr(representation.field_repr(self))\n\n\n# Boolean types...\n\nclass BooleanField(Field):\n default_error_messages = {\n 'invalid': _('\"{input}\" is not a valid boolean.')\n }\n default_empty_html = False\n initial = False\n TRUE_VALUES = set(('t', 'T', 'true', 'True', 'TRUE', '1', 1, True))\n FALSE_VALUES = set(('f', 'F', 'false', 'False', 'FALSE', '0', 0, 0.0, False))\n\n def __init__(self, **kwargs):\n assert 'allow_null' not in kwargs, '`allow_null` is not a valid option. Use `NullBooleanField` instead.'\n super(BooleanField, self).__init__(**kwargs)\n\n def to_internal_value(self, data):\n if data in self.TRUE_VALUES:\n return True\n elif data in self.FALSE_VALUES:\n return False\n self.fail('invalid', input=data)\n\n def to_representation(self, value):\n if value in self.TRUE_VALUES:\n return True\n elif value in self.FALSE_VALUES:\n return False\n return bool(value)\n\n\nclass NullBooleanField(Field):\n default_error_messages = {\n 'invalid': _('\"{input}\" is not a valid boolean.')\n }\n initial = None\n TRUE_VALUES = set(('t', 'T', 'true', 'True', 'TRUE', '1', 1, True))\n FALSE_VALUES = set(('f', 'F', 'false', 'False', 'FALSE', '0', 0, 0.0, False))\n NULL_VALUES = set(('n', 'N', 'null', 'Null', 'NULL', '', None))\n\n def __init__(self, **kwargs):\n assert 'allow_null' not in kwargs, '`allow_null` is not a valid option.'\n kwargs['allow_null'] = True\n super(NullBooleanField, self).__init__(**kwargs)\n\n def to_internal_value(self, data):\n if data in self.TRUE_VALUES:\n return True\n elif data in self.FALSE_VALUES:\n return False\n elif data in self.NULL_VALUES:\n return None\n self.fail('invalid', input=data)\n\n def to_representation(self, value):\n if value in self.NULL_VALUES:\n return None\n if value in self.TRUE_VALUES:\n return True\n elif value in self.FALSE_VALUES:\n return False\n return bool(value)\n\n\n# String types...\n\nclass CharField(Field):\n default_error_messages = {\n 'blank': _('This field may not be blank.'),\n 'max_length': _('Ensure this field has no more than {max_length} characters.'),\n 'min_length': _('Ensure this field has at least {min_length} characters.')\n }\n initial = ''\n\n def __init__(self, **kwargs):\n self.allow_blank = kwargs.pop('allow_blank', False)\n self.trim_whitespace = kwargs.pop('trim_whitespace', True)\n self.max_length = kwargs.pop('max_length', None)\n self.min_length = kwargs.pop('min_length', None)\n super(CharField, self).__init__(**kwargs)\n if self.max_length is not None:\n message = self.error_messages['max_length'].format(max_length=self.max_length)\n self.validators.append(MaxLengthValidator(self.max_length, message=message))\n if self.min_length is not None:\n message = self.error_messages['min_length'].format(min_length=self.min_length)\n self.validators.append(MinLengthValidator(self.min_length, message=message))\n\n def run_validation(self, data=empty):\n # Test for the empty string here so that it does not get validated,\n # and so that subclasses do not need to handle it explicitly\n # inside the `to_internal_value()` method.\n if data == '':\n if not self.allow_blank:\n self.fail('blank')\n return ''\n return super(CharField, self).run_validation(data)\n\n def to_internal_value(self, data):\n value = six.text_type(data)\n return value.strip() if self.trim_whitespace else value\n\n def to_representation(self, value):\n return six.text_type(value)\n\n\nclass EmailField(CharField):\n default_error_messages = {\n 'invalid': _('Enter a valid email address.')\n }\n\n def __init__(self, **kwargs):\n super(EmailField, self).__init__(**kwargs)\n validator = EmailValidator(message=self.error_messages['invalid'])\n self.validators.append(validator)\n\n\nclass RegexField(CharField):\n default_error_messages = {\n 'invalid': _('This value does not match the required pattern.')\n }\n\n def __init__(self, regex, **kwargs):\n super(RegexField, self).__init__(**kwargs)\n validator = RegexValidator(regex, message=self.error_messages['invalid'])\n self.validators.append(validator)\n\n\nclass SlugField(CharField):\n default_error_messages = {\n 'invalid': _('Enter a valid \"slug\" consisting of letters, numbers, underscores or hyphens.')\n }\n\n def __init__(self, **kwargs):\n super(SlugField, self).__init__(**kwargs)\n slug_regex = re.compile(r'^[-a-zA-Z0-9_]+$')\n validator = RegexValidator(slug_regex, message=self.error_messages['invalid'])\n self.validators.append(validator)\n\n\nclass URLField(CharField):\n default_error_messages = {\n 'invalid': _('Enter a valid URL.')\n }\n\n def __init__(self, **kwargs):\n super(URLField, self).__init__(**kwargs)\n validator = URLValidator(message=self.error_messages['invalid'])\n self.validators.append(validator)\n\n\nclass UUIDField(Field):\n default_error_messages = {\n 'invalid': _('\"{value}\" is not a valid UUID.'),\n }\n\n def to_internal_value(self, data):\n if not isinstance(data, uuid.UUID):\n try:\n return uuid.UUID(data)\n except (ValueError, TypeError):\n self.fail('invalid', value=data)\n return data\n\n def to_representation(self, value):\n return str(value)\n\n\n# Number types...\n\nclass IntegerField(Field):\n default_error_messages = {\n 'invalid': _('A valid integer is required.'),\n 'max_value': _('Ensure this value is less than or equal to {max_value}.'),\n 'min_value': _('Ensure this value is greater than or equal to {min_value}.'),\n 'max_string_length': _('String value too large.')\n }\n MAX_STRING_LENGTH = 1000 # Guard against malicious string inputs.\n re_decimal = re.compile(r'\\.0*\\s*$') # allow e.g. '1.0' as an int, but not '1.2'\n\n def __init__(self, **kwargs):\n self.max_value = kwargs.pop('max_value', None)\n self.min_value = kwargs.pop('min_value', None)\n super(IntegerField, self).__init__(**kwargs)\n if self.max_value is not None:\n message = self.error_messages['max_value'].format(max_value=self.max_value)\n self.validators.append(MaxValueValidator(self.max_value, message=message))\n if self.min_value is not None:\n message = self.error_messages['min_value'].format(min_value=self.min_value)\n self.validators.append(MinValueValidator(self.min_value, message=message))\n\n def to_internal_value(self, data):\n if isinstance(data, six.text_type) and len(data) > self.MAX_STRING_LENGTH:\n self.fail('max_string_length')\n\n try:\n data = int(self.re_decimal.sub('', str(data)))\n except (ValueError, TypeError):\n self.fail('invalid')\n return data\n\n def to_representation(self, value):\n return int(value)\n\n\nclass FloatField(Field):\n default_error_messages = {\n 'invalid': _('A valid number is required.'),\n 'max_value': _('Ensure this value is less than or equal to {max_value}.'),\n 'min_value': _('Ensure this value is greater than or equal to {min_value}.'),\n 'max_string_length': _('String value too large.')\n }\n MAX_STRING_LENGTH = 1000 # Guard against malicious string inputs.\n\n def __init__(self, **kwargs):\n self.max_value = kwargs.pop('max_value', None)\n self.min_value = kwargs.pop('min_value', None)\n super(FloatField, self).__init__(**kwargs)\n if self.max_value is not None:\n message = self.error_messages['max_value'].format(max_value=self.max_value)\n self.validators.append(MaxValueValidator(self.max_value, message=message))\n if self.min_value is not None:\n message = self.error_messages['min_value'].format(min_value=self.min_value)\n self.validators.append(MinValueValidator(self.min_value, message=message))\n\n def to_internal_value(self, data):\n if isinstance(data, six.text_type) and len(data) > self.MAX_STRING_LENGTH:\n self.fail('max_string_length')\n\n try:\n return float(data)\n except (TypeError, ValueError):\n self.fail('invalid')\n\n def to_representation(self, value):\n return float(value)\n\n\nclass DecimalField(Field):\n default_error_messages = {\n 'invalid': _('A valid number is required.'),\n 'max_value': _('Ensure this value is less than or equal to {max_value}.'),\n 'min_value': _('Ensure this value is greater than or equal to {min_value}.'),\n 'max_digits': _('Ensure that there are no more than {max_digits} digits in total.'),\n 'max_decimal_places': _('Ensure that there are no more than {max_decimal_places} decimal places.'),\n 'max_whole_digits': _('Ensure that there are no more than {max_whole_digits} digits before the decimal point.'),\n 'max_string_length': _('String value too large.')\n }\n MAX_STRING_LENGTH = 1000 # Guard against malicious string inputs.\n\n coerce_to_string = api_settings.COERCE_DECIMAL_TO_STRING\n\n def __init__(self, max_digits, decimal_places, coerce_to_string=None, max_value=None, min_value=None, **kwargs):\n self.max_digits = max_digits\n self.decimal_places = decimal_places\n self.coerce_to_string = coerce_to_string if (coerce_to_string is not None) else self.coerce_to_string\n\n self.max_value = max_value\n self.min_value = min_value\n\n super(DecimalField, self).__init__(**kwargs)\n\n if self.max_value is not None:\n message = self.error_messages['max_value'].format(max_value=self.max_value)\n self.validators.append(MaxValueValidator(self.max_value, message=message))\n if self.min_value is not None:\n message = self.error_messages['min_value'].format(min_value=self.min_value)\n self.validators.append(MinValueValidator(self.min_value, message=message))\n\n def to_internal_value(self, data):\n \"\"\"\n Validates that the input is a decimal number. Returns a Decimal\n instance. Returns None for empty values. Ensures that there are no more\n than max_digits in the number, and no more than decimal_places digits\n after the decimal point.\n \"\"\"\n data = smart_text(data).strip()\n if len(data) > self.MAX_STRING_LENGTH:\n self.fail('max_string_length')\n\n try:\n value = decimal.Decimal(data)\n except decimal.DecimalException:\n self.fail('invalid')\n\n # Check for NaN. It is the only value that isn't equal to itself,\n # so we can use this to identify NaN values.\n if value != value:\n self.fail('invalid')\n\n # Check for infinity and negative infinity.\n if value in (decimal.Decimal('Inf'), decimal.Decimal('-Inf')):\n self.fail('invalid')\n\n sign, digittuple, exponent = value.as_tuple()\n decimals = abs(exponent)\n # digittuple doesn't include any leading zeros.\n digits = len(digittuple)\n if decimals > digits:\n # We have leading zeros up to or past the decimal point. Count\n # everything past the decimal point as a digit. We do not count\n # 0 before the decimal point as a digit since that would mean\n # we would not allow max_digits = decimal_places.\n digits = decimals\n whole_digits = digits - decimals\n\n if self.max_digits is not None and digits > self.max_digits:\n self.fail('max_digits', max_digits=self.max_digits)\n if self.decimal_places is not None and decimals > self.decimal_places:\n self.fail('max_decimal_places', max_decimal_places=self.decimal_places)\n if self.max_digits is not None and self.decimal_places is not None and whole_digits > (self.max_digits - self.decimal_places):\n self.fail('max_whole_digits', max_whole_digits=self.max_digits - self.decimal_places)\n\n return value\n\n def to_representation(self, value):\n if not isinstance(value, decimal.Decimal):\n value = decimal.Decimal(six.text_type(value).strip())\n\n context = decimal.getcontext().copy()\n context.prec = self.max_digits\n quantized = value.quantize(\n decimal.Decimal('.1') ** self.decimal_places,\n context=context\n )\n if not self.coerce_to_string:\n return quantized\n return '{0:f}'.format(quantized)\n\n\n# Date & time fields...\n\nclass DateTimeField(Field):\n default_error_messages = {\n 'invalid': _('Datetime has wrong format. Use one of these formats instead: {format}.'),\n 'date': _('Expected a datetime but got a date.'),\n }\n format = api_settings.DATETIME_FORMAT\n input_formats = api_settings.DATETIME_INPUT_FORMATS\n default_timezone = timezone.get_default_timezone() if settings.USE_TZ else None\n\n def __init__(self, format=empty, input_formats=None, default_timezone=None, *args, **kwargs):\n self.format = format if format is not empty else self.format\n self.input_formats = input_formats if input_formats is not None else self.input_formats\n self.default_timezone = default_timezone if default_timezone is not None else self.default_timezone\n super(DateTimeField, self).__init__(*args, **kwargs)\n\n def enforce_timezone(self, value):\n \"\"\"\n When `self.default_timezone` is `None`, always return naive datetimes.\n When `self.default_timezone` is not `None`, always return aware datetimes.\n \"\"\"\n if (self.default_timezone is not None) and not timezone.is_aware(value):\n return timezone.make_aware(value, self.default_timezone)\n elif (self.default_timezone is None) and timezone.is_aware(value):\n return timezone.make_naive(value, timezone.UTC())\n return value\n\n def to_internal_value(self, value):\n if isinstance(value, datetime.date) and not isinstance(value, datetime.datetime):\n self.fail('date')\n\n if isinstance(value, datetime.datetime):\n return self.enforce_timezone(value)\n\n for format in self.input_formats:\n if format.lower() == ISO_8601:\n try:\n parsed = parse_datetime(value)\n except (ValueError, TypeError):\n pass\n else:\n if parsed is not None:\n return self.enforce_timezone(parsed)\n else:\n try:\n parsed = datetime.datetime.strptime(value, format)\n except (ValueError, TypeError):\n pass\n else:\n return self.enforce_timezone(parsed)\n\n humanized_format = humanize_datetime.datetime_formats(self.input_formats)\n self.fail('invalid', format=humanized_format)\n\n def to_representation(self, value):\n if self.format is None:\n return value\n\n if self.format.lower() == ISO_8601:\n value = value.isoformat()\n if value.endswith('+00:00'):\n value = value[:-6] + 'Z'\n return value\n return value.strftime(self.format)\n\n\nclass DateField(Field):\n default_error_messages = {\n 'invalid': _('Date has wrong format. Use one of these formats instead: {format}.'),\n 'datetime': _('Expected a date but got a datetime.'),\n }\n format = api_settings.DATE_FORMAT\n input_formats = api_settings.DATE_INPUT_FORMATS\n\n def __init__(self, format=empty, input_formats=None, *args, **kwargs):\n self.format = format if format is not empty else self.format\n self.input_formats = input_formats if input_formats is not None else self.input_formats\n super(DateField, self).__init__(*args, **kwargs)\n\n def to_internal_value(self, value):\n if isinstance(value, datetime.datetime):\n self.fail('datetime')\n\n if isinstance(value, datetime.date):\n return value\n\n for format in self.input_formats:\n if format.lower() == ISO_8601:\n try:\n parsed = parse_date(value)\n except (ValueError, TypeError):\n pass\n else:\n if parsed is not None:\n return parsed\n else:\n try:\n parsed = datetime.datetime.strptime(value, format)\n except (ValueError, TypeError):\n pass\n else:\n return parsed.date()\n\n humanized_format = humanize_datetime.date_formats(self.input_formats)\n self.fail('invalid', format=humanized_format)\n\n def to_representation(self, value):\n if not value:\n return None\n\n if self.format is None:\n return value\n\n # Applying a `DateField` to a datetime value is almost always\n # not a sensible thing to do, as it means naively dropping\n # any explicit or implicit timezone info.\n assert not isinstance(value, datetime.datetime), (\n 'Expected a `date`, but got a `datetime`. Refusing to coerce, '\n 'as this may mean losing timezone information. Use a custom '\n 'read-only field and deal with timezone issues explicitly.'\n )\n\n if self.format.lower() == ISO_8601:\n if (isinstance(value, str)):\n value = datetime.datetime.strptime(value, '%Y-%m-%d').date()\n return value.isoformat()\n\n return value.strftime(self.format)\n\n\nclass TimeField(Field):\n default_error_messages = {\n 'invalid': _('Time has wrong format. Use one of these formats instead: {format}.'),\n }\n format = api_settings.TIME_FORMAT\n input_formats = api_settings.TIME_INPUT_FORMATS\n\n def __init__(self, format=empty, input_formats=None, *args, **kwargs):\n self.format = format if format is not empty else self.format\n self.input_formats = input_formats if input_formats is not None else self.input_formats\n super(TimeField, self).__init__(*args, **kwargs)\n\n def to_internal_value(self, value):\n if isinstance(value, datetime.time):\n return value\n\n for format in self.input_formats:\n if format.lower() == ISO_8601:\n try:\n parsed = parse_time(value)\n except (ValueError, TypeError):\n pass\n else:\n if parsed is not None:\n return parsed\n else:\n try:\n parsed = datetime.datetime.strptime(value, format)\n except (ValueError, TypeError):\n pass\n else:\n return parsed.time()\n\n humanized_format = humanize_datetime.time_formats(self.input_formats)\n self.fail('invalid', format=humanized_format)\n\n def to_representation(self, value):\n if self.format is None:\n return value\n\n # Applying a `TimeField` to a datetime value is almost always\n # not a sensible thing to do, as it means naively dropping\n # any explicit or implicit timezone info.\n assert not isinstance(value, datetime.datetime), (\n 'Expected a `time`, but got a `datetime`. Refusing to coerce, '\n 'as this may mean losing timezone information. Use a custom '\n 'read-only field and deal with timezone issues explicitly.'\n )\n\n if self.format.lower() == ISO_8601:\n return value.isoformat()\n return value.strftime(self.format)\n\n\n# Choice types...\n\nclass ChoiceField(Field):\n default_error_messages = {\n 'invalid_choice': _('\"{input}\" is not a valid choice.')\n }\n\n def __init__(self, choices, **kwargs):\n # Allow either single or paired choices style:\n # choices = [1, 2, 3]\n # choices = [(1, 'First'), (2, 'Second'), (3, 'Third')]\n pairs = [\n isinstance(item, (list, tuple)) and len(item) == 2\n for item in choices\n ]\n if all(pairs):\n self.choices = OrderedDict([(key, display_value) for key, display_value in choices])\n else:\n self.choices = OrderedDict([(item, item) for item in choices])\n\n # Map the string representation of choices to the underlying value.\n # Allows us to deal with eg. integer choices while supporting either\n # integer or string input, but still get the correct datatype out.\n self.choice_strings_to_values = dict([\n (six.text_type(key), key) for key in self.choices.keys()\n ])\n\n self.allow_blank = kwargs.pop('allow_blank', False)\n\n super(ChoiceField, self).__init__(**kwargs)\n\n def to_internal_value(self, data):\n if data == '' and self.allow_blank:\n return ''\n\n try:\n return self.choice_strings_to_values[six.text_type(data)]\n except KeyError:\n self.fail('invalid_choice', input=data)\n\n def to_representation(self, value):\n if value in ('', None):\n return value\n return self.choice_strings_to_values.get(six.text_type(value), value)\n\n\nclass MultipleChoiceField(ChoiceField):\n default_error_messages = {\n 'invalid_choice': _('\"{input}\" is not a valid choice.'),\n 'not_a_list': _('Expected a list of items but got type \"{input_type}\".')\n }\n default_empty_html = []\n\n def get_value(self, dictionary):\n # We override the default field access in order to support\n # lists in HTML forms.\n if html.is_html_input(dictionary):\n return dictionary.getlist(self.field_name)\n return dictionary.get(self.field_name, empty)\n\n def to_internal_value(self, data):\n if isinstance(data, type('')) or not hasattr(data, '__iter__'):\n self.fail('not_a_list', input_type=type(data).__name__)\n\n return set([\n super(MultipleChoiceField, self).to_internal_value(item)\n for item in data\n ])\n\n def to_representation(self, value):\n return set([\n self.choice_strings_to_values.get(six.text_type(item), item) for item in value\n ])\n\n\n# File types...\n\nclass FileField(Field):\n default_error_messages = {\n 'required': _('No file was submitted.'),\n 'invalid': _('The submitted data was not a file. Check the encoding type on the form.'),\n 'no_name': _('No filename could be determined.'),\n 'empty': _('The submitted file is empty.'),\n 'max_length': _('Ensure this filename has at most {max_length} characters (it has {length}).'),\n }\n use_url = api_settings.UPLOADED_FILES_USE_URL\n\n def __init__(self, *args, **kwargs):\n self.max_length = kwargs.pop('max_length', None)\n self.allow_empty_file = kwargs.pop('allow_empty_file', False)\n self.use_url = kwargs.pop('use_url', self.use_url)\n super(FileField, self).__init__(*args, **kwargs)\n\n def to_internal_value(self, data):\n try:\n # `UploadedFile` objects should have name and size attributes.\n file_name = data.name\n file_size = data.size\n except AttributeError:\n self.fail('invalid')\n\n if not file_name:\n self.fail('no_name')\n if not self.allow_empty_file and not file_size:\n self.fail('empty')\n if self.max_length and len(file_name) > self.max_length:\n self.fail('max_length', max_length=self.max_length, length=len(file_name))\n\n return data\n\n def to_representation(self, value):\n if self.use_url:\n if not value:\n return None\n url = value.url\n request = self.context.get('request', None)\n if request is not None:\n return request.build_absolute_uri(url)\n return url\n return value.name\n\n\nclass ImageField(FileField):\n default_error_messages = {\n 'invalid_image': _(\n 'Upload a valid image. The file you uploaded was either not an image or a corrupted image.'\n ),\n }\n\n def __init__(self, *args, **kwargs):\n self._DjangoImageField = kwargs.pop('_DjangoImageField', DjangoImageField)\n super(ImageField, self).__init__(*args, **kwargs)\n\n def to_internal_value(self, data):\n # Image validation is a bit grungy, so we'll just outright\n # defer to Django's implementation so we don't need to\n # consider it, or treat PIL as a test dependency.\n file_object = super(ImageField, self).to_internal_value(data)\n django_field = self._DjangoImageField()\n django_field.error_messages = self.error_messages\n django_field.to_python(file_object)\n return file_object\n\n\n# Composite field types...\n\nclass _UnvalidatedField(Field):\n def __init__(self, *args, **kwargs):\n super(_UnvalidatedField, self).__init__(*args, **kwargs)\n self.allow_blank = True\n self.allow_null = True\n\n def to_internal_value(self, data):\n return data\n\n def to_representation(self, value):\n return value\n\n\nclass ListField(Field):\n child = _UnvalidatedField()\n initial = []\n default_error_messages = {\n 'not_a_list': _('Expected a list of items but got type \"{input_type}\".')\n }\n\n def __init__(self, *args, **kwargs):\n self.child = kwargs.pop('child', copy.deepcopy(self.child))\n assert not inspect.isclass(self.child), '`child` has not been instantiated.'\n super(ListField, self).__init__(*args, **kwargs)\n self.child.bind(field_name='', parent=self)\n\n def get_value(self, dictionary):\n # We override the default field access in order to support\n # lists in HTML forms.\n if html.is_html_input(dictionary):\n return html.parse_html_list(dictionary, prefix=self.field_name)\n return dictionary.get(self.field_name, empty)\n\n def to_internal_value(self, data):\n \"\"\"\n List of dicts of native values <- List of dicts of primitive datatypes.\n \"\"\"\n if html.is_html_input(data):\n data = html.parse_html_list(data)\n if isinstance(data, type('')) or not hasattr(data, '__iter__'):\n self.fail('not_a_list', input_type=type(data).__name__)\n return [self.child.run_validation(item) for item in data]\n\n def to_representation(self, data):\n \"\"\"\n List of object instances -> List of dicts of primitive datatypes.\n \"\"\"\n return [self.child.to_representation(item) for item in data]\n\n\nclass DictField(Field):\n child = _UnvalidatedField()\n initial = {}\n default_error_messages = {\n 'not_a_dict': _('Expected a dictionary of items but got type \"{input_type}\".')\n }\n\n def __init__(self, *args, **kwargs):\n self.child = kwargs.pop('child', copy.deepcopy(self.child))\n assert not inspect.isclass(self.child), '`child` has not been instantiated.'\n super(DictField, self).__init__(*args, **kwargs)\n self.child.bind(field_name='', parent=self)\n\n def get_value(self, dictionary):\n # We override the default field access in order to support\n # dictionaries in HTML forms.\n if html.is_html_input(dictionary):\n return html.parse_html_dict(dictionary, prefix=self.field_name)\n return dictionary.get(self.field_name, empty)\n\n def to_internal_value(self, data):\n \"\"\"\n Dicts of native values <- Dicts of primitive datatypes.\n \"\"\"\n if html.is_html_input(data):\n data = html.parse_html_dict(data)\n if not isinstance(data, dict):\n self.fail('not_a_dict', input_type=type(data).__name__)\n return dict([\n (six.text_type(key), self.child.run_validation(value))\n for key, value in data.items()\n ])\n\n def to_representation(self, value):\n \"\"\"\n List of object instances -> List of dicts of primitive datatypes.\n \"\"\"\n return dict([\n (six.text_type(key), self.child.to_representation(val))\n for key, val in value.items()\n ])\n\n\n# Miscellaneous field types...\n\nclass ReadOnlyField(Field):\n \"\"\"\n A read-only field that simply returns the field value.\n\n If the field is a method with no parameters, the method will be called\n and it's return value used as the representation.\n\n For example, the following would call `get_expiry_date()` on the object:\n\n class ExampleSerializer(self):\n expiry_date = ReadOnlyField(source='get_expiry_date')\n \"\"\"\n\n def __init__(self, **kwargs):\n kwargs['read_only'] = True\n super(ReadOnlyField, self).__init__(**kwargs)\n\n def to_representation(self, value):\n return value\n\n\nclass HiddenField(Field):\n \"\"\"\n A hidden field does not take input from the user, or present any output,\n but it does populate a field in `validated_data`, based on its default\n value. This is particularly useful when we have a `unique_for_date`\n constraint on a pair of fields, as we need some way to include the date in\n the validated data.\n \"\"\"\n def __init__(self, **kwargs):\n assert 'default' in kwargs, 'default is a required argument.'\n kwargs['write_only'] = True\n super(HiddenField, self).__init__(**kwargs)\n\n def get_value(self, dictionary):\n # We always use the default value for `HiddenField`.\n # User input is never provided or accepted.\n return empty\n\n def to_internal_value(self, data):\n return data\n\n\nclass SerializerMethodField(Field):\n \"\"\"\n A read-only field that get its representation from calling a method on the\n parent serializer class. The method called will be of the form\n \"get_{field_name}\", and should take a single argument, which is the\n object being serialized.\n\n For example:\n\n class ExampleSerializer(self):\n extra_info = SerializerMethodField()\n\n def get_extra_info(self, obj):\n return ... # Calculate some data to return.\n \"\"\"\n def __init__(self, method_name=None, **kwargs):\n self.method_name = method_name\n kwargs['source'] = '*'\n kwargs['read_only'] = True\n super(SerializerMethodField, self).__init__(**kwargs)\n\n def bind(self, field_name, parent):\n # In order to enforce a consistent style, we error if a redundant\n # 'method_name' argument has been used. For example:\n # my_field = serializer.CharField(source='my_field')\n default_method_name = 'get_{field_name}'.format(field_name=field_name)\n assert self.method_name != default_method_name, (\n \"It is redundant to specify `%s` on SerializerMethodField '%s' in \"\n \"serializer '%s', because it is the same as the default method name. \"\n \"Remove the `method_name` argument.\" %\n (self.method_name, field_name, parent.__class__.__name__)\n )\n\n # The method name should default to `get_{field_name}`.\n if self.method_name is None:\n self.method_name = default_method_name\n\n super(SerializerMethodField, self).bind(field_name, parent)\n\n def to_representation(self, value):\n method = getattr(self.parent, self.method_name)\n return method(value)\n\n\nclass ModelField(Field):\n \"\"\"\n A generic field that can be used against an arbitrary model field.\n\n This is used by `ModelSerializer` when dealing with custom model fields,\n that do not have a serializer field to be mapped to.\n \"\"\"\n default_error_messages = {\n 'max_length': _('Ensure this field has no more than {max_length} characters.'),\n }\n\n def __init__(self, model_field, **kwargs):\n self.model_field = model_field\n # The `max_length` option is supported by Django's base `Field` class,\n # so we'd better support it here.\n max_length = kwargs.pop('max_length', None)\n super(ModelField, self).__init__(**kwargs)\n if max_length is not None:\n message = self.error_messages['max_length'].format(max_length=max_length)\n self.validators.append(MaxLengthValidator(max_length, message=message))\n\n def to_internal_value(self, data):\n rel = getattr(self.model_field, 'rel', None)\n if rel is not None:\n return rel.to._meta.get_field(rel.field_name).to_python(data)\n return self.model_field.to_python(data)\n\n def get_attribute(self, obj):\n # We pass the object instance onto `to_representation`,\n # not just the field attribute.\n return obj\n\n def to_representation(self, obj):\n value = self.model_field._get_val_from_obj(obj)\n if is_protected_type(value):\n return value\n return self.model_field.value_to_string(obj)\n", "path": "rest_framework/fields.py"}], "after_files": [{"content": "from __future__ import unicode_literals\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.exceptions import ValidationError as DjangoValidationError\nfrom django.core.validators import RegexValidator\nfrom django.forms import ImageField as DjangoImageField\nfrom django.utils import six, timezone\nfrom django.utils.dateparse import parse_date, parse_datetime, parse_time\nfrom django.utils.encoding import is_protected_type, smart_text\nfrom django.utils.translation import ugettext_lazy as _\nfrom rest_framework import ISO_8601\nfrom rest_framework.compat import (\n EmailValidator, MinValueValidator, MaxValueValidator,\n MinLengthValidator, MaxLengthValidator, URLValidator, OrderedDict,\n unicode_repr, unicode_to_repr\n)\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.settings import api_settings\nfrom rest_framework.utils import html, representation, humanize_datetime\nimport collections\nimport copy\nimport datetime\nimport decimal\nimport inspect\nimport re\nimport uuid\n\n\nclass empty:\n \"\"\"\n This class is used to represent no data being provided for a given input\n or output value.\n\n It is required because `None` may be a valid input or output value.\n \"\"\"\n pass\n\n\ndef is_simple_callable(obj):\n \"\"\"\n True if the object is a callable that takes no arguments.\n \"\"\"\n function = inspect.isfunction(obj)\n method = inspect.ismethod(obj)\n\n if not (function or method):\n return False\n\n args, _, _, defaults = inspect.getargspec(obj)\n len_args = len(args) if function else len(args) - 1\n len_defaults = len(defaults) if defaults else 0\n return len_args <= len_defaults\n\n\ndef get_attribute(instance, attrs):\n \"\"\"\n Similar to Python's built in `getattr(instance, attr)`,\n but takes a list of nested attributes, instead of a single attribute.\n\n Also accepts either attribute lookup on objects or dictionary lookups.\n \"\"\"\n for attr in attrs:\n if instance is None:\n # Break out early if we get `None` at any point in a nested lookup.\n return None\n try:\n if isinstance(instance, collections.Mapping):\n instance = instance[attr]\n else:\n instance = getattr(instance, attr)\n except ObjectDoesNotExist:\n return None\n if is_simple_callable(instance):\n try:\n instance = instance()\n except (AttributeError, KeyError) as exc:\n # If we raised an Attribute or KeyError here it'd get treated\n # as an omitted field in `Field.get_attribute()`. Instead we\n # raise a ValueError to ensure the exception is not masked.\n raise ValueError('Exception raised in callable attribute \"{0}\"; original exception was: {1}'.format(attr, exc))\n\n return instance\n\n\ndef set_value(dictionary, keys, value):\n \"\"\"\n Similar to Python's built in `dictionary[key] = value`,\n but takes a list of nested keys instead of a single key.\n\n set_value({'a': 1}, [], {'b': 2}) -> {'a': 1, 'b': 2}\n set_value({'a': 1}, ['x'], 2) -> {'a': 1, 'x': 2}\n set_value({'a': 1}, ['x', 'y'], 2) -> {'a': 1, 'x': {'y': 2}}\n \"\"\"\n if not keys:\n dictionary.update(value)\n return\n\n for key in keys[:-1]:\n if key not in dictionary:\n dictionary[key] = {}\n dictionary = dictionary[key]\n\n dictionary[keys[-1]] = value\n\n\nclass CreateOnlyDefault(object):\n \"\"\"\n This class may be used to provide default values that are only used\n for create operations, but that do not return any value for update\n operations.\n \"\"\"\n def __init__(self, default):\n self.default = default\n\n def set_context(self, serializer_field):\n self.is_update = serializer_field.parent.instance is not None\n if callable(self.default) and hasattr(self.default, 'set_context') and not self.is_update:\n self.default.set_context(serializer_field)\n\n def __call__(self):\n if self.is_update:\n raise SkipField()\n if callable(self.default):\n return self.default()\n return self.default\n\n def __repr__(self):\n return unicode_to_repr(\n '%s(%s)' % (self.__class__.__name__, unicode_repr(self.default))\n )\n\n\nclass CurrentUserDefault(object):\n def set_context(self, serializer_field):\n self.user = serializer_field.context['request'].user\n\n def __call__(self):\n return self.user\n\n def __repr__(self):\n return unicode_to_repr('%s()' % self.__class__.__name__)\n\n\nclass SkipField(Exception):\n pass\n\n\nNOT_READ_ONLY_WRITE_ONLY = 'May not set both `read_only` and `write_only`'\nNOT_READ_ONLY_REQUIRED = 'May not set both `read_only` and `required`'\nNOT_REQUIRED_DEFAULT = 'May not set both `required` and `default`'\nUSE_READONLYFIELD = 'Field(read_only=True) should be ReadOnlyField'\nMISSING_ERROR_MESSAGE = (\n 'ValidationError raised by `{class_name}`, but error key `{key}` does '\n 'not exist in the `error_messages` dictionary.'\n)\n\n\nclass Field(object):\n _creation_counter = 0\n\n default_error_messages = {\n 'required': _('This field is required.'),\n 'null': _('This field may not be null.')\n }\n default_validators = []\n default_empty_html = empty\n initial = None\n\n def __init__(self, read_only=False, write_only=False,\n required=None, default=empty, initial=empty, source=None,\n label=None, help_text=None, style=None,\n error_messages=None, validators=None, allow_null=False):\n self._creation_counter = Field._creation_counter\n Field._creation_counter += 1\n\n # If `required` is unset, then use `True` unless a default is provided.\n if required is None:\n required = default is empty and not read_only\n\n # Some combinations of keyword arguments do not make sense.\n assert not (read_only and write_only), NOT_READ_ONLY_WRITE_ONLY\n assert not (read_only and required), NOT_READ_ONLY_REQUIRED\n assert not (required and default is not empty), NOT_REQUIRED_DEFAULT\n assert not (read_only and self.__class__ == Field), USE_READONLYFIELD\n\n self.read_only = read_only\n self.write_only = write_only\n self.required = required\n self.default = default\n self.source = source\n self.initial = self.initial if (initial is empty) else initial\n self.label = label\n self.help_text = help_text\n self.style = {} if style is None else style\n self.allow_null = allow_null\n\n if self.default_empty_html is not empty:\n if not required:\n self.default_empty_html = empty\n elif default is not empty:\n self.default_empty_html = default\n\n if validators is not None:\n self.validators = validators[:]\n\n # These are set up by `.bind()` when the field is added to a serializer.\n self.field_name = None\n self.parent = None\n\n # Collect default error message from self and parent classes\n messages = {}\n for cls in reversed(self.__class__.__mro__):\n messages.update(getattr(cls, 'default_error_messages', {}))\n messages.update(error_messages or {})\n self.error_messages = messages\n\n def bind(self, field_name, parent):\n \"\"\"\n Initializes the field name and parent for the field instance.\n Called when a field is added to the parent serializer instance.\n \"\"\"\n\n # In order to enforce a consistent style, we error if a redundant\n # 'source' argument has been used. For example:\n # my_field = serializer.CharField(source='my_field')\n assert self.source != field_name, (\n \"It is redundant to specify `source='%s'` on field '%s' in \"\n \"serializer '%s', because it is the same as the field name. \"\n \"Remove the `source` keyword argument.\" %\n (field_name, self.__class__.__name__, parent.__class__.__name__)\n )\n\n self.field_name = field_name\n self.parent = parent\n\n # `self.label` should default to being based on the field name.\n if self.label is None:\n self.label = field_name.replace('_', ' ').capitalize()\n\n # self.source should default to being the same as the field name.\n if self.source is None:\n self.source = field_name\n\n # self.source_attrs is a list of attributes that need to be looked up\n # when serializing the instance, or populating the validated data.\n if self.source == '*':\n self.source_attrs = []\n else:\n self.source_attrs = self.source.split('.')\n\n # .validators is a lazily loaded property, that gets its default\n # value from `get_validators`.\n @property\n def validators(self):\n if not hasattr(self, '_validators'):\n self._validators = self.get_validators()\n return self._validators\n\n @validators.setter\n def validators(self, validators):\n self._validators = validators\n\n def get_validators(self):\n return self.default_validators[:]\n\n def get_initial(self):\n \"\"\"\n Return a value to use when the field is being returned as a primitive\n value, without any object instance.\n \"\"\"\n return self.initial\n\n def get_value(self, dictionary):\n \"\"\"\n Given the *incoming* primitive data, return the value for this field\n that should be validated and transformed to a native value.\n \"\"\"\n if html.is_html_input(dictionary):\n # HTML forms will represent empty fields as '', and cannot\n # represent None or False values directly.\n if self.field_name not in dictionary:\n if getattr(self.root, 'partial', False):\n return empty\n return self.default_empty_html\n ret = dictionary[self.field_name]\n if ret == '' and self.allow_null:\n # If the field is blank, and null is a valid value then\n # determine if we should use null instead.\n return '' if getattr(self, 'allow_blank', False) else None\n return ret\n return dictionary.get(self.field_name, empty)\n\n def get_attribute(self, instance):\n \"\"\"\n Given the *outgoing* object instance, return the primitive value\n that should be used for this field.\n \"\"\"\n try:\n return get_attribute(instance, self.source_attrs)\n except (KeyError, AttributeError) as exc:\n if not self.required and self.default is empty:\n raise SkipField()\n msg = (\n 'Got {exc_type} when attempting to get a value for field '\n '`{field}` on serializer `{serializer}`.\\nThe serializer '\n 'field might be named incorrectly and not match '\n 'any attribute or key on the `{instance}` instance.\\n'\n 'Original exception text was: {exc}.'.format(\n exc_type=type(exc).__name__,\n field=self.field_name,\n serializer=self.parent.__class__.__name__,\n instance=instance.__class__.__name__,\n exc=exc\n )\n )\n raise type(exc)(msg)\n\n def get_default(self):\n \"\"\"\n Return the default value to use when validating data if no input\n is provided for this field.\n\n If a default has not been set for this field then this will simply\n return `empty`, indicating that no value should be set in the\n validated data for this field.\n \"\"\"\n if self.default is empty:\n raise SkipField()\n if callable(self.default):\n if hasattr(self.default, 'set_context'):\n self.default.set_context(self)\n return self.default()\n return self.default\n\n def validate_empty_values(self, data):\n \"\"\"\n Validate empty values, and either:\n\n * Raise `ValidationError`, indicating invalid data.\n * Raise `SkipField`, indicating that the field should be ignored.\n * Return (True, data), indicating an empty value that should be\n returned without any further validation being applied.\n * Return (False, data), indicating a non-empty value, that should\n have validation applied as normal.\n \"\"\"\n if self.read_only:\n return (True, self.get_default())\n\n if data is empty:\n if getattr(self.root, 'partial', False):\n raise SkipField()\n if self.required:\n self.fail('required')\n return (True, self.get_default())\n\n if data is None:\n if not self.allow_null:\n self.fail('null')\n return (True, None)\n\n return (False, data)\n\n def run_validation(self, data=empty):\n \"\"\"\n Validate a simple representation and return the internal value.\n\n The provided data may be `empty` if no representation was included\n in the input.\n\n May raise `SkipField` if the field should not be included in the\n validated data.\n \"\"\"\n (is_empty_value, data) = self.validate_empty_values(data)\n if is_empty_value:\n return data\n value = self.to_internal_value(data)\n self.run_validators(value)\n return value\n\n def run_validators(self, value):\n \"\"\"\n Test the given value against all the validators on the field,\n and either raise a `ValidationError` or simply return.\n \"\"\"\n errors = []\n for validator in self.validators:\n if hasattr(validator, 'set_context'):\n validator.set_context(self)\n\n try:\n validator(value)\n except ValidationError as exc:\n # If the validation error contains a mapping of fields to\n # errors then simply raise it immediately rather than\n # attempting to accumulate a list of errors.\n if isinstance(exc.detail, dict):\n raise\n errors.extend(exc.detail)\n except DjangoValidationError as exc:\n errors.extend(exc.messages)\n if errors:\n raise ValidationError(errors)\n\n def to_internal_value(self, data):\n \"\"\"\n Transform the *incoming* primitive data into a native value.\n \"\"\"\n raise NotImplementedError(\n '{cls}.to_internal_value() must be implemented.'.format(\n cls=self.__class__.__name__\n )\n )\n\n def to_representation(self, value):\n \"\"\"\n Transform the *outgoing* native value into primitive data.\n \"\"\"\n raise NotImplementedError(\n '{cls}.to_representation() must be implemented.\\n'\n 'If you are upgrading from REST framework version 2 '\n 'you might want `ReadOnlyField`.'.format(\n cls=self.__class__.__name__\n )\n )\n\n def fail(self, key, **kwargs):\n \"\"\"\n A helper method that simply raises a validation error.\n \"\"\"\n try:\n msg = self.error_messages[key]\n except KeyError:\n class_name = self.__class__.__name__\n msg = MISSING_ERROR_MESSAGE.format(class_name=class_name, key=key)\n raise AssertionError(msg)\n message_string = msg.format(**kwargs)\n raise ValidationError(message_string)\n\n @property\n def root(self):\n \"\"\"\n Returns the top-level serializer for this field.\n \"\"\"\n root = self\n while root.parent is not None:\n root = root.parent\n return root\n\n @property\n def context(self):\n \"\"\"\n Returns the context as passed to the root serializer on initialization.\n \"\"\"\n return getattr(self.root, '_context', {})\n\n def __new__(cls, *args, **kwargs):\n \"\"\"\n When a field is instantiated, we store the arguments that were used,\n so that we can present a helpful representation of the object.\n \"\"\"\n instance = super(Field, cls).__new__(cls)\n instance._args = args\n instance._kwargs = kwargs\n return instance\n\n def __deepcopy__(self, memo):\n \"\"\"\n When cloning fields we instantiate using the arguments it was\n originally created with, rather than copying the complete state.\n \"\"\"\n args = copy.deepcopy(self._args)\n kwargs = dict(self._kwargs)\n # Bit ugly, but we need to special case 'validators' as Django's\n # RegexValidator does not support deepcopy.\n # We treat validator callables as immutable objects.\n # See https://github.com/tomchristie/django-rest-framework/issues/1954\n validators = kwargs.pop('validators', None)\n kwargs = copy.deepcopy(kwargs)\n if validators is not None:\n kwargs['validators'] = validators\n return self.__class__(*args, **kwargs)\n\n def __repr__(self):\n \"\"\"\n Fields are represented using their initial calling arguments.\n This allows us to create descriptive representations for serializer\n instances that show all the declared fields on the serializer.\n \"\"\"\n return unicode_to_repr(representation.field_repr(self))\n\n\n# Boolean types...\n\nclass BooleanField(Field):\n default_error_messages = {\n 'invalid': _('\"{input}\" is not a valid boolean.')\n }\n default_empty_html = False\n initial = False\n TRUE_VALUES = set(('t', 'T', 'true', 'True', 'TRUE', '1', 1, True))\n FALSE_VALUES = set(('f', 'F', 'false', 'False', 'FALSE', '0', 0, 0.0, False))\n\n def __init__(self, **kwargs):\n assert 'allow_null' not in kwargs, '`allow_null` is not a valid option. Use `NullBooleanField` instead.'\n super(BooleanField, self).__init__(**kwargs)\n\n def to_internal_value(self, data):\n if data in self.TRUE_VALUES:\n return True\n elif data in self.FALSE_VALUES:\n return False\n self.fail('invalid', input=data)\n\n def to_representation(self, value):\n if value in self.TRUE_VALUES:\n return True\n elif value in self.FALSE_VALUES:\n return False\n return bool(value)\n\n\nclass NullBooleanField(Field):\n default_error_messages = {\n 'invalid': _('\"{input}\" is not a valid boolean.')\n }\n initial = None\n TRUE_VALUES = set(('t', 'T', 'true', 'True', 'TRUE', '1', 1, True))\n FALSE_VALUES = set(('f', 'F', 'false', 'False', 'FALSE', '0', 0, 0.0, False))\n NULL_VALUES = set(('n', 'N', 'null', 'Null', 'NULL', '', None))\n\n def __init__(self, **kwargs):\n assert 'allow_null' not in kwargs, '`allow_null` is not a valid option.'\n kwargs['allow_null'] = True\n super(NullBooleanField, self).__init__(**kwargs)\n\n def to_internal_value(self, data):\n if data in self.TRUE_VALUES:\n return True\n elif data in self.FALSE_VALUES:\n return False\n elif data in self.NULL_VALUES:\n return None\n self.fail('invalid', input=data)\n\n def to_representation(self, value):\n if value in self.NULL_VALUES:\n return None\n if value in self.TRUE_VALUES:\n return True\n elif value in self.FALSE_VALUES:\n return False\n return bool(value)\n\n\n# String types...\n\nclass CharField(Field):\n default_error_messages = {\n 'blank': _('This field may not be blank.'),\n 'max_length': _('Ensure this field has no more than {max_length} characters.'),\n 'min_length': _('Ensure this field has at least {min_length} characters.')\n }\n initial = ''\n\n def __init__(self, **kwargs):\n self.allow_blank = kwargs.pop('allow_blank', False)\n self.trim_whitespace = kwargs.pop('trim_whitespace', True)\n self.max_length = kwargs.pop('max_length', None)\n self.min_length = kwargs.pop('min_length', None)\n super(CharField, self).__init__(**kwargs)\n if self.max_length is not None:\n message = self.error_messages['max_length'].format(max_length=self.max_length)\n self.validators.append(MaxLengthValidator(self.max_length, message=message))\n if self.min_length is not None:\n message = self.error_messages['min_length'].format(min_length=self.min_length)\n self.validators.append(MinLengthValidator(self.min_length, message=message))\n\n def run_validation(self, data=empty):\n # Test for the empty string here so that it does not get validated,\n # and so that subclasses do not need to handle it explicitly\n # inside the `to_internal_value()` method.\n if data == '':\n if not self.allow_blank:\n self.fail('blank')\n return ''\n return super(CharField, self).run_validation(data)\n\n def to_internal_value(self, data):\n value = six.text_type(data)\n return value.strip() if self.trim_whitespace else value\n\n def to_representation(self, value):\n return six.text_type(value)\n\n\nclass EmailField(CharField):\n default_error_messages = {\n 'invalid': _('Enter a valid email address.')\n }\n\n def __init__(self, **kwargs):\n super(EmailField, self).__init__(**kwargs)\n validator = EmailValidator(message=self.error_messages['invalid'])\n self.validators.append(validator)\n\n\nclass RegexField(CharField):\n default_error_messages = {\n 'invalid': _('This value does not match the required pattern.')\n }\n\n def __init__(self, regex, **kwargs):\n super(RegexField, self).__init__(**kwargs)\n validator = RegexValidator(regex, message=self.error_messages['invalid'])\n self.validators.append(validator)\n\n\nclass SlugField(CharField):\n default_error_messages = {\n 'invalid': _('Enter a valid \"slug\" consisting of letters, numbers, underscores or hyphens.')\n }\n\n def __init__(self, **kwargs):\n super(SlugField, self).__init__(**kwargs)\n slug_regex = re.compile(r'^[-a-zA-Z0-9_]+$')\n validator = RegexValidator(slug_regex, message=self.error_messages['invalid'])\n self.validators.append(validator)\n\n\nclass URLField(CharField):\n default_error_messages = {\n 'invalid': _('Enter a valid URL.')\n }\n\n def __init__(self, **kwargs):\n super(URLField, self).__init__(**kwargs)\n validator = URLValidator(message=self.error_messages['invalid'])\n self.validators.append(validator)\n\n\nclass UUIDField(Field):\n default_error_messages = {\n 'invalid': _('\"{value}\" is not a valid UUID.'),\n }\n\n def to_internal_value(self, data):\n if not isinstance(data, uuid.UUID):\n try:\n return uuid.UUID(data)\n except (ValueError, TypeError):\n self.fail('invalid', value=data)\n return data\n\n def to_representation(self, value):\n return str(value)\n\n\n# Number types...\n\nclass IntegerField(Field):\n default_error_messages = {\n 'invalid': _('A valid integer is required.'),\n 'max_value': _('Ensure this value is less than or equal to {max_value}.'),\n 'min_value': _('Ensure this value is greater than or equal to {min_value}.'),\n 'max_string_length': _('String value too large.')\n }\n MAX_STRING_LENGTH = 1000 # Guard against malicious string inputs.\n re_decimal = re.compile(r'\\.0*\\s*$') # allow e.g. '1.0' as an int, but not '1.2'\n\n def __init__(self, **kwargs):\n self.max_value = kwargs.pop('max_value', None)\n self.min_value = kwargs.pop('min_value', None)\n super(IntegerField, self).__init__(**kwargs)\n if self.max_value is not None:\n message = self.error_messages['max_value'].format(max_value=self.max_value)\n self.validators.append(MaxValueValidator(self.max_value, message=message))\n if self.min_value is not None:\n message = self.error_messages['min_value'].format(min_value=self.min_value)\n self.validators.append(MinValueValidator(self.min_value, message=message))\n\n def to_internal_value(self, data):\n if isinstance(data, six.text_type) and len(data) > self.MAX_STRING_LENGTH:\n self.fail('max_string_length')\n\n try:\n data = int(self.re_decimal.sub('', str(data)))\n except (ValueError, TypeError):\n self.fail('invalid')\n return data\n\n def to_representation(self, value):\n return int(value)\n\n\nclass FloatField(Field):\n default_error_messages = {\n 'invalid': _('A valid number is required.'),\n 'max_value': _('Ensure this value is less than or equal to {max_value}.'),\n 'min_value': _('Ensure this value is greater than or equal to {min_value}.'),\n 'max_string_length': _('String value too large.')\n }\n MAX_STRING_LENGTH = 1000 # Guard against malicious string inputs.\n\n def __init__(self, **kwargs):\n self.max_value = kwargs.pop('max_value', None)\n self.min_value = kwargs.pop('min_value', None)\n super(FloatField, self).__init__(**kwargs)\n if self.max_value is not None:\n message = self.error_messages['max_value'].format(max_value=self.max_value)\n self.validators.append(MaxValueValidator(self.max_value, message=message))\n if self.min_value is not None:\n message = self.error_messages['min_value'].format(min_value=self.min_value)\n self.validators.append(MinValueValidator(self.min_value, message=message))\n\n def to_internal_value(self, data):\n if isinstance(data, six.text_type) and len(data) > self.MAX_STRING_LENGTH:\n self.fail('max_string_length')\n\n try:\n return float(data)\n except (TypeError, ValueError):\n self.fail('invalid')\n\n def to_representation(self, value):\n return float(value)\n\n\nclass DecimalField(Field):\n default_error_messages = {\n 'invalid': _('A valid number is required.'),\n 'max_value': _('Ensure this value is less than or equal to {max_value}.'),\n 'min_value': _('Ensure this value is greater than or equal to {min_value}.'),\n 'max_digits': _('Ensure that there are no more than {max_digits} digits in total.'),\n 'max_decimal_places': _('Ensure that there are no more than {max_decimal_places} decimal places.'),\n 'max_whole_digits': _('Ensure that there are no more than {max_whole_digits} digits before the decimal point.'),\n 'max_string_length': _('String value too large.')\n }\n MAX_STRING_LENGTH = 1000 # Guard against malicious string inputs.\n\n coerce_to_string = api_settings.COERCE_DECIMAL_TO_STRING\n\n def __init__(self, max_digits, decimal_places, coerce_to_string=None, max_value=None, min_value=None, **kwargs):\n self.max_digits = max_digits\n self.decimal_places = decimal_places\n self.coerce_to_string = coerce_to_string if (coerce_to_string is not None) else self.coerce_to_string\n\n self.max_value = max_value\n self.min_value = min_value\n\n super(DecimalField, self).__init__(**kwargs)\n\n if self.max_value is not None:\n message = self.error_messages['max_value'].format(max_value=self.max_value)\n self.validators.append(MaxValueValidator(self.max_value, message=message))\n if self.min_value is not None:\n message = self.error_messages['min_value'].format(min_value=self.min_value)\n self.validators.append(MinValueValidator(self.min_value, message=message))\n\n def to_internal_value(self, data):\n \"\"\"\n Validates that the input is a decimal number. Returns a Decimal\n instance. Returns None for empty values. Ensures that there are no more\n than max_digits in the number, and no more than decimal_places digits\n after the decimal point.\n \"\"\"\n data = smart_text(data).strip()\n if len(data) > self.MAX_STRING_LENGTH:\n self.fail('max_string_length')\n\n try:\n value = decimal.Decimal(data)\n except decimal.DecimalException:\n self.fail('invalid')\n\n # Check for NaN. It is the only value that isn't equal to itself,\n # so we can use this to identify NaN values.\n if value != value:\n self.fail('invalid')\n\n # Check for infinity and negative infinity.\n if value in (decimal.Decimal('Inf'), decimal.Decimal('-Inf')):\n self.fail('invalid')\n\n sign, digittuple, exponent = value.as_tuple()\n decimals = exponent * decimal.Decimal(-1) if exponent < 0 else 0\n\n # digittuple doesn't include any leading zeros.\n digits = len(digittuple)\n if decimals > digits:\n # We have leading zeros up to or past the decimal point. Count\n # everything past the decimal point as a digit. We do not count\n # 0 before the decimal point as a digit since that would mean\n # we would not allow max_digits = decimal_places.\n digits = decimals\n whole_digits = digits - decimals\n\n if self.max_digits is not None and digits > self.max_digits:\n self.fail('max_digits', max_digits=self.max_digits)\n if self.decimal_places is not None and decimals > self.decimal_places:\n self.fail('max_decimal_places', max_decimal_places=self.decimal_places)\n if self.max_digits is not None and self.decimal_places is not None and whole_digits > (self.max_digits - self.decimal_places):\n self.fail('max_whole_digits', max_whole_digits=self.max_digits - self.decimal_places)\n\n return value\n\n def to_representation(self, value):\n if not isinstance(value, decimal.Decimal):\n value = decimal.Decimal(six.text_type(value).strip())\n\n context = decimal.getcontext().copy()\n context.prec = self.max_digits\n quantized = value.quantize(\n decimal.Decimal('.1') ** self.decimal_places,\n context=context\n )\n if not self.coerce_to_string:\n return quantized\n return '{0:f}'.format(quantized)\n\n\n# Date & time fields...\n\nclass DateTimeField(Field):\n default_error_messages = {\n 'invalid': _('Datetime has wrong format. Use one of these formats instead: {format}.'),\n 'date': _('Expected a datetime but got a date.'),\n }\n format = api_settings.DATETIME_FORMAT\n input_formats = api_settings.DATETIME_INPUT_FORMATS\n default_timezone = timezone.get_default_timezone() if settings.USE_TZ else None\n\n def __init__(self, format=empty, input_formats=None, default_timezone=None, *args, **kwargs):\n self.format = format if format is not empty else self.format\n self.input_formats = input_formats if input_formats is not None else self.input_formats\n self.default_timezone = default_timezone if default_timezone is not None else self.default_timezone\n super(DateTimeField, self).__init__(*args, **kwargs)\n\n def enforce_timezone(self, value):\n \"\"\"\n When `self.default_timezone` is `None`, always return naive datetimes.\n When `self.default_timezone` is not `None`, always return aware datetimes.\n \"\"\"\n if (self.default_timezone is not None) and not timezone.is_aware(value):\n return timezone.make_aware(value, self.default_timezone)\n elif (self.default_timezone is None) and timezone.is_aware(value):\n return timezone.make_naive(value, timezone.UTC())\n return value\n\n def to_internal_value(self, value):\n if isinstance(value, datetime.date) and not isinstance(value, datetime.datetime):\n self.fail('date')\n\n if isinstance(value, datetime.datetime):\n return self.enforce_timezone(value)\n\n for format in self.input_formats:\n if format.lower() == ISO_8601:\n try:\n parsed = parse_datetime(value)\n except (ValueError, TypeError):\n pass\n else:\n if parsed is not None:\n return self.enforce_timezone(parsed)\n else:\n try:\n parsed = datetime.datetime.strptime(value, format)\n except (ValueError, TypeError):\n pass\n else:\n return self.enforce_timezone(parsed)\n\n humanized_format = humanize_datetime.datetime_formats(self.input_formats)\n self.fail('invalid', format=humanized_format)\n\n def to_representation(self, value):\n if self.format is None:\n return value\n\n if self.format.lower() == ISO_8601:\n value = value.isoformat()\n if value.endswith('+00:00'):\n value = value[:-6] + 'Z'\n return value\n return value.strftime(self.format)\n\n\nclass DateField(Field):\n default_error_messages = {\n 'invalid': _('Date has wrong format. Use one of these formats instead: {format}.'),\n 'datetime': _('Expected a date but got a datetime.'),\n }\n format = api_settings.DATE_FORMAT\n input_formats = api_settings.DATE_INPUT_FORMATS\n\n def __init__(self, format=empty, input_formats=None, *args, **kwargs):\n self.format = format if format is not empty else self.format\n self.input_formats = input_formats if input_formats is not None else self.input_formats\n super(DateField, self).__init__(*args, **kwargs)\n\n def to_internal_value(self, value):\n if isinstance(value, datetime.datetime):\n self.fail('datetime')\n\n if isinstance(value, datetime.date):\n return value\n\n for format in self.input_formats:\n if format.lower() == ISO_8601:\n try:\n parsed = parse_date(value)\n except (ValueError, TypeError):\n pass\n else:\n if parsed is not None:\n return parsed\n else:\n try:\n parsed = datetime.datetime.strptime(value, format)\n except (ValueError, TypeError):\n pass\n else:\n return parsed.date()\n\n humanized_format = humanize_datetime.date_formats(self.input_formats)\n self.fail('invalid', format=humanized_format)\n\n def to_representation(self, value):\n if not value:\n return None\n\n if self.format is None:\n return value\n\n # Applying a `DateField` to a datetime value is almost always\n # not a sensible thing to do, as it means naively dropping\n # any explicit or implicit timezone info.\n assert not isinstance(value, datetime.datetime), (\n 'Expected a `date`, but got a `datetime`. Refusing to coerce, '\n 'as this may mean losing timezone information. Use a custom '\n 'read-only field and deal with timezone issues explicitly.'\n )\n\n if self.format.lower() == ISO_8601:\n if (isinstance(value, str)):\n value = datetime.datetime.strptime(value, '%Y-%m-%d').date()\n return value.isoformat()\n\n return value.strftime(self.format)\n\n\nclass TimeField(Field):\n default_error_messages = {\n 'invalid': _('Time has wrong format. Use one of these formats instead: {format}.'),\n }\n format = api_settings.TIME_FORMAT\n input_formats = api_settings.TIME_INPUT_FORMATS\n\n def __init__(self, format=empty, input_formats=None, *args, **kwargs):\n self.format = format if format is not empty else self.format\n self.input_formats = input_formats if input_formats is not None else self.input_formats\n super(TimeField, self).__init__(*args, **kwargs)\n\n def to_internal_value(self, value):\n if isinstance(value, datetime.time):\n return value\n\n for format in self.input_formats:\n if format.lower() == ISO_8601:\n try:\n parsed = parse_time(value)\n except (ValueError, TypeError):\n pass\n else:\n if parsed is not None:\n return parsed\n else:\n try:\n parsed = datetime.datetime.strptime(value, format)\n except (ValueError, TypeError):\n pass\n else:\n return parsed.time()\n\n humanized_format = humanize_datetime.time_formats(self.input_formats)\n self.fail('invalid', format=humanized_format)\n\n def to_representation(self, value):\n if self.format is None:\n return value\n\n # Applying a `TimeField` to a datetime value is almost always\n # not a sensible thing to do, as it means naively dropping\n # any explicit or implicit timezone info.\n assert not isinstance(value, datetime.datetime), (\n 'Expected a `time`, but got a `datetime`. Refusing to coerce, '\n 'as this may mean losing timezone information. Use a custom '\n 'read-only field and deal with timezone issues explicitly.'\n )\n\n if self.format.lower() == ISO_8601:\n return value.isoformat()\n return value.strftime(self.format)\n\n\n# Choice types...\n\nclass ChoiceField(Field):\n default_error_messages = {\n 'invalid_choice': _('\"{input}\" is not a valid choice.')\n }\n\n def __init__(self, choices, **kwargs):\n # Allow either single or paired choices style:\n # choices = [1, 2, 3]\n # choices = [(1, 'First'), (2, 'Second'), (3, 'Third')]\n pairs = [\n isinstance(item, (list, tuple)) and len(item) == 2\n for item in choices\n ]\n if all(pairs):\n self.choices = OrderedDict([(key, display_value) for key, display_value in choices])\n else:\n self.choices = OrderedDict([(item, item) for item in choices])\n\n # Map the string representation of choices to the underlying value.\n # Allows us to deal with eg. integer choices while supporting either\n # integer or string input, but still get the correct datatype out.\n self.choice_strings_to_values = dict([\n (six.text_type(key), key) for key in self.choices.keys()\n ])\n\n self.allow_blank = kwargs.pop('allow_blank', False)\n\n super(ChoiceField, self).__init__(**kwargs)\n\n def to_internal_value(self, data):\n if data == '' and self.allow_blank:\n return ''\n\n try:\n return self.choice_strings_to_values[six.text_type(data)]\n except KeyError:\n self.fail('invalid_choice', input=data)\n\n def to_representation(self, value):\n if value in ('', None):\n return value\n return self.choice_strings_to_values.get(six.text_type(value), value)\n\n\nclass MultipleChoiceField(ChoiceField):\n default_error_messages = {\n 'invalid_choice': _('\"{input}\" is not a valid choice.'),\n 'not_a_list': _('Expected a list of items but got type \"{input_type}\".')\n }\n default_empty_html = []\n\n def get_value(self, dictionary):\n # We override the default field access in order to support\n # lists in HTML forms.\n if html.is_html_input(dictionary):\n return dictionary.getlist(self.field_name)\n return dictionary.get(self.field_name, empty)\n\n def to_internal_value(self, data):\n if isinstance(data, type('')) or not hasattr(data, '__iter__'):\n self.fail('not_a_list', input_type=type(data).__name__)\n\n return set([\n super(MultipleChoiceField, self).to_internal_value(item)\n for item in data\n ])\n\n def to_representation(self, value):\n return set([\n self.choice_strings_to_values.get(six.text_type(item), item) for item in value\n ])\n\n\n# File types...\n\nclass FileField(Field):\n default_error_messages = {\n 'required': _('No file was submitted.'),\n 'invalid': _('The submitted data was not a file. Check the encoding type on the form.'),\n 'no_name': _('No filename could be determined.'),\n 'empty': _('The submitted file is empty.'),\n 'max_length': _('Ensure this filename has at most {max_length} characters (it has {length}).'),\n }\n use_url = api_settings.UPLOADED_FILES_USE_URL\n\n def __init__(self, *args, **kwargs):\n self.max_length = kwargs.pop('max_length', None)\n self.allow_empty_file = kwargs.pop('allow_empty_file', False)\n self.use_url = kwargs.pop('use_url', self.use_url)\n super(FileField, self).__init__(*args, **kwargs)\n\n def to_internal_value(self, data):\n try:\n # `UploadedFile` objects should have name and size attributes.\n file_name = data.name\n file_size = data.size\n except AttributeError:\n self.fail('invalid')\n\n if not file_name:\n self.fail('no_name')\n if not self.allow_empty_file and not file_size:\n self.fail('empty')\n if self.max_length and len(file_name) > self.max_length:\n self.fail('max_length', max_length=self.max_length, length=len(file_name))\n\n return data\n\n def to_representation(self, value):\n if self.use_url:\n if not value:\n return None\n url = value.url\n request = self.context.get('request', None)\n if request is not None:\n return request.build_absolute_uri(url)\n return url\n return value.name\n\n\nclass ImageField(FileField):\n default_error_messages = {\n 'invalid_image': _(\n 'Upload a valid image. The file you uploaded was either not an image or a corrupted image.'\n ),\n }\n\n def __init__(self, *args, **kwargs):\n self._DjangoImageField = kwargs.pop('_DjangoImageField', DjangoImageField)\n super(ImageField, self).__init__(*args, **kwargs)\n\n def to_internal_value(self, data):\n # Image validation is a bit grungy, so we'll just outright\n # defer to Django's implementation so we don't need to\n # consider it, or treat PIL as a test dependency.\n file_object = super(ImageField, self).to_internal_value(data)\n django_field = self._DjangoImageField()\n django_field.error_messages = self.error_messages\n django_field.to_python(file_object)\n return file_object\n\n\n# Composite field types...\n\nclass _UnvalidatedField(Field):\n def __init__(self, *args, **kwargs):\n super(_UnvalidatedField, self).__init__(*args, **kwargs)\n self.allow_blank = True\n self.allow_null = True\n\n def to_internal_value(self, data):\n return data\n\n def to_representation(self, value):\n return value\n\n\nclass ListField(Field):\n child = _UnvalidatedField()\n initial = []\n default_error_messages = {\n 'not_a_list': _('Expected a list of items but got type \"{input_type}\".')\n }\n\n def __init__(self, *args, **kwargs):\n self.child = kwargs.pop('child', copy.deepcopy(self.child))\n assert not inspect.isclass(self.child), '`child` has not been instantiated.'\n super(ListField, self).__init__(*args, **kwargs)\n self.child.bind(field_name='', parent=self)\n\n def get_value(self, dictionary):\n # We override the default field access in order to support\n # lists in HTML forms.\n if html.is_html_input(dictionary):\n return html.parse_html_list(dictionary, prefix=self.field_name)\n return dictionary.get(self.field_name, empty)\n\n def to_internal_value(self, data):\n \"\"\"\n List of dicts of native values <- List of dicts of primitive datatypes.\n \"\"\"\n if html.is_html_input(data):\n data = html.parse_html_list(data)\n if isinstance(data, type('')) or not hasattr(data, '__iter__'):\n self.fail('not_a_list', input_type=type(data).__name__)\n return [self.child.run_validation(item) for item in data]\n\n def to_representation(self, data):\n \"\"\"\n List of object instances -> List of dicts of primitive datatypes.\n \"\"\"\n return [self.child.to_representation(item) for item in data]\n\n\nclass DictField(Field):\n child = _UnvalidatedField()\n initial = {}\n default_error_messages = {\n 'not_a_dict': _('Expected a dictionary of items but got type \"{input_type}\".')\n }\n\n def __init__(self, *args, **kwargs):\n self.child = kwargs.pop('child', copy.deepcopy(self.child))\n assert not inspect.isclass(self.child), '`child` has not been instantiated.'\n super(DictField, self).__init__(*args, **kwargs)\n self.child.bind(field_name='', parent=self)\n\n def get_value(self, dictionary):\n # We override the default field access in order to support\n # dictionaries in HTML forms.\n if html.is_html_input(dictionary):\n return html.parse_html_dict(dictionary, prefix=self.field_name)\n return dictionary.get(self.field_name, empty)\n\n def to_internal_value(self, data):\n \"\"\"\n Dicts of native values <- Dicts of primitive datatypes.\n \"\"\"\n if html.is_html_input(data):\n data = html.parse_html_dict(data)\n if not isinstance(data, dict):\n self.fail('not_a_dict', input_type=type(data).__name__)\n return dict([\n (six.text_type(key), self.child.run_validation(value))\n for key, value in data.items()\n ])\n\n def to_representation(self, value):\n \"\"\"\n List of object instances -> List of dicts of primitive datatypes.\n \"\"\"\n return dict([\n (six.text_type(key), self.child.to_representation(val))\n for key, val in value.items()\n ])\n\n\n# Miscellaneous field types...\n\nclass ReadOnlyField(Field):\n \"\"\"\n A read-only field that simply returns the field value.\n\n If the field is a method with no parameters, the method will be called\n and it's return value used as the representation.\n\n For example, the following would call `get_expiry_date()` on the object:\n\n class ExampleSerializer(self):\n expiry_date = ReadOnlyField(source='get_expiry_date')\n \"\"\"\n\n def __init__(self, **kwargs):\n kwargs['read_only'] = True\n super(ReadOnlyField, self).__init__(**kwargs)\n\n def to_representation(self, value):\n return value\n\n\nclass HiddenField(Field):\n \"\"\"\n A hidden field does not take input from the user, or present any output,\n but it does populate a field in `validated_data`, based on its default\n value. This is particularly useful when we have a `unique_for_date`\n constraint on a pair of fields, as we need some way to include the date in\n the validated data.\n \"\"\"\n def __init__(self, **kwargs):\n assert 'default' in kwargs, 'default is a required argument.'\n kwargs['write_only'] = True\n super(HiddenField, self).__init__(**kwargs)\n\n def get_value(self, dictionary):\n # We always use the default value for `HiddenField`.\n # User input is never provided or accepted.\n return empty\n\n def to_internal_value(self, data):\n return data\n\n\nclass SerializerMethodField(Field):\n \"\"\"\n A read-only field that get its representation from calling a method on the\n parent serializer class. The method called will be of the form\n \"get_{field_name}\", and should take a single argument, which is the\n object being serialized.\n\n For example:\n\n class ExampleSerializer(self):\n extra_info = SerializerMethodField()\n\n def get_extra_info(self, obj):\n return ... # Calculate some data to return.\n \"\"\"\n def __init__(self, method_name=None, **kwargs):\n self.method_name = method_name\n kwargs['source'] = '*'\n kwargs['read_only'] = True\n super(SerializerMethodField, self).__init__(**kwargs)\n\n def bind(self, field_name, parent):\n # In order to enforce a consistent style, we error if a redundant\n # 'method_name' argument has been used. For example:\n # my_field = serializer.CharField(source='my_field')\n default_method_name = 'get_{field_name}'.format(field_name=field_name)\n assert self.method_name != default_method_name, (\n \"It is redundant to specify `%s` on SerializerMethodField '%s' in \"\n \"serializer '%s', because it is the same as the default method name. \"\n \"Remove the `method_name` argument.\" %\n (self.method_name, field_name, parent.__class__.__name__)\n )\n\n # The method name should default to `get_{field_name}`.\n if self.method_name is None:\n self.method_name = default_method_name\n\n super(SerializerMethodField, self).bind(field_name, parent)\n\n def to_representation(self, value):\n method = getattr(self.parent, self.method_name)\n return method(value)\n\n\nclass ModelField(Field):\n \"\"\"\n A generic field that can be used against an arbitrary model field.\n\n This is used by `ModelSerializer` when dealing with custom model fields,\n that do not have a serializer field to be mapped to.\n \"\"\"\n default_error_messages = {\n 'max_length': _('Ensure this field has no more than {max_length} characters.'),\n }\n\n def __init__(self, model_field, **kwargs):\n self.model_field = model_field\n # The `max_length` option is supported by Django's base `Field` class,\n # so we'd better support it here.\n max_length = kwargs.pop('max_length', None)\n super(ModelField, self).__init__(**kwargs)\n if max_length is not None:\n message = self.error_messages['max_length'].format(max_length=max_length)\n self.validators.append(MaxLengthValidator(max_length, message=message))\n\n def to_internal_value(self, data):\n rel = getattr(self.model_field, 'rel', None)\n if rel is not None:\n return rel.to._meta.get_field(rel.field_name).to_python(data)\n return self.model_field.to_python(data)\n\n def get_attribute(self, obj):\n # We pass the object instance onto `to_representation`,\n # not just the field attribute.\n return obj\n\n def to_representation(self, obj):\n value = self.model_field._get_val_from_obj(obj)\n if is_protected_type(value):\n return value\n return self.model_field.value_to_string(obj)\n", "path": "rest_framework/fields.py"}]} |
gh_patches_debug_1277 | rasdani/github-patches | git_diff | bokeh__bokeh-6380 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
remove the hover menu item, and keep the hover function working
Feature request.
I would like to remove the hover menu item, and keep the hover function working
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bokeh/models/tools.py`
Content:
```
1 ''' Bokeh comes with a number of interactive tools.
2
3 There are five types of tool interactions:
4
5 .. hlist::
6 :columns: 5
7
8 * Pan/Drag
9 * Click/Tap
10 * Scroll/Pinch
11 * Actions
12 * Inspectors
13
14 For the first three comprise the category of gesture tools, and only
15 one tool for each gesture can be active at any given time. The active
16 tool is indicated on the toolbar by a highlight next to to the tool.
17 Actions are immediate or modal operations that are only activated when
18 their button in the toolbar is pressed. Inspectors are passive tools that
19 merely report information or annotate the plot in some way, and may
20 always be active regardless of what other tools are currently active.
21
22 '''
23 from __future__ import absolute_import
24
25 from ..core.enums import accept_left_right_center, Anchor, DeprecatedAnchor, Dimension, Dimensions, Location, TooltipFieldFormatter
26 from ..core.has_props import abstract
27 from ..core.properties import (
28 Any, Auto, Bool, Color, Dict, Either, Enum, Float, Percent, Instance, List,
29 Override, Seq, String, Tuple
30 )
31 from ..model import Model
32
33 from .annotations import BoxAnnotation, PolyAnnotation
34 from .callbacks import Callback
35 from .renderers import Renderer
36 from .layouts import Box, LayoutDOM
37
38 class ToolEvents(Model):
39 ''' A class for reporting tools geometries from BokehJS.
40
41 .. warning::
42 This class will be superceded by a new general events system in the
43 near future.
44
45 '''
46
47 geometries = List(Dict(String, Any))
48
49 @abstract
50 class Tool(Model):
51 ''' A base class for all interactive tool types.
52
53 '''
54
55 plot = Instance(".models.plots.Plot", help="""
56 The Plot that this tool will act on.
57 """)
58
59 @abstract
60 class Action(Tool):
61 ''' A base class for tools that are buttons in the toolbar.
62
63 '''
64 pass
65
66 @abstract
67 class Drag(Tool):
68 ''' A base class for tools that respond to drag events.
69
70 '''
71 pass
72
73 @abstract
74 class Scroll(Tool):
75 ''' A base class for tools that respond to scroll events.
76
77 '''
78 pass
79
80 @abstract
81 class Tap(Tool):
82 ''' A base class for tools that respond to tap/click events.
83
84 '''
85 pass
86
87
88 @abstract
89 class Inspection(Tool):
90 ''' A base class for tools that perform "inspections", e.g. ``HoverTool``.
91
92 '''
93 pass
94
95 @abstract
96 class ToolbarBase(LayoutDOM):
97 ''' A base class for different toolbars.
98
99 '''
100
101 logo = Enum("normal", "grey", help="""
102 What version of the Bokeh logo to display on the toolbar. If
103 set to None, no logo will be displayed.
104 """)
105
106 tools = List(Instance(Tool), help="""
107 A list of tools to add to the plot.
108 """)
109
110 # This is an odd case. The sizing is custom handled. In the future we will
111 # probably set it as `stretch_width` or `stretch_height` depending on its
112 # orientation.
113 sizing_mode = Override(default=None)
114
115
116 class Toolbar(ToolbarBase):
117 ''' Collect tools to display for a single plot.
118
119 '''
120
121 active_drag = Either(Auto, Instance(Drag), help="""
122 Specify a drag tool to be active when the plot is displayed.
123 """)
124
125 active_inspect = Either(Auto, Instance(Inspection), Seq(Instance(Inspection)), help="""
126 Specify an inspection tool or sequence of inspection tools to be active when
127 the plot is displayed.
128 """)
129
130 active_scroll = Either(Auto, Instance(Scroll), help="""
131 Specify a scroll/pinch tool to be active when the plot is displayed.
132 """)
133
134 active_tap = Either(Auto, Instance(Tap), help="""
135 Specify a tap/click tool to be active when the plot is displayed.
136 """)
137
138
139 class ToolbarBox(Box):
140 ''' A layoutable toolbar that can accept the tools of multiple plots, and
141 can merge the tools into a single button for convenience.
142
143 '''
144 def _check_empty_layout(self):
145 # Overriding the children check from Box. As toolbarbox's children
146 # are normally set JS side.
147 return None
148
149 toolbar_location = Enum(Location, default='right', help="""
150 Should the toolbar be presented as if it was stuck to the `above`, `right`, `left`, `below`
151 edge of a plot. Default is `right`.
152 """)
153
154 tools = List(Instance(Tool), help="""
155 A list of tools to add to the plot.
156 """)
157
158 merge_tools = Bool(default=True, help="""
159 Merge all the tools together so there is one tool to control all the plots.
160 """)
161
162 logo = Enum("normal", "grey", help="""
163 What version of the Bokeh logo to display on the toolbar. If
164 set to None, no logo will be displayed.
165 """)
166
167
168 class PanTool(Drag):
169 ''' *toolbar icon*: |pan_icon|
170
171 The pan tool allows the user to pan a Plot by left-dragging
172 a mouse, or on touch devices by dragging a finger or stylus, across
173 the plot region.
174
175 The pan tool also activates the border regions of a Plot for "single
176 axis" panning. For instance, dragging in the vertical border or axis
177 will effect a pan in the vertical direction only, with the horizontal
178 dimension kept fixed.
179
180 .. |pan_icon| image:: /_images/icons/Pan.png
181 :height: 18pt
182
183 '''
184
185 dimensions = Enum(Dimensions, default="both", help="""
186 Which dimensions the pan tool is constrained to act in. By default
187 the pan tool will pan in any dimension, but can be configured to only
188 pan horizontally across the width of the plot, or vertically across the
189 height of the plot.
190 """)
191
192 class WheelPanTool(Scroll):
193 ''' *toolbar icon*: |wheel_pan_icon|
194
195 The wheel pan tool allows the user to pan the plot along the configured
196 dimension using the scroll wheel.
197
198 .. |wheel_pan_icon| image:: /_images/icons/WheelPan.png
199 :height: 18pt
200
201 '''
202
203 dimension = Enum(Dimension, default="width", help="""
204 Which dimension the wheel pan tool is constrained to act in. By
205 default the wheel pan tool will pan the plot along the x-axis.
206 """)
207
208
209 class WheelZoomTool(Scroll):
210 ''' *toolbar icon*: |wheel_zoom_icon|
211
212 The wheel zoom tool will zoom the plot in and out, centered on the
213 current mouse location.
214
215 The wheel zoom tool also activates the border regions of a Plot for
216 "single axis" zooming. For instance, zooming in the vertical border or
217 axis will effect a zoom in the vertical direction only, with the
218 horizontal dimension kept fixed.
219
220 .. |wheel_zoom_icon| image:: /_images/icons/WheelZoom.png
221 :height: 18pt
222
223 '''
224
225 dimensions = Enum(Dimensions, default="both", help="""
226 Which dimensions the wheel zoom tool is constrained to act in. By
227 default the wheel zoom tool will zoom in any dimension, but can be
228 configured to only zoom horizontally across the width of the plot, or
229 vertically across the height of the plot.
230 """)
231
232
233 class SaveTool(Action):
234 ''' *toolbar icon*: |save_icon|
235
236 The save tool is an action. When activated, the tool opens a download dialog
237 which allows to save an image reproduction of the plot in PNG format. If
238 automatic download is not support by a web browser, the tool falls back to
239 opening the generated image in a new tab or window. User then can manually
240 save it by right clicking on the image and choosing "Save As" (or similar)
241 menu item.
242
243 .. |save_icon| image:: /_images/icons/Save.png
244 :height: 18pt
245
246 '''
247
248
249 class ResetTool(Action):
250 ''' *toolbar icon*: |reset_icon|
251
252 The reset tool is an action. When activated in the toolbar, the tool
253 resets the data bounds of the plot to their values when the plot was
254 initially created.
255
256 Optionally, the reset tool also resets the plat canvas dimensions to
257 their original size
258
259 .. |reset_icon| image:: /_images/icons/Reset.png
260 :height: 18pt
261
262 '''
263
264 reset_size = Bool(default=True, help="""
265 Whether activating the Reset tool should also reset the plot's canvas
266 dimensions to their original size.
267 """)
268
269
270 class ResizeTool(Drag):
271 ''' *toolbar icon*: |resize_icon|
272
273 The resize tool allows the user to left-drag a mouse or drag a finger
274 to resize the entire plot area on the screen.
275
276 .. |resize_icon| image:: /_images/icons/Resize.png
277 :height: 18pt
278
279 '''
280
281
282 class TapTool(Tap):
283 ''' *toolbar icon*: |tap_select_icon|
284
285 The tap selection tool allows the user to select at single points by
286 left-clicking a mouse, or tapping with a finger.
287
288 See :ref:`userguide_styling_selected_unselected_glyphs` for information
289 on styling selected and unselected glyphs.
290
291 .. |tap_select_icon| image:: /_images/icons/TapSelect.png
292 :height: 18pt
293
294 .. note::
295 Selections can be comprised of multiple regions, even those
296 made by different selection tools. Hold down the <<shift>> key
297 while making a selection to append the new selection to any
298 previous selection that might exist.
299
300 '''
301
302 names = List(String, help="""
303 A list of names to query for. If set, only renderers that
304 have a matching value for their ``name`` attribute will be used.
305 """)
306
307 renderers = List(Instance(Renderer), help="""
308 An explicit list of renderers to hit test again. If unset,
309 defaults to all renderers on a plot.
310 """)
311
312 behavior = Enum("select", "inspect", default="select", help="""
313 This tool can be configured to either make selections or inspections
314 on associated data sources. The difference is that selection changes
315 propagate across bokeh and other components (e.g. selection glyph)
316 will be notified. Inspecions don't act like this, so it's useful to
317 configure `callback` when setting `behavior='inspect'`.
318 """)
319
320 callback = Instance(Callback, help="""
321 A client-side action specification, like opening a URL, showing
322 a dialog box, etc. See :class:`~bokeh.models.actions.Action` for details.
323 """)
324
325
326
327
328 class CrosshairTool(Inspection):
329 ''' *toolbar icon*: |crosshair_icon|
330
331 The crosshair tool is a passive inspector tool. It is generally on
332 at all times, but can be configured in the inspector's menu
333 associated with the *toolbar icon* shown above.
334
335 The crosshair tool draws a crosshair annotation over the plot,
336 centered on the current mouse position. The crosshair tool may be
337 configured to draw across only one dimension by setting the
338 ``dimension`` property to only ``width`` or ``height``.
339
340 .. |crosshair_icon| image:: /_images/icons/Crosshair.png
341 :height: 18pt
342
343 '''
344
345 dimensions = Enum(Dimensions, default="both", help="""
346 Which dimensions the crosshair tool is to track. By default, both a
347 vertical and horizontal line will be drawn. If only "width" is supplied,
348 only a horizontal line will be drawn. If only "height" is supplied,
349 only a vertical line will be drawn.
350 """)
351
352 line_color = Color(default="black", help="""
353 A color to use to stroke paths with.
354
355 Acceptable values are:
356
357 - any of the 147 named `CSS colors`_, e.g ``'green'``, ``'indigo'``
358 - an RGB(A) hex value, e.g., ``'#FF0000'``, ``'#44444444'``
359 - a 3-tuple of integers (r,g,b) between 0 and 255
360 - a 4-tuple of (r,g,b,a) where r,g,b are integers between 0..255 and a is between 0..1
361
362 .. _CSS colors: http://www.w3schools.com/cssref/css_colornames.asp
363
364 """)
365
366 line_width = Float(default=1, help="""
367 Stroke width in units of pixels.
368 """)
369
370 line_alpha = Float(default=1.0, help="""
371 An alpha value to use to stroke paths with.
372
373 Acceptable values are floating point numbers between 0 (transparent)
374 and 1 (opaque).
375
376 """)
377
378 DEFAULT_BOX_OVERLAY = lambda: BoxAnnotation(
379 level="overlay",
380 render_mode="css",
381 top_units="screen",
382 left_units="screen",
383 bottom_units="screen",
384 right_units="screen",
385 fill_color="lightgrey",
386 fill_alpha=0.5,
387 line_color="black",
388 line_alpha=1.0,
389 line_width=2,
390 line_dash=[4, 4]
391 )
392
393 class BoxZoomTool(Drag):
394 ''' *toolbar icon*: |box_zoom_icon|
395
396 The box zoom tool allows users to define a rectangular
397 region of a Plot to zoom to by dragging he mouse or a
398 finger over the plot region. The end of the drag
399 event indicates the selection region is ready.
400
401 .. |box_zoom_icon| image:: /_images/icons/BoxZoom.png
402 :height: 18pt
403
404 '''
405
406 dimensions = Enum(Dimensions, default="both", help="""
407 Which dimensions the zoom box is to be free in. By default,
408 users may freely draw zoom boxes with any dimensions. If only
409 "width" is supplied, the box will be constrained to span the entire
410 vertical space of the plot, only the horizontal dimension can be
411 controlled. If only "height" is supplied, the box will be constrained
412 to span the entire horizontal space of the plot, and the vertical
413 dimension can be controlled.
414 """)
415
416 overlay = Instance(BoxAnnotation, default=DEFAULT_BOX_OVERLAY, help="""
417 A shaded annotation drawn to indicate the selection region.
418 """)
419
420 match_aspect = Bool(default=False, help="""
421 Whether the box zoom region should be restricted to have the same
422 aspect ratio as the plot region.
423
424 .. note::
425 If the tool is restricted to one dimension, this value has
426 no effect.
427
428 """)
429
430 class ZoomInTool(Action):
431 ''' *toolbar icon*: |zoom_in_icon|
432
433 The zoom-in tool allows users to click a button to zoom in
434 by a fixed amount.
435
436 .. |zoom_in_icon| image:: /_images/icons/ZoomIn.png
437 :height: 18pt
438
439 '''
440 # TODO ZoomInTool dimensions should probably be constrained to be the same as ZoomOutTool
441 dimensions = Enum(Dimensions, default="both", help="""
442 Which dimensions the zoom-in tool is constrained to act in. By
443 default the zoom-in zoom tool will zoom in any dimension, but can be
444 configured to only zoom horizontally across the width of the plot, or
445 vertically across the height of the plot.
446 """)
447
448 factor = Percent(default=0.1, help="""
449 Percentage to zoom for each click of the zoom-in tool.
450 """)
451
452 class ZoomOutTool(Action):
453 ''' *toolbar icon*: |zoom_out_icon|
454
455 The zoom-out tool allows users to click a button to zoom out
456 by a fixed amount.
457
458 .. |zoom_out_icon| image:: /_images/icons/ZoomOut.png
459 :height: 18pt
460
461 '''
462 dimensions = Enum(Dimensions, default="both", help="""
463 Which dimensions the zoom-out tool is constrained to act in. By
464 default the zoom-out tool will zoom in any dimension, but can be
465 configured to only zoom horizontally across the width of the plot, or
466 vertically across the height of the plot.
467 """)
468
469 factor = Percent(default=0.1, help="""
470 Percentage to zoom for each click of the zoom-in tool.
471 """)
472
473
474 class BoxSelectTool(Drag):
475 ''' *toolbar icon*: |box_select_icon|
476
477 The box selection tool allows users to make selections on a
478 Plot by indicating a rectangular region by dragging the
479 mouse or a finger over the plot region. The end of the drag
480 event indicates the selection region is ready.
481
482 See :ref:`userguide_styling_selected_unselected_glyphs` for information
483 on styling selected and unselected glyphs.
484
485
486 .. |box_select_icon| image:: /_images/icons/BoxSelect.png
487 :height: 18pt
488
489 '''
490
491 names = List(String, help="""
492 A list of names to query for. If set, only renderers that
493 have a matching value for their ``name`` attribute will be used.
494 """)
495
496 renderers = List(Instance(Renderer), help="""
497 An explicit list of renderers to hit test again. If unset,
498 defaults to all renderers on a plot.
499 """)
500
501 select_every_mousemove = Bool(False, help="""
502 Whether a selection computation should happen on every mouse
503 event, or only once, when the selection region is completed. Default: False
504 """)
505
506 dimensions = Enum(Dimensions, default="both", help="""
507 Which dimensions the box selection is to be free in. By default,
508 users may freely draw selections boxes with any dimensions. If only
509 "width" is supplied, the box will be constrained to span the entire
510 vertical space of the plot, only the horizontal dimension can be
511 controlled. If only "height" is supplied, the box will be constrained
512 to span the entire horizontal space of the plot, and the vertical
513 dimension can be controlled.
514 """)
515
516 callback = Instance(Callback, help="""
517 A callback to run in the browser on completion of drawing a selection box.
518 The cb_data parameter that is available to the Callback code will contain
519 one BoxSelectTool-specific field:
520
521 :geometry: object containing the coordinates of the selection box
522 """)
523
524 overlay = Instance(BoxAnnotation, default=DEFAULT_BOX_OVERLAY, help="""
525 A shaded annotation drawn to indicate the selection region.
526 """)
527
528 DEFAULT_POLY_OVERLAY = lambda: PolyAnnotation(
529 level="overlay",
530 xs_units="screen",
531 ys_units="screen",
532 fill_color="lightgrey",
533 fill_alpha=0.5,
534 line_color="black",
535 line_alpha=1.0,
536 line_width=2,
537 line_dash=[4, 4]
538 )
539
540 class LassoSelectTool(Drag):
541 ''' *toolbar icon*: |lasso_select_icon|
542
543 The lasso selection tool allows users to make selections on a
544 Plot by indicating a free-drawn "lasso" region by dragging the
545 mouse or a finger over the plot region. The end of the drag
546 event indicates the selection region is ready.
547
548 See :ref:`userguide_styling_selected_unselected_glyphs` for information
549 on styling selected and unselected glyphs.
550
551 .. note::
552 Selections can be comprised of multiple regions, even those
553 made by different selection tools. Hold down the <<shift>> key
554 while making a selection to append the new selection to any
555 previous selection that might exist.
556
557 .. |lasso_select_icon| image:: /_images/icons/LassoSelect.png
558 :height: 18pt
559
560 '''
561
562 names = List(String, help="""
563 A list of names to query for. If set, only renderers that
564 have a matching value for their ``name`` attribute will be used.
565 """)
566
567 renderers = List(Instance(Renderer), help="""
568 An explicit list of renderers to hit test again. If unset,
569 defaults to all renderers on a plot.
570 """)
571
572 select_every_mousemove = Bool(True, help="""
573 Whether a selection computation should happen on every mouse
574 event, or only once, when the selection region is completed. Default: True
575 """)
576
577 callback = Instance(Callback, help="""
578 A callback to run in the browser on every selection of a lasso area.
579 The cb_data parameter that is available to the Callback code will contain
580 one LassoSelectTool-specific field:
581
582 :geometry: object containing the coordinates of the lasso area
583 """)
584
585 overlay = Instance(PolyAnnotation, default=DEFAULT_POLY_OVERLAY, help="""
586 A shaded annotation drawn to indicate the selection region.
587 """)
588
589
590 class PolySelectTool(Tap):
591 ''' *toolbar icon*: |poly_select_icon|
592
593 The polygon selection tool allows users to make selections on a
594 Plot by indicating a polygonal region with mouse clicks. single
595 clicks (or taps) add successive points to the definition of the
596 polygon, and a double click (or tap) indicates the selection
597 region is ready.
598
599 See :ref:`userguide_styling_selected_unselected_glyphs` for information
600 on styling selected and unselected glyphs.
601
602 .. note::
603 Selections can be comprised of multiple regions, even those
604 made by different selection tools. Hold down the <<shift>> key
605 while making a selection to append the new selection to any
606 previous selection that might exist.
607
608 .. |poly_select_icon| image:: /_images/icons/PolygonSelect.png
609 :height: 18pt
610
611 '''
612
613 names = List(String, help="""
614 A list of names to query for. If set, only renderers that
615 have a matching value for their ``name`` attribute will be used.
616 """)
617
618 renderers = List(Instance(Renderer), help="""
619 An explicit list of renderers to hit test again. If unset,
620 defaults to all renderers on a plot.
621 """)
622
623 overlay = Instance(PolyAnnotation, default=DEFAULT_POLY_OVERLAY, help="""
624 A shaded annotation drawn to indicate the selection region.
625 """)
626
627 class HoverTool(Inspection):
628 ''' *toolbar icon*: |crosshair_icon|
629
630 The hover tool is a passive inspector tool. It is generally on at
631 all times, but can be configured in the inspector's menu associated
632 with the *toolbar icon* shown above.
633
634 By default, the hover tool displays informational tooltips whenever
635 the cursor is directly over a glyph. The data to show comes from the
636 glyph's data source, and what is to be displayed is configurable with
637 the ``tooltips`` attribute that maps display names to columns in the
638 data source, or to special known variables.
639
640 Here is an example of how to configure and use the hover tool::
641
642 # Add tooltip (name, field) pairs to the tool. See below for a
643 # description of possible field values.
644 hover.tooltips = [
645 ("index", "$index"),
646 ("(x,y)", "($x, $y)"),
647 ("radius", "@radius"),
648 ("fill color", "$color[hex, swatch]:fill_color"),
649 ("foo", "@foo"),
650 ("bar", "@bar"),
651 ("baz", "@baz{safe}"),
652 ("total", "@total{$0,0.00}"
653 ]
654
655 You can also supply a ``Callback`` to the HoverTool, to build custom
656 interactions on hover. In this case you may want to turn the tooltips
657 off by setting ``tooltips=None``.
658
659 .. warning::
660
661 Hover tool does not currently work with the following glyphs:
662
663 .. hlist::
664 :columns: 3
665
666 * annulus
667 * arc
668 * bezier
669 * image
670 * image_rgba
671 * image_url
672 * oval
673 * patch
674 * quadratic
675 * ray
676 * text
677
678 .. |hover_icon| image:: /_images/icons/Hover.png
679 :height: 18pt
680
681 '''
682
683 names = List(String, help="""
684 A list of names to query for. If set, only renderers that
685 have a matching value for their ``name`` attribute will be used.
686 """)
687
688 renderers = List(Instance(Renderer), help="""
689 An explicit list of renderers to hit test again. If unset,
690 defaults to all renderers on a plot.
691 """)
692
693 callback = Instance(Callback, help="""
694 A callback to run in the browser whenever the input's value changes. The
695 cb_data parameter that is available to the Callback code will contain two
696 HoverTool specific fields:
697
698 :index: object containing the indices of the hovered points in the data source
699 :geometry: object containing the coordinates of the hover cursor
700 """)
701
702 tooltips = Either(String, List(Tuple(String, String)),
703 default=[
704 ("index","$index"),
705 ("data (x, y)","($x, $y)"),
706 ("canvas (x, y)","($sx, $sy)"),
707 ], help="""
708 The (name, field) pairs describing what the hover tool should
709 display when there is a hit.
710
711 Field names starting with "@" are interpreted as columns on the
712 data source. For instance, "@temp" would look up values to display
713 from the "temp" column of the data source.
714
715 Field names starting with "$" are special, known fields:
716
717 :$index: index of selected point in the data source
718 :$x: x-coordinate under the cursor in data space
719 :$y: y-coordinate under the cursor in data space
720 :$sx: x-coordinate under the cursor in screen (canvas) space
721 :$sy: y-coordinate under the cursor in screen (canvas) space
722 :$color: color data from data source, with the syntax:
723 ``$color[options]:field_name``. The available options
724 are: 'hex' (to display the color as a hex value), and
725 'swatch' to also display a small color swatch.
726
727 Field names that begin with ``@`` are associated with columns in a
728 ``ColumnDataSource``. For instance the field name ``"@price"`` will
729 display values from the ``"price"`` column whenever a hover is triggered.
730 If the hover is for the 17th glyph, then the hover tooltip will
731 correspondingly display the 17th price value.
732
733 Note that if a column name contains spaces, the it must be supplied by
734 surrounding it in curly braces, e.g. ``@{adjusted close}`` will display
735 values from a column named ``"adjusted close"``.
736
737 By default, values for fields (e.g. ``@foo``) are displayed in a basic
738 numeric format. However it is possible to control the formatting of values
739 more precisely. Fields can be modified by appending a format specified to
740 the end in curly braces. Some examples are below.
741
742 .. code-block:: python
743
744 "@foo{0,0.000}" # formats 10000.1234 as: 10,000.123
745
746 "@foo{(.00)}" # formats -10000.1234 as: (10000.123)
747
748 "@foo{($ 0.00 a)}" # formats 1230974 as: $ 1.23 m
749
750 Specifying a format ``{safe}`` after a field name will override automatic
751 escaping of the tooltip data source. Any HTML tags in the data tags will
752 be rendered as HTML in the resulting HoverTool output. See
753 :ref:`custom_hover_tooltip` for a more detailed example.
754
755 ``None`` is also a valid value for tooltips. This turns off the
756 rendering of tooltips. This is mostly useful when supplying other
757 actions on hover via the callback property.
758
759 .. note::
760 The tooltips attribute can also be configured with a mapping type,
761 e.g. ``dict`` or ``OrderedDict``. However, if a ``dict`` is used,
762 the visual presentation order is unspecified.
763
764 """).accepts(Dict(String, String), lambda d: list(d.items()))
765
766 formatters = Dict(String, Enum(TooltipFieldFormatter), default=lambda: dict(), help="""
767 Specify the formatting scheme for data source columns, e.g.
768
769 .. code-block:: python
770
771 tool.formatters = dict(date="datetime")
772
773 will cause format specifications for the "date" column to be interpreted
774 according to the "datetime" formatting scheme. The following schemed are
775 available:
776
777 :``"numeral"``:
778 Provides a wide variety of formats for numbers, currency, bytes, times,
779 and percentages. The full set of formats can be found in the
780 |NumeralTickFormatter| reference documentation.
781
782 :``"datetime"``:
783 Provides formats for date and time values. The full set of formats is
784 listed in the |DatetimeTickFormatter| reference documentation.
785
786 :``"printf"``:
787 Provides formats similar to C-style "printf" type specifiers. See the
788 |PrintfTickFormatter| reference documentation for complete details.
789
790 If no formatter is specified for a column name, the default ``"numeral"``
791 formatter is assumed.
792
793 .. |NumeralTickFormatter| replace:: :class:`~bokeh.models.formatters.NumeralTickFormatter`
794 .. |DatetimeTickFormatter| replace:: :class:`~bokeh.models.formatters.DatetimeTickFormatter`
795 .. |PrintfTickFormatter| replace:: :class:`~bokeh.models.formatters.PrintfTickFormatter`
796
797 """)
798
799 mode = Enum("mouse", "hline", "vline", help="""
800 Whether to consider hover pointer as a point (x/y values), or a
801 span on h or v directions.
802 """)
803
804 point_policy = Enum("snap_to_data", "follow_mouse", "none", help="""
805 Whether the tooltip position should snap to the "center" (or other anchor)
806 position of the associated glyph, or always follow the current mouse cursor
807 position.
808 """)
809
810 line_policy = Enum("prev", "next", "nearest", "interp", "none",
811 default="nearest", help="""
812 When showing tooltips for lines, designates whether the tooltip position
813 should be the "previous" or "next" points on the line, the "nearest" point
814 to the current mouse position, or "interpolate" along the line to the
815 current mouse position.
816 """)
817
818 anchor = Enum(Anchor, default="center", help="""
819 If point policy is set to `"snap_to_data"`, `anchor` defines the attachment
820 point of a tooltip. The default is to attach to the center of a glyph.
821 """).accepts(Enum(DeprecatedAnchor), accept_left_right_center)
822
823 attachment = Enum("horizontal", "vertical", help="""
824 Whether tooltip's arrow should appear in the horizontal or vertical dimension.
825 """)
826
827 show_arrow = Bool(default=True, help="""
828 Whether tooltip's arrow should be showed.
829 """)
830
831 DEFAULT_HELP_TIP = "Click the question mark to learn more about Bokeh plot tools."
832 DEFAULT_HELP_URL = "http://bokeh.pydata.org/en/latest/docs/user_guide/tools.html#built-in-tools"
833
834 class HelpTool(Action):
835 ''' A button tool to provide a "help" link to users.
836
837 The hover text can be customized through the ``help_tooltip`` attribute
838 and the redirect site overridden as well.
839
840 '''
841
842 help_tooltip = String(default=DEFAULT_HELP_TIP, help="""
843 Tooltip displayed when hovering over the help icon.
844 """)
845
846 redirect = String(default=DEFAULT_HELP_URL, help="""
847 Site to be redirected through upon click.
848 """)
849
850 class UndoTool(Action):
851 ''' *toolbar icon*: |undo_icon|
852
853 Undo tool allows to restore previous state of the plot.
854
855 .. |undo_icon| image:: /_images/icons/Undo.png
856 :height: 18pt
857
858 '''
859
860 class RedoTool(Action):
861 ''' *toolbar icon*: |redo_icon|
862
863 Redo tool reverses the last action performed by undo tool.
864
865 .. |redo_icon| image:: /_images/icons/Redo.png
866 :height: 18pt
867
868 '''
869
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bokeh/models/tools.py b/bokeh/models/tools.py
--- a/bokeh/models/tools.py
+++ b/bokeh/models/tools.py
@@ -90,7 +90,11 @@
''' A base class for tools that perform "inspections", e.g. ``HoverTool``.
'''
- pass
+ toggleable = Bool(True, help="""
+ Whether an on/off toggle button should appear in the toolbar for this
+ inpection tool. If ``False``, the viewers of a plot will not be able to
+ toggle the inspector on or off using the toolbar.
+ """)
@abstract
class ToolbarBase(LayoutDOM):
| {"golden_diff": "diff --git a/bokeh/models/tools.py b/bokeh/models/tools.py\n--- a/bokeh/models/tools.py\n+++ b/bokeh/models/tools.py\n@@ -90,7 +90,11 @@\n ''' A base class for tools that perform \"inspections\", e.g. ``HoverTool``.\n \n '''\n- pass\n+ toggleable = Bool(True, help=\"\"\"\n+ Whether an on/off toggle button should appear in the toolbar for this\n+ inpection tool. If ``False``, the viewers of a plot will not be able to\n+ toggle the inspector on or off using the toolbar.\n+ \"\"\")\n \n @abstract\n class ToolbarBase(LayoutDOM):\n", "issue": "remove the hover menu item, and keep the hover function working\n\r\nFeature request.\r\n\r\nI would like to remove the hover menu item, and keep the hover function working\r\n\r\n\n", "before_files": [{"content": "''' Bokeh comes with a number of interactive tools.\n\nThere are five types of tool interactions:\n\n.. hlist::\n :columns: 5\n\n * Pan/Drag\n * Click/Tap\n * Scroll/Pinch\n * Actions\n * Inspectors\n\nFor the first three comprise the category of gesture tools, and only\none tool for each gesture can be active at any given time. The active\ntool is indicated on the toolbar by a highlight next to to the tool.\nActions are immediate or modal operations that are only activated when\ntheir button in the toolbar is pressed. Inspectors are passive tools that\nmerely report information or annotate the plot in some way, and may\nalways be active regardless of what other tools are currently active.\n\n'''\nfrom __future__ import absolute_import\n\nfrom ..core.enums import accept_left_right_center, Anchor, DeprecatedAnchor, Dimension, Dimensions, Location, TooltipFieldFormatter\nfrom ..core.has_props import abstract\nfrom ..core.properties import (\n Any, Auto, Bool, Color, Dict, Either, Enum, Float, Percent, Instance, List,\n Override, Seq, String, Tuple\n)\nfrom ..model import Model\n\nfrom .annotations import BoxAnnotation, PolyAnnotation\nfrom .callbacks import Callback\nfrom .renderers import Renderer\nfrom .layouts import Box, LayoutDOM\n\nclass ToolEvents(Model):\n ''' A class for reporting tools geometries from BokehJS.\n\n .. warning::\n This class will be superceded by a new general events system in the\n near future.\n\n '''\n\n geometries = List(Dict(String, Any))\n\n@abstract\nclass Tool(Model):\n ''' A base class for all interactive tool types.\n\n '''\n\n plot = Instance(\".models.plots.Plot\", help=\"\"\"\n The Plot that this tool will act on.\n \"\"\")\n\n@abstract\nclass Action(Tool):\n ''' A base class for tools that are buttons in the toolbar.\n\n '''\n pass\n\n@abstract\nclass Drag(Tool):\n ''' A base class for tools that respond to drag events.\n\n '''\n pass\n\n@abstract\nclass Scroll(Tool):\n ''' A base class for tools that respond to scroll events.\n\n '''\n pass\n\n@abstract\nclass Tap(Tool):\n ''' A base class for tools that respond to tap/click events.\n\n '''\n pass\n\n\n@abstract\nclass Inspection(Tool):\n ''' A base class for tools that perform \"inspections\", e.g. ``HoverTool``.\n\n '''\n pass\n\n@abstract\nclass ToolbarBase(LayoutDOM):\n ''' A base class for different toolbars.\n\n '''\n\n logo = Enum(\"normal\", \"grey\", help=\"\"\"\n What version of the Bokeh logo to display on the toolbar. If\n set to None, no logo will be displayed.\n \"\"\")\n\n tools = List(Instance(Tool), help=\"\"\"\n A list of tools to add to the plot.\n \"\"\")\n\n # This is an odd case. The sizing is custom handled. In the future we will\n # probably set it as `stretch_width` or `stretch_height` depending on its\n # orientation.\n sizing_mode = Override(default=None)\n\n\nclass Toolbar(ToolbarBase):\n ''' Collect tools to display for a single plot.\n\n '''\n\n active_drag = Either(Auto, Instance(Drag), help=\"\"\"\n Specify a drag tool to be active when the plot is displayed.\n \"\"\")\n\n active_inspect = Either(Auto, Instance(Inspection), Seq(Instance(Inspection)), help=\"\"\"\n Specify an inspection tool or sequence of inspection tools to be active when\n the plot is displayed.\n \"\"\")\n\n active_scroll = Either(Auto, Instance(Scroll), help=\"\"\"\n Specify a scroll/pinch tool to be active when the plot is displayed.\n \"\"\")\n\n active_tap = Either(Auto, Instance(Tap), help=\"\"\"\n Specify a tap/click tool to be active when the plot is displayed.\n \"\"\")\n\n\nclass ToolbarBox(Box):\n ''' A layoutable toolbar that can accept the tools of multiple plots, and\n can merge the tools into a single button for convenience.\n\n '''\n def _check_empty_layout(self):\n # Overriding the children check from Box. As toolbarbox's children\n # are normally set JS side.\n return None\n\n toolbar_location = Enum(Location, default='right', help=\"\"\"\n Should the toolbar be presented as if it was stuck to the `above`, `right`, `left`, `below`\n edge of a plot. Default is `right`.\n \"\"\")\n\n tools = List(Instance(Tool), help=\"\"\"\n A list of tools to add to the plot.\n \"\"\")\n\n merge_tools = Bool(default=True, help=\"\"\"\n Merge all the tools together so there is one tool to control all the plots.\n \"\"\")\n\n logo = Enum(\"normal\", \"grey\", help=\"\"\"\n What version of the Bokeh logo to display on the toolbar. If\n set to None, no logo will be displayed.\n \"\"\")\n\n\nclass PanTool(Drag):\n ''' *toolbar icon*: |pan_icon|\n\n The pan tool allows the user to pan a Plot by left-dragging\n a mouse, or on touch devices by dragging a finger or stylus, across\n the plot region.\n\n The pan tool also activates the border regions of a Plot for \"single\n axis\" panning. For instance, dragging in the vertical border or axis\n will effect a pan in the vertical direction only, with the horizontal\n dimension kept fixed.\n\n .. |pan_icon| image:: /_images/icons/Pan.png\n :height: 18pt\n\n '''\n\n dimensions = Enum(Dimensions, default=\"both\", help=\"\"\"\n Which dimensions the pan tool is constrained to act in. By default\n the pan tool will pan in any dimension, but can be configured to only\n pan horizontally across the width of the plot, or vertically across the\n height of the plot.\n \"\"\")\n\nclass WheelPanTool(Scroll):\n ''' *toolbar icon*: |wheel_pan_icon|\n\n The wheel pan tool allows the user to pan the plot along the configured\n dimension using the scroll wheel.\n\n .. |wheel_pan_icon| image:: /_images/icons/WheelPan.png\n :height: 18pt\n\n '''\n\n dimension = Enum(Dimension, default=\"width\", help=\"\"\"\n Which dimension the wheel pan tool is constrained to act in. By\n default the wheel pan tool will pan the plot along the x-axis.\n \"\"\")\n\n\nclass WheelZoomTool(Scroll):\n ''' *toolbar icon*: |wheel_zoom_icon|\n\n The wheel zoom tool will zoom the plot in and out, centered on the\n current mouse location.\n\n The wheel zoom tool also activates the border regions of a Plot for\n \"single axis\" zooming. For instance, zooming in the vertical border or\n axis will effect a zoom in the vertical direction only, with the\n horizontal dimension kept fixed.\n\n .. |wheel_zoom_icon| image:: /_images/icons/WheelZoom.png\n :height: 18pt\n\n '''\n\n dimensions = Enum(Dimensions, default=\"both\", help=\"\"\"\n Which dimensions the wheel zoom tool is constrained to act in. By\n default the wheel zoom tool will zoom in any dimension, but can be\n configured to only zoom horizontally across the width of the plot, or\n vertically across the height of the plot.\n \"\"\")\n\n\nclass SaveTool(Action):\n ''' *toolbar icon*: |save_icon|\n\n The save tool is an action. When activated, the tool opens a download dialog\n which allows to save an image reproduction of the plot in PNG format. If\n automatic download is not support by a web browser, the tool falls back to\n opening the generated image in a new tab or window. User then can manually\n save it by right clicking on the image and choosing \"Save As\" (or similar)\n menu item.\n\n .. |save_icon| image:: /_images/icons/Save.png\n :height: 18pt\n\n '''\n\n\nclass ResetTool(Action):\n ''' *toolbar icon*: |reset_icon|\n\n The reset tool is an action. When activated in the toolbar, the tool\n resets the data bounds of the plot to their values when the plot was\n initially created.\n\n Optionally, the reset tool also resets the plat canvas dimensions to\n their original size\n\n .. |reset_icon| image:: /_images/icons/Reset.png\n :height: 18pt\n\n '''\n\n reset_size = Bool(default=True, help=\"\"\"\n Whether activating the Reset tool should also reset the plot's canvas\n dimensions to their original size.\n \"\"\")\n\n\nclass ResizeTool(Drag):\n ''' *toolbar icon*: |resize_icon|\n\n The resize tool allows the user to left-drag a mouse or drag a finger\n to resize the entire plot area on the screen.\n\n .. |resize_icon| image:: /_images/icons/Resize.png\n :height: 18pt\n\n '''\n\n\nclass TapTool(Tap):\n ''' *toolbar icon*: |tap_select_icon|\n\n The tap selection tool allows the user to select at single points by\n left-clicking a mouse, or tapping with a finger.\n\n See :ref:`userguide_styling_selected_unselected_glyphs` for information\n on styling selected and unselected glyphs.\n\n .. |tap_select_icon| image:: /_images/icons/TapSelect.png\n :height: 18pt\n\n .. note::\n Selections can be comprised of multiple regions, even those\n made by different selection tools. Hold down the <<shift>> key\n while making a selection to append the new selection to any\n previous selection that might exist.\n\n '''\n\n names = List(String, help=\"\"\"\n A list of names to query for. If set, only renderers that\n have a matching value for their ``name`` attribute will be used.\n \"\"\")\n\n renderers = List(Instance(Renderer), help=\"\"\"\n An explicit list of renderers to hit test again. If unset,\n defaults to all renderers on a plot.\n \"\"\")\n\n behavior = Enum(\"select\", \"inspect\", default=\"select\", help=\"\"\"\n This tool can be configured to either make selections or inspections\n on associated data sources. The difference is that selection changes\n propagate across bokeh and other components (e.g. selection glyph)\n will be notified. Inspecions don't act like this, so it's useful to\n configure `callback` when setting `behavior='inspect'`.\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A client-side action specification, like opening a URL, showing\n a dialog box, etc. See :class:`~bokeh.models.actions.Action` for details.\n \"\"\")\n\n\n\n\nclass CrosshairTool(Inspection):\n ''' *toolbar icon*: |crosshair_icon|\n\n The crosshair tool is a passive inspector tool. It is generally on\n at all times, but can be configured in the inspector's menu\n associated with the *toolbar icon* shown above.\n\n The crosshair tool draws a crosshair annotation over the plot,\n centered on the current mouse position. The crosshair tool may be\n configured to draw across only one dimension by setting the\n ``dimension`` property to only ``width`` or ``height``.\n\n .. |crosshair_icon| image:: /_images/icons/Crosshair.png\n :height: 18pt\n\n '''\n\n dimensions = Enum(Dimensions, default=\"both\", help=\"\"\"\n Which dimensions the crosshair tool is to track. By default, both a\n vertical and horizontal line will be drawn. If only \"width\" is supplied,\n only a horizontal line will be drawn. If only \"height\" is supplied,\n only a vertical line will be drawn.\n \"\"\")\n\n line_color = Color(default=\"black\", help=\"\"\"\n A color to use to stroke paths with.\n\n Acceptable values are:\n\n - any of the 147 named `CSS colors`_, e.g ``'green'``, ``'indigo'``\n - an RGB(A) hex value, e.g., ``'#FF0000'``, ``'#44444444'``\n - a 3-tuple of integers (r,g,b) between 0 and 255\n - a 4-tuple of (r,g,b,a) where r,g,b are integers between 0..255 and a is between 0..1\n\n .. _CSS colors: http://www.w3schools.com/cssref/css_colornames.asp\n\n \"\"\")\n\n line_width = Float(default=1, help=\"\"\"\n Stroke width in units of pixels.\n \"\"\")\n\n line_alpha = Float(default=1.0, help=\"\"\"\n An alpha value to use to stroke paths with.\n\n Acceptable values are floating point numbers between 0 (transparent)\n and 1 (opaque).\n\n \"\"\")\n\nDEFAULT_BOX_OVERLAY = lambda: BoxAnnotation(\n level=\"overlay\",\n render_mode=\"css\",\n top_units=\"screen\",\n left_units=\"screen\",\n bottom_units=\"screen\",\n right_units=\"screen\",\n fill_color=\"lightgrey\",\n fill_alpha=0.5,\n line_color=\"black\",\n line_alpha=1.0,\n line_width=2,\n line_dash=[4, 4]\n)\n\nclass BoxZoomTool(Drag):\n ''' *toolbar icon*: |box_zoom_icon|\n\n The box zoom tool allows users to define a rectangular\n region of a Plot to zoom to by dragging he mouse or a\n finger over the plot region. The end of the drag\n event indicates the selection region is ready.\n\n .. |box_zoom_icon| image:: /_images/icons/BoxZoom.png\n :height: 18pt\n\n '''\n\n dimensions = Enum(Dimensions, default=\"both\", help=\"\"\"\n Which dimensions the zoom box is to be free in. By default,\n users may freely draw zoom boxes with any dimensions. If only\n \"width\" is supplied, the box will be constrained to span the entire\n vertical space of the plot, only the horizontal dimension can be\n controlled. If only \"height\" is supplied, the box will be constrained\n to span the entire horizontal space of the plot, and the vertical\n dimension can be controlled.\n \"\"\")\n\n overlay = Instance(BoxAnnotation, default=DEFAULT_BOX_OVERLAY, help=\"\"\"\n A shaded annotation drawn to indicate the selection region.\n \"\"\")\n\n match_aspect = Bool(default=False, help=\"\"\"\n Whether the box zoom region should be restricted to have the same\n aspect ratio as the plot region.\n\n .. note::\n If the tool is restricted to one dimension, this value has\n no effect.\n\n \"\"\")\n\nclass ZoomInTool(Action):\n ''' *toolbar icon*: |zoom_in_icon|\n\n The zoom-in tool allows users to click a button to zoom in\n by a fixed amount.\n\n .. |zoom_in_icon| image:: /_images/icons/ZoomIn.png\n :height: 18pt\n\n '''\n # TODO ZoomInTool dimensions should probably be constrained to be the same as ZoomOutTool\n dimensions = Enum(Dimensions, default=\"both\", help=\"\"\"\n Which dimensions the zoom-in tool is constrained to act in. By\n default the zoom-in zoom tool will zoom in any dimension, but can be\n configured to only zoom horizontally across the width of the plot, or\n vertically across the height of the plot.\n \"\"\")\n\n factor = Percent(default=0.1, help=\"\"\"\n Percentage to zoom for each click of the zoom-in tool.\n \"\"\")\n\nclass ZoomOutTool(Action):\n ''' *toolbar icon*: |zoom_out_icon|\n\n The zoom-out tool allows users to click a button to zoom out\n by a fixed amount.\n\n .. |zoom_out_icon| image:: /_images/icons/ZoomOut.png\n :height: 18pt\n\n '''\n dimensions = Enum(Dimensions, default=\"both\", help=\"\"\"\n Which dimensions the zoom-out tool is constrained to act in. By\n default the zoom-out tool will zoom in any dimension, but can be\n configured to only zoom horizontally across the width of the plot, or\n vertically across the height of the plot.\n \"\"\")\n\n factor = Percent(default=0.1, help=\"\"\"\n Percentage to zoom for each click of the zoom-in tool.\n \"\"\")\n\n\nclass BoxSelectTool(Drag):\n ''' *toolbar icon*: |box_select_icon|\n\n The box selection tool allows users to make selections on a\n Plot by indicating a rectangular region by dragging the\n mouse or a finger over the plot region. The end of the drag\n event indicates the selection region is ready.\n\n See :ref:`userguide_styling_selected_unselected_glyphs` for information\n on styling selected and unselected glyphs.\n\n\n .. |box_select_icon| image:: /_images/icons/BoxSelect.png\n :height: 18pt\n\n '''\n\n names = List(String, help=\"\"\"\n A list of names to query for. If set, only renderers that\n have a matching value for their ``name`` attribute will be used.\n \"\"\")\n\n renderers = List(Instance(Renderer), help=\"\"\"\n An explicit list of renderers to hit test again. If unset,\n defaults to all renderers on a plot.\n \"\"\")\n\n select_every_mousemove = Bool(False, help=\"\"\"\n Whether a selection computation should happen on every mouse\n event, or only once, when the selection region is completed. Default: False\n \"\"\")\n\n dimensions = Enum(Dimensions, default=\"both\", help=\"\"\"\n Which dimensions the box selection is to be free in. By default,\n users may freely draw selections boxes with any dimensions. If only\n \"width\" is supplied, the box will be constrained to span the entire\n vertical space of the plot, only the horizontal dimension can be\n controlled. If only \"height\" is supplied, the box will be constrained\n to span the entire horizontal space of the plot, and the vertical\n dimension can be controlled.\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser on completion of drawing a selection box.\n The cb_data parameter that is available to the Callback code will contain\n one BoxSelectTool-specific field:\n\n :geometry: object containing the coordinates of the selection box\n \"\"\")\n\n overlay = Instance(BoxAnnotation, default=DEFAULT_BOX_OVERLAY, help=\"\"\"\n A shaded annotation drawn to indicate the selection region.\n \"\"\")\n\nDEFAULT_POLY_OVERLAY = lambda: PolyAnnotation(\n level=\"overlay\",\n xs_units=\"screen\",\n ys_units=\"screen\",\n fill_color=\"lightgrey\",\n fill_alpha=0.5,\n line_color=\"black\",\n line_alpha=1.0,\n line_width=2,\n line_dash=[4, 4]\n)\n\nclass LassoSelectTool(Drag):\n ''' *toolbar icon*: |lasso_select_icon|\n\n The lasso selection tool allows users to make selections on a\n Plot by indicating a free-drawn \"lasso\" region by dragging the\n mouse or a finger over the plot region. The end of the drag\n event indicates the selection region is ready.\n\n See :ref:`userguide_styling_selected_unselected_glyphs` for information\n on styling selected and unselected glyphs.\n\n .. note::\n Selections can be comprised of multiple regions, even those\n made by different selection tools. Hold down the <<shift>> key\n while making a selection to append the new selection to any\n previous selection that might exist.\n\n .. |lasso_select_icon| image:: /_images/icons/LassoSelect.png\n :height: 18pt\n\n '''\n\n names = List(String, help=\"\"\"\n A list of names to query for. If set, only renderers that\n have a matching value for their ``name`` attribute will be used.\n \"\"\")\n\n renderers = List(Instance(Renderer), help=\"\"\"\n An explicit list of renderers to hit test again. If unset,\n defaults to all renderers on a plot.\n \"\"\")\n\n select_every_mousemove = Bool(True, help=\"\"\"\n Whether a selection computation should happen on every mouse\n event, or only once, when the selection region is completed. Default: True\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser on every selection of a lasso area.\n The cb_data parameter that is available to the Callback code will contain\n one LassoSelectTool-specific field:\n\n :geometry: object containing the coordinates of the lasso area\n \"\"\")\n\n overlay = Instance(PolyAnnotation, default=DEFAULT_POLY_OVERLAY, help=\"\"\"\n A shaded annotation drawn to indicate the selection region.\n \"\"\")\n\n\nclass PolySelectTool(Tap):\n ''' *toolbar icon*: |poly_select_icon|\n\n The polygon selection tool allows users to make selections on a\n Plot by indicating a polygonal region with mouse clicks. single\n clicks (or taps) add successive points to the definition of the\n polygon, and a double click (or tap) indicates the selection\n region is ready.\n\n See :ref:`userguide_styling_selected_unselected_glyphs` for information\n on styling selected and unselected glyphs.\n\n .. note::\n Selections can be comprised of multiple regions, even those\n made by different selection tools. Hold down the <<shift>> key\n while making a selection to append the new selection to any\n previous selection that might exist.\n\n .. |poly_select_icon| image:: /_images/icons/PolygonSelect.png\n :height: 18pt\n\n '''\n\n names = List(String, help=\"\"\"\n A list of names to query for. If set, only renderers that\n have a matching value for their ``name`` attribute will be used.\n \"\"\")\n\n renderers = List(Instance(Renderer), help=\"\"\"\n An explicit list of renderers to hit test again. If unset,\n defaults to all renderers on a plot.\n \"\"\")\n\n overlay = Instance(PolyAnnotation, default=DEFAULT_POLY_OVERLAY, help=\"\"\"\n A shaded annotation drawn to indicate the selection region.\n \"\"\")\n\nclass HoverTool(Inspection):\n ''' *toolbar icon*: |crosshair_icon|\n\n The hover tool is a passive inspector tool. It is generally on at\n all times, but can be configured in the inspector's menu associated\n with the *toolbar icon* shown above.\n\n By default, the hover tool displays informational tooltips whenever\n the cursor is directly over a glyph. The data to show comes from the\n glyph's data source, and what is to be displayed is configurable with\n the ``tooltips`` attribute that maps display names to columns in the\n data source, or to special known variables.\n\n Here is an example of how to configure and use the hover tool::\n\n # Add tooltip (name, field) pairs to the tool. See below for a\n # description of possible field values.\n hover.tooltips = [\n (\"index\", \"$index\"),\n (\"(x,y)\", \"($x, $y)\"),\n (\"radius\", \"@radius\"),\n (\"fill color\", \"$color[hex, swatch]:fill_color\"),\n (\"foo\", \"@foo\"),\n (\"bar\", \"@bar\"),\n (\"baz\", \"@baz{safe}\"),\n (\"total\", \"@total{$0,0.00}\"\n ]\n\n You can also supply a ``Callback`` to the HoverTool, to build custom\n interactions on hover. In this case you may want to turn the tooltips\n off by setting ``tooltips=None``.\n\n .. warning::\n\n Hover tool does not currently work with the following glyphs:\n\n .. hlist::\n :columns: 3\n\n * annulus\n * arc\n * bezier\n * image\n * image_rgba\n * image_url\n * oval\n * patch\n * quadratic\n * ray\n * text\n\n .. |hover_icon| image:: /_images/icons/Hover.png\n :height: 18pt\n\n '''\n\n names = List(String, help=\"\"\"\n A list of names to query for. If set, only renderers that\n have a matching value for their ``name`` attribute will be used.\n \"\"\")\n\n renderers = List(Instance(Renderer), help=\"\"\"\n An explicit list of renderers to hit test again. If unset,\n defaults to all renderers on a plot.\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser whenever the input's value changes. The\n cb_data parameter that is available to the Callback code will contain two\n HoverTool specific fields:\n\n :index: object containing the indices of the hovered points in the data source\n :geometry: object containing the coordinates of the hover cursor\n \"\"\")\n\n tooltips = Either(String, List(Tuple(String, String)),\n default=[\n (\"index\",\"$index\"),\n (\"data (x, y)\",\"($x, $y)\"),\n (\"canvas (x, y)\",\"($sx, $sy)\"),\n ], help=\"\"\"\n The (name, field) pairs describing what the hover tool should\n display when there is a hit.\n\n Field names starting with \"@\" are interpreted as columns on the\n data source. For instance, \"@temp\" would look up values to display\n from the \"temp\" column of the data source.\n\n Field names starting with \"$\" are special, known fields:\n\n :$index: index of selected point in the data source\n :$x: x-coordinate under the cursor in data space\n :$y: y-coordinate under the cursor in data space\n :$sx: x-coordinate under the cursor in screen (canvas) space\n :$sy: y-coordinate under the cursor in screen (canvas) space\n :$color: color data from data source, with the syntax:\n ``$color[options]:field_name``. The available options\n are: 'hex' (to display the color as a hex value), and\n 'swatch' to also display a small color swatch.\n\n Field names that begin with ``@`` are associated with columns in a\n ``ColumnDataSource``. For instance the field name ``\"@price\"`` will\n display values from the ``\"price\"`` column whenever a hover is triggered.\n If the hover is for the 17th glyph, then the hover tooltip will\n correspondingly display the 17th price value.\n\n Note that if a column name contains spaces, the it must be supplied by\n surrounding it in curly braces, e.g. ``@{adjusted close}`` will display\n values from a column named ``\"adjusted close\"``.\n\n By default, values for fields (e.g. ``@foo``) are displayed in a basic\n numeric format. However it is possible to control the formatting of values\n more precisely. Fields can be modified by appending a format specified to\n the end in curly braces. Some examples are below.\n\n .. code-block:: python\n\n \"@foo{0,0.000}\" # formats 10000.1234 as: 10,000.123\n\n \"@foo{(.00)}\" # formats -10000.1234 as: (10000.123)\n\n \"@foo{($ 0.00 a)}\" # formats 1230974 as: $ 1.23 m\n\n Specifying a format ``{safe}`` after a field name will override automatic\n escaping of the tooltip data source. Any HTML tags in the data tags will\n be rendered as HTML in the resulting HoverTool output. See\n :ref:`custom_hover_tooltip` for a more detailed example.\n\n ``None`` is also a valid value for tooltips. This turns off the\n rendering of tooltips. This is mostly useful when supplying other\n actions on hover via the callback property.\n\n .. note::\n The tooltips attribute can also be configured with a mapping type,\n e.g. ``dict`` or ``OrderedDict``. However, if a ``dict`` is used,\n the visual presentation order is unspecified.\n\n \"\"\").accepts(Dict(String, String), lambda d: list(d.items()))\n\n formatters = Dict(String, Enum(TooltipFieldFormatter), default=lambda: dict(), help=\"\"\"\n Specify the formatting scheme for data source columns, e.g.\n\n .. code-block:: python\n\n tool.formatters = dict(date=\"datetime\")\n\n will cause format specifications for the \"date\" column to be interpreted\n according to the \"datetime\" formatting scheme. The following schemed are\n available:\n\n :``\"numeral\"``:\n Provides a wide variety of formats for numbers, currency, bytes, times,\n and percentages. The full set of formats can be found in the\n |NumeralTickFormatter| reference documentation.\n\n :``\"datetime\"``:\n Provides formats for date and time values. The full set of formats is\n listed in the |DatetimeTickFormatter| reference documentation.\n\n :``\"printf\"``:\n Provides formats similar to C-style \"printf\" type specifiers. See the\n |PrintfTickFormatter| reference documentation for complete details.\n\n If no formatter is specified for a column name, the default ``\"numeral\"``\n formatter is assumed.\n\n .. |NumeralTickFormatter| replace:: :class:`~bokeh.models.formatters.NumeralTickFormatter`\n .. |DatetimeTickFormatter| replace:: :class:`~bokeh.models.formatters.DatetimeTickFormatter`\n .. |PrintfTickFormatter| replace:: :class:`~bokeh.models.formatters.PrintfTickFormatter`\n\n \"\"\")\n\n mode = Enum(\"mouse\", \"hline\", \"vline\", help=\"\"\"\n Whether to consider hover pointer as a point (x/y values), or a\n span on h or v directions.\n \"\"\")\n\n point_policy = Enum(\"snap_to_data\", \"follow_mouse\", \"none\", help=\"\"\"\n Whether the tooltip position should snap to the \"center\" (or other anchor)\n position of the associated glyph, or always follow the current mouse cursor\n position.\n \"\"\")\n\n line_policy = Enum(\"prev\", \"next\", \"nearest\", \"interp\", \"none\",\n default=\"nearest\", help=\"\"\"\n When showing tooltips for lines, designates whether the tooltip position\n should be the \"previous\" or \"next\" points on the line, the \"nearest\" point\n to the current mouse position, or \"interpolate\" along the line to the\n current mouse position.\n \"\"\")\n\n anchor = Enum(Anchor, default=\"center\", help=\"\"\"\n If point policy is set to `\"snap_to_data\"`, `anchor` defines the attachment\n point of a tooltip. The default is to attach to the center of a glyph.\n \"\"\").accepts(Enum(DeprecatedAnchor), accept_left_right_center)\n\n attachment = Enum(\"horizontal\", \"vertical\", help=\"\"\"\n Whether tooltip's arrow should appear in the horizontal or vertical dimension.\n \"\"\")\n\n show_arrow = Bool(default=True, help=\"\"\"\n Whether tooltip's arrow should be showed.\n \"\"\")\n\nDEFAULT_HELP_TIP = \"Click the question mark to learn more about Bokeh plot tools.\"\nDEFAULT_HELP_URL = \"http://bokeh.pydata.org/en/latest/docs/user_guide/tools.html#built-in-tools\"\n\nclass HelpTool(Action):\n ''' A button tool to provide a \"help\" link to users.\n\n The hover text can be customized through the ``help_tooltip`` attribute\n and the redirect site overridden as well.\n\n '''\n\n help_tooltip = String(default=DEFAULT_HELP_TIP, help=\"\"\"\n Tooltip displayed when hovering over the help icon.\n \"\"\")\n\n redirect = String(default=DEFAULT_HELP_URL, help=\"\"\"\n Site to be redirected through upon click.\n \"\"\")\n\nclass UndoTool(Action):\n ''' *toolbar icon*: |undo_icon|\n\n Undo tool allows to restore previous state of the plot.\n\n .. |undo_icon| image:: /_images/icons/Undo.png\n :height: 18pt\n\n '''\n\nclass RedoTool(Action):\n ''' *toolbar icon*: |redo_icon|\n\n Redo tool reverses the last action performed by undo tool.\n\n .. |redo_icon| image:: /_images/icons/Redo.png\n :height: 18pt\n\n '''\n", "path": "bokeh/models/tools.py"}], "after_files": [{"content": "''' Bokeh comes with a number of interactive tools.\n\nThere are five types of tool interactions:\n\n.. hlist::\n :columns: 5\n\n * Pan/Drag\n * Click/Tap\n * Scroll/Pinch\n * Actions\n * Inspectors\n\nFor the first three comprise the category of gesture tools, and only\none tool for each gesture can be active at any given time. The active\ntool is indicated on the toolbar by a highlight next to to the tool.\nActions are immediate or modal operations that are only activated when\ntheir button in the toolbar is pressed. Inspectors are passive tools that\nmerely report information or annotate the plot in some way, and may\nalways be active regardless of what other tools are currently active.\n\n'''\nfrom __future__ import absolute_import\n\nfrom ..core.enums import accept_left_right_center, Anchor, DeprecatedAnchor, Dimension, Dimensions, Location, TooltipFieldFormatter\nfrom ..core.has_props import abstract\nfrom ..core.properties import (\n Any, Auto, Bool, Color, Dict, Either, Enum, Float, Percent, Instance, List,\n Override, Seq, String, Tuple\n)\nfrom ..model import Model\n\nfrom .annotations import BoxAnnotation, PolyAnnotation\nfrom .callbacks import Callback\nfrom .renderers import Renderer\nfrom .layouts import Box, LayoutDOM\n\nclass ToolEvents(Model):\n ''' A class for reporting tools geometries from BokehJS.\n\n .. warning::\n This class will be superceded by a new general events system in the\n near future.\n\n '''\n\n geometries = List(Dict(String, Any))\n\n@abstract\nclass Tool(Model):\n ''' A base class for all interactive tool types.\n\n '''\n\n plot = Instance(\".models.plots.Plot\", help=\"\"\"\n The Plot that this tool will act on.\n \"\"\")\n\n@abstract\nclass Action(Tool):\n ''' A base class for tools that are buttons in the toolbar.\n\n '''\n pass\n\n@abstract\nclass Drag(Tool):\n ''' A base class for tools that respond to drag events.\n\n '''\n pass\n\n@abstract\nclass Scroll(Tool):\n ''' A base class for tools that respond to scroll events.\n\n '''\n pass\n\n@abstract\nclass Tap(Tool):\n ''' A base class for tools that respond to tap/click events.\n\n '''\n pass\n\n\n@abstract\nclass Inspection(Tool):\n ''' A base class for tools that perform \"inspections\", e.g. ``HoverTool``.\n\n '''\n toggleable = Bool(True, help=\"\"\"\n Whether an on/off toggle button should appear in the toolbar for this\n inpection tool. If ``False``, the viewers of a plot will not be able to\n toggle the inspector on or off using the toolbar.\n \"\"\")\n\n@abstract\nclass ToolbarBase(LayoutDOM):\n ''' A base class for different toolbars.\n\n '''\n\n logo = Enum(\"normal\", \"grey\", help=\"\"\"\n What version of the Bokeh logo to display on the toolbar. If\n set to None, no logo will be displayed.\n \"\"\")\n\n tools = List(Instance(Tool), help=\"\"\"\n A list of tools to add to the plot.\n \"\"\")\n\n # This is an odd case. The sizing is custom handled. In the future we will\n # probably set it as `stretch_width` or `stretch_height` depending on its\n # orientation.\n sizing_mode = Override(default=None)\n\n\nclass Toolbar(ToolbarBase):\n ''' Collect tools to display for a single plot.\n\n '''\n\n active_drag = Either(Auto, Instance(Drag), help=\"\"\"\n Specify a drag tool to be active when the plot is displayed.\n \"\"\")\n\n active_inspect = Either(Auto, Instance(Inspection), Seq(Instance(Inspection)), help=\"\"\"\n Specify an inspection tool or sequence of inspection tools to be active when\n the plot is displayed.\n \"\"\")\n\n active_scroll = Either(Auto, Instance(Scroll), help=\"\"\"\n Specify a scroll/pinch tool to be active when the plot is displayed.\n \"\"\")\n\n active_tap = Either(Auto, Instance(Tap), help=\"\"\"\n Specify a tap/click tool to be active when the plot is displayed.\n \"\"\")\n\n\nclass ToolbarBox(Box):\n ''' A layoutable toolbar that can accept the tools of multiple plots, and\n can merge the tools into a single button for convenience.\n\n '''\n def _check_empty_layout(self):\n # Overriding the children check from Box. As toolbarbox's children\n # are normally set JS side.\n return None\n\n toolbar_location = Enum(Location, default='right', help=\"\"\"\n Should the toolbar be presented as if it was stuck to the `above`, `right`, `left`, `below`\n edge of a plot. Default is `right`.\n \"\"\")\n\n tools = List(Instance(Tool), help=\"\"\"\n A list of tools to add to the plot.\n \"\"\")\n\n merge_tools = Bool(default=True, help=\"\"\"\n Merge all the tools together so there is one tool to control all the plots.\n \"\"\")\n\n logo = Enum(\"normal\", \"grey\", help=\"\"\"\n What version of the Bokeh logo to display on the toolbar. If\n set to None, no logo will be displayed.\n \"\"\")\n\n\nclass PanTool(Drag):\n ''' *toolbar icon*: |pan_icon|\n\n The pan tool allows the user to pan a Plot by left-dragging\n a mouse, or on touch devices by dragging a finger or stylus, across\n the plot region.\n\n The pan tool also activates the border regions of a Plot for \"single\n axis\" panning. For instance, dragging in the vertical border or axis\n will effect a pan in the vertical direction only, with the horizontal\n dimension kept fixed.\n\n .. |pan_icon| image:: /_images/icons/Pan.png\n :height: 18pt\n\n '''\n\n dimensions = Enum(Dimensions, default=\"both\", help=\"\"\"\n Which dimensions the pan tool is constrained to act in. By default\n the pan tool will pan in any dimension, but can be configured to only\n pan horizontally across the width of the plot, or vertically across the\n height of the plot.\n \"\"\")\n\nclass WheelPanTool(Scroll):\n ''' *toolbar icon*: |wheel_pan_icon|\n\n The wheel pan tool allows the user to pan the plot along the configured\n dimension using the scroll wheel.\n\n .. |wheel_pan_icon| image:: /_images/icons/WheelPan.png\n :height: 18pt\n\n '''\n\n dimension = Enum(Dimension, default=\"width\", help=\"\"\"\n Which dimension the wheel pan tool is constrained to act in. By\n default the wheel pan tool will pan the plot along the x-axis.\n \"\"\")\n\n\nclass WheelZoomTool(Scroll):\n ''' *toolbar icon*: |wheel_zoom_icon|\n\n The wheel zoom tool will zoom the plot in and out, centered on the\n current mouse location.\n\n The wheel zoom tool also activates the border regions of a Plot for\n \"single axis\" zooming. For instance, zooming in the vertical border or\n axis will effect a zoom in the vertical direction only, with the\n horizontal dimension kept fixed.\n\n .. |wheel_zoom_icon| image:: /_images/icons/WheelZoom.png\n :height: 18pt\n\n '''\n\n dimensions = Enum(Dimensions, default=\"both\", help=\"\"\"\n Which dimensions the wheel zoom tool is constrained to act in. By\n default the wheel zoom tool will zoom in any dimension, but can be\n configured to only zoom horizontally across the width of the plot, or\n vertically across the height of the plot.\n \"\"\")\n\n\nclass SaveTool(Action):\n ''' *toolbar icon*: |save_icon|\n\n The save tool is an action. When activated, the tool opens a download dialog\n which allows to save an image reproduction of the plot in PNG format. If\n automatic download is not support by a web browser, the tool falls back to\n opening the generated image in a new tab or window. User then can manually\n save it by right clicking on the image and choosing \"Save As\" (or similar)\n menu item.\n\n .. |save_icon| image:: /_images/icons/Save.png\n :height: 18pt\n\n '''\n\n\nclass ResetTool(Action):\n ''' *toolbar icon*: |reset_icon|\n\n The reset tool is an action. When activated in the toolbar, the tool\n resets the data bounds of the plot to their values when the plot was\n initially created.\n\n Optionally, the reset tool also resets the plat canvas dimensions to\n their original size\n\n .. |reset_icon| image:: /_images/icons/Reset.png\n :height: 18pt\n\n '''\n\n reset_size = Bool(default=True, help=\"\"\"\n Whether activating the Reset tool should also reset the plot's canvas\n dimensions to their original size.\n \"\"\")\n\n\nclass ResizeTool(Drag):\n ''' *toolbar icon*: |resize_icon|\n\n The resize tool allows the user to left-drag a mouse or drag a finger\n to resize the entire plot area on the screen.\n\n .. |resize_icon| image:: /_images/icons/Resize.png\n :height: 18pt\n\n '''\n\n\nclass TapTool(Tap):\n ''' *toolbar icon*: |tap_select_icon|\n\n The tap selection tool allows the user to select at single points by\n left-clicking a mouse, or tapping with a finger.\n\n See :ref:`userguide_styling_selected_unselected_glyphs` for information\n on styling selected and unselected glyphs.\n\n .. |tap_select_icon| image:: /_images/icons/TapSelect.png\n :height: 18pt\n\n .. note::\n Selections can be comprised of multiple regions, even those\n made by different selection tools. Hold down the <<shift>> key\n while making a selection to append the new selection to any\n previous selection that might exist.\n\n '''\n\n names = List(String, help=\"\"\"\n A list of names to query for. If set, only renderers that\n have a matching value for their ``name`` attribute will be used.\n \"\"\")\n\n renderers = List(Instance(Renderer), help=\"\"\"\n An explicit list of renderers to hit test again. If unset,\n defaults to all renderers on a plot.\n \"\"\")\n\n behavior = Enum(\"select\", \"inspect\", default=\"select\", help=\"\"\"\n This tool can be configured to either make selections or inspections\n on associated data sources. The difference is that selection changes\n propagate across bokeh and other components (e.g. selection glyph)\n will be notified. Inspecions don't act like this, so it's useful to\n configure `callback` when setting `behavior='inspect'`.\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A client-side action specification, like opening a URL, showing\n a dialog box, etc. See :class:`~bokeh.models.actions.Action` for details.\n \"\"\")\n\n\n\n\nclass CrosshairTool(Inspection):\n ''' *toolbar icon*: |crosshair_icon|\n\n The crosshair tool is a passive inspector tool. It is generally on\n at all times, but can be configured in the inspector's menu\n associated with the *toolbar icon* shown above.\n\n The crosshair tool draws a crosshair annotation over the plot,\n centered on the current mouse position. The crosshair tool may be\n configured to draw across only one dimension by setting the\n ``dimension`` property to only ``width`` or ``height``.\n\n .. |crosshair_icon| image:: /_images/icons/Crosshair.png\n :height: 18pt\n\n '''\n\n dimensions = Enum(Dimensions, default=\"both\", help=\"\"\"\n Which dimensions the crosshair tool is to track. By default, both a\n vertical and horizontal line will be drawn. If only \"width\" is supplied,\n only a horizontal line will be drawn. If only \"height\" is supplied,\n only a vertical line will be drawn.\n \"\"\")\n\n line_color = Color(default=\"black\", help=\"\"\"\n A color to use to stroke paths with.\n\n Acceptable values are:\n\n - any of the 147 named `CSS colors`_, e.g ``'green'``, ``'indigo'``\n - an RGB(A) hex value, e.g., ``'#FF0000'``, ``'#44444444'``\n - a 3-tuple of integers (r,g,b) between 0 and 255\n - a 4-tuple of (r,g,b,a) where r,g,b are integers between 0..255 and a is between 0..1\n\n .. _CSS colors: http://www.w3schools.com/cssref/css_colornames.asp\n\n \"\"\")\n\n line_width = Float(default=1, help=\"\"\"\n Stroke width in units of pixels.\n \"\"\")\n\n line_alpha = Float(default=1.0, help=\"\"\"\n An alpha value to use to stroke paths with.\n\n Acceptable values are floating point numbers between 0 (transparent)\n and 1 (opaque).\n\n \"\"\")\n\nDEFAULT_BOX_OVERLAY = lambda: BoxAnnotation(\n level=\"overlay\",\n render_mode=\"css\",\n top_units=\"screen\",\n left_units=\"screen\",\n bottom_units=\"screen\",\n right_units=\"screen\",\n fill_color=\"lightgrey\",\n fill_alpha=0.5,\n line_color=\"black\",\n line_alpha=1.0,\n line_width=2,\n line_dash=[4, 4]\n)\n\nclass BoxZoomTool(Drag):\n ''' *toolbar icon*: |box_zoom_icon|\n\n The box zoom tool allows users to define a rectangular\n region of a Plot to zoom to by dragging he mouse or a\n finger over the plot region. The end of the drag\n event indicates the selection region is ready.\n\n .. |box_zoom_icon| image:: /_images/icons/BoxZoom.png\n :height: 18pt\n\n '''\n\n dimensions = Enum(Dimensions, default=\"both\", help=\"\"\"\n Which dimensions the zoom box is to be free in. By default,\n users may freely draw zoom boxes with any dimensions. If only\n \"width\" is supplied, the box will be constrained to span the entire\n vertical space of the plot, only the horizontal dimension can be\n controlled. If only \"height\" is supplied, the box will be constrained\n to span the entire horizontal space of the plot, and the vertical\n dimension can be controlled.\n \"\"\")\n\n overlay = Instance(BoxAnnotation, default=DEFAULT_BOX_OVERLAY, help=\"\"\"\n A shaded annotation drawn to indicate the selection region.\n \"\"\")\n\n match_aspect = Bool(default=False, help=\"\"\"\n Whether the box zoom region should be restricted to have the same\n aspect ratio as the plot region.\n\n .. note::\n If the tool is restricted to one dimension, this value has\n no effect.\n\n \"\"\")\n\nclass ZoomInTool(Action):\n ''' *toolbar icon*: |zoom_in_icon|\n\n The zoom-in tool allows users to click a button to zoom in\n by a fixed amount.\n\n .. |zoom_in_icon| image:: /_images/icons/ZoomIn.png\n :height: 18pt\n\n '''\n # TODO ZoomInTool dimensions should probably be constrained to be the same as ZoomOutTool\n dimensions = Enum(Dimensions, default=\"both\", help=\"\"\"\n Which dimensions the zoom-in tool is constrained to act in. By\n default the zoom-in zoom tool will zoom in any dimension, but can be\n configured to only zoom horizontally across the width of the plot, or\n vertically across the height of the plot.\n \"\"\")\n\n factor = Percent(default=0.1, help=\"\"\"\n Percentage to zoom for each click of the zoom-in tool.\n \"\"\")\n\nclass ZoomOutTool(Action):\n ''' *toolbar icon*: |zoom_out_icon|\n\n The zoom-out tool allows users to click a button to zoom out\n by a fixed amount.\n\n .. |zoom_out_icon| image:: /_images/icons/ZoomOut.png\n :height: 18pt\n\n '''\n dimensions = Enum(Dimensions, default=\"both\", help=\"\"\"\n Which dimensions the zoom-out tool is constrained to act in. By\n default the zoom-out tool will zoom in any dimension, but can be\n configured to only zoom horizontally across the width of the plot, or\n vertically across the height of the plot.\n \"\"\")\n\n factor = Percent(default=0.1, help=\"\"\"\n Percentage to zoom for each click of the zoom-in tool.\n \"\"\")\n\n\nclass BoxSelectTool(Drag):\n ''' *toolbar icon*: |box_select_icon|\n\n The box selection tool allows users to make selections on a\n Plot by indicating a rectangular region by dragging the\n mouse or a finger over the plot region. The end of the drag\n event indicates the selection region is ready.\n\n See :ref:`userguide_styling_selected_unselected_glyphs` for information\n on styling selected and unselected glyphs.\n\n\n .. |box_select_icon| image:: /_images/icons/BoxSelect.png\n :height: 18pt\n\n '''\n\n names = List(String, help=\"\"\"\n A list of names to query for. If set, only renderers that\n have a matching value for their ``name`` attribute will be used.\n \"\"\")\n\n renderers = List(Instance(Renderer), help=\"\"\"\n An explicit list of renderers to hit test again. If unset,\n defaults to all renderers on a plot.\n \"\"\")\n\n select_every_mousemove = Bool(False, help=\"\"\"\n Whether a selection computation should happen on every mouse\n event, or only once, when the selection region is completed. Default: False\n \"\"\")\n\n dimensions = Enum(Dimensions, default=\"both\", help=\"\"\"\n Which dimensions the box selection is to be free in. By default,\n users may freely draw selections boxes with any dimensions. If only\n \"width\" is supplied, the box will be constrained to span the entire\n vertical space of the plot, only the horizontal dimension can be\n controlled. If only \"height\" is supplied, the box will be constrained\n to span the entire horizontal space of the plot, and the vertical\n dimension can be controlled.\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser on completion of drawing a selection box.\n The cb_data parameter that is available to the Callback code will contain\n one BoxSelectTool-specific field:\n\n :geometry: object containing the coordinates of the selection box\n \"\"\")\n\n overlay = Instance(BoxAnnotation, default=DEFAULT_BOX_OVERLAY, help=\"\"\"\n A shaded annotation drawn to indicate the selection region.\n \"\"\")\n\nDEFAULT_POLY_OVERLAY = lambda: PolyAnnotation(\n level=\"overlay\",\n xs_units=\"screen\",\n ys_units=\"screen\",\n fill_color=\"lightgrey\",\n fill_alpha=0.5,\n line_color=\"black\",\n line_alpha=1.0,\n line_width=2,\n line_dash=[4, 4]\n)\n\nclass LassoSelectTool(Drag):\n ''' *toolbar icon*: |lasso_select_icon|\n\n The lasso selection tool allows users to make selections on a\n Plot by indicating a free-drawn \"lasso\" region by dragging the\n mouse or a finger over the plot region. The end of the drag\n event indicates the selection region is ready.\n\n See :ref:`userguide_styling_selected_unselected_glyphs` for information\n on styling selected and unselected glyphs.\n\n .. note::\n Selections can be comprised of multiple regions, even those\n made by different selection tools. Hold down the <<shift>> key\n while making a selection to append the new selection to any\n previous selection that might exist.\n\n .. |lasso_select_icon| image:: /_images/icons/LassoSelect.png\n :height: 18pt\n\n '''\n\n names = List(String, help=\"\"\"\n A list of names to query for. If set, only renderers that\n have a matching value for their ``name`` attribute will be used.\n \"\"\")\n\n renderers = List(Instance(Renderer), help=\"\"\"\n An explicit list of renderers to hit test again. If unset,\n defaults to all renderers on a plot.\n \"\"\")\n\n select_every_mousemove = Bool(True, help=\"\"\"\n Whether a selection computation should happen on every mouse\n event, or only once, when the selection region is completed. Default: True\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser on every selection of a lasso area.\n The cb_data parameter that is available to the Callback code will contain\n one LassoSelectTool-specific field:\n\n :geometry: object containing the coordinates of the lasso area\n \"\"\")\n\n overlay = Instance(PolyAnnotation, default=DEFAULT_POLY_OVERLAY, help=\"\"\"\n A shaded annotation drawn to indicate the selection region.\n \"\"\")\n\n\nclass PolySelectTool(Tap):\n ''' *toolbar icon*: |poly_select_icon|\n\n The polygon selection tool allows users to make selections on a\n Plot by indicating a polygonal region with mouse clicks. single\n clicks (or taps) add successive points to the definition of the\n polygon, and a double click (or tap) indicates the selection\n region is ready.\n\n See :ref:`userguide_styling_selected_unselected_glyphs` for information\n on styling selected and unselected glyphs.\n\n .. note::\n Selections can be comprised of multiple regions, even those\n made by different selection tools. Hold down the <<shift>> key\n while making a selection to append the new selection to any\n previous selection that might exist.\n\n .. |poly_select_icon| image:: /_images/icons/PolygonSelect.png\n :height: 18pt\n\n '''\n\n names = List(String, help=\"\"\"\n A list of names to query for. If set, only renderers that\n have a matching value for their ``name`` attribute will be used.\n \"\"\")\n\n renderers = List(Instance(Renderer), help=\"\"\"\n An explicit list of renderers to hit test again. If unset,\n defaults to all renderers on a plot.\n \"\"\")\n\n overlay = Instance(PolyAnnotation, default=DEFAULT_POLY_OVERLAY, help=\"\"\"\n A shaded annotation drawn to indicate the selection region.\n \"\"\")\n\nclass HoverTool(Inspection):\n ''' *toolbar icon*: |crosshair_icon|\n\n The hover tool is a passive inspector tool. It is generally on at\n all times, but can be configured in the inspector's menu associated\n with the *toolbar icon* shown above.\n\n By default, the hover tool displays informational tooltips whenever\n the cursor is directly over a glyph. The data to show comes from the\n glyph's data source, and what is to be displayed is configurable with\n the ``tooltips`` attribute that maps display names to columns in the\n data source, or to special known variables.\n\n Here is an example of how to configure and use the hover tool::\n\n # Add tooltip (name, field) pairs to the tool. See below for a\n # description of possible field values.\n hover.tooltips = [\n (\"index\", \"$index\"),\n (\"(x,y)\", \"($x, $y)\"),\n (\"radius\", \"@radius\"),\n (\"fill color\", \"$color[hex, swatch]:fill_color\"),\n (\"foo\", \"@foo\"),\n (\"bar\", \"@bar\"),\n (\"baz\", \"@baz{safe}\"),\n (\"total\", \"@total{$0,0.00}\"\n ]\n\n You can also supply a ``Callback`` to the HoverTool, to build custom\n interactions on hover. In this case you may want to turn the tooltips\n off by setting ``tooltips=None``.\n\n .. warning::\n\n Hover tool does not currently work with the following glyphs:\n\n .. hlist::\n :columns: 3\n\n * annulus\n * arc\n * bezier\n * image\n * image_rgba\n * image_url\n * oval\n * patch\n * quadratic\n * ray\n * text\n\n .. |hover_icon| image:: /_images/icons/Hover.png\n :height: 18pt\n\n '''\n\n names = List(String, help=\"\"\"\n A list of names to query for. If set, only renderers that\n have a matching value for their ``name`` attribute will be used.\n \"\"\")\n\n renderers = List(Instance(Renderer), help=\"\"\"\n An explicit list of renderers to hit test again. If unset,\n defaults to all renderers on a plot.\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser whenever the input's value changes. The\n cb_data parameter that is available to the Callback code will contain two\n HoverTool specific fields:\n\n :index: object containing the indices of the hovered points in the data source\n :geometry: object containing the coordinates of the hover cursor\n \"\"\")\n\n tooltips = Either(String, List(Tuple(String, String)),\n default=[\n (\"index\",\"$index\"),\n (\"data (x, y)\",\"($x, $y)\"),\n (\"canvas (x, y)\",\"($sx, $sy)\"),\n ], help=\"\"\"\n The (name, field) pairs describing what the hover tool should\n display when there is a hit.\n\n Field names starting with \"@\" are interpreted as columns on the\n data source. For instance, \"@temp\" would look up values to display\n from the \"temp\" column of the data source.\n\n Field names starting with \"$\" are special, known fields:\n\n :$index: index of selected point in the data source\n :$x: x-coordinate under the cursor in data space\n :$y: y-coordinate under the cursor in data space\n :$sx: x-coordinate under the cursor in screen (canvas) space\n :$sy: y-coordinate under the cursor in screen (canvas) space\n :$color: color data from data source, with the syntax:\n ``$color[options]:field_name``. The available options\n are: 'hex' (to display the color as a hex value), and\n 'swatch' to also display a small color swatch.\n\n Field names that begin with ``@`` are associated with columns in a\n ``ColumnDataSource``. For instance the field name ``\"@price\"`` will\n display values from the ``\"price\"`` column whenever a hover is triggered.\n If the hover is for the 17th glyph, then the hover tooltip will\n correspondingly display the 17th price value.\n\n Note that if a column name contains spaces, the it must be supplied by\n surrounding it in curly braces, e.g. ``@{adjusted close}`` will display\n values from a column named ``\"adjusted close\"``.\n\n By default, values for fields (e.g. ``@foo``) are displayed in a basic\n numeric format. However it is possible to control the formatting of values\n more precisely. Fields can be modified by appending a format specified to\n the end in curly braces. Some examples are below.\n\n .. code-block:: python\n\n \"@foo{0,0.000}\" # formats 10000.1234 as: 10,000.123\n\n \"@foo{(.00)}\" # formats -10000.1234 as: (10000.123)\n\n \"@foo{($ 0.00 a)}\" # formats 1230974 as: $ 1.23 m\n\n Specifying a format ``{safe}`` after a field name will override automatic\n escaping of the tooltip data source. Any HTML tags in the data tags will\n be rendered as HTML in the resulting HoverTool output. See\n :ref:`custom_hover_tooltip` for a more detailed example.\n\n ``None`` is also a valid value for tooltips. This turns off the\n rendering of tooltips. This is mostly useful when supplying other\n actions on hover via the callback property.\n\n .. note::\n The tooltips attribute can also be configured with a mapping type,\n e.g. ``dict`` or ``OrderedDict``. However, if a ``dict`` is used,\n the visual presentation order is unspecified.\n\n \"\"\").accepts(Dict(String, String), lambda d: list(d.items()))\n\n formatters = Dict(String, Enum(TooltipFieldFormatter), default=lambda: dict(), help=\"\"\"\n Specify the formatting scheme for data source columns, e.g.\n\n .. code-block:: python\n\n tool.formatters = dict(date=\"datetime\")\n\n will cause format specifications for the \"date\" column to be interpreted\n according to the \"datetime\" formatting scheme. The following schemed are\n available:\n\n :``\"numeral\"``:\n Provides a wide variety of formats for numbers, currency, bytes, times,\n and percentages. The full set of formats can be found in the\n |NumeralTickFormatter| reference documentation.\n\n :``\"datetime\"``:\n Provides formats for date and time values. The full set of formats is\n listed in the |DatetimeTickFormatter| reference documentation.\n\n :``\"printf\"``:\n Provides formats similar to C-style \"printf\" type specifiers. See the\n |PrintfTickFormatter| reference documentation for complete details.\n\n If no formatter is specified for a column name, the default ``\"numeral\"``\n formatter is assumed.\n\n .. |NumeralTickFormatter| replace:: :class:`~bokeh.models.formatters.NumeralTickFormatter`\n .. |DatetimeTickFormatter| replace:: :class:`~bokeh.models.formatters.DatetimeTickFormatter`\n .. |PrintfTickFormatter| replace:: :class:`~bokeh.models.formatters.PrintfTickFormatter`\n\n \"\"\")\n\n mode = Enum(\"mouse\", \"hline\", \"vline\", help=\"\"\"\n Whether to consider hover pointer as a point (x/y values), or a\n span on h or v directions.\n \"\"\")\n\n point_policy = Enum(\"snap_to_data\", \"follow_mouse\", \"none\", help=\"\"\"\n Whether the tooltip position should snap to the \"center\" (or other anchor)\n position of the associated glyph, or always follow the current mouse cursor\n position.\n \"\"\")\n\n line_policy = Enum(\"prev\", \"next\", \"nearest\", \"interp\", \"none\",\n default=\"nearest\", help=\"\"\"\n When showing tooltips for lines, designates whether the tooltip position\n should be the \"previous\" or \"next\" points on the line, the \"nearest\" point\n to the current mouse position, or \"interpolate\" along the line to the\n current mouse position.\n \"\"\")\n\n anchor = Enum(Anchor, default=\"center\", help=\"\"\"\n If point policy is set to `\"snap_to_data\"`, `anchor` defines the attachment\n point of a tooltip. The default is to attach to the center of a glyph.\n \"\"\").accepts(Enum(DeprecatedAnchor), accept_left_right_center)\n\n attachment = Enum(\"horizontal\", \"vertical\", help=\"\"\"\n Whether tooltip's arrow should appear in the horizontal or vertical dimension.\n \"\"\")\n\n show_arrow = Bool(default=True, help=\"\"\"\n Whether tooltip's arrow should be showed.\n \"\"\")\n\nDEFAULT_HELP_TIP = \"Click the question mark to learn more about Bokeh plot tools.\"\nDEFAULT_HELP_URL = \"http://bokeh.pydata.org/en/latest/docs/user_guide/tools.html#built-in-tools\"\n\nclass HelpTool(Action):\n ''' A button tool to provide a \"help\" link to users.\n\n The hover text can be customized through the ``help_tooltip`` attribute\n and the redirect site overridden as well.\n\n '''\n\n help_tooltip = String(default=DEFAULT_HELP_TIP, help=\"\"\"\n Tooltip displayed when hovering over the help icon.\n \"\"\")\n\n redirect = String(default=DEFAULT_HELP_URL, help=\"\"\"\n Site to be redirected through upon click.\n \"\"\")\n\nclass UndoTool(Action):\n ''' *toolbar icon*: |undo_icon|\n\n Undo tool allows to restore previous state of the plot.\n\n .. |undo_icon| image:: /_images/icons/Undo.png\n :height: 18pt\n\n '''\n\nclass RedoTool(Action):\n ''' *toolbar icon*: |redo_icon|\n\n Redo tool reverses the last action performed by undo tool.\n\n .. |redo_icon| image:: /_images/icons/Redo.png\n :height: 18pt\n\n '''\n", "path": "bokeh/models/tools.py"}]} |
gh_patches_debug_1278 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3328 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider longhorn_steakhouse is broken
During the global build at 2021-10-20-14-42-48, spider **longhorn_steakhouse** failed with **0 features** and **0 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/logs/longhorn_steakhouse.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/longhorn_steakhouse.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/longhorn_steakhouse.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/longhorn_steakhouse.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import json
3 import re
4
5 import scrapy
6
7 from locations.items import GeojsonPointItem
8 from locations.hours import OpeningHours
9
10
11 class LongHornSteakhouseSpider(scrapy.Spider):
12 name = "longhorn_steakhouse"
13 item_attributes = {'brand': 'LongHorn Steakhouse', 'brand_wikidata': "Q3259007"}
14 allowed_domains = []
15 start_urls = [
16 'https://www.longhornsteakhouse.com/locations-sitemap.xml',
17 ]
18 custom_settings = {
19 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',
20 }
21 download_delay = 5
22
23 def parse_hours(self, hours):
24 opening_hours = OpeningHours()
25
26 for hour in hours:
27 day, open_close = hour.split(' ')
28 open_time, close_time = open_close.split('-')
29 opening_hours.add_range(day=day, open_time=open_time, close_time=close_time, time_format='%H:%M')
30 return opening_hours.as_opening_hours()
31
32 def parse(self, response):
33 response.selector.remove_namespaces()
34 urls = response.xpath('//url/loc/text()').extract()
35 for url in urls:
36 yield scrapy.Request(url=url, callback=self.parse_store)
37
38 def parse_store(self, response):
39 store_data = response.xpath('//script[@type="application/ld+json" and contains(text(), "streetAddress")]/text()').extract_first()
40 if store_data:
41 data = json.loads(store_data)
42 ref = re.search(r'.+/(.+?)/?(?:\.html|$)', response.url).group(1)
43
44 # Handle store pages that are missing the application/ld+json data
45 addr, city_state_zip, phone = response.xpath('//p[@id="info-link-webhead"]/text()').extract()
46 city, state, postcode = re.search(r'(.*?),\s([A-Z]{2})\s([\d-]+)$', city_state_zip).groups()
47
48 properties = {
49 'name': data.get("name") or response.xpath('//h1[@class="style_h1"]/text()').extract_first().strip(),
50 'ref': data["branchCode"] or ref,
51 'addr_full': data["address"]["streetAddress"].strip() or addr.strip(),
52 'city': data["address"]["addressLocality"] or city,
53 'state': data["address"]["addressRegion"] or state,
54 'postcode': data["address"]["postalCode"] or postcode,
55 'country': data["address"]["addressCountry"],
56 'phone': data.get("telephone") or phone.strip(),
57 'website': data.get("url") or response.url,
58 'lat': float(data["geo"]["latitude"]),
59 'lon': float(data["geo"]["longitude"]),
60 }
61
62 hours = data.get("openingHours")
63 if hours:
64 store_hours = self.parse_hours(hours)
65 properties["opening_hours"] = store_hours
66
67 yield GeojsonPointItem(**properties)
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/longhorn_steakhouse.py b/locations/spiders/longhorn_steakhouse.py
--- a/locations/spiders/longhorn_steakhouse.py
+++ b/locations/spiders/longhorn_steakhouse.py
@@ -18,7 +18,7 @@
custom_settings = {
'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',
}
- download_delay = 5
+ download_delay = 1
def parse_hours(self, hours):
opening_hours = OpeningHours()
| {"golden_diff": "diff --git a/locations/spiders/longhorn_steakhouse.py b/locations/spiders/longhorn_steakhouse.py\n--- a/locations/spiders/longhorn_steakhouse.py\n+++ b/locations/spiders/longhorn_steakhouse.py\n@@ -18,7 +18,7 @@\n custom_settings = {\n 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',\n }\n- download_delay = 5\n+ download_delay = 1\n \n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n", "issue": "Spider longhorn_steakhouse is broken\nDuring the global build at 2021-10-20-14-42-48, spider **longhorn_steakhouse** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/logs/longhorn_steakhouse.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/longhorn_steakhouse.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/longhorn_steakhouse.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass LongHornSteakhouseSpider(scrapy.Spider):\n name = \"longhorn_steakhouse\"\n item_attributes = {'brand': 'LongHorn Steakhouse', 'brand_wikidata': \"Q3259007\"}\n allowed_domains = []\n start_urls = [\n 'https://www.longhornsteakhouse.com/locations-sitemap.xml',\n ]\n custom_settings = {\n 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',\n }\n download_delay = 5\n\n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n\n for hour in hours:\n day, open_close = hour.split(' ')\n open_time, close_time = open_close.split('-')\n opening_hours.add_range(day=day, open_time=open_time, close_time=close_time, time_format='%H:%M')\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n response.selector.remove_namespaces()\n urls = response.xpath('//url/loc/text()').extract()\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse_store)\n\n def parse_store(self, response):\n store_data = response.xpath('//script[@type=\"application/ld+json\" and contains(text(), \"streetAddress\")]/text()').extract_first()\n if store_data:\n data = json.loads(store_data)\n ref = re.search(r'.+/(.+?)/?(?:\\.html|$)', response.url).group(1)\n\n # Handle store pages that are missing the application/ld+json data\n addr, city_state_zip, phone = response.xpath('//p[@id=\"info-link-webhead\"]/text()').extract()\n city, state, postcode = re.search(r'(.*?),\\s([A-Z]{2})\\s([\\d-]+)$', city_state_zip).groups()\n\n properties = {\n 'name': data.get(\"name\") or response.xpath('//h1[@class=\"style_h1\"]/text()').extract_first().strip(),\n 'ref': data[\"branchCode\"] or ref,\n 'addr_full': data[\"address\"][\"streetAddress\"].strip() or addr.strip(),\n 'city': data[\"address\"][\"addressLocality\"] or city,\n 'state': data[\"address\"][\"addressRegion\"] or state,\n 'postcode': data[\"address\"][\"postalCode\"] or postcode,\n 'country': data[\"address\"][\"addressCountry\"],\n 'phone': data.get(\"telephone\") or phone.strip(),\n 'website': data.get(\"url\") or response.url,\n 'lat': float(data[\"geo\"][\"latitude\"]),\n 'lon': float(data[\"geo\"][\"longitude\"]),\n }\n\n hours = data.get(\"openingHours\")\n if hours:\n store_hours = self.parse_hours(hours)\n properties[\"opening_hours\"] = store_hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/longhorn_steakhouse.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass LongHornSteakhouseSpider(scrapy.Spider):\n name = \"longhorn_steakhouse\"\n item_attributes = {'brand': 'LongHorn Steakhouse', 'brand_wikidata': \"Q3259007\"}\n allowed_domains = []\n start_urls = [\n 'https://www.longhornsteakhouse.com/locations-sitemap.xml',\n ]\n custom_settings = {\n 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',\n }\n download_delay = 1\n\n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n\n for hour in hours:\n day, open_close = hour.split(' ')\n open_time, close_time = open_close.split('-')\n opening_hours.add_range(day=day, open_time=open_time, close_time=close_time, time_format='%H:%M')\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n response.selector.remove_namespaces()\n urls = response.xpath('//url/loc/text()').extract()\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse_store)\n\n def parse_store(self, response):\n store_data = response.xpath('//script[@type=\"application/ld+json\" and contains(text(), \"streetAddress\")]/text()').extract_first()\n if store_data:\n data = json.loads(store_data)\n ref = re.search(r'.+/(.+?)/?(?:\\.html|$)', response.url).group(1)\n\n # Handle store pages that are missing the application/ld+json data\n addr, city_state_zip, phone = response.xpath('//p[@id=\"info-link-webhead\"]/text()').extract()\n city, state, postcode = re.search(r'(.*?),\\s([A-Z]{2})\\s([\\d-]+)$', city_state_zip).groups()\n\n properties = {\n 'name': data.get(\"name\") or response.xpath('//h1[@class=\"style_h1\"]/text()').extract_first().strip(),\n 'ref': data[\"branchCode\"] or ref,\n 'addr_full': data[\"address\"][\"streetAddress\"].strip() or addr.strip(),\n 'city': data[\"address\"][\"addressLocality\"] or city,\n 'state': data[\"address\"][\"addressRegion\"] or state,\n 'postcode': data[\"address\"][\"postalCode\"] or postcode,\n 'country': data[\"address\"][\"addressCountry\"],\n 'phone': data.get(\"telephone\") or phone.strip(),\n 'website': data.get(\"url\") or response.url,\n 'lat': float(data[\"geo\"][\"latitude\"]),\n 'lon': float(data[\"geo\"][\"longitude\"]),\n }\n\n hours = data.get(\"openingHours\")\n if hours:\n store_hours = self.parse_hours(hours)\n properties[\"opening_hours\"] = store_hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/longhorn_steakhouse.py"}]} |
gh_patches_debug_1279 | rasdani/github-patches | git_diff | carltongibson__django-filter-199 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incompatible with django-debug-toolbar versions panel
django-debug-toolbar versions panel expects a parameterless get_version() method.
https://github.com/django-debug-toolbar/django-debug-toolbar/blob/master/debug_toolbar/panels/versions.py#L67
Your get_version method requires a parameter (version)
https://github.com/alex/django-filter/blob/develop/django_filters/__init__.py#L9
Is there a way you could change your get_version method signature to be compatible? Been running django-debug-toolbar for a long time and I have never run into this with any other 3rd party apps.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django_filters/__init__.py`
Content:
```
1 # flake8: noqa
2 from __future__ import absolute_import
3 from .filterset import FilterSet
4 from .filters import *
5
6 __version__ = '0.9.0'
7
8
9 def get_version(version):
10 '''
11 '0.1.2-dev' -> (0, 1, 2, 'dev')
12 '0.1.2' -> (0, 1, 2)
13 '''
14 v = version.split('.')
15 v = v[:-1] + v[-1].split('-')
16 ret = []
17 for p in v:
18 if p.isdigit():
19 ret.append(int(p))
20 else:
21 ret.append(p)
22 return tuple(ret)
23
24 VERSION = get_version(__version__)
25
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/django_filters/__init__.py b/django_filters/__init__.py
--- a/django_filters/__init__.py
+++ b/django_filters/__init__.py
@@ -6,7 +6,7 @@
__version__ = '0.9.0'
-def get_version(version):
+def parse_version(version):
'''
'0.1.2-dev' -> (0, 1, 2, 'dev')
'0.1.2' -> (0, 1, 2)
@@ -21,4 +21,4 @@
ret.append(p)
return tuple(ret)
-VERSION = get_version(__version__)
+VERSION = parse_version(__version__)
| {"golden_diff": "diff --git a/django_filters/__init__.py b/django_filters/__init__.py\n--- a/django_filters/__init__.py\n+++ b/django_filters/__init__.py\n@@ -6,7 +6,7 @@\n __version__ = '0.9.0'\n \n \n-def get_version(version):\n+def parse_version(version):\n '''\n '0.1.2-dev' -> (0, 1, 2, 'dev')\n '0.1.2' -> (0, 1, 2)\n@@ -21,4 +21,4 @@\n ret.append(p)\n return tuple(ret)\n \n-VERSION = get_version(__version__)\n+VERSION = parse_version(__version__)\n", "issue": "Incompatible with django-debug-toolbar versions panel\ndjango-debug-toolbar versions panel expects a parameterless get_version() method.\n\nhttps://github.com/django-debug-toolbar/django-debug-toolbar/blob/master/debug_toolbar/panels/versions.py#L67\n\nYour get_version method requires a parameter (version)\n\nhttps://github.com/alex/django-filter/blob/develop/django_filters/__init__.py#L9\n\nIs there a way you could change your get_version method signature to be compatible? Been running django-debug-toolbar for a long time and I have never run into this with any other 3rd party apps.\n\n", "before_files": [{"content": "# flake8: noqa\nfrom __future__ import absolute_import\nfrom .filterset import FilterSet\nfrom .filters import *\n\n__version__ = '0.9.0'\n\n\ndef get_version(version):\n '''\n '0.1.2-dev' -> (0, 1, 2, 'dev')\n '0.1.2' -> (0, 1, 2)\n '''\n v = version.split('.')\n v = v[:-1] + v[-1].split('-')\n ret = []\n for p in v:\n if p.isdigit():\n ret.append(int(p))\n else:\n ret.append(p)\n return tuple(ret)\n\nVERSION = get_version(__version__)\n", "path": "django_filters/__init__.py"}], "after_files": [{"content": "# flake8: noqa\nfrom __future__ import absolute_import\nfrom .filterset import FilterSet\nfrom .filters import *\n\n__version__ = '0.9.0'\n\n\ndef parse_version(version):\n '''\n '0.1.2-dev' -> (0, 1, 2, 'dev')\n '0.1.2' -> (0, 1, 2)\n '''\n v = version.split('.')\n v = v[:-1] + v[-1].split('-')\n ret = []\n for p in v:\n if p.isdigit():\n ret.append(int(p))\n else:\n ret.append(p)\n return tuple(ret)\n\nVERSION = parse_version(__version__)\n", "path": "django_filters/__init__.py"}]} |
gh_patches_debug_1280 | rasdani/github-patches | git_diff | Lightning-Universe__lightning-flash-1243 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Docs page describing Beta meaning
## 📚 Documentation
Add a page in our docs describing that beta means that one or all of the following are true:
- the feature has unstable dependencies
- the feature may change without notice in future versions
- the feature is not compatible with other flash / pl features
- the performance of the feature has not been verified
Anything else?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/extensions/stability.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from docutils import nodes
15 from docutils.parsers.rst import Directive
16 from docutils.statemachine import StringList
17
18 ADMONITION_TEMPLATE = """
19 .. raw:: html
20
21 <div class="admonition warning {type}">
22 <p class="admonition-title">{title}</p>
23 <p>This {scope} is currently in Beta. The interfaces and functionality may change without warning in future
24 releases.</p>
25 </div>
26 """
27
28
29 class Beta(Directive):
30 has_content = True
31 required_arguments = 1
32 optional_arguments = 0
33
34 def run(self):
35
36 scope = self.arguments[0]
37
38 admonition_rst = ADMONITION_TEMPLATE.format(type="beta", title="Beta", scope=scope)
39 admonition_list = StringList(admonition_rst.split("\n"))
40 admonition = nodes.paragraph()
41 self.state.nested_parse(admonition_list, self.content_offset, admonition)
42 return [admonition]
43
44
45 def setup(app):
46 app.add_directive("beta", Beta)
47
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/extensions/stability.py b/docs/extensions/stability.py
--- a/docs/extensions/stability.py
+++ b/docs/extensions/stability.py
@@ -20,8 +20,14 @@
<div class="admonition warning {type}">
<p class="admonition-title">{title}</p>
- <p>This {scope} is currently in Beta. The interfaces and functionality may change without warning in future
- releases.</p>
+ <p>
+
+This {scope} is currently in Beta. The API and functionality may change without warning in future
+releases. :ref:`More details <stability>`.
+
+.. raw:: html
+
+ </p>
</div>
"""
| {"golden_diff": "diff --git a/docs/extensions/stability.py b/docs/extensions/stability.py\n--- a/docs/extensions/stability.py\n+++ b/docs/extensions/stability.py\n@@ -20,8 +20,14 @@\n \n <div class=\"admonition warning {type}\">\n <p class=\"admonition-title\">{title}</p>\n- <p>This {scope} is currently in Beta. The interfaces and functionality may change without warning in future\n- releases.</p>\n+ <p>\n+\n+This {scope} is currently in Beta. The API and functionality may change without warning in future\n+releases. :ref:`More details <stability>`.\n+\n+.. raw:: html\n+\n+ </p>\n </div>\n \"\"\"\n", "issue": "Docs page describing Beta meaning\n## \ud83d\udcda Documentation\r\n\r\nAdd a page in our docs describing that beta means that one or all of the following are true:\r\n- the feature has unstable dependencies\r\n- the feature may change without notice in future versions\r\n- the feature is not compatible with other flash / pl features\r\n- the performance of the feature has not been verified\r\n\r\nAnything else?\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom docutils import nodes\nfrom docutils.parsers.rst import Directive\nfrom docutils.statemachine import StringList\n\nADMONITION_TEMPLATE = \"\"\"\n.. raw:: html\n\n <div class=\"admonition warning {type}\">\n <p class=\"admonition-title\">{title}</p>\n <p>This {scope} is currently in Beta. The interfaces and functionality may change without warning in future\n releases.</p>\n </div>\n\"\"\"\n\n\nclass Beta(Directive):\n has_content = True\n required_arguments = 1\n optional_arguments = 0\n\n def run(self):\n\n scope = self.arguments[0]\n\n admonition_rst = ADMONITION_TEMPLATE.format(type=\"beta\", title=\"Beta\", scope=scope)\n admonition_list = StringList(admonition_rst.split(\"\\n\"))\n admonition = nodes.paragraph()\n self.state.nested_parse(admonition_list, self.content_offset, admonition)\n return [admonition]\n\n\ndef setup(app):\n app.add_directive(\"beta\", Beta)\n", "path": "docs/extensions/stability.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom docutils import nodes\nfrom docutils.parsers.rst import Directive\nfrom docutils.statemachine import StringList\n\nADMONITION_TEMPLATE = \"\"\"\n.. raw:: html\n\n <div class=\"admonition warning {type}\">\n <p class=\"admonition-title\">{title}</p>\n <p>\n\nThis {scope} is currently in Beta. The API and functionality may change without warning in future\nreleases. :ref:`More details <stability>`.\n\n.. raw:: html\n\n </p>\n </div>\n\"\"\"\n\n\nclass Beta(Directive):\n has_content = True\n required_arguments = 1\n optional_arguments = 0\n\n def run(self):\n\n scope = self.arguments[0]\n\n admonition_rst = ADMONITION_TEMPLATE.format(type=\"beta\", title=\"Beta\", scope=scope)\n admonition_list = StringList(admonition_rst.split(\"\\n\"))\n admonition = nodes.paragraph()\n self.state.nested_parse(admonition_list, self.content_offset, admonition)\n return [admonition]\n\n\ndef setup(app):\n app.add_directive(\"beta\", Beta)\n", "path": "docs/extensions/stability.py"}]} |
gh_patches_debug_1281 | rasdani/github-patches | git_diff | iterative__dvc-1325 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
gitignore: use unambiguous paths
E.g. `/dir` instead of `dir`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/scm.py`
Content:
```
1 import os
2
3 from dvc.exceptions import DvcException
4 from dvc.logger import Logger
5 from dvc.utils import fix_env
6
7
8 class SCMError(DvcException):
9 pass
10
11
12 class FileNotInRepoError(DvcException):
13 pass
14
15
16 class Base(object):
17 def __init__(self, root_dir=os.curdir, project=None):
18 self.project = project
19 self.root_dir = root_dir
20
21 @property
22 def dir(self):
23 return None
24
25 @staticmethod
26 def is_repo(root_dir):
27 return True
28
29 @staticmethod
30 def is_submodule(root_dir):
31 return True
32
33 def ignore(self, path):
34 pass
35
36 def ignore_remove(self, path):
37 pass
38
39 def ignore_file(self):
40 pass
41
42 def ignore_list(self, p_list):
43 return [self.ignore(path) for path in p_list]
44
45 def add(self, paths):
46 pass
47
48 def commit(self, msg):
49 pass
50
51 def checkout(self, branch):
52 pass
53
54 def branch(self, branch):
55 pass
56
57 def tag(self, tag):
58 pass
59
60 def brancher(self,
61 branches=None,
62 all_branches=False,
63 tags=None,
64 all_tags=False):
65 if not branches and not all_branches \
66 and not tags and not all_tags:
67 yield ''
68 return
69
70 saved = self.active_branch()
71 revs = []
72
73 if all_branches:
74 branches = self.list_branches()
75
76 if all_tags:
77 tags = self.list_tags()
78
79 if branches is None:
80 revs.extend([saved])
81 else:
82 revs.extend(branches)
83
84 if tags is not None:
85 revs.extend(tags)
86
87 for rev in revs:
88 self.checkout(rev)
89 yield rev
90
91 self.checkout(saved)
92
93 def untracked_files(self):
94 pass
95
96 def is_tracked(self, path):
97 pass
98
99 def active_branch(self):
100 pass
101
102 def list_branches(self):
103 pass
104
105 def list_tags(self):
106 pass
107
108 def install(self):
109 pass
110
111
112 class Git(Base):
113 GITIGNORE = '.gitignore'
114 GIT_DIR = '.git'
115
116 def __init__(self, root_dir=os.curdir, project=None):
117 super(Git, self).__init__(root_dir, project=project)
118
119 import git
120 from git.exc import InvalidGitRepositoryError
121 try:
122 self.repo = git.Repo(root_dir)
123 except InvalidGitRepositoryError:
124 msg = '{} is not a git repository'
125 raise SCMError(msg.format(root_dir))
126
127 # NOTE: fixing LD_LIBRARY_PATH for binary built by PyInstaller.
128 # http://pyinstaller.readthedocs.io/en/stable/runtime-information.html
129 env = fix_env(None)
130 lp = env.get('LD_LIBRARY_PATH', None)
131 self.repo.git.update_environment(LD_LIBRARY_PATH=lp)
132
133 @staticmethod
134 def is_repo(root_dir):
135 return os.path.isdir(Git._get_git_dir(root_dir))
136
137 @staticmethod
138 def is_submodule(root_dir):
139 return os.path.isfile(Git._get_git_dir(root_dir))
140
141 @staticmethod
142 def _get_git_dir(root_dir):
143 return os.path.join(root_dir, Git.GIT_DIR)
144
145 @property
146 def dir(self):
147 return self.repo.git_dir
148
149 def ignore_file(self):
150 return self.GITIGNORE
151
152 def _get_gitignore(self, path):
153 assert os.path.isabs(path)
154 entry = os.path.basename(path)
155 gitignore = os.path.join(os.path.dirname(path), self.GITIGNORE)
156
157 if not gitignore.startswith(self.root_dir):
158 raise FileNotInRepoError(path)
159
160 return entry, gitignore
161
162 def ignore(self, path):
163 entry, gitignore = self._get_gitignore(path)
164
165 ignore_list = []
166 if os.path.exists(gitignore):
167 ignore_list = open(gitignore, 'r').readlines()
168 filtered = list(filter(lambda x: x.strip() == entry.strip(),
169 ignore_list))
170 if len(filtered) != 0:
171 return
172
173 msg = "Adding '{}' to '{}'.".format(os.path.relpath(path),
174 os.path.relpath(gitignore))
175 Logger.info(msg)
176
177 content = entry
178 if len(ignore_list) > 0:
179 content = '\n' + content
180
181 with open(gitignore, 'a') as fd:
182 fd.write(content)
183
184 if self.project is not None:
185 self.project._files_to_git_add.append(os.path.relpath(gitignore))
186
187 def ignore_remove(self, path):
188 entry, gitignore = self._get_gitignore(path)
189
190 if not os.path.exists(gitignore):
191 return
192
193 with open(gitignore, 'r') as fd:
194 lines = fd.readlines()
195
196 filtered = list(filter(lambda x: x.strip() != entry.strip(), lines))
197
198 with open(gitignore, 'w') as fd:
199 fd.writelines(filtered)
200
201 if self.project is not None:
202 self.project._files_to_git_add.append(os.path.relpath(gitignore))
203
204 def add(self, paths):
205 # NOTE: GitPython is not currently able to handle index version >= 3.
206 # See https://github.com/iterative/dvc/issues/610 for more details.
207 try:
208 self.repo.index.add(paths)
209 except AssertionError as exc:
210 msg = 'Failed to add \'{}\' to git. You can add those files '
211 msg += 'manually using \'git add\'. '
212 msg += 'See \'https://github.com/iterative/dvc/issues/610\' '
213 msg += 'for more details.'
214 Logger.error(msg.format(str(paths)), exc)
215
216 def commit(self, msg):
217 self.repo.index.commit(msg)
218
219 def checkout(self, branch, create_new=False):
220 if create_new:
221 self.repo.git.checkout('HEAD', b=branch)
222 else:
223 self.repo.git.checkout(branch)
224
225 def branch(self, branch):
226 self.repo.git.branch(branch)
227
228 def tag(self, tag):
229 self.repo.git.tag(tag)
230
231 def untracked_files(self):
232 files = self.repo.untracked_files
233 return [os.path.join(self.repo.working_dir, fname) for fname in files]
234
235 def is_tracked(self, path):
236 return len(self.repo.git.ls_files(path)) != 0
237
238 def active_branch(self):
239 return self.repo.active_branch.name
240
241 def list_branches(self):
242 return [h.name for h in self.repo.heads]
243
244 def list_tags(self):
245 return [t.name for t in self.repo.tags]
246
247 def _install_hook(self, name, cmd):
248 hook = os.path.join(self.root_dir,
249 self.GIT_DIR,
250 'hooks',
251 name)
252 if os.path.isfile(hook):
253 msg = 'Git hook \'{}\' already exists.'
254 raise SCMError(msg.format(os.path.relpath(hook)))
255 with open(hook, 'w+') as fd:
256 fd.write('#!/bin/sh\nexec dvc {}\n'.format(cmd))
257 os.chmod(hook, 0o777)
258
259 def install(self):
260 self._install_hook('post-checkout', 'checkout')
261 self._install_hook('pre-commit', 'status')
262
263
264 def SCM(root_dir, no_scm=False, project=None):
265 if Git.is_repo(root_dir) or Git.is_submodule(root_dir):
266 return Git(root_dir, project=project)
267
268 return Base(root_dir, project=project)
269
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dvc/scm.py b/dvc/scm.py
--- a/dvc/scm.py
+++ b/dvc/scm.py
@@ -151,7 +151,8 @@
def _get_gitignore(self, path):
assert os.path.isabs(path)
- entry = os.path.basename(path)
+ # NOTE: using '/' prefix to make path unambiguous
+ entry = '/' + os.path.basename(path)
gitignore = os.path.join(os.path.dirname(path), self.GITIGNORE)
if not gitignore.startswith(self.root_dir):
| {"golden_diff": "diff --git a/dvc/scm.py b/dvc/scm.py\n--- a/dvc/scm.py\n+++ b/dvc/scm.py\n@@ -151,7 +151,8 @@\n \n def _get_gitignore(self, path):\n assert os.path.isabs(path)\n- entry = os.path.basename(path)\n+ # NOTE: using '/' prefix to make path unambiguous\n+ entry = '/' + os.path.basename(path)\n gitignore = os.path.join(os.path.dirname(path), self.GITIGNORE)\n \n if not gitignore.startswith(self.root_dir):\n", "issue": "gitignore: use unambiguous paths\nE.g. `/dir` instead of `dir`.\n", "before_files": [{"content": "import os\n\nfrom dvc.exceptions import DvcException\nfrom dvc.logger import Logger\nfrom dvc.utils import fix_env\n\n\nclass SCMError(DvcException):\n pass\n\n\nclass FileNotInRepoError(DvcException):\n pass\n\n\nclass Base(object):\n def __init__(self, root_dir=os.curdir, project=None):\n self.project = project\n self.root_dir = root_dir\n\n @property\n def dir(self):\n return None\n\n @staticmethod\n def is_repo(root_dir):\n return True\n\n @staticmethod\n def is_submodule(root_dir):\n return True\n\n def ignore(self, path):\n pass\n\n def ignore_remove(self, path):\n pass\n\n def ignore_file(self):\n pass\n\n def ignore_list(self, p_list):\n return [self.ignore(path) for path in p_list]\n\n def add(self, paths):\n pass\n\n def commit(self, msg):\n pass\n\n def checkout(self, branch):\n pass\n\n def branch(self, branch):\n pass\n\n def tag(self, tag):\n pass\n\n def brancher(self,\n branches=None,\n all_branches=False,\n tags=None,\n all_tags=False):\n if not branches and not all_branches \\\n and not tags and not all_tags:\n yield ''\n return\n\n saved = self.active_branch()\n revs = []\n\n if all_branches:\n branches = self.list_branches()\n\n if all_tags:\n tags = self.list_tags()\n\n if branches is None:\n revs.extend([saved])\n else:\n revs.extend(branches)\n\n if tags is not None:\n revs.extend(tags)\n\n for rev in revs:\n self.checkout(rev)\n yield rev\n\n self.checkout(saved)\n\n def untracked_files(self):\n pass\n\n def is_tracked(self, path):\n pass\n\n def active_branch(self):\n pass\n\n def list_branches(self):\n pass\n\n def list_tags(self):\n pass\n\n def install(self):\n pass\n\n\nclass Git(Base):\n GITIGNORE = '.gitignore'\n GIT_DIR = '.git'\n\n def __init__(self, root_dir=os.curdir, project=None):\n super(Git, self).__init__(root_dir, project=project)\n\n import git\n from git.exc import InvalidGitRepositoryError\n try:\n self.repo = git.Repo(root_dir)\n except InvalidGitRepositoryError:\n msg = '{} is not a git repository'\n raise SCMError(msg.format(root_dir))\n\n # NOTE: fixing LD_LIBRARY_PATH for binary built by PyInstaller.\n # http://pyinstaller.readthedocs.io/en/stable/runtime-information.html\n env = fix_env(None)\n lp = env.get('LD_LIBRARY_PATH', None)\n self.repo.git.update_environment(LD_LIBRARY_PATH=lp)\n\n @staticmethod\n def is_repo(root_dir):\n return os.path.isdir(Git._get_git_dir(root_dir))\n\n @staticmethod\n def is_submodule(root_dir):\n return os.path.isfile(Git._get_git_dir(root_dir))\n\n @staticmethod\n def _get_git_dir(root_dir):\n return os.path.join(root_dir, Git.GIT_DIR)\n\n @property\n def dir(self):\n return self.repo.git_dir\n\n def ignore_file(self):\n return self.GITIGNORE\n\n def _get_gitignore(self, path):\n assert os.path.isabs(path)\n entry = os.path.basename(path)\n gitignore = os.path.join(os.path.dirname(path), self.GITIGNORE)\n\n if not gitignore.startswith(self.root_dir):\n raise FileNotInRepoError(path)\n\n return entry, gitignore\n\n def ignore(self, path):\n entry, gitignore = self._get_gitignore(path)\n\n ignore_list = []\n if os.path.exists(gitignore):\n ignore_list = open(gitignore, 'r').readlines()\n filtered = list(filter(lambda x: x.strip() == entry.strip(),\n ignore_list))\n if len(filtered) != 0:\n return\n\n msg = \"Adding '{}' to '{}'.\".format(os.path.relpath(path),\n os.path.relpath(gitignore))\n Logger.info(msg)\n\n content = entry\n if len(ignore_list) > 0:\n content = '\\n' + content\n\n with open(gitignore, 'a') as fd:\n fd.write(content)\n\n if self.project is not None:\n self.project._files_to_git_add.append(os.path.relpath(gitignore))\n\n def ignore_remove(self, path):\n entry, gitignore = self._get_gitignore(path)\n\n if not os.path.exists(gitignore):\n return\n\n with open(gitignore, 'r') as fd:\n lines = fd.readlines()\n\n filtered = list(filter(lambda x: x.strip() != entry.strip(), lines))\n\n with open(gitignore, 'w') as fd:\n fd.writelines(filtered)\n\n if self.project is not None:\n self.project._files_to_git_add.append(os.path.relpath(gitignore))\n\n def add(self, paths):\n # NOTE: GitPython is not currently able to handle index version >= 3.\n # See https://github.com/iterative/dvc/issues/610 for more details.\n try:\n self.repo.index.add(paths)\n except AssertionError as exc:\n msg = 'Failed to add \\'{}\\' to git. You can add those files '\n msg += 'manually using \\'git add\\'. '\n msg += 'See \\'https://github.com/iterative/dvc/issues/610\\' '\n msg += 'for more details.'\n Logger.error(msg.format(str(paths)), exc)\n\n def commit(self, msg):\n self.repo.index.commit(msg)\n\n def checkout(self, branch, create_new=False):\n if create_new:\n self.repo.git.checkout('HEAD', b=branch)\n else:\n self.repo.git.checkout(branch)\n\n def branch(self, branch):\n self.repo.git.branch(branch)\n\n def tag(self, tag):\n self.repo.git.tag(tag)\n\n def untracked_files(self):\n files = self.repo.untracked_files\n return [os.path.join(self.repo.working_dir, fname) for fname in files]\n\n def is_tracked(self, path):\n return len(self.repo.git.ls_files(path)) != 0\n\n def active_branch(self):\n return self.repo.active_branch.name\n\n def list_branches(self):\n return [h.name for h in self.repo.heads]\n\n def list_tags(self):\n return [t.name for t in self.repo.tags]\n\n def _install_hook(self, name, cmd):\n hook = os.path.join(self.root_dir,\n self.GIT_DIR,\n 'hooks',\n name)\n if os.path.isfile(hook):\n msg = 'Git hook \\'{}\\' already exists.'\n raise SCMError(msg.format(os.path.relpath(hook)))\n with open(hook, 'w+') as fd:\n fd.write('#!/bin/sh\\nexec dvc {}\\n'.format(cmd))\n os.chmod(hook, 0o777)\n\n def install(self):\n self._install_hook('post-checkout', 'checkout')\n self._install_hook('pre-commit', 'status')\n\n\ndef SCM(root_dir, no_scm=False, project=None):\n if Git.is_repo(root_dir) or Git.is_submodule(root_dir):\n return Git(root_dir, project=project)\n\n return Base(root_dir, project=project)\n", "path": "dvc/scm.py"}], "after_files": [{"content": "import os\n\nfrom dvc.exceptions import DvcException\nfrom dvc.logger import Logger\nfrom dvc.utils import fix_env\n\n\nclass SCMError(DvcException):\n pass\n\n\nclass FileNotInRepoError(DvcException):\n pass\n\n\nclass Base(object):\n def __init__(self, root_dir=os.curdir, project=None):\n self.project = project\n self.root_dir = root_dir\n\n @property\n def dir(self):\n return None\n\n @staticmethod\n def is_repo(root_dir):\n return True\n\n @staticmethod\n def is_submodule(root_dir):\n return True\n\n def ignore(self, path):\n pass\n\n def ignore_remove(self, path):\n pass\n\n def ignore_file(self):\n pass\n\n def ignore_list(self, p_list):\n return [self.ignore(path) for path in p_list]\n\n def add(self, paths):\n pass\n\n def commit(self, msg):\n pass\n\n def checkout(self, branch):\n pass\n\n def branch(self, branch):\n pass\n\n def tag(self, tag):\n pass\n\n def brancher(self,\n branches=None,\n all_branches=False,\n tags=None,\n all_tags=False):\n if not branches and not all_branches \\\n and not tags and not all_tags:\n yield ''\n return\n\n saved = self.active_branch()\n revs = []\n\n if all_branches:\n branches = self.list_branches()\n\n if all_tags:\n tags = self.list_tags()\n\n if branches is None:\n revs.extend([saved])\n else:\n revs.extend(branches)\n\n if tags is not None:\n revs.extend(tags)\n\n for rev in revs:\n self.checkout(rev)\n yield rev\n\n self.checkout(saved)\n\n def untracked_files(self):\n pass\n\n def is_tracked(self, path):\n pass\n\n def active_branch(self):\n pass\n\n def list_branches(self):\n pass\n\n def list_tags(self):\n pass\n\n def install(self):\n pass\n\n\nclass Git(Base):\n GITIGNORE = '.gitignore'\n GIT_DIR = '.git'\n\n def __init__(self, root_dir=os.curdir, project=None):\n super(Git, self).__init__(root_dir, project=project)\n\n import git\n from git.exc import InvalidGitRepositoryError\n try:\n self.repo = git.Repo(root_dir)\n except InvalidGitRepositoryError:\n msg = '{} is not a git repository'\n raise SCMError(msg.format(root_dir))\n\n # NOTE: fixing LD_LIBRARY_PATH for binary built by PyInstaller.\n # http://pyinstaller.readthedocs.io/en/stable/runtime-information.html\n env = fix_env(None)\n lp = env.get('LD_LIBRARY_PATH', None)\n self.repo.git.update_environment(LD_LIBRARY_PATH=lp)\n\n @staticmethod\n def is_repo(root_dir):\n return os.path.isdir(Git._get_git_dir(root_dir))\n\n @staticmethod\n def is_submodule(root_dir):\n return os.path.isfile(Git._get_git_dir(root_dir))\n\n @staticmethod\n def _get_git_dir(root_dir):\n return os.path.join(root_dir, Git.GIT_DIR)\n\n @property\n def dir(self):\n return self.repo.git_dir\n\n def ignore_file(self):\n return self.GITIGNORE\n\n def _get_gitignore(self, path):\n assert os.path.isabs(path)\n # NOTE: using '/' prefix to make path unambiguous\n entry = '/' + os.path.basename(path)\n gitignore = os.path.join(os.path.dirname(path), self.GITIGNORE)\n\n if not gitignore.startswith(self.root_dir):\n raise FileNotInRepoError(path)\n\n return entry, gitignore\n\n def ignore(self, path):\n entry, gitignore = self._get_gitignore(path)\n\n ignore_list = []\n if os.path.exists(gitignore):\n ignore_list = open(gitignore, 'r').readlines()\n filtered = list(filter(lambda x: x.strip() == entry.strip(),\n ignore_list))\n if len(filtered) != 0:\n return\n\n msg = \"Adding '{}' to '{}'.\".format(os.path.relpath(path),\n os.path.relpath(gitignore))\n Logger.info(msg)\n\n content = entry\n if len(ignore_list) > 0:\n content = '\\n' + content\n\n with open(gitignore, 'a') as fd:\n fd.write(content)\n\n if self.project is not None:\n self.project._files_to_git_add.append(os.path.relpath(gitignore))\n\n def ignore_remove(self, path):\n entry, gitignore = self._get_gitignore(path)\n\n if not os.path.exists(gitignore):\n return\n\n with open(gitignore, 'r') as fd:\n lines = fd.readlines()\n\n filtered = list(filter(lambda x: x.strip() != entry.strip(), lines))\n\n with open(gitignore, 'w') as fd:\n fd.writelines(filtered)\n\n if self.project is not None:\n self.project._files_to_git_add.append(os.path.relpath(gitignore))\n\n def add(self, paths):\n # NOTE: GitPython is not currently able to handle index version >= 3.\n # See https://github.com/iterative/dvc/issues/610 for more details.\n try:\n self.repo.index.add(paths)\n except AssertionError as exc:\n msg = 'Failed to add \\'{}\\' to git. You can add those files '\n msg += 'manually using \\'git add\\'. '\n msg += 'See \\'https://github.com/iterative/dvc/issues/610\\' '\n msg += 'for more details.'\n Logger.error(msg.format(str(paths)), exc)\n\n def commit(self, msg):\n self.repo.index.commit(msg)\n\n def checkout(self, branch, create_new=False):\n if create_new:\n self.repo.git.checkout('HEAD', b=branch)\n else:\n self.repo.git.checkout(branch)\n\n def branch(self, branch):\n self.repo.git.branch(branch)\n\n def tag(self, tag):\n self.repo.git.tag(tag)\n\n def untracked_files(self):\n files = self.repo.untracked_files\n return [os.path.join(self.repo.working_dir, fname) for fname in files]\n\n def is_tracked(self, path):\n return len(self.repo.git.ls_files(path)) != 0\n\n def active_branch(self):\n return self.repo.active_branch.name\n\n def list_branches(self):\n return [h.name for h in self.repo.heads]\n\n def list_tags(self):\n return [t.name for t in self.repo.tags]\n\n def _install_hook(self, name, cmd):\n hook = os.path.join(self.root_dir,\n self.GIT_DIR,\n 'hooks',\n name)\n if os.path.isfile(hook):\n msg = 'Git hook \\'{}\\' already exists.'\n raise SCMError(msg.format(os.path.relpath(hook)))\n with open(hook, 'w+') as fd:\n fd.write('#!/bin/sh\\nexec dvc {}\\n'.format(cmd))\n os.chmod(hook, 0o777)\n\n def install(self):\n self._install_hook('post-checkout', 'checkout')\n self._install_hook('pre-commit', 'status')\n\n\ndef SCM(root_dir, no_scm=False, project=None):\n if Git.is_repo(root_dir) or Git.is_submodule(root_dir):\n return Git(root_dir, project=project)\n\n return Base(root_dir, project=project)\n", "path": "dvc/scm.py"}]} |
gh_patches_debug_1282 | rasdani/github-patches | git_diff | fidals__shopelectro-415 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Убери пункт меню
Убери mp3 колонки из меню. Их больше не будет.
Я зашел в админку и выключил категорию. Надеюсь правильно )
http://prntscr.com/k553lt
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shopelectro/templatetags/se_extras.py`
Content:
```
1 import datetime
2 import math
3
4 from django import template
5 from django.conf import settings
6 from django.contrib.humanize.templatetags.humanize import intcomma
7 from django.template.defaultfilters import floatformat
8 from django.urls import reverse
9
10 from images.models import ImageMixin
11 from pages.models import Page
12
13 from shopelectro import config
14 from shopelectro.models import Category
15
16 register = template.Library()
17
18
19 # TODO - move it in catalog. Inspired by lp_electric
20 @register.simple_tag
21 def roots():
22 return sorted(
23 Category.objects
24 .select_related('page')
25 .get_cached_trees(), # https://goo.gl/rFKiku
26 key=lambda x: x.page.position
27 )
28
29
30 @register.simple_tag
31 def footer_links():
32 return config.FOOTER_LINKS
33
34
35 # TODO - move in pages. Inspired by LP electric
36 @register.filter
37 def class_name(model):
38 """Return Model name."""
39 return type(model).__name__
40
41
42 @register.simple_tag
43 def time_to_call():
44 def is_weekend(t):
45 return t.weekday() > 4
46
47 def is_friday(t):
48 return t.weekday() == 4
49
50 def not_yet_opened(t):
51 current_time = (t.hour, t.minute)
52 open_time = (10, 00)
53 return current_time < open_time and not is_weekend(t)
54
55 def is_closed(t):
56 current_time = (t.hour, t.minute)
57 closing_time = (16, 30) if is_friday(t) else (17, 30)
58 return current_time > closing_time
59
60 when_we_call = {
61 lambda now: is_weekend(now) or (is_friday(now) and is_closed(now)): 'В понедельник в 10:30',
62 lambda now: not_yet_opened(now): 'Сегодня в 10:30',
63 lambda now: is_closed(now) and not (is_friday(now) or is_weekend(now)): 'Завтра в 10:30',
64 lambda _: True: 'В течение 30 минут'
65 }
66
67 time_ = datetime.datetime.now()
68 call = ' позвонит менеджер и обсудит детали доставки.'
69 for condition, time in when_we_call.items():
70 if condition(time_):
71 return time + call
72
73
74 # TODO - move it in pages.
75 @register.simple_tag
76 def full_url(url_name, *args):
77 return settings.BASE_URL + reverse(url_name, args=args)
78
79
80 @register.filter
81 def humanize_price(price):
82 return intcomma(floatformat(price, 0))
83
84
85 # Not good code, but duker at 06/10/2016 don't know how to fix it.
86 # It makes Image model very complex.
87 @register.simple_tag
88 def get_img_alt(entity: ImageMixin):
89 product_alt = 'Фотография {}'
90 logo_alt = 'Логотип компании Shopelectro'
91
92 if not isinstance(entity, Page):
93 return logo_alt
94
95 # try one of this attributes to get pages name
96 name_attrs = ['h1', 'title', 'name']
97 entity_name = next(
98 getattr(entity, attr)
99 for attr in name_attrs
100 if getattr(entity, attr)
101 )
102 return product_alt.format(entity_name)
103
104
105 @register.simple_tag
106 def main_image_or_logo(page: Page):
107 """Used for microdata."""
108 if hasattr(page, 'main_image') and page.main_image:
109 return page.main_image.url
110 else:
111 return settings.STATIC_URL + 'images/logo.png'
112
113
114 @register.inclusion_tag('catalog/product_feedbacks_icons.html')
115 def icon_stars(rating=0):
116 """Render set of rating icons based on 1 through 5 rating values."""
117 full_icons = int(math.floor(rating))
118 half_icons = 0 if rating == int(rating) else 1
119 empty_icons = 5 - full_icons - half_icons
120
121 return {
122 'full_icons': range(full_icons),
123 'half_icons': range(half_icons),
124 'empty_icons': range(empty_icons),
125 }
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/shopelectro/templatetags/se_extras.py b/shopelectro/templatetags/se_extras.py
--- a/shopelectro/templatetags/se_extras.py
+++ b/shopelectro/templatetags/se_extras.py
@@ -20,10 +20,13 @@
@register.simple_tag
def roots():
return sorted(
- Category.objects
- .select_related('page')
- .get_cached_trees(), # https://goo.gl/rFKiku
- key=lambda x: x.page.position
+ filter(
+ lambda x: x.page.is_active,
+ Category.objects # https://goo.gl/rFKiku
+ .select_related('page')
+ .get_cached_trees()
+ ),
+ key=lambda x: x.page.position,
)
| {"golden_diff": "diff --git a/shopelectro/templatetags/se_extras.py b/shopelectro/templatetags/se_extras.py\n--- a/shopelectro/templatetags/se_extras.py\n+++ b/shopelectro/templatetags/se_extras.py\n@@ -20,10 +20,13 @@\n @register.simple_tag\n def roots():\n return sorted(\n- Category.objects\n- .select_related('page')\n- .get_cached_trees(), # https://goo.gl/rFKiku\n- key=lambda x: x.page.position\n+ filter(\n+ lambda x: x.page.is_active,\n+ Category.objects # https://goo.gl/rFKiku\n+ .select_related('page')\n+ .get_cached_trees()\n+ ),\n+ key=lambda x: x.page.position,\n )\n", "issue": "\u0423\u0431\u0435\u0440\u0438 \u043f\u0443\u043d\u043a\u0442 \u043c\u0435\u043d\u044e\n\u0423\u0431\u0435\u0440\u0438 mp3 \u043a\u043e\u043b\u043e\u043d\u043a\u0438 \u0438\u0437 \u043c\u0435\u043d\u044e. \u0418\u0445 \u0431\u043e\u043b\u044c\u0448\u0435 \u043d\u0435 \u0431\u0443\u0434\u0435\u0442.\r\n\u042f \u0437\u0430\u0448\u0435\u043b \u0432 \u0430\u0434\u043c\u0438\u043d\u043a\u0443 \u0438 \u0432\u044b\u043a\u043b\u044e\u0447\u0438\u043b \u043a\u0430\u0442\u0435\u0433\u043e\u0440\u0438\u044e. \u041d\u0430\u0434\u0435\u044e\u0441\u044c \u043f\u0440\u0430\u0432\u0438\u043b\u044c\u043d\u043e ) \r\nhttp://prntscr.com/k553lt\n", "before_files": [{"content": "import datetime\nimport math\n\nfrom django import template\nfrom django.conf import settings\nfrom django.contrib.humanize.templatetags.humanize import intcomma\nfrom django.template.defaultfilters import floatformat\nfrom django.urls import reverse\n\nfrom images.models import ImageMixin\nfrom pages.models import Page\n\nfrom shopelectro import config\nfrom shopelectro.models import Category\n\nregister = template.Library()\n\n\n# TODO - move it in catalog. Inspired by lp_electric\[email protected]_tag\ndef roots():\n return sorted(\n Category.objects\n .select_related('page')\n .get_cached_trees(), # https://goo.gl/rFKiku\n key=lambda x: x.page.position\n )\n\n\[email protected]_tag\ndef footer_links():\n return config.FOOTER_LINKS\n\n\n# TODO - move in pages. Inspired by LP electric\[email protected]\ndef class_name(model):\n \"\"\"Return Model name.\"\"\"\n return type(model).__name__\n\n\[email protected]_tag\ndef time_to_call():\n def is_weekend(t):\n return t.weekday() > 4\n\n def is_friday(t):\n return t.weekday() == 4\n\n def not_yet_opened(t):\n current_time = (t.hour, t.minute)\n open_time = (10, 00)\n return current_time < open_time and not is_weekend(t)\n\n def is_closed(t):\n current_time = (t.hour, t.minute)\n closing_time = (16, 30) if is_friday(t) else (17, 30)\n return current_time > closing_time\n\n when_we_call = {\n lambda now: is_weekend(now) or (is_friday(now) and is_closed(now)): '\u0412 \u043f\u043e\u043d\u0435\u0434\u0435\u043b\u044c\u043d\u0438\u043a \u0432 10:30',\n lambda now: not_yet_opened(now): '\u0421\u0435\u0433\u043e\u0434\u043d\u044f \u0432 10:30',\n lambda now: is_closed(now) and not (is_friday(now) or is_weekend(now)): '\u0417\u0430\u0432\u0442\u0440\u0430 \u0432 10:30',\n lambda _: True: '\u0412 \u0442\u0435\u0447\u0435\u043d\u0438\u0435 30 \u043c\u0438\u043d\u0443\u0442'\n }\n\n time_ = datetime.datetime.now()\n call = ' \u043f\u043e\u0437\u0432\u043e\u043d\u0438\u0442 \u043c\u0435\u043d\u0435\u0434\u0436\u0435\u0440 \u0438 \u043e\u0431\u0441\u0443\u0434\u0438\u0442 \u0434\u0435\u0442\u0430\u043b\u0438 \u0434\u043e\u0441\u0442\u0430\u0432\u043a\u0438.'\n for condition, time in when_we_call.items():\n if condition(time_):\n return time + call\n\n\n# TODO - move it in pages.\[email protected]_tag\ndef full_url(url_name, *args):\n return settings.BASE_URL + reverse(url_name, args=args)\n\n\[email protected]\ndef humanize_price(price):\n return intcomma(floatformat(price, 0))\n\n\n# Not good code, but duker at 06/10/2016 don't know how to fix it.\n# It makes Image model very complex.\[email protected]_tag\ndef get_img_alt(entity: ImageMixin):\n product_alt = '\u0424\u043e\u0442\u043e\u0433\u0440\u0430\u0444\u0438\u044f {}'\n logo_alt = '\u041b\u043e\u0433\u043e\u0442\u0438\u043f \u043a\u043e\u043c\u043f\u0430\u043d\u0438\u0438 Shopelectro'\n\n if not isinstance(entity, Page):\n return logo_alt\n\n # try one of this attributes to get pages name\n name_attrs = ['h1', 'title', 'name']\n entity_name = next(\n getattr(entity, attr)\n for attr in name_attrs\n if getattr(entity, attr)\n )\n return product_alt.format(entity_name)\n\n\[email protected]_tag\ndef main_image_or_logo(page: Page):\n \"\"\"Used for microdata.\"\"\"\n if hasattr(page, 'main_image') and page.main_image:\n return page.main_image.url\n else:\n return settings.STATIC_URL + 'images/logo.png'\n\n\[email protected]_tag('catalog/product_feedbacks_icons.html')\ndef icon_stars(rating=0):\n \"\"\"Render set of rating icons based on 1 through 5 rating values.\"\"\"\n full_icons = int(math.floor(rating))\n half_icons = 0 if rating == int(rating) else 1\n empty_icons = 5 - full_icons - half_icons\n\n return {\n 'full_icons': range(full_icons),\n 'half_icons': range(half_icons),\n 'empty_icons': range(empty_icons),\n }\n", "path": "shopelectro/templatetags/se_extras.py"}], "after_files": [{"content": "import datetime\nimport math\n\nfrom django import template\nfrom django.conf import settings\nfrom django.contrib.humanize.templatetags.humanize import intcomma\nfrom django.template.defaultfilters import floatformat\nfrom django.urls import reverse\n\nfrom images.models import ImageMixin\nfrom pages.models import Page\n\nfrom shopelectro import config\nfrom shopelectro.models import Category\n\nregister = template.Library()\n\n\n# TODO - move it in catalog. Inspired by lp_electric\[email protected]_tag\ndef roots():\n return sorted(\n filter(\n lambda x: x.page.is_active,\n Category.objects # https://goo.gl/rFKiku\n .select_related('page')\n .get_cached_trees()\n ),\n key=lambda x: x.page.position,\n )\n\n\[email protected]_tag\ndef footer_links():\n return config.FOOTER_LINKS\n\n\n# TODO - move in pages. Inspired by LP electric\[email protected]\ndef class_name(model):\n \"\"\"Return Model name.\"\"\"\n return type(model).__name__\n\n\[email protected]_tag\ndef time_to_call():\n def is_weekend(t):\n return t.weekday() > 4\n\n def is_friday(t):\n return t.weekday() == 4\n\n def not_yet_opened(t):\n current_time = (t.hour, t.minute)\n open_time = (10, 00)\n return current_time < open_time and not is_weekend(t)\n\n def is_closed(t):\n current_time = (t.hour, t.minute)\n closing_time = (16, 30) if is_friday(t) else (17, 30)\n return current_time > closing_time\n\n when_we_call = {\n lambda now: is_weekend(now) or (is_friday(now) and is_closed(now)): '\u0412 \u043f\u043e\u043d\u0435\u0434\u0435\u043b\u044c\u043d\u0438\u043a \u0432 10:30',\n lambda now: not_yet_opened(now): '\u0421\u0435\u0433\u043e\u0434\u043d\u044f \u0432 10:30',\n lambda now: is_closed(now) and not (is_friday(now) or is_weekend(now)): '\u0417\u0430\u0432\u0442\u0440\u0430 \u0432 10:30',\n lambda _: True: '\u0412 \u0442\u0435\u0447\u0435\u043d\u0438\u0435 30 \u043c\u0438\u043d\u0443\u0442'\n }\n\n time_ = datetime.datetime.now()\n call = ' \u043f\u043e\u0437\u0432\u043e\u043d\u0438\u0442 \u043c\u0435\u043d\u0435\u0434\u0436\u0435\u0440 \u0438 \u043e\u0431\u0441\u0443\u0434\u0438\u0442 \u0434\u0435\u0442\u0430\u043b\u0438 \u0434\u043e\u0441\u0442\u0430\u0432\u043a\u0438.'\n for condition, time in when_we_call.items():\n if condition(time_):\n return time + call\n\n\n# TODO - move it in pages.\[email protected]_tag\ndef full_url(url_name, *args):\n return settings.BASE_URL + reverse(url_name, args=args)\n\n\[email protected]\ndef humanize_price(price):\n return intcomma(floatformat(price, 0))\n\n\n# Not good code, but duker at 06/10/2016 don't know how to fix it.\n# It makes Image model very complex.\[email protected]_tag\ndef get_img_alt(entity: ImageMixin):\n product_alt = '\u0424\u043e\u0442\u043e\u0433\u0440\u0430\u0444\u0438\u044f {}'\n logo_alt = '\u041b\u043e\u0433\u043e\u0442\u0438\u043f \u043a\u043e\u043c\u043f\u0430\u043d\u0438\u0438 Shopelectro'\n\n if not isinstance(entity, Page):\n return logo_alt\n\n # try one of this attributes to get pages name\n name_attrs = ['h1', 'title', 'name']\n entity_name = next(\n getattr(entity, attr)\n for attr in name_attrs\n if getattr(entity, attr)\n )\n return product_alt.format(entity_name)\n\n\[email protected]_tag\ndef main_image_or_logo(page: Page):\n \"\"\"Used for microdata.\"\"\"\n if hasattr(page, 'main_image') and page.main_image:\n return page.main_image.url\n else:\n return settings.STATIC_URL + 'images/logo.png'\n\n\[email protected]_tag('catalog/product_feedbacks_icons.html')\ndef icon_stars(rating=0):\n \"\"\"Render set of rating icons based on 1 through 5 rating values.\"\"\"\n full_icons = int(math.floor(rating))\n half_icons = 0 if rating == int(rating) else 1\n empty_icons = 5 - full_icons - half_icons\n\n return {\n 'full_icons': range(full_icons),\n 'half_icons': range(half_icons),\n 'empty_icons': range(empty_icons),\n }\n", "path": "shopelectro/templatetags/se_extras.py"}]} |
gh_patches_debug_1283 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-1288 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
inplace operation in pairwise_cosine_similarity
## 🐛 Bug
Hello !
The x, y values are modified inplace in the `pairwise_cosine_similarity` function.
This is not documented and may cause bugs that are difficult to find.
Thank you.
### To Reproduce
```python
import torch
from torchmetrics.functional import pairwise_cosine_similarity
x = torch.tensor([[2, 3], [3, 5], [5, 8]], dtype=torch.float32)
y = torch.tensor([[1, 0], [2, 1]], dtype=torch.float32)
print("Result:", pairwise_cosine_similarity(x, y))
print("X:", x)
print("Y:", y)
"""Out[0]
Result: tensor([[0.5547, 0.8682],
[0.5145, 0.8437],
[0.5300, 0.8533]])
X: tensor([[0.5547, 0.8321],
[0.5145, 0.8575],
[0.5300, 0.8480]])
Y: tensor([[1.0000, 0.0000],
[0.8944, 0.4472]])
"""
```
### Environment
torchmetrics==0.10.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/torchmetrics/functional/pairwise/cosine.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Optional
15
16 import torch
17 from torch import Tensor
18 from typing_extensions import Literal
19
20 from torchmetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix
21 from torchmetrics.utilities.compute import _safe_matmul
22
23
24 def _pairwise_cosine_similarity_update(
25 x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None
26 ) -> Tensor:
27 """Calculates the pairwise cosine similarity matrix.
28
29 Args:
30 x: tensor of shape ``[N,d]``
31 y: tensor of shape ``[M,d]``
32 zero_diagonal: determines if the diagonal of the distance matrix should be set to zero
33 """
34 x, y, zero_diagonal = _check_input(x, y, zero_diagonal)
35
36 norm = torch.norm(x, p=2, dim=1)
37 x /= norm.unsqueeze(1)
38 norm = torch.norm(y, p=2, dim=1)
39 y /= norm.unsqueeze(1)
40
41 distance = _safe_matmul(x, y)
42 if zero_diagonal:
43 distance.fill_diagonal_(0)
44 return distance
45
46
47 def pairwise_cosine_similarity(
48 x: Tensor,
49 y: Optional[Tensor] = None,
50 reduction: Literal["mean", "sum", "none", None] = None,
51 zero_diagonal: Optional[bool] = None,
52 ) -> Tensor:
53 r"""Calculates pairwise cosine similarity:
54
55 .. math::
56 s_{cos}(x,y) = \frac{<x,y>}{||x|| \cdot ||y||}
57 = \frac{\sum_{d=1}^D x_d \cdot y_d }{\sqrt{\sum_{d=1}^D x_i^2} \cdot \sqrt{\sum_{d=1}^D y_i^2}}
58
59 If both :math:`x` and :math:`y` are passed in, the calculation will be performed pairwise
60 between the rows of :math:`x` and :math:`y`.
61 If only :math:`x` is passed in, the calculation will be performed between the rows of :math:`x`.
62
63 Args:
64 x: Tensor with shape ``[N, d]``
65 y: Tensor with shape ``[M, d]``, optional
66 reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'`
67 (applied along column dimension) or `'none'`, `None` for no reduction
68 zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only :math:`x` is given
69 this defaults to ``True`` else if :math:`y` is also given it defaults to ``False``
70
71 Returns:
72 A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix
73
74 Example:
75 >>> import torch
76 >>> from torchmetrics.functional import pairwise_cosine_similarity
77 >>> x = torch.tensor([[2, 3], [3, 5], [5, 8]], dtype=torch.float32)
78 >>> y = torch.tensor([[1, 0], [2, 1]], dtype=torch.float32)
79 >>> pairwise_cosine_similarity(x, y)
80 tensor([[0.5547, 0.8682],
81 [0.5145, 0.8437],
82 [0.5300, 0.8533]])
83 >>> pairwise_cosine_similarity(x)
84 tensor([[0.0000, 0.9989, 0.9996],
85 [0.9989, 0.0000, 0.9998],
86 [0.9996, 0.9998, 0.0000]])
87 """
88 distance = _pairwise_cosine_similarity_update(x, y, zero_diagonal)
89 return _reduce_distance_matrix(distance, reduction)
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/torchmetrics/functional/pairwise/cosine.py b/src/torchmetrics/functional/pairwise/cosine.py
--- a/src/torchmetrics/functional/pairwise/cosine.py
+++ b/src/torchmetrics/functional/pairwise/cosine.py
@@ -34,9 +34,9 @@
x, y, zero_diagonal = _check_input(x, y, zero_diagonal)
norm = torch.norm(x, p=2, dim=1)
- x /= norm.unsqueeze(1)
+ x = x / norm.unsqueeze(1)
norm = torch.norm(y, p=2, dim=1)
- y /= norm.unsqueeze(1)
+ y = y / norm.unsqueeze(1)
distance = _safe_matmul(x, y)
if zero_diagonal:
| {"golden_diff": "diff --git a/src/torchmetrics/functional/pairwise/cosine.py b/src/torchmetrics/functional/pairwise/cosine.py\n--- a/src/torchmetrics/functional/pairwise/cosine.py\n+++ b/src/torchmetrics/functional/pairwise/cosine.py\n@@ -34,9 +34,9 @@\n x, y, zero_diagonal = _check_input(x, y, zero_diagonal)\n \n norm = torch.norm(x, p=2, dim=1)\n- x /= norm.unsqueeze(1)\n+ x = x / norm.unsqueeze(1)\n norm = torch.norm(y, p=2, dim=1)\n- y /= norm.unsqueeze(1)\n+ y = y / norm.unsqueeze(1)\n \n distance = _safe_matmul(x, y)\n if zero_diagonal:\n", "issue": "inplace operation in pairwise_cosine_similarity\n## \ud83d\udc1b Bug\r\nHello !\r\nThe x, y values are modified inplace in the `pairwise_cosine_similarity` function. \r\nThis is not documented and may cause bugs that are difficult to find. \r\nThank you.\r\n\r\n### To Reproduce\r\n\r\n```python\r\nimport torch\r\nfrom torchmetrics.functional import pairwise_cosine_similarity\r\nx = torch.tensor([[2, 3], [3, 5], [5, 8]], dtype=torch.float32)\r\ny = torch.tensor([[1, 0], [2, 1]], dtype=torch.float32)\r\nprint(\"Result:\", pairwise_cosine_similarity(x, y))\r\nprint(\"X:\", x)\r\nprint(\"Y:\", y)\r\n\"\"\"Out[0]\r\nResult: tensor([[0.5547, 0.8682],\r\n [0.5145, 0.8437],\r\n [0.5300, 0.8533]])\r\nX: tensor([[0.5547, 0.8321],\r\n [0.5145, 0.8575],\r\n [0.5300, 0.8480]])\r\nY: tensor([[1.0000, 0.0000],\r\n [0.8944, 0.4472]])\r\n\"\"\"\r\n```\r\n\r\n### Environment\r\ntorchmetrics==0.10.0\r\n\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Optional\n\nimport torch\nfrom torch import Tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix\nfrom torchmetrics.utilities.compute import _safe_matmul\n\n\ndef _pairwise_cosine_similarity_update(\n x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None\n) -> Tensor:\n \"\"\"Calculates the pairwise cosine similarity matrix.\n\n Args:\n x: tensor of shape ``[N,d]``\n y: tensor of shape ``[M,d]``\n zero_diagonal: determines if the diagonal of the distance matrix should be set to zero\n \"\"\"\n x, y, zero_diagonal = _check_input(x, y, zero_diagonal)\n\n norm = torch.norm(x, p=2, dim=1)\n x /= norm.unsqueeze(1)\n norm = torch.norm(y, p=2, dim=1)\n y /= norm.unsqueeze(1)\n\n distance = _safe_matmul(x, y)\n if zero_diagonal:\n distance.fill_diagonal_(0)\n return distance\n\n\ndef pairwise_cosine_similarity(\n x: Tensor,\n y: Optional[Tensor] = None,\n reduction: Literal[\"mean\", \"sum\", \"none\", None] = None,\n zero_diagonal: Optional[bool] = None,\n) -> Tensor:\n r\"\"\"Calculates pairwise cosine similarity:\n\n .. math::\n s_{cos}(x,y) = \\frac{<x,y>}{||x|| \\cdot ||y||}\n = \\frac{\\sum_{d=1}^D x_d \\cdot y_d }{\\sqrt{\\sum_{d=1}^D x_i^2} \\cdot \\sqrt{\\sum_{d=1}^D y_i^2}}\n\n If both :math:`x` and :math:`y` are passed in, the calculation will be performed pairwise\n between the rows of :math:`x` and :math:`y`.\n If only :math:`x` is passed in, the calculation will be performed between the rows of :math:`x`.\n\n Args:\n x: Tensor with shape ``[N, d]``\n y: Tensor with shape ``[M, d]``, optional\n reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'`\n (applied along column dimension) or `'none'`, `None` for no reduction\n zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only :math:`x` is given\n this defaults to ``True`` else if :math:`y` is also given it defaults to ``False``\n\n Returns:\n A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix\n\n Example:\n >>> import torch\n >>> from torchmetrics.functional import pairwise_cosine_similarity\n >>> x = torch.tensor([[2, 3], [3, 5], [5, 8]], dtype=torch.float32)\n >>> y = torch.tensor([[1, 0], [2, 1]], dtype=torch.float32)\n >>> pairwise_cosine_similarity(x, y)\n tensor([[0.5547, 0.8682],\n [0.5145, 0.8437],\n [0.5300, 0.8533]])\n >>> pairwise_cosine_similarity(x)\n tensor([[0.0000, 0.9989, 0.9996],\n [0.9989, 0.0000, 0.9998],\n [0.9996, 0.9998, 0.0000]])\n \"\"\"\n distance = _pairwise_cosine_similarity_update(x, y, zero_diagonal)\n return _reduce_distance_matrix(distance, reduction)\n", "path": "src/torchmetrics/functional/pairwise/cosine.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Optional\n\nimport torch\nfrom torch import Tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix\nfrom torchmetrics.utilities.compute import _safe_matmul\n\n\ndef _pairwise_cosine_similarity_update(\n x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None\n) -> Tensor:\n \"\"\"Calculates the pairwise cosine similarity matrix.\n\n Args:\n x: tensor of shape ``[N,d]``\n y: tensor of shape ``[M,d]``\n zero_diagonal: determines if the diagonal of the distance matrix should be set to zero\n \"\"\"\n x, y, zero_diagonal = _check_input(x, y, zero_diagonal)\n\n norm = torch.norm(x, p=2, dim=1)\n x = x / norm.unsqueeze(1)\n norm = torch.norm(y, p=2, dim=1)\n y = y / norm.unsqueeze(1)\n\n distance = _safe_matmul(x, y)\n if zero_diagonal:\n distance.fill_diagonal_(0)\n return distance\n\n\ndef pairwise_cosine_similarity(\n x: Tensor,\n y: Optional[Tensor] = None,\n reduction: Literal[\"mean\", \"sum\", \"none\", None] = None,\n zero_diagonal: Optional[bool] = None,\n) -> Tensor:\n r\"\"\"Calculates pairwise cosine similarity:\n\n .. math::\n s_{cos}(x,y) = \\frac{<x,y>}{||x|| \\cdot ||y||}\n = \\frac{\\sum_{d=1}^D x_d \\cdot y_d }{\\sqrt{\\sum_{d=1}^D x_i^2} \\cdot \\sqrt{\\sum_{d=1}^D y_i^2}}\n\n If both :math:`x` and :math:`y` are passed in, the calculation will be performed pairwise\n between the rows of :math:`x` and :math:`y`.\n If only :math:`x` is passed in, the calculation will be performed between the rows of :math:`x`.\n\n Args:\n x: Tensor with shape ``[N, d]``\n y: Tensor with shape ``[M, d]``, optional\n reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'`\n (applied along column dimension) or `'none'`, `None` for no reduction\n zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only :math:`x` is given\n this defaults to ``True`` else if :math:`y` is also given it defaults to ``False``\n\n Returns:\n A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix\n\n Example:\n >>> import torch\n >>> from torchmetrics.functional import pairwise_cosine_similarity\n >>> x = torch.tensor([[2, 3], [3, 5], [5, 8]], dtype=torch.float32)\n >>> y = torch.tensor([[1, 0], [2, 1]], dtype=torch.float32)\n >>> pairwise_cosine_similarity(x, y)\n tensor([[0.5547, 0.8682],\n [0.5145, 0.8437],\n [0.5300, 0.8533]])\n >>> pairwise_cosine_similarity(x)\n tensor([[0.0000, 0.9989, 0.9996],\n [0.9989, 0.0000, 0.9998],\n [0.9996, 0.9998, 0.0000]])\n \"\"\"\n distance = _pairwise_cosine_similarity_update(x, y, zero_diagonal)\n return _reduce_distance_matrix(distance, reduction)\n", "path": "src/torchmetrics/functional/pairwise/cosine.py"}]} |
gh_patches_debug_1284 | rasdani/github-patches | git_diff | Pycord-Development__pycord-576 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SlashCommand Groups Issues
This issue is to keep track of the issues since we reworked groups.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/app_commands/slash_groups.py`
Content:
```
1 import discord
2
3 bot = discord.Bot()
4
5 # If you use commands.Bot, @bot.slash_command should be used for
6 # slash commands. You can use @bot.slash_command with discord.Bot as well
7
8 math = bot.command_group(
9 "math", "Commands related to mathematics."
10 ) # create a slash command group
11
12
13 @math.command(guild_ids=[...]) # create a slash command
14 async def add(ctx, num1: int, num2: int):
15 """Get the sum of 2 integers."""
16 await ctx.respond(f"The sum of these numbers is **{num1+num2}**")
17
18
19 # another way, creating the class manually
20
21 from discord.commands import SlashCommandGroup
22
23 math = SlashCommandGroup("math", "Commands related to mathematics.")
24
25
26 @math.command(guild_ids=[...])
27 async def add(ctx, num1: int, num2: int):
28 ...
29
30
31 bot.add_application_command(math)
32
33 bot.run("TOKEN")
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/app_commands/slash_groups.py b/examples/app_commands/slash_groups.py
--- a/examples/app_commands/slash_groups.py
+++ b/examples/app_commands/slash_groups.py
@@ -5,7 +5,7 @@
# If you use commands.Bot, @bot.slash_command should be used for
# slash commands. You can use @bot.slash_command with discord.Bot as well
-math = bot.command_group(
+math = bot.create_group(
"math", "Commands related to mathematics."
) # create a slash command group
| {"golden_diff": "diff --git a/examples/app_commands/slash_groups.py b/examples/app_commands/slash_groups.py\n--- a/examples/app_commands/slash_groups.py\n+++ b/examples/app_commands/slash_groups.py\n@@ -5,7 +5,7 @@\n # If you use commands.Bot, @bot.slash_command should be used for\r\n # slash commands. You can use @bot.slash_command with discord.Bot as well\r\n \r\n-math = bot.command_group(\r\n+math = bot.create_group(\r\n \"math\", \"Commands related to mathematics.\"\r\n ) # create a slash command group\n", "issue": "SlashCommand Groups Issues\nThis issue is to keep track of the issues since we reworked groups.\n", "before_files": [{"content": "import discord\r\n\r\nbot = discord.Bot()\r\n\r\n# If you use commands.Bot, @bot.slash_command should be used for\r\n# slash commands. You can use @bot.slash_command with discord.Bot as well\r\n\r\nmath = bot.command_group(\r\n \"math\", \"Commands related to mathematics.\"\r\n) # create a slash command group\r\n\r\n\r\[email protected](guild_ids=[...]) # create a slash command\r\nasync def add(ctx, num1: int, num2: int):\r\n \"\"\"Get the sum of 2 integers.\"\"\"\r\n await ctx.respond(f\"The sum of these numbers is **{num1+num2}**\")\r\n\r\n\r\n# another way, creating the class manually\r\n\r\nfrom discord.commands import SlashCommandGroup\r\n\r\nmath = SlashCommandGroup(\"math\", \"Commands related to mathematics.\")\r\n\r\n\r\[email protected](guild_ids=[...])\r\nasync def add(ctx, num1: int, num2: int):\r\n ...\r\n\r\n\r\nbot.add_application_command(math)\r\n\r\nbot.run(\"TOKEN\")\r\n", "path": "examples/app_commands/slash_groups.py"}], "after_files": [{"content": "import discord\r\n\r\nbot = discord.Bot()\r\n\r\n# If you use commands.Bot, @bot.slash_command should be used for\r\n# slash commands. You can use @bot.slash_command with discord.Bot as well\r\n\r\nmath = bot.create_group(\r\n \"math\", \"Commands related to mathematics.\"\r\n) # create a slash command group\r\n\r\n\r\[email protected](guild_ids=[...]) # create a slash command\r\nasync def add(ctx, num1: int, num2: int):\r\n \"\"\"Get the sum of 2 integers.\"\"\"\r\n await ctx.respond(f\"The sum of these numbers is **{num1+num2}**\")\r\n\r\n\r\n# another way, creating the class manually\r\n\r\nfrom discord.commands import SlashCommandGroup\r\n\r\nmath = SlashCommandGroup(\"math\", \"Commands related to mathematics.\")\r\n\r\n\r\[email protected](guild_ids=[...])\r\nasync def add(ctx, num1: int, num2: int):\r\n ...\r\n\r\n\r\nbot.add_application_command(math)\r\n\r\nbot.run(\"TOKEN\")\r\n", "path": "examples/app_commands/slash_groups.py"}]} |
gh_patches_debug_1285 | rasdani/github-patches | git_diff | deis__deis-3403 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
deis run cannot handle large output
I need to get a large generated text file from deis. I do the following
`deis run -- cat foo`
If foo is large enough, in my case is (7468 lines 26438 words 186989 bytes) , then `deis run` hangs and no output gets displayed.
Running `deis logs` afterwards shows the full output of the cat command in the logs.
If I run `deis run -- "cat foo | head -n 20"` everything works OK.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `controller/scheduler/fleet.py`
Content:
```
1 import cStringIO
2 import base64
3 import copy
4 import httplib
5 import json
6 import paramiko
7 import socket
8 import re
9 import time
10
11 from django.conf import settings
12
13 from .states import JobState
14
15
16 MATCH = re.compile(
17 '(?P<app>[a-z0-9-]+)_?(?P<version>v[0-9]+)?\.?(?P<c_type>[a-z-_]+)?.(?P<c_num>[0-9]+)')
18 RETRIES = 3
19
20
21 class UHTTPConnection(httplib.HTTPConnection):
22 """Subclass of Python library HTTPConnection that uses a Unix domain socket.
23 """
24
25 def __init__(self, path):
26 httplib.HTTPConnection.__init__(self, 'localhost')
27 self.path = path
28
29 def connect(self):
30 sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
31 sock.connect(self.path)
32 self.sock = sock
33
34
35 class FleetHTTPClient(object):
36
37 def __init__(self, target, auth, options, pkey):
38 self.target = target
39 self.auth = auth
40 self.options = options
41 self.pkey = pkey
42 # single global connection
43 self.conn = UHTTPConnection(self.target)
44
45 # connection helpers
46
47 def _request_unit(self, method, name, body=None):
48 headers = {'Content-Type': 'application/json'}
49 self.conn.request(method, '/v1-alpha/units/{name}.service'.format(**locals()),
50 headers=headers, body=json.dumps(body))
51 return self.conn.getresponse()
52
53 def _get_unit(self, name):
54 for attempt in range(RETRIES):
55 try:
56 resp = self._request_unit('GET', name)
57 data = resp.read()
58 if not 200 <= resp.status <= 299:
59 errmsg = "Failed to retrieve unit: {} {} - {}".format(
60 resp.status, resp.reason, data)
61 raise RuntimeError(errmsg)
62 return data
63 except:
64 if attempt >= (RETRIES - 1):
65 raise
66
67 def _put_unit(self, name, body):
68 for attempt in range(RETRIES):
69 try:
70 resp = self._request_unit('PUT', name, body)
71 data = resp.read()
72 if not 200 <= resp.status <= 299:
73 errmsg = "Failed to create unit: {} {} - {}".format(
74 resp.status, resp.reason, data)
75 raise RuntimeError(errmsg)
76 return data
77 except:
78 if attempt >= (RETRIES - 1):
79 raise
80
81 def _delete_unit(self, name):
82 headers = {'Content-Type': 'application/json'}
83 self.conn.request('DELETE', '/v1-alpha/units/{name}.service'.format(**locals()),
84 headers=headers)
85 resp = self.conn.getresponse()
86 data = resp.read()
87 if resp.status not in (404, 204):
88 errmsg = "Failed to delete unit: {} {} - {}".format(
89 resp.status, resp.reason, data)
90 raise RuntimeError(errmsg)
91 return data
92
93 def _get_state(self, name=None):
94 headers = {'Content-Type': 'application/json'}
95 url = '/v1-alpha/state'
96 if name:
97 url += '?unitName={name}.service'.format(**locals())
98 self.conn.request('GET', url, headers=headers)
99 resp = self.conn.getresponse()
100 data = resp.read()
101 if resp.status not in (200,):
102 errmsg = "Failed to retrieve state: {} {} - {}".format(
103 resp.status, resp.reason, data)
104 raise RuntimeError(errmsg)
105 return json.loads(data)
106
107 def _get_machines(self):
108 headers = {'Content-Type': 'application/json'}
109 url = '/v1-alpha/machines'
110 self.conn.request('GET', url, headers=headers)
111 resp = self.conn.getresponse()
112 data = resp.read()
113 if resp.status not in (200,):
114 errmsg = "Failed to retrieve machines: {} {} - {}".format(
115 resp.status, resp.reason, data)
116 raise RuntimeError(errmsg)
117 return json.loads(data)
118
119 # container api
120
121 def create(self, name, image, command='', template=None, **kwargs):
122 """Create a container"""
123 self._create_container(name, image, command,
124 template or copy.deepcopy(CONTAINER_TEMPLATE), **kwargs)
125
126 def _create_container(self, name, image, command, unit, **kwargs):
127 l = locals().copy()
128 l.update(re.match(MATCH, name).groupdict())
129 # prepare memory limit for the container type
130 mem = kwargs.get('memory', {}).get(l['c_type'], None)
131 if mem:
132 l.update({'memory': '-m {}'.format(mem.lower())})
133 else:
134 l.update({'memory': ''})
135 # prepare memory limit for the container type
136 cpu = kwargs.get('cpu', {}).get(l['c_type'], None)
137 if cpu:
138 l.update({'cpu': '-c {}'.format(cpu)})
139 else:
140 l.update({'cpu': ''})
141 # set unit hostname
142 l.update({'hostname': self._get_hostname(name)})
143 # should a special entrypoint be used
144 entrypoint = kwargs.get('entrypoint')
145 if entrypoint:
146 l.update({'entrypoint': '{}'.format(entrypoint)})
147 # encode command as utf-8
148 if isinstance(l.get('command'), basestring):
149 l['command'] = l['command'].encode('utf-8')
150 # construct unit from template
151 for f in unit:
152 f['value'] = f['value'].format(**l)
153 # prepare tags only if one was provided
154 tags = kwargs.get('tags', {})
155 if tags:
156 tagset = ' '.join(['"{}={}"'.format(k, v) for k, v in tags.items()])
157 unit.append({"section": "X-Fleet", "name": "MachineMetadata",
158 "value": tagset})
159 # post unit to fleet
160 self._put_unit(name, {"desiredState": "loaded", "options": unit})
161
162 def _get_hostname(self, application_name):
163 hostname = settings.UNIT_HOSTNAME
164 if hostname == "default":
165 return ''
166 elif hostname == "application":
167 # replace underscore with dots, since underscore is not valid in DNS hostnames
168 dns_name = application_name.replace("_", ".")
169 return '-h ' + dns_name
170 elif hostname == "server":
171 return '-h %H'
172 else:
173 raise RuntimeError('Unsupported hostname: ' + hostname)
174
175 def start(self, name):
176 """Start a container"""
177 self._put_unit(name, {'desiredState': 'launched'})
178 self._wait_for_container_running(name)
179
180 def _wait_for_container_state(self, name):
181 # wait for container to get scheduled
182 for _ in range(30):
183 states = self._get_state(name)
184 if states and len(states.get('states', [])) == 1:
185 return states.get('states')[0]
186 time.sleep(1)
187 else:
188 raise RuntimeError('container timeout while retrieving state')
189
190 def _wait_for_container_running(self, name):
191 # we bump to 20 minutes here to match the timeout on the router and in the app unit files
192 for _ in range(1200):
193 if self.state(name) == JobState.up:
194 return
195 time.sleep(1)
196 else:
197 raise RuntimeError('container failed to start')
198
199 def _wait_for_destroy(self, name):
200 for _ in range(30):
201 if not self._get_state(name):
202 break
203 time.sleep(1)
204 else:
205 raise RuntimeError('timeout on container destroy')
206
207 def stop(self, name):
208 """Stop a container"""
209 raise NotImplementedError
210
211 def destroy(self, name):
212 """Destroy a container"""
213 # call all destroy functions, ignoring any errors
214 try:
215 self._destroy_container(name)
216 except:
217 pass
218 self._wait_for_destroy(name)
219
220 def _destroy_container(self, name):
221 for attempt in range(RETRIES):
222 try:
223 self._delete_unit(name)
224 break
225 except:
226 if attempt == (RETRIES - 1): # account for 0 indexing
227 raise
228
229 def run(self, name, image, entrypoint, command): # noqa
230 """Run a one-off command"""
231 self._create_container(name, image, command, copy.deepcopy(RUN_TEMPLATE),
232 entrypoint=entrypoint)
233 # launch the container
234 self._put_unit(name, {'desiredState': 'launched'})
235 # wait for the container to get scheduled
236 state = self._wait_for_container_state(name)
237
238 try:
239 machineID = state.get('machineID')
240
241 # find the machine
242 machines = self._get_machines()
243 if not machines:
244 raise RuntimeError('no available hosts to run command')
245
246 # find the machine's primaryIP
247 primaryIP = None
248 for m in machines.get('machines', []):
249 if m['id'] == machineID:
250 primaryIP = m['primaryIP']
251 if not primaryIP:
252 raise RuntimeError('could not find host')
253
254 # prepare ssh key
255 file_obj = cStringIO.StringIO(base64.b64decode(self.pkey))
256 pkey = paramiko.RSAKey(file_obj=file_obj)
257
258 # grab output via docker logs over SSH
259 ssh = paramiko.SSHClient()
260 ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
261 ssh.connect(primaryIP, username="core", pkey=pkey)
262 # share a transport
263 tran = ssh.get_transport()
264
265 def _do_ssh(cmd):
266 chan = tran.open_session()
267 # get a pty so stdout/stderr look right
268 chan.get_pty()
269 out = chan.makefile()
270 chan.exec_command(cmd)
271 rc, output = chan.recv_exit_status(), out.read()
272 return rc, output
273
274 # wait for container to launch
275 # we loop indefinitely here, as we have no idea how long the docker pull will take
276 while True:
277 rc, _ = _do_ssh('docker inspect {name}'.format(**locals()))
278 if rc == 0:
279 break
280 time.sleep(1)
281 else:
282 raise RuntimeError('failed to create container')
283
284 # wait for container to start
285 for _ in range(2):
286 _rc, _output = _do_ssh('docker inspect {name}'.format(**locals()))
287 if _rc != 0:
288 raise RuntimeError('failed to inspect container')
289 _container = json.loads(_output)
290 started_at = _container[0]["State"]["StartedAt"]
291 if not started_at.startswith('0001'):
292 break
293 time.sleep(1)
294 else:
295 raise RuntimeError('container failed to start')
296
297 # wait for container to complete
298 for _ in range(1200):
299 _rc, _output = _do_ssh('docker inspect {name}'.format(**locals()))
300 if _rc != 0:
301 raise RuntimeError('failed to inspect container')
302 _container = json.loads(_output)
303 finished_at = _container[0]["State"]["FinishedAt"]
304 if not finished_at.startswith('0001'):
305 break
306 time.sleep(1)
307 else:
308 raise RuntimeError('container timed out')
309
310 # gather container output
311 _rc, output = _do_ssh('docker logs {name}'.format(**locals()))
312 if _rc != 0:
313 raise RuntimeError('could not attach to container')
314
315 # determine container exit code
316 _rc, _output = _do_ssh('docker inspect {name}'.format(**locals()))
317 if _rc != 0:
318 raise RuntimeError('could not determine exit code')
319 container = json.loads(_output)
320 rc = container[0]["State"]["ExitCode"]
321
322 finally:
323 # cleanup
324 self._destroy_container(name)
325 self._wait_for_destroy(name)
326
327 # return rc and output
328 return rc, output
329
330 def state(self, name):
331 systemdActiveStateMap = {
332 "active": "up",
333 "reloading": "down",
334 "inactive": "created",
335 "failed": "crashed",
336 "activating": "down",
337 "deactivating": "down",
338 }
339 try:
340 # NOTE (bacongobbler): this call to ._get_unit() acts as a pre-emptive check to
341 # determine if the job no longer exists (will raise a RuntimeError on 404)
342 self._get_unit(name)
343 state = self._wait_for_container_state(name)
344 activeState = state['systemdActiveState']
345 # FIXME (bacongobbler): when fleet loads a job, sometimes it'll automatically start and
346 # stop the container, which in our case will return as 'failed', even though
347 # the container is perfectly fine.
348 if activeState == 'failed':
349 if state['systemdLoadState'] == 'loaded':
350 return JobState.created
351 return getattr(JobState, systemdActiveStateMap[activeState])
352 except KeyError:
353 # failed retrieving a proper response from the fleet API
354 return JobState.error
355 except RuntimeError:
356 # failed to retrieve a response from the fleet API,
357 # which means it does not exist
358 return JobState.destroyed
359
360 def attach(self, name):
361 """
362 Attach to a job's stdin, stdout and stderr
363 """
364 raise NotImplementedError
365
366 SchedulerClient = FleetHTTPClient
367
368
369 CONTAINER_TEMPLATE = [
370 {"section": "Unit", "name": "Description", "value": "{name}"},
371 {"section": "Service", "name": "ExecStartPre", "value": '''/bin/sh -c "IMAGE=$(etcdctl get /deis/registry/host 2>&1):$(etcdctl get /deis/registry/port 2>&1)/{image}; docker pull $IMAGE"'''}, # noqa
372 {"section": "Service", "name": "ExecStartPre", "value": '''/bin/sh -c "docker inspect {name} >/dev/null 2>&1 && docker rm -f {name} || true"'''}, # noqa
373 {"section": "Service", "name": "ExecStart", "value": '''/bin/sh -c "IMAGE=$(etcdctl get /deis/registry/host 2>&1):$(etcdctl get /deis/registry/port 2>&1)/{image}; port=$(docker inspect -f '{{{{range $k, $v := .ContainerConfig.ExposedPorts }}}}{{{{$k}}}}{{{{end}}}}' $IMAGE | cut -d/ -f1) ; docker run --name {name} {memory} {cpu} {hostname} -P -e PORT=$port $IMAGE {command}"'''}, # noqa
374 {"section": "Service", "name": "ExecStop", "value": '''/usr/bin/docker stop {name}'''},
375 {"section": "Service", "name": "ExecStop", "value": '''/usr/bin/docker rm -f {name}'''},
376 {"section": "Service", "name": "TimeoutStartSec", "value": "20m"},
377 {"section": "Service", "name": "TimeoutStopSec", "value": "10"},
378 {"section": "Service", "name": "RestartSec", "value": "5"},
379 {"section": "Service", "name": "Restart", "value": "on-failure"},
380 ]
381
382
383 RUN_TEMPLATE = [
384 {"section": "Unit", "name": "Description", "value": "{name} admin command"},
385 {"section": "Service", "name": "ExecStartPre", "value": '''/bin/sh -c "IMAGE=$(etcdctl get /deis/registry/host 2>&1):$(etcdctl get /deis/registry/port 2>&1)/{image}; docker pull $IMAGE"'''}, # noqa
386 {"section": "Service", "name": "ExecStartPre", "value": '''/bin/sh -c "docker inspect {name} >/dev/null 2>&1 && docker rm -f {name} || true"'''}, # noqa
387 {"section": "Service", "name": "ExecStart", "value": '''/bin/sh -c "IMAGE=$(etcdctl get /deis/registry/host 2>&1):$(etcdctl get /deis/registry/port 2>&1)/{image}; docker run --name {name} --entrypoint={entrypoint} -a stdout -a stderr $IMAGE {command}"'''}, # noqa
388 {"section": "Service", "name": "TimeoutStartSec", "value": "20m"},
389 ]
390
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/controller/scheduler/fleet.py b/controller/scheduler/fleet.py
--- a/controller/scheduler/fleet.py
+++ b/controller/scheduler/fleet.py
@@ -268,7 +268,8 @@
chan.get_pty()
out = chan.makefile()
chan.exec_command(cmd)
- rc, output = chan.recv_exit_status(), out.read()
+ output = out.read()
+ rc = chan.recv_exit_status()
return rc, output
# wait for container to launch
| {"golden_diff": "diff --git a/controller/scheduler/fleet.py b/controller/scheduler/fleet.py\n--- a/controller/scheduler/fleet.py\n+++ b/controller/scheduler/fleet.py\n@@ -268,7 +268,8 @@\n chan.get_pty()\n out = chan.makefile()\n chan.exec_command(cmd)\n- rc, output = chan.recv_exit_status(), out.read()\n+ output = out.read()\n+ rc = chan.recv_exit_status()\n return rc, output\n \n # wait for container to launch\n", "issue": "deis run cannot handle large output\nI need to get a large generated text file from deis. I do the following\n\n`deis run -- cat foo`\n\nIf foo is large enough, in my case is (7468 lines 26438 words 186989 bytes) , then `deis run` hangs and no output gets displayed.\n\nRunning `deis logs` afterwards shows the full output of the cat command in the logs.\n\nIf I run `deis run -- \"cat foo | head -n 20\"` everything works OK.\n\n", "before_files": [{"content": "import cStringIO\nimport base64\nimport copy\nimport httplib\nimport json\nimport paramiko\nimport socket\nimport re\nimport time\n\nfrom django.conf import settings\n\nfrom .states import JobState\n\n\nMATCH = re.compile(\n '(?P<app>[a-z0-9-]+)_?(?P<version>v[0-9]+)?\\.?(?P<c_type>[a-z-_]+)?.(?P<c_num>[0-9]+)')\nRETRIES = 3\n\n\nclass UHTTPConnection(httplib.HTTPConnection):\n \"\"\"Subclass of Python library HTTPConnection that uses a Unix domain socket.\n \"\"\"\n\n def __init__(self, path):\n httplib.HTTPConnection.__init__(self, 'localhost')\n self.path = path\n\n def connect(self):\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n sock.connect(self.path)\n self.sock = sock\n\n\nclass FleetHTTPClient(object):\n\n def __init__(self, target, auth, options, pkey):\n self.target = target\n self.auth = auth\n self.options = options\n self.pkey = pkey\n # single global connection\n self.conn = UHTTPConnection(self.target)\n\n # connection helpers\n\n def _request_unit(self, method, name, body=None):\n headers = {'Content-Type': 'application/json'}\n self.conn.request(method, '/v1-alpha/units/{name}.service'.format(**locals()),\n headers=headers, body=json.dumps(body))\n return self.conn.getresponse()\n\n def _get_unit(self, name):\n for attempt in range(RETRIES):\n try:\n resp = self._request_unit('GET', name)\n data = resp.read()\n if not 200 <= resp.status <= 299:\n errmsg = \"Failed to retrieve unit: {} {} - {}\".format(\n resp.status, resp.reason, data)\n raise RuntimeError(errmsg)\n return data\n except:\n if attempt >= (RETRIES - 1):\n raise\n\n def _put_unit(self, name, body):\n for attempt in range(RETRIES):\n try:\n resp = self._request_unit('PUT', name, body)\n data = resp.read()\n if not 200 <= resp.status <= 299:\n errmsg = \"Failed to create unit: {} {} - {}\".format(\n resp.status, resp.reason, data)\n raise RuntimeError(errmsg)\n return data\n except:\n if attempt >= (RETRIES - 1):\n raise\n\n def _delete_unit(self, name):\n headers = {'Content-Type': 'application/json'}\n self.conn.request('DELETE', '/v1-alpha/units/{name}.service'.format(**locals()),\n headers=headers)\n resp = self.conn.getresponse()\n data = resp.read()\n if resp.status not in (404, 204):\n errmsg = \"Failed to delete unit: {} {} - {}\".format(\n resp.status, resp.reason, data)\n raise RuntimeError(errmsg)\n return data\n\n def _get_state(self, name=None):\n headers = {'Content-Type': 'application/json'}\n url = '/v1-alpha/state'\n if name:\n url += '?unitName={name}.service'.format(**locals())\n self.conn.request('GET', url, headers=headers)\n resp = self.conn.getresponse()\n data = resp.read()\n if resp.status not in (200,):\n errmsg = \"Failed to retrieve state: {} {} - {}\".format(\n resp.status, resp.reason, data)\n raise RuntimeError(errmsg)\n return json.loads(data)\n\n def _get_machines(self):\n headers = {'Content-Type': 'application/json'}\n url = '/v1-alpha/machines'\n self.conn.request('GET', url, headers=headers)\n resp = self.conn.getresponse()\n data = resp.read()\n if resp.status not in (200,):\n errmsg = \"Failed to retrieve machines: {} {} - {}\".format(\n resp.status, resp.reason, data)\n raise RuntimeError(errmsg)\n return json.loads(data)\n\n # container api\n\n def create(self, name, image, command='', template=None, **kwargs):\n \"\"\"Create a container\"\"\"\n self._create_container(name, image, command,\n template or copy.deepcopy(CONTAINER_TEMPLATE), **kwargs)\n\n def _create_container(self, name, image, command, unit, **kwargs):\n l = locals().copy()\n l.update(re.match(MATCH, name).groupdict())\n # prepare memory limit for the container type\n mem = kwargs.get('memory', {}).get(l['c_type'], None)\n if mem:\n l.update({'memory': '-m {}'.format(mem.lower())})\n else:\n l.update({'memory': ''})\n # prepare memory limit for the container type\n cpu = kwargs.get('cpu', {}).get(l['c_type'], None)\n if cpu:\n l.update({'cpu': '-c {}'.format(cpu)})\n else:\n l.update({'cpu': ''})\n # set unit hostname\n l.update({'hostname': self._get_hostname(name)})\n # should a special entrypoint be used\n entrypoint = kwargs.get('entrypoint')\n if entrypoint:\n l.update({'entrypoint': '{}'.format(entrypoint)})\n # encode command as utf-8\n if isinstance(l.get('command'), basestring):\n l['command'] = l['command'].encode('utf-8')\n # construct unit from template\n for f in unit:\n f['value'] = f['value'].format(**l)\n # prepare tags only if one was provided\n tags = kwargs.get('tags', {})\n if tags:\n tagset = ' '.join(['\"{}={}\"'.format(k, v) for k, v in tags.items()])\n unit.append({\"section\": \"X-Fleet\", \"name\": \"MachineMetadata\",\n \"value\": tagset})\n # post unit to fleet\n self._put_unit(name, {\"desiredState\": \"loaded\", \"options\": unit})\n\n def _get_hostname(self, application_name):\n hostname = settings.UNIT_HOSTNAME\n if hostname == \"default\":\n return ''\n elif hostname == \"application\":\n # replace underscore with dots, since underscore is not valid in DNS hostnames\n dns_name = application_name.replace(\"_\", \".\")\n return '-h ' + dns_name\n elif hostname == \"server\":\n return '-h %H'\n else:\n raise RuntimeError('Unsupported hostname: ' + hostname)\n\n def start(self, name):\n \"\"\"Start a container\"\"\"\n self._put_unit(name, {'desiredState': 'launched'})\n self._wait_for_container_running(name)\n\n def _wait_for_container_state(self, name):\n # wait for container to get scheduled\n for _ in range(30):\n states = self._get_state(name)\n if states and len(states.get('states', [])) == 1:\n return states.get('states')[0]\n time.sleep(1)\n else:\n raise RuntimeError('container timeout while retrieving state')\n\n def _wait_for_container_running(self, name):\n # we bump to 20 minutes here to match the timeout on the router and in the app unit files\n for _ in range(1200):\n if self.state(name) == JobState.up:\n return\n time.sleep(1)\n else:\n raise RuntimeError('container failed to start')\n\n def _wait_for_destroy(self, name):\n for _ in range(30):\n if not self._get_state(name):\n break\n time.sleep(1)\n else:\n raise RuntimeError('timeout on container destroy')\n\n def stop(self, name):\n \"\"\"Stop a container\"\"\"\n raise NotImplementedError\n\n def destroy(self, name):\n \"\"\"Destroy a container\"\"\"\n # call all destroy functions, ignoring any errors\n try:\n self._destroy_container(name)\n except:\n pass\n self._wait_for_destroy(name)\n\n def _destroy_container(self, name):\n for attempt in range(RETRIES):\n try:\n self._delete_unit(name)\n break\n except:\n if attempt == (RETRIES - 1): # account for 0 indexing\n raise\n\n def run(self, name, image, entrypoint, command): # noqa\n \"\"\"Run a one-off command\"\"\"\n self._create_container(name, image, command, copy.deepcopy(RUN_TEMPLATE),\n entrypoint=entrypoint)\n # launch the container\n self._put_unit(name, {'desiredState': 'launched'})\n # wait for the container to get scheduled\n state = self._wait_for_container_state(name)\n\n try:\n machineID = state.get('machineID')\n\n # find the machine\n machines = self._get_machines()\n if not machines:\n raise RuntimeError('no available hosts to run command')\n\n # find the machine's primaryIP\n primaryIP = None\n for m in machines.get('machines', []):\n if m['id'] == machineID:\n primaryIP = m['primaryIP']\n if not primaryIP:\n raise RuntimeError('could not find host')\n\n # prepare ssh key\n file_obj = cStringIO.StringIO(base64.b64decode(self.pkey))\n pkey = paramiko.RSAKey(file_obj=file_obj)\n\n # grab output via docker logs over SSH\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(primaryIP, username=\"core\", pkey=pkey)\n # share a transport\n tran = ssh.get_transport()\n\n def _do_ssh(cmd):\n chan = tran.open_session()\n # get a pty so stdout/stderr look right\n chan.get_pty()\n out = chan.makefile()\n chan.exec_command(cmd)\n rc, output = chan.recv_exit_status(), out.read()\n return rc, output\n\n # wait for container to launch\n # we loop indefinitely here, as we have no idea how long the docker pull will take\n while True:\n rc, _ = _do_ssh('docker inspect {name}'.format(**locals()))\n if rc == 0:\n break\n time.sleep(1)\n else:\n raise RuntimeError('failed to create container')\n\n # wait for container to start\n for _ in range(2):\n _rc, _output = _do_ssh('docker inspect {name}'.format(**locals()))\n if _rc != 0:\n raise RuntimeError('failed to inspect container')\n _container = json.loads(_output)\n started_at = _container[0][\"State\"][\"StartedAt\"]\n if not started_at.startswith('0001'):\n break\n time.sleep(1)\n else:\n raise RuntimeError('container failed to start')\n\n # wait for container to complete\n for _ in range(1200):\n _rc, _output = _do_ssh('docker inspect {name}'.format(**locals()))\n if _rc != 0:\n raise RuntimeError('failed to inspect container')\n _container = json.loads(_output)\n finished_at = _container[0][\"State\"][\"FinishedAt\"]\n if not finished_at.startswith('0001'):\n break\n time.sleep(1)\n else:\n raise RuntimeError('container timed out')\n\n # gather container output\n _rc, output = _do_ssh('docker logs {name}'.format(**locals()))\n if _rc != 0:\n raise RuntimeError('could not attach to container')\n\n # determine container exit code\n _rc, _output = _do_ssh('docker inspect {name}'.format(**locals()))\n if _rc != 0:\n raise RuntimeError('could not determine exit code')\n container = json.loads(_output)\n rc = container[0][\"State\"][\"ExitCode\"]\n\n finally:\n # cleanup\n self._destroy_container(name)\n self._wait_for_destroy(name)\n\n # return rc and output\n return rc, output\n\n def state(self, name):\n systemdActiveStateMap = {\n \"active\": \"up\",\n \"reloading\": \"down\",\n \"inactive\": \"created\",\n \"failed\": \"crashed\",\n \"activating\": \"down\",\n \"deactivating\": \"down\",\n }\n try:\n # NOTE (bacongobbler): this call to ._get_unit() acts as a pre-emptive check to\n # determine if the job no longer exists (will raise a RuntimeError on 404)\n self._get_unit(name)\n state = self._wait_for_container_state(name)\n activeState = state['systemdActiveState']\n # FIXME (bacongobbler): when fleet loads a job, sometimes it'll automatically start and\n # stop the container, which in our case will return as 'failed', even though\n # the container is perfectly fine.\n if activeState == 'failed':\n if state['systemdLoadState'] == 'loaded':\n return JobState.created\n return getattr(JobState, systemdActiveStateMap[activeState])\n except KeyError:\n # failed retrieving a proper response from the fleet API\n return JobState.error\n except RuntimeError:\n # failed to retrieve a response from the fleet API,\n # which means it does not exist\n return JobState.destroyed\n\n def attach(self, name):\n \"\"\"\n Attach to a job's stdin, stdout and stderr\n \"\"\"\n raise NotImplementedError\n\nSchedulerClient = FleetHTTPClient\n\n\nCONTAINER_TEMPLATE = [\n {\"section\": \"Unit\", \"name\": \"Description\", \"value\": \"{name}\"},\n {\"section\": \"Service\", \"name\": \"ExecStartPre\", \"value\": '''/bin/sh -c \"IMAGE=$(etcdctl get /deis/registry/host 2>&1):$(etcdctl get /deis/registry/port 2>&1)/{image}; docker pull $IMAGE\"'''}, # noqa\n {\"section\": \"Service\", \"name\": \"ExecStartPre\", \"value\": '''/bin/sh -c \"docker inspect {name} >/dev/null 2>&1 && docker rm -f {name} || true\"'''}, # noqa\n {\"section\": \"Service\", \"name\": \"ExecStart\", \"value\": '''/bin/sh -c \"IMAGE=$(etcdctl get /deis/registry/host 2>&1):$(etcdctl get /deis/registry/port 2>&1)/{image}; port=$(docker inspect -f '{{{{range $k, $v := .ContainerConfig.ExposedPorts }}}}{{{{$k}}}}{{{{end}}}}' $IMAGE | cut -d/ -f1) ; docker run --name {name} {memory} {cpu} {hostname} -P -e PORT=$port $IMAGE {command}\"'''}, # noqa\n {\"section\": \"Service\", \"name\": \"ExecStop\", \"value\": '''/usr/bin/docker stop {name}'''},\n {\"section\": \"Service\", \"name\": \"ExecStop\", \"value\": '''/usr/bin/docker rm -f {name}'''},\n {\"section\": \"Service\", \"name\": \"TimeoutStartSec\", \"value\": \"20m\"},\n {\"section\": \"Service\", \"name\": \"TimeoutStopSec\", \"value\": \"10\"},\n {\"section\": \"Service\", \"name\": \"RestartSec\", \"value\": \"5\"},\n {\"section\": \"Service\", \"name\": \"Restart\", \"value\": \"on-failure\"},\n]\n\n\nRUN_TEMPLATE = [\n {\"section\": \"Unit\", \"name\": \"Description\", \"value\": \"{name} admin command\"},\n {\"section\": \"Service\", \"name\": \"ExecStartPre\", \"value\": '''/bin/sh -c \"IMAGE=$(etcdctl get /deis/registry/host 2>&1):$(etcdctl get /deis/registry/port 2>&1)/{image}; docker pull $IMAGE\"'''}, # noqa\n {\"section\": \"Service\", \"name\": \"ExecStartPre\", \"value\": '''/bin/sh -c \"docker inspect {name} >/dev/null 2>&1 && docker rm -f {name} || true\"'''}, # noqa\n {\"section\": \"Service\", \"name\": \"ExecStart\", \"value\": '''/bin/sh -c \"IMAGE=$(etcdctl get /deis/registry/host 2>&1):$(etcdctl get /deis/registry/port 2>&1)/{image}; docker run --name {name} --entrypoint={entrypoint} -a stdout -a stderr $IMAGE {command}\"'''}, # noqa\n {\"section\": \"Service\", \"name\": \"TimeoutStartSec\", \"value\": \"20m\"},\n]\n", "path": "controller/scheduler/fleet.py"}], "after_files": [{"content": "import cStringIO\nimport base64\nimport copy\nimport httplib\nimport json\nimport paramiko\nimport socket\nimport re\nimport time\n\nfrom django.conf import settings\n\nfrom .states import JobState\n\n\nMATCH = re.compile(\n '(?P<app>[a-z0-9-]+)_?(?P<version>v[0-9]+)?\\.?(?P<c_type>[a-z-_]+)?.(?P<c_num>[0-9]+)')\nRETRIES = 3\n\n\nclass UHTTPConnection(httplib.HTTPConnection):\n \"\"\"Subclass of Python library HTTPConnection that uses a Unix domain socket.\n \"\"\"\n\n def __init__(self, path):\n httplib.HTTPConnection.__init__(self, 'localhost')\n self.path = path\n\n def connect(self):\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n sock.connect(self.path)\n self.sock = sock\n\n\nclass FleetHTTPClient(object):\n\n def __init__(self, target, auth, options, pkey):\n self.target = target\n self.auth = auth\n self.options = options\n self.pkey = pkey\n # single global connection\n self.conn = UHTTPConnection(self.target)\n\n # connection helpers\n\n def _request_unit(self, method, name, body=None):\n headers = {'Content-Type': 'application/json'}\n self.conn.request(method, '/v1-alpha/units/{name}.service'.format(**locals()),\n headers=headers, body=json.dumps(body))\n return self.conn.getresponse()\n\n def _get_unit(self, name):\n for attempt in range(RETRIES):\n try:\n resp = self._request_unit('GET', name)\n data = resp.read()\n if not 200 <= resp.status <= 299:\n errmsg = \"Failed to retrieve unit: {} {} - {}\".format(\n resp.status, resp.reason, data)\n raise RuntimeError(errmsg)\n return data\n except:\n if attempt >= (RETRIES - 1):\n raise\n\n def _put_unit(self, name, body):\n for attempt in range(RETRIES):\n try:\n resp = self._request_unit('PUT', name, body)\n data = resp.read()\n if not 200 <= resp.status <= 299:\n errmsg = \"Failed to create unit: {} {} - {}\".format(\n resp.status, resp.reason, data)\n raise RuntimeError(errmsg)\n return data\n except:\n if attempt >= (RETRIES - 1):\n raise\n\n def _delete_unit(self, name):\n headers = {'Content-Type': 'application/json'}\n self.conn.request('DELETE', '/v1-alpha/units/{name}.service'.format(**locals()),\n headers=headers)\n resp = self.conn.getresponse()\n data = resp.read()\n if resp.status not in (404, 204):\n errmsg = \"Failed to delete unit: {} {} - {}\".format(\n resp.status, resp.reason, data)\n raise RuntimeError(errmsg)\n return data\n\n def _get_state(self, name=None):\n headers = {'Content-Type': 'application/json'}\n url = '/v1-alpha/state'\n if name:\n url += '?unitName={name}.service'.format(**locals())\n self.conn.request('GET', url, headers=headers)\n resp = self.conn.getresponse()\n data = resp.read()\n if resp.status not in (200,):\n errmsg = \"Failed to retrieve state: {} {} - {}\".format(\n resp.status, resp.reason, data)\n raise RuntimeError(errmsg)\n return json.loads(data)\n\n def _get_machines(self):\n headers = {'Content-Type': 'application/json'}\n url = '/v1-alpha/machines'\n self.conn.request('GET', url, headers=headers)\n resp = self.conn.getresponse()\n data = resp.read()\n if resp.status not in (200,):\n errmsg = \"Failed to retrieve machines: {} {} - {}\".format(\n resp.status, resp.reason, data)\n raise RuntimeError(errmsg)\n return json.loads(data)\n\n # container api\n\n def create(self, name, image, command='', template=None, **kwargs):\n \"\"\"Create a container\"\"\"\n self._create_container(name, image, command,\n template or copy.deepcopy(CONTAINER_TEMPLATE), **kwargs)\n\n def _create_container(self, name, image, command, unit, **kwargs):\n l = locals().copy()\n l.update(re.match(MATCH, name).groupdict())\n # prepare memory limit for the container type\n mem = kwargs.get('memory', {}).get(l['c_type'], None)\n if mem:\n l.update({'memory': '-m {}'.format(mem.lower())})\n else:\n l.update({'memory': ''})\n # prepare memory limit for the container type\n cpu = kwargs.get('cpu', {}).get(l['c_type'], None)\n if cpu:\n l.update({'cpu': '-c {}'.format(cpu)})\n else:\n l.update({'cpu': ''})\n # set unit hostname\n l.update({'hostname': self._get_hostname(name)})\n # should a special entrypoint be used\n entrypoint = kwargs.get('entrypoint')\n if entrypoint:\n l.update({'entrypoint': '{}'.format(entrypoint)})\n # encode command as utf-8\n if isinstance(l.get('command'), basestring):\n l['command'] = l['command'].encode('utf-8')\n # construct unit from template\n for f in unit:\n f['value'] = f['value'].format(**l)\n # prepare tags only if one was provided\n tags = kwargs.get('tags', {})\n if tags:\n tagset = ' '.join(['\"{}={}\"'.format(k, v) for k, v in tags.items()])\n unit.append({\"section\": \"X-Fleet\", \"name\": \"MachineMetadata\",\n \"value\": tagset})\n # post unit to fleet\n self._put_unit(name, {\"desiredState\": \"loaded\", \"options\": unit})\n\n def _get_hostname(self, application_name):\n hostname = settings.UNIT_HOSTNAME\n if hostname == \"default\":\n return ''\n elif hostname == \"application\":\n # replace underscore with dots, since underscore is not valid in DNS hostnames\n dns_name = application_name.replace(\"_\", \".\")\n return '-h ' + dns_name\n elif hostname == \"server\":\n return '-h %H'\n else:\n raise RuntimeError('Unsupported hostname: ' + hostname)\n\n def start(self, name):\n \"\"\"Start a container\"\"\"\n self._put_unit(name, {'desiredState': 'launched'})\n self._wait_for_container_running(name)\n\n def _wait_for_container_state(self, name):\n # wait for container to get scheduled\n for _ in range(30):\n states = self._get_state(name)\n if states and len(states.get('states', [])) == 1:\n return states.get('states')[0]\n time.sleep(1)\n else:\n raise RuntimeError('container timeout while retrieving state')\n\n def _wait_for_container_running(self, name):\n # we bump to 20 minutes here to match the timeout on the router and in the app unit files\n for _ in range(1200):\n if self.state(name) == JobState.up:\n return\n time.sleep(1)\n else:\n raise RuntimeError('container failed to start')\n\n def _wait_for_destroy(self, name):\n for _ in range(30):\n if not self._get_state(name):\n break\n time.sleep(1)\n else:\n raise RuntimeError('timeout on container destroy')\n\n def stop(self, name):\n \"\"\"Stop a container\"\"\"\n raise NotImplementedError\n\n def destroy(self, name):\n \"\"\"Destroy a container\"\"\"\n # call all destroy functions, ignoring any errors\n try:\n self._destroy_container(name)\n except:\n pass\n self._wait_for_destroy(name)\n\n def _destroy_container(self, name):\n for attempt in range(RETRIES):\n try:\n self._delete_unit(name)\n break\n except:\n if attempt == (RETRIES - 1): # account for 0 indexing\n raise\n\n def run(self, name, image, entrypoint, command): # noqa\n \"\"\"Run a one-off command\"\"\"\n self._create_container(name, image, command, copy.deepcopy(RUN_TEMPLATE),\n entrypoint=entrypoint)\n # launch the container\n self._put_unit(name, {'desiredState': 'launched'})\n # wait for the container to get scheduled\n state = self._wait_for_container_state(name)\n\n try:\n machineID = state.get('machineID')\n\n # find the machine\n machines = self._get_machines()\n if not machines:\n raise RuntimeError('no available hosts to run command')\n\n # find the machine's primaryIP\n primaryIP = None\n for m in machines.get('machines', []):\n if m['id'] == machineID:\n primaryIP = m['primaryIP']\n if not primaryIP:\n raise RuntimeError('could not find host')\n\n # prepare ssh key\n file_obj = cStringIO.StringIO(base64.b64decode(self.pkey))\n pkey = paramiko.RSAKey(file_obj=file_obj)\n\n # grab output via docker logs over SSH\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(primaryIP, username=\"core\", pkey=pkey)\n # share a transport\n tran = ssh.get_transport()\n\n def _do_ssh(cmd):\n chan = tran.open_session()\n # get a pty so stdout/stderr look right\n chan.get_pty()\n out = chan.makefile()\n chan.exec_command(cmd)\n output = out.read()\n rc = chan.recv_exit_status()\n return rc, output\n\n # wait for container to launch\n # we loop indefinitely here, as we have no idea how long the docker pull will take\n while True:\n rc, _ = _do_ssh('docker inspect {name}'.format(**locals()))\n if rc == 0:\n break\n time.sleep(1)\n else:\n raise RuntimeError('failed to create container')\n\n # wait for container to start\n for _ in range(2):\n _rc, _output = _do_ssh('docker inspect {name}'.format(**locals()))\n if _rc != 0:\n raise RuntimeError('failed to inspect container')\n _container = json.loads(_output)\n started_at = _container[0][\"State\"][\"StartedAt\"]\n if not started_at.startswith('0001'):\n break\n time.sleep(1)\n else:\n raise RuntimeError('container failed to start')\n\n # wait for container to complete\n for _ in range(1200):\n _rc, _output = _do_ssh('docker inspect {name}'.format(**locals()))\n if _rc != 0:\n raise RuntimeError('failed to inspect container')\n _container = json.loads(_output)\n finished_at = _container[0][\"State\"][\"FinishedAt\"]\n if not finished_at.startswith('0001'):\n break\n time.sleep(1)\n else:\n raise RuntimeError('container timed out')\n\n # gather container output\n _rc, output = _do_ssh('docker logs {name}'.format(**locals()))\n if _rc != 0:\n raise RuntimeError('could not attach to container')\n\n # determine container exit code\n _rc, _output = _do_ssh('docker inspect {name}'.format(**locals()))\n if _rc != 0:\n raise RuntimeError('could not determine exit code')\n container = json.loads(_output)\n rc = container[0][\"State\"][\"ExitCode\"]\n\n finally:\n # cleanup\n self._destroy_container(name)\n self._wait_for_destroy(name)\n\n # return rc and output\n return rc, output\n\n def state(self, name):\n systemdActiveStateMap = {\n \"active\": \"up\",\n \"reloading\": \"down\",\n \"inactive\": \"created\",\n \"failed\": \"crashed\",\n \"activating\": \"down\",\n \"deactivating\": \"down\",\n }\n try:\n # NOTE (bacongobbler): this call to ._get_unit() acts as a pre-emptive check to\n # determine if the job no longer exists (will raise a RuntimeError on 404)\n self._get_unit(name)\n state = self._wait_for_container_state(name)\n activeState = state['systemdActiveState']\n # FIXME (bacongobbler): when fleet loads a job, sometimes it'll automatically start and\n # stop the container, which in our case will return as 'failed', even though\n # the container is perfectly fine.\n if activeState == 'failed':\n if state['systemdLoadState'] == 'loaded':\n return JobState.created\n return getattr(JobState, systemdActiveStateMap[activeState])\n except KeyError:\n # failed retrieving a proper response from the fleet API\n return JobState.error\n except RuntimeError:\n # failed to retrieve a response from the fleet API,\n # which means it does not exist\n return JobState.destroyed\n\n def attach(self, name):\n \"\"\"\n Attach to a job's stdin, stdout and stderr\n \"\"\"\n raise NotImplementedError\n\nSchedulerClient = FleetHTTPClient\n\n\nCONTAINER_TEMPLATE = [\n {\"section\": \"Unit\", \"name\": \"Description\", \"value\": \"{name}\"},\n {\"section\": \"Service\", \"name\": \"ExecStartPre\", \"value\": '''/bin/sh -c \"IMAGE=$(etcdctl get /deis/registry/host 2>&1):$(etcdctl get /deis/registry/port 2>&1)/{image}; docker pull $IMAGE\"'''}, # noqa\n {\"section\": \"Service\", \"name\": \"ExecStartPre\", \"value\": '''/bin/sh -c \"docker inspect {name} >/dev/null 2>&1 && docker rm -f {name} || true\"'''}, # noqa\n {\"section\": \"Service\", \"name\": \"ExecStart\", \"value\": '''/bin/sh -c \"IMAGE=$(etcdctl get /deis/registry/host 2>&1):$(etcdctl get /deis/registry/port 2>&1)/{image}; port=$(docker inspect -f '{{{{range $k, $v := .ContainerConfig.ExposedPorts }}}}{{{{$k}}}}{{{{end}}}}' $IMAGE | cut -d/ -f1) ; docker run --name {name} {memory} {cpu} {hostname} -P -e PORT=$port $IMAGE {command}\"'''}, # noqa\n {\"section\": \"Service\", \"name\": \"ExecStop\", \"value\": '''/usr/bin/docker stop {name}'''},\n {\"section\": \"Service\", \"name\": \"ExecStop\", \"value\": '''/usr/bin/docker rm -f {name}'''},\n {\"section\": \"Service\", \"name\": \"TimeoutStartSec\", \"value\": \"20m\"},\n {\"section\": \"Service\", \"name\": \"TimeoutStopSec\", \"value\": \"10\"},\n {\"section\": \"Service\", \"name\": \"RestartSec\", \"value\": \"5\"},\n {\"section\": \"Service\", \"name\": \"Restart\", \"value\": \"on-failure\"},\n]\n\n\nRUN_TEMPLATE = [\n {\"section\": \"Unit\", \"name\": \"Description\", \"value\": \"{name} admin command\"},\n {\"section\": \"Service\", \"name\": \"ExecStartPre\", \"value\": '''/bin/sh -c \"IMAGE=$(etcdctl get /deis/registry/host 2>&1):$(etcdctl get /deis/registry/port 2>&1)/{image}; docker pull $IMAGE\"'''}, # noqa\n {\"section\": \"Service\", \"name\": \"ExecStartPre\", \"value\": '''/bin/sh -c \"docker inspect {name} >/dev/null 2>&1 && docker rm -f {name} || true\"'''}, # noqa\n {\"section\": \"Service\", \"name\": \"ExecStart\", \"value\": '''/bin/sh -c \"IMAGE=$(etcdctl get /deis/registry/host 2>&1):$(etcdctl get /deis/registry/port 2>&1)/{image}; docker run --name {name} --entrypoint={entrypoint} -a stdout -a stderr $IMAGE {command}\"'''}, # noqa\n {\"section\": \"Service\", \"name\": \"TimeoutStartSec\", \"value\": \"20m\"},\n]\n", "path": "controller/scheduler/fleet.py"}]} |
gh_patches_debug_1286 | rasdani/github-patches | git_diff | interlegis__sapl-3164 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Não permitir que se altere campos rotulo_prefixo_texto e rotulo_sufixo_texto via interface admin
<!--- Forneça um resumo geral da _issue_ no título acima -->
## Comportamento Esperado
<!--- Se você está descrevendo um _bug_, conte-nos o que deveria acontecer. -->
<!--- Se você está sugerindo uma mudança/melhoria, conte-nos como deve funcionar. -->
## Comportamento Atual
<!--- Se está descrevendo um bug, conte-nos o que acontece em vez do comportamento esperado. -->
<!--- Se está sugerindo uma mudança/melhoria, explique a diferença com o comportamento atual. -->
## Possível Solução
<!--- Não é obrigatório, mas sugira uma possível correção/razão para o bug -->
<!--- ou ideias de como implementar a adição/mudança. -->
## Passos para Reproduzir (para bugs)
<!--- Forneça um link para um exemplo, ou um conjunto de passos inequívocos -->
<!--- para reproduzir esse bug. Inclua código para reproduzir, se relevante. -->
1.
2.
3.
4.
## Contexto
<!--- Como esse problema o afeta? O que você está tentando realizar? -->
<!--- Fornecer o contexto nos ajuda a encontrar uma solução que seja mais útil no mundo real -->
## Imagens do Ocorrido
<!--- Representação visual em vídeo ou imagem do ocorrido -->
<!--- Se está descrevendo um bug poste imagens ou vídeos na reprodução do bug citado, caso se aplique -->
## Seu Ambiente
<!--- Inclua detalhes relevantes sobre o ambiente em que você presenciou/experienciou o bug. -->
* Versão usada (_Release_):
* Nome e versão do navegador:
* Nome e versão do Sistema Operacional (desktop ou mobile):
* Link para o seu projeto (Caso de fork deste projeto):
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sapl/compilacao/admin.py`
Content:
```
1 from sapl.utils import register_all_models_in_admin
2
3 register_all_models_in_admin(__name__)
4
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sapl/compilacao/admin.py b/sapl/compilacao/admin.py
--- a/sapl/compilacao/admin.py
+++ b/sapl/compilacao/admin.py
@@ -1,3 +1,12 @@
+from django.contrib import admin
+from sapl.compilacao.models import TipoDispositivo
from sapl.utils import register_all_models_in_admin
register_all_models_in_admin(__name__)
+admin.site.unregister(TipoDispositivo)
+
+
[email protected](TipoDispositivo)
+class TipoDispositivoAdmin(admin.ModelAdmin):
+ readonly_fields = ("rotulo_prefixo_texto", "rotulo_sufixo_texto",)
+ list_display = [f.name for f in TipoDispositivo._meta.fields if f.name != 'id']
| {"golden_diff": "diff --git a/sapl/compilacao/admin.py b/sapl/compilacao/admin.py\n--- a/sapl/compilacao/admin.py\n+++ b/sapl/compilacao/admin.py\n@@ -1,3 +1,12 @@\n+from django.contrib import admin\n+from sapl.compilacao.models import TipoDispositivo\n from sapl.utils import register_all_models_in_admin\n \n register_all_models_in_admin(__name__)\n+admin.site.unregister(TipoDispositivo)\n+\n+\[email protected](TipoDispositivo)\n+class TipoDispositivoAdmin(admin.ModelAdmin):\n+ readonly_fields = (\"rotulo_prefixo_texto\", \"rotulo_sufixo_texto\",)\n+ list_display = [f.name for f in TipoDispositivo._meta.fields if f.name != 'id']\n", "issue": "N\u00e3o permitir que se altere campos rotulo_prefixo_texto e rotulo_sufixo_texto via interface admin\n<!--- Forne\u00e7a um resumo geral da _issue_ no t\u00edtulo acima -->\r\n\r\n## Comportamento Esperado\r\n<!--- Se voc\u00ea est\u00e1 descrevendo um _bug_, conte-nos o que deveria acontecer. -->\r\n<!--- Se voc\u00ea est\u00e1 sugerindo uma mudan\u00e7a/melhoria, conte-nos como deve funcionar. -->\r\n\r\n## Comportamento Atual\r\n<!--- Se est\u00e1 descrevendo um bug, conte-nos o que acontece em vez do comportamento esperado. -->\r\n<!--- Se est\u00e1 sugerindo uma mudan\u00e7a/melhoria, explique a diferen\u00e7a com o comportamento atual. -->\r\n\r\n## Poss\u00edvel Solu\u00e7\u00e3o\r\n<!--- N\u00e3o \u00e9 obrigat\u00f3rio, mas sugira uma poss\u00edvel corre\u00e7\u00e3o/raz\u00e3o para o bug -->\r\n<!--- ou ideias de como implementar a adi\u00e7\u00e3o/mudan\u00e7a. -->\r\n\r\n## Passos para Reproduzir (para bugs)\r\n<!--- Forne\u00e7a um link para um exemplo, ou um conjunto de passos inequ\u00edvocos -->\r\n<!--- para reproduzir esse bug. Inclua c\u00f3digo para reproduzir, se relevante. -->\r\n1.\r\n2.\r\n3.\r\n4.\r\n\r\n## Contexto\r\n<!--- Como esse problema o afeta? O que voc\u00ea est\u00e1 tentando realizar? -->\r\n<!--- Fornecer o contexto nos ajuda a encontrar uma solu\u00e7\u00e3o que seja mais \u00fatil no mundo real -->\r\n\r\n## Imagens do Ocorrido\r\n<!--- Representa\u00e7\u00e3o visual em v\u00eddeo ou imagem do ocorrido -->\r\n<!--- Se est\u00e1 descrevendo um bug poste imagens ou v\u00eddeos na reprodu\u00e7\u00e3o do bug citado, caso se aplique -->\r\n\r\n## Seu Ambiente\r\n<!--- Inclua detalhes relevantes sobre o ambiente em que voc\u00ea presenciou/experienciou o bug. -->\r\n* Vers\u00e3o usada (_Release_):\r\n* Nome e vers\u00e3o do navegador:\r\n* Nome e vers\u00e3o do Sistema Operacional (desktop ou mobile):\r\n* Link para o seu projeto (Caso de fork deste projeto):\r\n\n", "before_files": [{"content": "from sapl.utils import register_all_models_in_admin\n\nregister_all_models_in_admin(__name__)\n", "path": "sapl/compilacao/admin.py"}], "after_files": [{"content": "from django.contrib import admin\nfrom sapl.compilacao.models import TipoDispositivo\nfrom sapl.utils import register_all_models_in_admin\n\nregister_all_models_in_admin(__name__)\nadmin.site.unregister(TipoDispositivo)\n\n\[email protected](TipoDispositivo)\nclass TipoDispositivoAdmin(admin.ModelAdmin):\n readonly_fields = (\"rotulo_prefixo_texto\", \"rotulo_sufixo_texto\",)\n list_display = [f.name for f in TipoDispositivo._meta.fields if f.name != 'id']\n", "path": "sapl/compilacao/admin.py"}]} |
gh_patches_debug_1287 | rasdani/github-patches | git_diff | tobymao__sqlglot-3482 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[bug] pgsql to mysql special character || Semantic mismatch
**Before you file an issue**
- Make sure you specify the "read" dialect eg. `parse_one(sql, read="spark")`
- Make sure you specify the "write" dialect eg. `ast.sql(dialect="duckdb")`
- Check if the issue still exists on main
**Fully reproducible code snippet**
Please include a fully reproducible code snippet or the input sql, dialect, and expected output.
**Official Documentation**
Please include links to official SQL documentation related to your issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sqlglot/dialects/mysql.py`
Content:
```
1 from __future__ import annotations
2
3 import typing as t
4
5 from sqlglot import exp, generator, parser, tokens, transforms
6 from sqlglot.dialects.dialect import (
7 Dialect,
8 NormalizationStrategy,
9 arrow_json_extract_sql,
10 date_add_interval_sql,
11 datestrtodate_sql,
12 build_formatted_time,
13 isnull_to_is_null,
14 locate_to_strposition,
15 max_or_greatest,
16 min_or_least,
17 no_ilike_sql,
18 no_paren_current_date_sql,
19 no_pivot_sql,
20 no_tablesample_sql,
21 no_trycast_sql,
22 build_date_delta,
23 build_date_delta_with_interval,
24 rename_func,
25 strposition_to_locate_sql,
26 unit_to_var,
27 )
28 from sqlglot.helper import seq_get
29 from sqlglot.tokens import TokenType
30
31
32 def _show_parser(*args: t.Any, **kwargs: t.Any) -> t.Callable[[MySQL.Parser], exp.Show]:
33 def _parse(self: MySQL.Parser) -> exp.Show:
34 return self._parse_show_mysql(*args, **kwargs)
35
36 return _parse
37
38
39 def _date_trunc_sql(self: MySQL.Generator, expression: exp.DateTrunc) -> str:
40 expr = self.sql(expression, "this")
41 unit = expression.text("unit").upper()
42
43 if unit == "WEEK":
44 concat = f"CONCAT(YEAR({expr}), ' ', WEEK({expr}, 1), ' 1')"
45 date_format = "%Y %u %w"
46 elif unit == "MONTH":
47 concat = f"CONCAT(YEAR({expr}), ' ', MONTH({expr}), ' 1')"
48 date_format = "%Y %c %e"
49 elif unit == "QUARTER":
50 concat = f"CONCAT(YEAR({expr}), ' ', QUARTER({expr}) * 3 - 2, ' 1')"
51 date_format = "%Y %c %e"
52 elif unit == "YEAR":
53 concat = f"CONCAT(YEAR({expr}), ' 1 1')"
54 date_format = "%Y %c %e"
55 else:
56 if unit != "DAY":
57 self.unsupported(f"Unexpected interval unit: {unit}")
58 return self.func("DATE", expr)
59
60 return self.func("STR_TO_DATE", concat, f"'{date_format}'")
61
62
63 # All specifiers for time parts (as opposed to date parts)
64 # https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_date-format
65 TIME_SPECIFIERS = {"f", "H", "h", "I", "i", "k", "l", "p", "r", "S", "s", "T"}
66
67
68 def _has_time_specifier(date_format: str) -> bool:
69 i = 0
70 length = len(date_format)
71
72 while i < length:
73 if date_format[i] == "%":
74 i += 1
75 if i < length and date_format[i] in TIME_SPECIFIERS:
76 return True
77 i += 1
78 return False
79
80
81 def _str_to_date(args: t.List) -> exp.StrToDate | exp.StrToTime:
82 mysql_date_format = seq_get(args, 1)
83 date_format = MySQL.format_time(mysql_date_format)
84 this = seq_get(args, 0)
85
86 if mysql_date_format and _has_time_specifier(mysql_date_format.name):
87 return exp.StrToTime(this=this, format=date_format)
88
89 return exp.StrToDate(this=this, format=date_format)
90
91
92 def _str_to_date_sql(
93 self: MySQL.Generator, expression: exp.StrToDate | exp.StrToTime | exp.TsOrDsToDate
94 ) -> str:
95 return self.func("STR_TO_DATE", expression.this, self.format_time(expression))
96
97
98 def _trim_sql(self: MySQL.Generator, expression: exp.Trim) -> str:
99 target = self.sql(expression, "this")
100 trim_type = self.sql(expression, "position")
101 remove_chars = self.sql(expression, "expression")
102
103 # Use TRIM/LTRIM/RTRIM syntax if the expression isn't mysql-specific
104 if not remove_chars:
105 return self.trim_sql(expression)
106
107 trim_type = f"{trim_type} " if trim_type else ""
108 remove_chars = f"{remove_chars} " if remove_chars else ""
109 from_part = "FROM " if trim_type or remove_chars else ""
110 return f"TRIM({trim_type}{remove_chars}{from_part}{target})"
111
112
113 def date_add_sql(
114 kind: str,
115 ) -> t.Callable[[generator.Generator, exp.Expression], str]:
116 def func(self: generator.Generator, expression: exp.Expression) -> str:
117 return self.func(
118 f"DATE_{kind}",
119 expression.this,
120 exp.Interval(this=expression.expression, unit=unit_to_var(expression)),
121 )
122
123 return func
124
125
126 def _ts_or_ds_to_date_sql(self: MySQL.Generator, expression: exp.TsOrDsToDate) -> str:
127 time_format = expression.args.get("format")
128 return _str_to_date_sql(self, expression) if time_format else self.func("DATE", expression.this)
129
130
131 def _remove_ts_or_ds_to_date(
132 to_sql: t.Optional[t.Callable[[MySQL.Generator, exp.Expression], str]] = None,
133 args: t.Tuple[str, ...] = ("this",),
134 ) -> t.Callable[[MySQL.Generator, exp.Func], str]:
135 def func(self: MySQL.Generator, expression: exp.Func) -> str:
136 for arg_key in args:
137 arg = expression.args.get(arg_key)
138 if isinstance(arg, exp.TsOrDsToDate) and not arg.args.get("format"):
139 expression.set(arg_key, arg.this)
140
141 return to_sql(self, expression) if to_sql else self.function_fallback_sql(expression)
142
143 return func
144
145
146 class MySQL(Dialect):
147 # https://dev.mysql.com/doc/refman/8.0/en/identifiers.html
148 IDENTIFIERS_CAN_START_WITH_DIGIT = True
149
150 # We default to treating all identifiers as case-sensitive, since it matches MySQL's
151 # behavior on Linux systems. For MacOS and Windows systems, one can override this
152 # setting by specifying `dialect="mysql, normalization_strategy = lowercase"`.
153 #
154 # See also https://dev.mysql.com/doc/refman/8.2/en/identifier-case-sensitivity.html
155 NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_SENSITIVE
156
157 TIME_FORMAT = "'%Y-%m-%d %T'"
158 DPIPE_IS_STRING_CONCAT = False
159 SUPPORTS_USER_DEFINED_TYPES = False
160 SUPPORTS_SEMI_ANTI_JOIN = False
161 SAFE_DIVISION = True
162
163 # https://prestodb.io/docs/current/functions/datetime.html#mysql-date-functions
164 TIME_MAPPING = {
165 "%M": "%B",
166 "%c": "%-m",
167 "%e": "%-d",
168 "%h": "%I",
169 "%i": "%M",
170 "%s": "%S",
171 "%u": "%W",
172 "%k": "%-H",
173 "%l": "%-I",
174 "%T": "%H:%M:%S",
175 "%W": "%a",
176 }
177
178 class Tokenizer(tokens.Tokenizer):
179 QUOTES = ["'", '"']
180 COMMENTS = ["--", "#", ("/*", "*/")]
181 IDENTIFIERS = ["`"]
182 STRING_ESCAPES = ["'", '"', "\\"]
183 BIT_STRINGS = [("b'", "'"), ("B'", "'"), ("0b", "")]
184 HEX_STRINGS = [("x'", "'"), ("X'", "'"), ("0x", "")]
185
186 KEYWORDS = {
187 **tokens.Tokenizer.KEYWORDS,
188 "CHARSET": TokenType.CHARACTER_SET,
189 "FORCE": TokenType.FORCE,
190 "IGNORE": TokenType.IGNORE,
191 "LOCK TABLES": TokenType.COMMAND,
192 "LONGBLOB": TokenType.LONGBLOB,
193 "LONGTEXT": TokenType.LONGTEXT,
194 "MEDIUMBLOB": TokenType.MEDIUMBLOB,
195 "TINYBLOB": TokenType.TINYBLOB,
196 "TINYTEXT": TokenType.TINYTEXT,
197 "MEDIUMTEXT": TokenType.MEDIUMTEXT,
198 "MEDIUMINT": TokenType.MEDIUMINT,
199 "MEMBER OF": TokenType.MEMBER_OF,
200 "SEPARATOR": TokenType.SEPARATOR,
201 "START": TokenType.BEGIN,
202 "SIGNED": TokenType.BIGINT,
203 "SIGNED INTEGER": TokenType.BIGINT,
204 "UNLOCK TABLES": TokenType.COMMAND,
205 "UNSIGNED": TokenType.UBIGINT,
206 "UNSIGNED INTEGER": TokenType.UBIGINT,
207 "YEAR": TokenType.YEAR,
208 "_ARMSCII8": TokenType.INTRODUCER,
209 "_ASCII": TokenType.INTRODUCER,
210 "_BIG5": TokenType.INTRODUCER,
211 "_BINARY": TokenType.INTRODUCER,
212 "_CP1250": TokenType.INTRODUCER,
213 "_CP1251": TokenType.INTRODUCER,
214 "_CP1256": TokenType.INTRODUCER,
215 "_CP1257": TokenType.INTRODUCER,
216 "_CP850": TokenType.INTRODUCER,
217 "_CP852": TokenType.INTRODUCER,
218 "_CP866": TokenType.INTRODUCER,
219 "_CP932": TokenType.INTRODUCER,
220 "_DEC8": TokenType.INTRODUCER,
221 "_EUCJPMS": TokenType.INTRODUCER,
222 "_EUCKR": TokenType.INTRODUCER,
223 "_GB18030": TokenType.INTRODUCER,
224 "_GB2312": TokenType.INTRODUCER,
225 "_GBK": TokenType.INTRODUCER,
226 "_GEOSTD8": TokenType.INTRODUCER,
227 "_GREEK": TokenType.INTRODUCER,
228 "_HEBREW": TokenType.INTRODUCER,
229 "_HP8": TokenType.INTRODUCER,
230 "_KEYBCS2": TokenType.INTRODUCER,
231 "_KOI8R": TokenType.INTRODUCER,
232 "_KOI8U": TokenType.INTRODUCER,
233 "_LATIN1": TokenType.INTRODUCER,
234 "_LATIN2": TokenType.INTRODUCER,
235 "_LATIN5": TokenType.INTRODUCER,
236 "_LATIN7": TokenType.INTRODUCER,
237 "_MACCE": TokenType.INTRODUCER,
238 "_MACROMAN": TokenType.INTRODUCER,
239 "_SJIS": TokenType.INTRODUCER,
240 "_SWE7": TokenType.INTRODUCER,
241 "_TIS620": TokenType.INTRODUCER,
242 "_UCS2": TokenType.INTRODUCER,
243 "_UJIS": TokenType.INTRODUCER,
244 # https://dev.mysql.com/doc/refman/8.0/en/string-literals.html
245 "_UTF8": TokenType.INTRODUCER,
246 "_UTF16": TokenType.INTRODUCER,
247 "_UTF16LE": TokenType.INTRODUCER,
248 "_UTF32": TokenType.INTRODUCER,
249 "_UTF8MB3": TokenType.INTRODUCER,
250 "_UTF8MB4": TokenType.INTRODUCER,
251 "@@": TokenType.SESSION_PARAMETER,
252 }
253
254 COMMANDS = {*tokens.Tokenizer.COMMANDS, TokenType.REPLACE} - {TokenType.SHOW}
255
256 class Parser(parser.Parser):
257 FUNC_TOKENS = {
258 *parser.Parser.FUNC_TOKENS,
259 TokenType.DATABASE,
260 TokenType.SCHEMA,
261 TokenType.VALUES,
262 }
263
264 CONJUNCTION = {
265 **parser.Parser.CONJUNCTION,
266 TokenType.DAMP: exp.And,
267 TokenType.XOR: exp.Xor,
268 TokenType.DPIPE: exp.Or,
269 }
270
271 TABLE_ALIAS_TOKENS = (
272 parser.Parser.TABLE_ALIAS_TOKENS - parser.Parser.TABLE_INDEX_HINT_TOKENS
273 )
274
275 RANGE_PARSERS = {
276 **parser.Parser.RANGE_PARSERS,
277 TokenType.MEMBER_OF: lambda self, this: self.expression(
278 exp.JSONArrayContains,
279 this=this,
280 expression=self._parse_wrapped(self._parse_expression),
281 ),
282 }
283
284 FUNCTIONS = {
285 **parser.Parser.FUNCTIONS,
286 "DATE": lambda args: exp.TsOrDsToDate(this=seq_get(args, 0)),
287 "DATE_ADD": build_date_delta_with_interval(exp.DateAdd),
288 "DATE_FORMAT": build_formatted_time(exp.TimeToStr, "mysql"),
289 "DATE_SUB": build_date_delta_with_interval(exp.DateSub),
290 "DAY": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))),
291 "DAYOFMONTH": lambda args: exp.DayOfMonth(this=exp.TsOrDsToDate(this=seq_get(args, 0))),
292 "DAYOFWEEK": lambda args: exp.DayOfWeek(this=exp.TsOrDsToDate(this=seq_get(args, 0))),
293 "DAYOFYEAR": lambda args: exp.DayOfYear(this=exp.TsOrDsToDate(this=seq_get(args, 0))),
294 "INSTR": lambda args: exp.StrPosition(substr=seq_get(args, 1), this=seq_get(args, 0)),
295 "FROM_UNIXTIME": build_formatted_time(exp.UnixToTime, "mysql"),
296 "ISNULL": isnull_to_is_null,
297 "LOCATE": locate_to_strposition,
298 "MAKETIME": exp.TimeFromParts.from_arg_list,
299 "MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate(this=seq_get(args, 0))),
300 "MONTHNAME": lambda args: exp.TimeToStr(
301 this=exp.TsOrDsToDate(this=seq_get(args, 0)),
302 format=exp.Literal.string("%B"),
303 ),
304 "STR_TO_DATE": _str_to_date,
305 "TIMESTAMPDIFF": build_date_delta(exp.TimestampDiff),
306 "TO_DAYS": lambda args: exp.paren(
307 exp.DateDiff(
308 this=exp.TsOrDsToDate(this=seq_get(args, 0)),
309 expression=exp.TsOrDsToDate(this=exp.Literal.string("0000-01-01")),
310 unit=exp.var("DAY"),
311 )
312 + 1
313 ),
314 "WEEK": lambda args: exp.Week(
315 this=exp.TsOrDsToDate(this=seq_get(args, 0)), mode=seq_get(args, 1)
316 ),
317 "WEEKOFYEAR": lambda args: exp.WeekOfYear(this=exp.TsOrDsToDate(this=seq_get(args, 0))),
318 "YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate(this=seq_get(args, 0))),
319 }
320
321 FUNCTION_PARSERS = {
322 **parser.Parser.FUNCTION_PARSERS,
323 "CHAR": lambda self: self._parse_chr(),
324 "GROUP_CONCAT": lambda self: self._parse_group_concat(),
325 # https://dev.mysql.com/doc/refman/5.7/en/miscellaneous-functions.html#function_values
326 "VALUES": lambda self: self.expression(
327 exp.Anonymous, this="VALUES", expressions=[self._parse_id_var()]
328 ),
329 }
330
331 STATEMENT_PARSERS = {
332 **parser.Parser.STATEMENT_PARSERS,
333 TokenType.SHOW: lambda self: self._parse_show(),
334 }
335
336 SHOW_PARSERS = {
337 "BINARY LOGS": _show_parser("BINARY LOGS"),
338 "MASTER LOGS": _show_parser("BINARY LOGS"),
339 "BINLOG EVENTS": _show_parser("BINLOG EVENTS"),
340 "CHARACTER SET": _show_parser("CHARACTER SET"),
341 "CHARSET": _show_parser("CHARACTER SET"),
342 "COLLATION": _show_parser("COLLATION"),
343 "FULL COLUMNS": _show_parser("COLUMNS", target="FROM", full=True),
344 "COLUMNS": _show_parser("COLUMNS", target="FROM"),
345 "CREATE DATABASE": _show_parser("CREATE DATABASE", target=True),
346 "CREATE EVENT": _show_parser("CREATE EVENT", target=True),
347 "CREATE FUNCTION": _show_parser("CREATE FUNCTION", target=True),
348 "CREATE PROCEDURE": _show_parser("CREATE PROCEDURE", target=True),
349 "CREATE TABLE": _show_parser("CREATE TABLE", target=True),
350 "CREATE TRIGGER": _show_parser("CREATE TRIGGER", target=True),
351 "CREATE VIEW": _show_parser("CREATE VIEW", target=True),
352 "DATABASES": _show_parser("DATABASES"),
353 "SCHEMAS": _show_parser("DATABASES"),
354 "ENGINE": _show_parser("ENGINE", target=True),
355 "STORAGE ENGINES": _show_parser("ENGINES"),
356 "ENGINES": _show_parser("ENGINES"),
357 "ERRORS": _show_parser("ERRORS"),
358 "EVENTS": _show_parser("EVENTS"),
359 "FUNCTION CODE": _show_parser("FUNCTION CODE", target=True),
360 "FUNCTION STATUS": _show_parser("FUNCTION STATUS"),
361 "GRANTS": _show_parser("GRANTS", target="FOR"),
362 "INDEX": _show_parser("INDEX", target="FROM"),
363 "MASTER STATUS": _show_parser("MASTER STATUS"),
364 "OPEN TABLES": _show_parser("OPEN TABLES"),
365 "PLUGINS": _show_parser("PLUGINS"),
366 "PROCEDURE CODE": _show_parser("PROCEDURE CODE", target=True),
367 "PROCEDURE STATUS": _show_parser("PROCEDURE STATUS"),
368 "PRIVILEGES": _show_parser("PRIVILEGES"),
369 "FULL PROCESSLIST": _show_parser("PROCESSLIST", full=True),
370 "PROCESSLIST": _show_parser("PROCESSLIST"),
371 "PROFILE": _show_parser("PROFILE"),
372 "PROFILES": _show_parser("PROFILES"),
373 "RELAYLOG EVENTS": _show_parser("RELAYLOG EVENTS"),
374 "REPLICAS": _show_parser("REPLICAS"),
375 "SLAVE HOSTS": _show_parser("REPLICAS"),
376 "REPLICA STATUS": _show_parser("REPLICA STATUS"),
377 "SLAVE STATUS": _show_parser("REPLICA STATUS"),
378 "GLOBAL STATUS": _show_parser("STATUS", global_=True),
379 "SESSION STATUS": _show_parser("STATUS"),
380 "STATUS": _show_parser("STATUS"),
381 "TABLE STATUS": _show_parser("TABLE STATUS"),
382 "FULL TABLES": _show_parser("TABLES", full=True),
383 "TABLES": _show_parser("TABLES"),
384 "TRIGGERS": _show_parser("TRIGGERS"),
385 "GLOBAL VARIABLES": _show_parser("VARIABLES", global_=True),
386 "SESSION VARIABLES": _show_parser("VARIABLES"),
387 "VARIABLES": _show_parser("VARIABLES"),
388 "WARNINGS": _show_parser("WARNINGS"),
389 }
390
391 PROPERTY_PARSERS = {
392 **parser.Parser.PROPERTY_PARSERS,
393 "LOCK": lambda self: self._parse_property_assignment(exp.LockProperty),
394 }
395
396 SET_PARSERS = {
397 **parser.Parser.SET_PARSERS,
398 "PERSIST": lambda self: self._parse_set_item_assignment("PERSIST"),
399 "PERSIST_ONLY": lambda self: self._parse_set_item_assignment("PERSIST_ONLY"),
400 "CHARACTER SET": lambda self: self._parse_set_item_charset("CHARACTER SET"),
401 "CHARSET": lambda self: self._parse_set_item_charset("CHARACTER SET"),
402 "NAMES": lambda self: self._parse_set_item_names(),
403 }
404
405 CONSTRAINT_PARSERS = {
406 **parser.Parser.CONSTRAINT_PARSERS,
407 "FULLTEXT": lambda self: self._parse_index_constraint(kind="FULLTEXT"),
408 "INDEX": lambda self: self._parse_index_constraint(),
409 "KEY": lambda self: self._parse_index_constraint(),
410 "SPATIAL": lambda self: self._parse_index_constraint(kind="SPATIAL"),
411 }
412
413 ALTER_PARSERS = {
414 **parser.Parser.ALTER_PARSERS,
415 "MODIFY": lambda self: self._parse_alter_table_alter(),
416 }
417
418 SCHEMA_UNNAMED_CONSTRAINTS = {
419 *parser.Parser.SCHEMA_UNNAMED_CONSTRAINTS,
420 "FULLTEXT",
421 "INDEX",
422 "KEY",
423 "SPATIAL",
424 }
425
426 PROFILE_TYPES: parser.OPTIONS_TYPE = {
427 **dict.fromkeys(("ALL", "CPU", "IPC", "MEMORY", "SOURCE", "SWAPS"), tuple()),
428 "BLOCK": ("IO",),
429 "CONTEXT": ("SWITCHES",),
430 "PAGE": ("FAULTS",),
431 }
432
433 TYPE_TOKENS = {
434 *parser.Parser.TYPE_TOKENS,
435 TokenType.SET,
436 }
437
438 ENUM_TYPE_TOKENS = {
439 *parser.Parser.ENUM_TYPE_TOKENS,
440 TokenType.SET,
441 }
442
443 LOG_DEFAULTS_TO_LN = True
444 STRING_ALIASES = True
445 VALUES_FOLLOWED_BY_PAREN = False
446 SUPPORTS_PARTITION_SELECTION = True
447
448 def _parse_primary_key_part(self) -> t.Optional[exp.Expression]:
449 this = self._parse_id_var()
450 if not self._match(TokenType.L_PAREN):
451 return this
452
453 expression = self._parse_number()
454 self._match_r_paren()
455 return self.expression(exp.ColumnPrefix, this=this, expression=expression)
456
457 def _parse_index_constraint(
458 self, kind: t.Optional[str] = None
459 ) -> exp.IndexColumnConstraint:
460 if kind:
461 self._match_texts(("INDEX", "KEY"))
462
463 this = self._parse_id_var(any_token=False)
464 index_type = self._match(TokenType.USING) and self._advance_any() and self._prev.text
465 expressions = self._parse_wrapped_csv(self._parse_ordered)
466
467 options = []
468 while True:
469 if self._match_text_seq("KEY_BLOCK_SIZE"):
470 self._match(TokenType.EQ)
471 opt = exp.IndexConstraintOption(key_block_size=self._parse_number())
472 elif self._match(TokenType.USING):
473 opt = exp.IndexConstraintOption(using=self._advance_any() and self._prev.text)
474 elif self._match_text_seq("WITH", "PARSER"):
475 opt = exp.IndexConstraintOption(parser=self._parse_var(any_token=True))
476 elif self._match(TokenType.COMMENT):
477 opt = exp.IndexConstraintOption(comment=self._parse_string())
478 elif self._match_text_seq("VISIBLE"):
479 opt = exp.IndexConstraintOption(visible=True)
480 elif self._match_text_seq("INVISIBLE"):
481 opt = exp.IndexConstraintOption(visible=False)
482 elif self._match_text_seq("ENGINE_ATTRIBUTE"):
483 self._match(TokenType.EQ)
484 opt = exp.IndexConstraintOption(engine_attr=self._parse_string())
485 elif self._match_text_seq("SECONDARY_ENGINE_ATTRIBUTE"):
486 self._match(TokenType.EQ)
487 opt = exp.IndexConstraintOption(secondary_engine_attr=self._parse_string())
488 else:
489 opt = None
490
491 if not opt:
492 break
493
494 options.append(opt)
495
496 return self.expression(
497 exp.IndexColumnConstraint,
498 this=this,
499 expressions=expressions,
500 kind=kind,
501 index_type=index_type,
502 options=options,
503 )
504
505 def _parse_show_mysql(
506 self,
507 this: str,
508 target: bool | str = False,
509 full: t.Optional[bool] = None,
510 global_: t.Optional[bool] = None,
511 ) -> exp.Show:
512 if target:
513 if isinstance(target, str):
514 self._match_text_seq(target)
515 target_id = self._parse_id_var()
516 else:
517 target_id = None
518
519 log = self._parse_string() if self._match_text_seq("IN") else None
520
521 if this in ("BINLOG EVENTS", "RELAYLOG EVENTS"):
522 position = self._parse_number() if self._match_text_seq("FROM") else None
523 db = None
524 else:
525 position = None
526 db = None
527
528 if self._match(TokenType.FROM):
529 db = self._parse_id_var()
530 elif self._match(TokenType.DOT):
531 db = target_id
532 target_id = self._parse_id_var()
533
534 channel = self._parse_id_var() if self._match_text_seq("FOR", "CHANNEL") else None
535
536 like = self._parse_string() if self._match_text_seq("LIKE") else None
537 where = self._parse_where()
538
539 if this == "PROFILE":
540 types = self._parse_csv(lambda: self._parse_var_from_options(self.PROFILE_TYPES))
541 query = self._parse_number() if self._match_text_seq("FOR", "QUERY") else None
542 offset = self._parse_number() if self._match_text_seq("OFFSET") else None
543 limit = self._parse_number() if self._match_text_seq("LIMIT") else None
544 else:
545 types, query = None, None
546 offset, limit = self._parse_oldstyle_limit()
547
548 mutex = True if self._match_text_seq("MUTEX") else None
549 mutex = False if self._match_text_seq("STATUS") else mutex
550
551 return self.expression(
552 exp.Show,
553 this=this,
554 target=target_id,
555 full=full,
556 log=log,
557 position=position,
558 db=db,
559 channel=channel,
560 like=like,
561 where=where,
562 types=types,
563 query=query,
564 offset=offset,
565 limit=limit,
566 mutex=mutex,
567 **{"global": global_}, # type: ignore
568 )
569
570 def _parse_oldstyle_limit(
571 self,
572 ) -> t.Tuple[t.Optional[exp.Expression], t.Optional[exp.Expression]]:
573 limit = None
574 offset = None
575 if self._match_text_seq("LIMIT"):
576 parts = self._parse_csv(self._parse_number)
577 if len(parts) == 1:
578 limit = parts[0]
579 elif len(parts) == 2:
580 limit = parts[1]
581 offset = parts[0]
582
583 return offset, limit
584
585 def _parse_set_item_charset(self, kind: str) -> exp.Expression:
586 this = self._parse_string() or self._parse_id_var()
587 return self.expression(exp.SetItem, this=this, kind=kind)
588
589 def _parse_set_item_names(self) -> exp.Expression:
590 charset = self._parse_string() or self._parse_id_var()
591 if self._match_text_seq("COLLATE"):
592 collate = self._parse_string() or self._parse_id_var()
593 else:
594 collate = None
595
596 return self.expression(exp.SetItem, this=charset, collate=collate, kind="NAMES")
597
598 def _parse_type(self, parse_interval: bool = True) -> t.Optional[exp.Expression]:
599 # mysql binary is special and can work anywhere, even in order by operations
600 # it operates like a no paren func
601 if self._match(TokenType.BINARY, advance=False):
602 data_type = self._parse_types(check_func=True, allow_identifiers=False)
603
604 if isinstance(data_type, exp.DataType):
605 return self.expression(exp.Cast, this=self._parse_column(), to=data_type)
606
607 return super()._parse_type(parse_interval=parse_interval)
608
609 def _parse_chr(self) -> t.Optional[exp.Expression]:
610 expressions = self._parse_csv(self._parse_conjunction)
611 kwargs: t.Dict[str, t.Any] = {"this": seq_get(expressions, 0)}
612
613 if len(expressions) > 1:
614 kwargs["expressions"] = expressions[1:]
615
616 if self._match(TokenType.USING):
617 kwargs["charset"] = self._parse_var()
618
619 return self.expression(exp.Chr, **kwargs)
620
621 def _parse_group_concat(self) -> t.Optional[exp.Expression]:
622 def concat_exprs(
623 node: t.Optional[exp.Expression], exprs: t.List[exp.Expression]
624 ) -> exp.Expression:
625 if isinstance(node, exp.Distinct) and len(node.expressions) > 1:
626 concat_exprs = [
627 self.expression(exp.Concat, expressions=node.expressions, safe=True)
628 ]
629 node.set("expressions", concat_exprs)
630 return node
631 if len(exprs) == 1:
632 return exprs[0]
633 return self.expression(exp.Concat, expressions=args, safe=True)
634
635 args = self._parse_csv(self._parse_lambda)
636
637 if args:
638 order = args[-1] if isinstance(args[-1], exp.Order) else None
639
640 if order:
641 # Order By is the last (or only) expression in the list and has consumed the 'expr' before it,
642 # remove 'expr' from exp.Order and add it back to args
643 args[-1] = order.this
644 order.set("this", concat_exprs(order.this, args))
645
646 this = order or concat_exprs(args[0], args)
647 else:
648 this = None
649
650 separator = self._parse_field() if self._match(TokenType.SEPARATOR) else None
651
652 return self.expression(exp.GroupConcat, this=this, separator=separator)
653
654 class Generator(generator.Generator):
655 LOCKING_READS_SUPPORTED = True
656 NULL_ORDERING_SUPPORTED = None
657 JOIN_HINTS = False
658 TABLE_HINTS = True
659 DUPLICATE_KEY_UPDATE_WITH_SET = False
660 QUERY_HINT_SEP = " "
661 VALUES_AS_TABLE = False
662 NVL2_SUPPORTED = False
663 LAST_DAY_SUPPORTS_DATE_PART = False
664 JSON_TYPE_REQUIRED_FOR_EXTRACTION = True
665 JSON_PATH_BRACKETED_KEY_SUPPORTED = False
666 JSON_KEY_VALUE_PAIR_SEP = ","
667 SUPPORTS_TO_NUMBER = False
668
669 TRANSFORMS = {
670 **generator.Generator.TRANSFORMS,
671 exp.ArrayAgg: rename_func("GROUP_CONCAT"),
672 exp.CurrentDate: no_paren_current_date_sql,
673 exp.DateDiff: _remove_ts_or_ds_to_date(
674 lambda self, e: self.func("DATEDIFF", e.this, e.expression), ("this", "expression")
675 ),
676 exp.DateAdd: _remove_ts_or_ds_to_date(date_add_sql("ADD")),
677 exp.DateStrToDate: datestrtodate_sql,
678 exp.DateSub: _remove_ts_or_ds_to_date(date_add_sql("SUB")),
679 exp.DateTrunc: _date_trunc_sql,
680 exp.Day: _remove_ts_or_ds_to_date(),
681 exp.DayOfMonth: _remove_ts_or_ds_to_date(rename_func("DAYOFMONTH")),
682 exp.DayOfWeek: _remove_ts_or_ds_to_date(rename_func("DAYOFWEEK")),
683 exp.DayOfYear: _remove_ts_or_ds_to_date(rename_func("DAYOFYEAR")),
684 exp.GroupConcat: lambda self,
685 e: f"""GROUP_CONCAT({self.sql(e, "this")} SEPARATOR {self.sql(e, "separator") or "','"})""",
686 exp.ILike: no_ilike_sql,
687 exp.JSONExtractScalar: arrow_json_extract_sql,
688 exp.Max: max_or_greatest,
689 exp.Min: min_or_least,
690 exp.Month: _remove_ts_or_ds_to_date(),
691 exp.NullSafeEQ: lambda self, e: self.binary(e, "<=>"),
692 exp.NullSafeNEQ: lambda self, e: f"NOT {self.binary(e, '<=>')}",
693 exp.ParseJSON: lambda self, e: self.sql(e, "this"),
694 exp.Pivot: no_pivot_sql,
695 exp.Select: transforms.preprocess(
696 [
697 transforms.eliminate_distinct_on,
698 transforms.eliminate_semi_and_anti_joins,
699 transforms.eliminate_qualify,
700 transforms.eliminate_full_outer_join,
701 ]
702 ),
703 exp.StrPosition: strposition_to_locate_sql,
704 exp.StrToDate: _str_to_date_sql,
705 exp.StrToTime: _str_to_date_sql,
706 exp.Stuff: rename_func("INSERT"),
707 exp.TableSample: no_tablesample_sql,
708 exp.TimeFromParts: rename_func("MAKETIME"),
709 exp.TimestampAdd: date_add_interval_sql("DATE", "ADD"),
710 exp.TimestampDiff: lambda self, e: self.func(
711 "TIMESTAMPDIFF", unit_to_var(e), e.expression, e.this
712 ),
713 exp.TimestampSub: date_add_interval_sql("DATE", "SUB"),
714 exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
715 exp.TimeStrToTime: lambda self, e: self.sql(
716 exp.cast(e.this, exp.DataType.Type.DATETIME, copy=True)
717 ),
718 exp.TimeToStr: _remove_ts_or_ds_to_date(
719 lambda self, e: self.func("DATE_FORMAT", e.this, self.format_time(e))
720 ),
721 exp.Trim: _trim_sql,
722 exp.TryCast: no_trycast_sql,
723 exp.TsOrDsAdd: date_add_sql("ADD"),
724 exp.TsOrDsDiff: lambda self, e: self.func("DATEDIFF", e.this, e.expression),
725 exp.TsOrDsToDate: _ts_or_ds_to_date_sql,
726 exp.UnixToTime: lambda self, e: self.func("FROM_UNIXTIME", e.this, self.format_time(e)),
727 exp.Week: _remove_ts_or_ds_to_date(),
728 exp.WeekOfYear: _remove_ts_or_ds_to_date(rename_func("WEEKOFYEAR")),
729 exp.Year: _remove_ts_or_ds_to_date(),
730 }
731
732 UNSIGNED_TYPE_MAPPING = {
733 exp.DataType.Type.UBIGINT: "BIGINT",
734 exp.DataType.Type.UINT: "INT",
735 exp.DataType.Type.UMEDIUMINT: "MEDIUMINT",
736 exp.DataType.Type.USMALLINT: "SMALLINT",
737 exp.DataType.Type.UTINYINT: "TINYINT",
738 exp.DataType.Type.UDECIMAL: "DECIMAL",
739 }
740
741 TIMESTAMP_TYPE_MAPPING = {
742 exp.DataType.Type.TIMESTAMP: "DATETIME",
743 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
744 exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
745 }
746
747 TYPE_MAPPING = {
748 **generator.Generator.TYPE_MAPPING,
749 **UNSIGNED_TYPE_MAPPING,
750 **TIMESTAMP_TYPE_MAPPING,
751 }
752
753 TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMTEXT)
754 TYPE_MAPPING.pop(exp.DataType.Type.LONGTEXT)
755 TYPE_MAPPING.pop(exp.DataType.Type.TINYTEXT)
756 TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMBLOB)
757 TYPE_MAPPING.pop(exp.DataType.Type.LONGBLOB)
758 TYPE_MAPPING.pop(exp.DataType.Type.TINYBLOB)
759
760 PROPERTIES_LOCATION = {
761 **generator.Generator.PROPERTIES_LOCATION,
762 exp.TransientProperty: exp.Properties.Location.UNSUPPORTED,
763 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
764 }
765
766 LIMIT_FETCH = "LIMIT"
767
768 LIMIT_ONLY_LITERALS = True
769
770 CHAR_CAST_MAPPING = dict.fromkeys(
771 (
772 exp.DataType.Type.LONGTEXT,
773 exp.DataType.Type.LONGBLOB,
774 exp.DataType.Type.MEDIUMBLOB,
775 exp.DataType.Type.MEDIUMTEXT,
776 exp.DataType.Type.TEXT,
777 exp.DataType.Type.TINYBLOB,
778 exp.DataType.Type.TINYTEXT,
779 exp.DataType.Type.VARCHAR,
780 ),
781 "CHAR",
782 )
783 SIGNED_CAST_MAPPING = dict.fromkeys(
784 (
785 exp.DataType.Type.BIGINT,
786 exp.DataType.Type.BOOLEAN,
787 exp.DataType.Type.INT,
788 exp.DataType.Type.SMALLINT,
789 exp.DataType.Type.TINYINT,
790 exp.DataType.Type.MEDIUMINT,
791 ),
792 "SIGNED",
793 )
794
795 # MySQL doesn't support many datatypes in cast.
796 # https://dev.mysql.com/doc/refman/8.0/en/cast-functions.html#function_cast
797 CAST_MAPPING = {
798 **CHAR_CAST_MAPPING,
799 **SIGNED_CAST_MAPPING,
800 exp.DataType.Type.UBIGINT: "UNSIGNED",
801 }
802
803 TIMESTAMP_FUNC_TYPES = {
804 exp.DataType.Type.TIMESTAMPTZ,
805 exp.DataType.Type.TIMESTAMPLTZ,
806 }
807
808 def extract_sql(self, expression: exp.Extract) -> str:
809 unit = expression.name
810 if unit and unit.lower() == "epoch":
811 return self.func("UNIX_TIMESTAMP", expression.expression)
812
813 return super().extract_sql(expression)
814
815 def datatype_sql(self, expression: exp.DataType) -> str:
816 # https://dev.mysql.com/doc/refman/8.0/en/numeric-type-syntax.html
817 result = super().datatype_sql(expression)
818 if expression.this in self.UNSIGNED_TYPE_MAPPING:
819 result = f"{result} UNSIGNED"
820 return result
821
822 def jsonarraycontains_sql(self, expression: exp.JSONArrayContains) -> str:
823 return f"{self.sql(expression, 'this')} MEMBER OF({self.sql(expression, 'expression')})"
824
825 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
826 if expression.to.this in self.TIMESTAMP_FUNC_TYPES:
827 return self.func("TIMESTAMP", expression.this)
828
829 to = self.CAST_MAPPING.get(expression.to.this)
830
831 if to:
832 expression.to.set("this", to)
833 return super().cast_sql(expression)
834
835 def show_sql(self, expression: exp.Show) -> str:
836 this = f" {expression.name}"
837 full = " FULL" if expression.args.get("full") else ""
838 global_ = " GLOBAL" if expression.args.get("global") else ""
839
840 target = self.sql(expression, "target")
841 target = f" {target}" if target else ""
842 if expression.name in ("COLUMNS", "INDEX"):
843 target = f" FROM{target}"
844 elif expression.name == "GRANTS":
845 target = f" FOR{target}"
846
847 db = self._prefixed_sql("FROM", expression, "db")
848
849 like = self._prefixed_sql("LIKE", expression, "like")
850 where = self.sql(expression, "where")
851
852 types = self.expressions(expression, key="types")
853 types = f" {types}" if types else types
854 query = self._prefixed_sql("FOR QUERY", expression, "query")
855
856 if expression.name == "PROFILE":
857 offset = self._prefixed_sql("OFFSET", expression, "offset")
858 limit = self._prefixed_sql("LIMIT", expression, "limit")
859 else:
860 offset = ""
861 limit = self._oldstyle_limit_sql(expression)
862
863 log = self._prefixed_sql("IN", expression, "log")
864 position = self._prefixed_sql("FROM", expression, "position")
865
866 channel = self._prefixed_sql("FOR CHANNEL", expression, "channel")
867
868 if expression.name == "ENGINE":
869 mutex_or_status = " MUTEX" if expression.args.get("mutex") else " STATUS"
870 else:
871 mutex_or_status = ""
872
873 return f"SHOW{full}{global_}{this}{target}{types}{db}{query}{log}{position}{channel}{mutex_or_status}{like}{where}{offset}{limit}"
874
875 def altercolumn_sql(self, expression: exp.AlterColumn) -> str:
876 dtype = self.sql(expression, "dtype")
877 if not dtype:
878 return super().altercolumn_sql(expression)
879
880 this = self.sql(expression, "this")
881 return f"MODIFY COLUMN {this} {dtype}"
882
883 def _prefixed_sql(self, prefix: str, expression: exp.Expression, arg: str) -> str:
884 sql = self.sql(expression, arg)
885 return f" {prefix} {sql}" if sql else ""
886
887 def _oldstyle_limit_sql(self, expression: exp.Show) -> str:
888 limit = self.sql(expression, "limit")
889 offset = self.sql(expression, "offset")
890 if limit:
891 limit_offset = f"{offset}, {limit}" if offset else limit
892 return f" LIMIT {limit_offset}"
893 return ""
894
895 def chr_sql(self, expression: exp.Chr) -> str:
896 this = self.expressions(sqls=[expression.this] + expression.expressions)
897 charset = expression.args.get("charset")
898 using = f" USING {self.sql(charset)}" if charset else ""
899 return f"CHAR({this}{using})"
900
901 def timestamptrunc_sql(self, expression: exp.TimestampTrunc) -> str:
902 unit = expression.args.get("unit")
903
904 # Pick an old-enough date to avoid negative timestamp diffs
905 start_ts = "'0000-01-01 00:00:00'"
906
907 # Source: https://stackoverflow.com/a/32955740
908 timestamp_diff = build_date_delta(exp.TimestampDiff)([unit, start_ts, expression.this])
909 interval = exp.Interval(this=timestamp_diff, unit=unit)
910 dateadd = build_date_delta_with_interval(exp.DateAdd)([start_ts, interval])
911
912 return self.sql(dateadd)
913
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sqlglot/dialects/mysql.py b/sqlglot/dialects/mysql.py
--- a/sqlglot/dialects/mysql.py
+++ b/sqlglot/dialects/mysql.py
@@ -805,6 +805,9 @@
exp.DataType.Type.TIMESTAMPLTZ,
}
+ def dpipe_sql(self, expression: exp.DPipe) -> str:
+ return self.func("CONCAT", *expression.flatten())
+
def extract_sql(self, expression: exp.Extract) -> str:
unit = expression.name
if unit and unit.lower() == "epoch":
| {"golden_diff": "diff --git a/sqlglot/dialects/mysql.py b/sqlglot/dialects/mysql.py\n--- a/sqlglot/dialects/mysql.py\n+++ b/sqlglot/dialects/mysql.py\n@@ -805,6 +805,9 @@\n exp.DataType.Type.TIMESTAMPLTZ,\n }\n \n+ def dpipe_sql(self, expression: exp.DPipe) -> str:\n+ return self.func(\"CONCAT\", *expression.flatten())\n+\n def extract_sql(self, expression: exp.Extract) -> str:\n unit = expression.name\n if unit and unit.lower() == \"epoch\":\n", "issue": "[bug] pgsql to mysql special character || Semantic mismatch\n**Before you file an issue**\r\n- Make sure you specify the \"read\" dialect eg. `parse_one(sql, read=\"spark\")`\r\n- Make sure you specify the \"write\" dialect eg. `ast.sql(dialect=\"duckdb\")`\r\n- Check if the issue still exists on main\r\n\r\n**Fully reproducible code snippet**\r\nPlease include a fully reproducible code snippet or the input sql, dialect, and expected output.\r\n\r\n**Official Documentation**\r\nPlease include links to official SQL documentation related to your issue.\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import exp, generator, parser, tokens, transforms\nfrom sqlglot.dialects.dialect import (\n Dialect,\n NormalizationStrategy,\n arrow_json_extract_sql,\n date_add_interval_sql,\n datestrtodate_sql,\n build_formatted_time,\n isnull_to_is_null,\n locate_to_strposition,\n max_or_greatest,\n min_or_least,\n no_ilike_sql,\n no_paren_current_date_sql,\n no_pivot_sql,\n no_tablesample_sql,\n no_trycast_sql,\n build_date_delta,\n build_date_delta_with_interval,\n rename_func,\n strposition_to_locate_sql,\n unit_to_var,\n)\nfrom sqlglot.helper import seq_get\nfrom sqlglot.tokens import TokenType\n\n\ndef _show_parser(*args: t.Any, **kwargs: t.Any) -> t.Callable[[MySQL.Parser], exp.Show]:\n def _parse(self: MySQL.Parser) -> exp.Show:\n return self._parse_show_mysql(*args, **kwargs)\n\n return _parse\n\n\ndef _date_trunc_sql(self: MySQL.Generator, expression: exp.DateTrunc) -> str:\n expr = self.sql(expression, \"this\")\n unit = expression.text(\"unit\").upper()\n\n if unit == \"WEEK\":\n concat = f\"CONCAT(YEAR({expr}), ' ', WEEK({expr}, 1), ' 1')\"\n date_format = \"%Y %u %w\"\n elif unit == \"MONTH\":\n concat = f\"CONCAT(YEAR({expr}), ' ', MONTH({expr}), ' 1')\"\n date_format = \"%Y %c %e\"\n elif unit == \"QUARTER\":\n concat = f\"CONCAT(YEAR({expr}), ' ', QUARTER({expr}) * 3 - 2, ' 1')\"\n date_format = \"%Y %c %e\"\n elif unit == \"YEAR\":\n concat = f\"CONCAT(YEAR({expr}), ' 1 1')\"\n date_format = \"%Y %c %e\"\n else:\n if unit != \"DAY\":\n self.unsupported(f\"Unexpected interval unit: {unit}\")\n return self.func(\"DATE\", expr)\n\n return self.func(\"STR_TO_DATE\", concat, f\"'{date_format}'\")\n\n\n# All specifiers for time parts (as opposed to date parts)\n# https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_date-format\nTIME_SPECIFIERS = {\"f\", \"H\", \"h\", \"I\", \"i\", \"k\", \"l\", \"p\", \"r\", \"S\", \"s\", \"T\"}\n\n\ndef _has_time_specifier(date_format: str) -> bool:\n i = 0\n length = len(date_format)\n\n while i < length:\n if date_format[i] == \"%\":\n i += 1\n if i < length and date_format[i] in TIME_SPECIFIERS:\n return True\n i += 1\n return False\n\n\ndef _str_to_date(args: t.List) -> exp.StrToDate | exp.StrToTime:\n mysql_date_format = seq_get(args, 1)\n date_format = MySQL.format_time(mysql_date_format)\n this = seq_get(args, 0)\n\n if mysql_date_format and _has_time_specifier(mysql_date_format.name):\n return exp.StrToTime(this=this, format=date_format)\n\n return exp.StrToDate(this=this, format=date_format)\n\n\ndef _str_to_date_sql(\n self: MySQL.Generator, expression: exp.StrToDate | exp.StrToTime | exp.TsOrDsToDate\n) -> str:\n return self.func(\"STR_TO_DATE\", expression.this, self.format_time(expression))\n\n\ndef _trim_sql(self: MySQL.Generator, expression: exp.Trim) -> str:\n target = self.sql(expression, \"this\")\n trim_type = self.sql(expression, \"position\")\n remove_chars = self.sql(expression, \"expression\")\n\n # Use TRIM/LTRIM/RTRIM syntax if the expression isn't mysql-specific\n if not remove_chars:\n return self.trim_sql(expression)\n\n trim_type = f\"{trim_type} \" if trim_type else \"\"\n remove_chars = f\"{remove_chars} \" if remove_chars else \"\"\n from_part = \"FROM \" if trim_type or remove_chars else \"\"\n return f\"TRIM({trim_type}{remove_chars}{from_part}{target})\"\n\n\ndef date_add_sql(\n kind: str,\n) -> t.Callable[[generator.Generator, exp.Expression], str]:\n def func(self: generator.Generator, expression: exp.Expression) -> str:\n return self.func(\n f\"DATE_{kind}\",\n expression.this,\n exp.Interval(this=expression.expression, unit=unit_to_var(expression)),\n )\n\n return func\n\n\ndef _ts_or_ds_to_date_sql(self: MySQL.Generator, expression: exp.TsOrDsToDate) -> str:\n time_format = expression.args.get(\"format\")\n return _str_to_date_sql(self, expression) if time_format else self.func(\"DATE\", expression.this)\n\n\ndef _remove_ts_or_ds_to_date(\n to_sql: t.Optional[t.Callable[[MySQL.Generator, exp.Expression], str]] = None,\n args: t.Tuple[str, ...] = (\"this\",),\n) -> t.Callable[[MySQL.Generator, exp.Func], str]:\n def func(self: MySQL.Generator, expression: exp.Func) -> str:\n for arg_key in args:\n arg = expression.args.get(arg_key)\n if isinstance(arg, exp.TsOrDsToDate) and not arg.args.get(\"format\"):\n expression.set(arg_key, arg.this)\n\n return to_sql(self, expression) if to_sql else self.function_fallback_sql(expression)\n\n return func\n\n\nclass MySQL(Dialect):\n # https://dev.mysql.com/doc/refman/8.0/en/identifiers.html\n IDENTIFIERS_CAN_START_WITH_DIGIT = True\n\n # We default to treating all identifiers as case-sensitive, since it matches MySQL's\n # behavior on Linux systems. For MacOS and Windows systems, one can override this\n # setting by specifying `dialect=\"mysql, normalization_strategy = lowercase\"`.\n #\n # See also https://dev.mysql.com/doc/refman/8.2/en/identifier-case-sensitivity.html\n NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_SENSITIVE\n\n TIME_FORMAT = \"'%Y-%m-%d %T'\"\n DPIPE_IS_STRING_CONCAT = False\n SUPPORTS_USER_DEFINED_TYPES = False\n SUPPORTS_SEMI_ANTI_JOIN = False\n SAFE_DIVISION = True\n\n # https://prestodb.io/docs/current/functions/datetime.html#mysql-date-functions\n TIME_MAPPING = {\n \"%M\": \"%B\",\n \"%c\": \"%-m\",\n \"%e\": \"%-d\",\n \"%h\": \"%I\",\n \"%i\": \"%M\",\n \"%s\": \"%S\",\n \"%u\": \"%W\",\n \"%k\": \"%-H\",\n \"%l\": \"%-I\",\n \"%T\": \"%H:%M:%S\",\n \"%W\": \"%a\",\n }\n\n class Tokenizer(tokens.Tokenizer):\n QUOTES = [\"'\", '\"']\n COMMENTS = [\"--\", \"#\", (\"/*\", \"*/\")]\n IDENTIFIERS = [\"`\"]\n STRING_ESCAPES = [\"'\", '\"', \"\\\\\"]\n BIT_STRINGS = [(\"b'\", \"'\"), (\"B'\", \"'\"), (\"0b\", \"\")]\n HEX_STRINGS = [(\"x'\", \"'\"), (\"X'\", \"'\"), (\"0x\", \"\")]\n\n KEYWORDS = {\n **tokens.Tokenizer.KEYWORDS,\n \"CHARSET\": TokenType.CHARACTER_SET,\n \"FORCE\": TokenType.FORCE,\n \"IGNORE\": TokenType.IGNORE,\n \"LOCK TABLES\": TokenType.COMMAND,\n \"LONGBLOB\": TokenType.LONGBLOB,\n \"LONGTEXT\": TokenType.LONGTEXT,\n \"MEDIUMBLOB\": TokenType.MEDIUMBLOB,\n \"TINYBLOB\": TokenType.TINYBLOB,\n \"TINYTEXT\": TokenType.TINYTEXT,\n \"MEDIUMTEXT\": TokenType.MEDIUMTEXT,\n \"MEDIUMINT\": TokenType.MEDIUMINT,\n \"MEMBER OF\": TokenType.MEMBER_OF,\n \"SEPARATOR\": TokenType.SEPARATOR,\n \"START\": TokenType.BEGIN,\n \"SIGNED\": TokenType.BIGINT,\n \"SIGNED INTEGER\": TokenType.BIGINT,\n \"UNLOCK TABLES\": TokenType.COMMAND,\n \"UNSIGNED\": TokenType.UBIGINT,\n \"UNSIGNED INTEGER\": TokenType.UBIGINT,\n \"YEAR\": TokenType.YEAR,\n \"_ARMSCII8\": TokenType.INTRODUCER,\n \"_ASCII\": TokenType.INTRODUCER,\n \"_BIG5\": TokenType.INTRODUCER,\n \"_BINARY\": TokenType.INTRODUCER,\n \"_CP1250\": TokenType.INTRODUCER,\n \"_CP1251\": TokenType.INTRODUCER,\n \"_CP1256\": TokenType.INTRODUCER,\n \"_CP1257\": TokenType.INTRODUCER,\n \"_CP850\": TokenType.INTRODUCER,\n \"_CP852\": TokenType.INTRODUCER,\n \"_CP866\": TokenType.INTRODUCER,\n \"_CP932\": TokenType.INTRODUCER,\n \"_DEC8\": TokenType.INTRODUCER,\n \"_EUCJPMS\": TokenType.INTRODUCER,\n \"_EUCKR\": TokenType.INTRODUCER,\n \"_GB18030\": TokenType.INTRODUCER,\n \"_GB2312\": TokenType.INTRODUCER,\n \"_GBK\": TokenType.INTRODUCER,\n \"_GEOSTD8\": TokenType.INTRODUCER,\n \"_GREEK\": TokenType.INTRODUCER,\n \"_HEBREW\": TokenType.INTRODUCER,\n \"_HP8\": TokenType.INTRODUCER,\n \"_KEYBCS2\": TokenType.INTRODUCER,\n \"_KOI8R\": TokenType.INTRODUCER,\n \"_KOI8U\": TokenType.INTRODUCER,\n \"_LATIN1\": TokenType.INTRODUCER,\n \"_LATIN2\": TokenType.INTRODUCER,\n \"_LATIN5\": TokenType.INTRODUCER,\n \"_LATIN7\": TokenType.INTRODUCER,\n \"_MACCE\": TokenType.INTRODUCER,\n \"_MACROMAN\": TokenType.INTRODUCER,\n \"_SJIS\": TokenType.INTRODUCER,\n \"_SWE7\": TokenType.INTRODUCER,\n \"_TIS620\": TokenType.INTRODUCER,\n \"_UCS2\": TokenType.INTRODUCER,\n \"_UJIS\": TokenType.INTRODUCER,\n # https://dev.mysql.com/doc/refman/8.0/en/string-literals.html\n \"_UTF8\": TokenType.INTRODUCER,\n \"_UTF16\": TokenType.INTRODUCER,\n \"_UTF16LE\": TokenType.INTRODUCER,\n \"_UTF32\": TokenType.INTRODUCER,\n \"_UTF8MB3\": TokenType.INTRODUCER,\n \"_UTF8MB4\": TokenType.INTRODUCER,\n \"@@\": TokenType.SESSION_PARAMETER,\n }\n\n COMMANDS = {*tokens.Tokenizer.COMMANDS, TokenType.REPLACE} - {TokenType.SHOW}\n\n class Parser(parser.Parser):\n FUNC_TOKENS = {\n *parser.Parser.FUNC_TOKENS,\n TokenType.DATABASE,\n TokenType.SCHEMA,\n TokenType.VALUES,\n }\n\n CONJUNCTION = {\n **parser.Parser.CONJUNCTION,\n TokenType.DAMP: exp.And,\n TokenType.XOR: exp.Xor,\n TokenType.DPIPE: exp.Or,\n }\n\n TABLE_ALIAS_TOKENS = (\n parser.Parser.TABLE_ALIAS_TOKENS - parser.Parser.TABLE_INDEX_HINT_TOKENS\n )\n\n RANGE_PARSERS = {\n **parser.Parser.RANGE_PARSERS,\n TokenType.MEMBER_OF: lambda self, this: self.expression(\n exp.JSONArrayContains,\n this=this,\n expression=self._parse_wrapped(self._parse_expression),\n ),\n }\n\n FUNCTIONS = {\n **parser.Parser.FUNCTIONS,\n \"DATE\": lambda args: exp.TsOrDsToDate(this=seq_get(args, 0)),\n \"DATE_ADD\": build_date_delta_with_interval(exp.DateAdd),\n \"DATE_FORMAT\": build_formatted_time(exp.TimeToStr, \"mysql\"),\n \"DATE_SUB\": build_date_delta_with_interval(exp.DateSub),\n \"DAY\": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))),\n \"DAYOFMONTH\": lambda args: exp.DayOfMonth(this=exp.TsOrDsToDate(this=seq_get(args, 0))),\n \"DAYOFWEEK\": lambda args: exp.DayOfWeek(this=exp.TsOrDsToDate(this=seq_get(args, 0))),\n \"DAYOFYEAR\": lambda args: exp.DayOfYear(this=exp.TsOrDsToDate(this=seq_get(args, 0))),\n \"INSTR\": lambda args: exp.StrPosition(substr=seq_get(args, 1), this=seq_get(args, 0)),\n \"FROM_UNIXTIME\": build_formatted_time(exp.UnixToTime, \"mysql\"),\n \"ISNULL\": isnull_to_is_null,\n \"LOCATE\": locate_to_strposition,\n \"MAKETIME\": exp.TimeFromParts.from_arg_list,\n \"MONTH\": lambda args: exp.Month(this=exp.TsOrDsToDate(this=seq_get(args, 0))),\n \"MONTHNAME\": lambda args: exp.TimeToStr(\n this=exp.TsOrDsToDate(this=seq_get(args, 0)),\n format=exp.Literal.string(\"%B\"),\n ),\n \"STR_TO_DATE\": _str_to_date,\n \"TIMESTAMPDIFF\": build_date_delta(exp.TimestampDiff),\n \"TO_DAYS\": lambda args: exp.paren(\n exp.DateDiff(\n this=exp.TsOrDsToDate(this=seq_get(args, 0)),\n expression=exp.TsOrDsToDate(this=exp.Literal.string(\"0000-01-01\")),\n unit=exp.var(\"DAY\"),\n )\n + 1\n ),\n \"WEEK\": lambda args: exp.Week(\n this=exp.TsOrDsToDate(this=seq_get(args, 0)), mode=seq_get(args, 1)\n ),\n \"WEEKOFYEAR\": lambda args: exp.WeekOfYear(this=exp.TsOrDsToDate(this=seq_get(args, 0))),\n \"YEAR\": lambda args: exp.Year(this=exp.TsOrDsToDate(this=seq_get(args, 0))),\n }\n\n FUNCTION_PARSERS = {\n **parser.Parser.FUNCTION_PARSERS,\n \"CHAR\": lambda self: self._parse_chr(),\n \"GROUP_CONCAT\": lambda self: self._parse_group_concat(),\n # https://dev.mysql.com/doc/refman/5.7/en/miscellaneous-functions.html#function_values\n \"VALUES\": lambda self: self.expression(\n exp.Anonymous, this=\"VALUES\", expressions=[self._parse_id_var()]\n ),\n }\n\n STATEMENT_PARSERS = {\n **parser.Parser.STATEMENT_PARSERS,\n TokenType.SHOW: lambda self: self._parse_show(),\n }\n\n SHOW_PARSERS = {\n \"BINARY LOGS\": _show_parser(\"BINARY LOGS\"),\n \"MASTER LOGS\": _show_parser(\"BINARY LOGS\"),\n \"BINLOG EVENTS\": _show_parser(\"BINLOG EVENTS\"),\n \"CHARACTER SET\": _show_parser(\"CHARACTER SET\"),\n \"CHARSET\": _show_parser(\"CHARACTER SET\"),\n \"COLLATION\": _show_parser(\"COLLATION\"),\n \"FULL COLUMNS\": _show_parser(\"COLUMNS\", target=\"FROM\", full=True),\n \"COLUMNS\": _show_parser(\"COLUMNS\", target=\"FROM\"),\n \"CREATE DATABASE\": _show_parser(\"CREATE DATABASE\", target=True),\n \"CREATE EVENT\": _show_parser(\"CREATE EVENT\", target=True),\n \"CREATE FUNCTION\": _show_parser(\"CREATE FUNCTION\", target=True),\n \"CREATE PROCEDURE\": _show_parser(\"CREATE PROCEDURE\", target=True),\n \"CREATE TABLE\": _show_parser(\"CREATE TABLE\", target=True),\n \"CREATE TRIGGER\": _show_parser(\"CREATE TRIGGER\", target=True),\n \"CREATE VIEW\": _show_parser(\"CREATE VIEW\", target=True),\n \"DATABASES\": _show_parser(\"DATABASES\"),\n \"SCHEMAS\": _show_parser(\"DATABASES\"),\n \"ENGINE\": _show_parser(\"ENGINE\", target=True),\n \"STORAGE ENGINES\": _show_parser(\"ENGINES\"),\n \"ENGINES\": _show_parser(\"ENGINES\"),\n \"ERRORS\": _show_parser(\"ERRORS\"),\n \"EVENTS\": _show_parser(\"EVENTS\"),\n \"FUNCTION CODE\": _show_parser(\"FUNCTION CODE\", target=True),\n \"FUNCTION STATUS\": _show_parser(\"FUNCTION STATUS\"),\n \"GRANTS\": _show_parser(\"GRANTS\", target=\"FOR\"),\n \"INDEX\": _show_parser(\"INDEX\", target=\"FROM\"),\n \"MASTER STATUS\": _show_parser(\"MASTER STATUS\"),\n \"OPEN TABLES\": _show_parser(\"OPEN TABLES\"),\n \"PLUGINS\": _show_parser(\"PLUGINS\"),\n \"PROCEDURE CODE\": _show_parser(\"PROCEDURE CODE\", target=True),\n \"PROCEDURE STATUS\": _show_parser(\"PROCEDURE STATUS\"),\n \"PRIVILEGES\": _show_parser(\"PRIVILEGES\"),\n \"FULL PROCESSLIST\": _show_parser(\"PROCESSLIST\", full=True),\n \"PROCESSLIST\": _show_parser(\"PROCESSLIST\"),\n \"PROFILE\": _show_parser(\"PROFILE\"),\n \"PROFILES\": _show_parser(\"PROFILES\"),\n \"RELAYLOG EVENTS\": _show_parser(\"RELAYLOG EVENTS\"),\n \"REPLICAS\": _show_parser(\"REPLICAS\"),\n \"SLAVE HOSTS\": _show_parser(\"REPLICAS\"),\n \"REPLICA STATUS\": _show_parser(\"REPLICA STATUS\"),\n \"SLAVE STATUS\": _show_parser(\"REPLICA STATUS\"),\n \"GLOBAL STATUS\": _show_parser(\"STATUS\", global_=True),\n \"SESSION STATUS\": _show_parser(\"STATUS\"),\n \"STATUS\": _show_parser(\"STATUS\"),\n \"TABLE STATUS\": _show_parser(\"TABLE STATUS\"),\n \"FULL TABLES\": _show_parser(\"TABLES\", full=True),\n \"TABLES\": _show_parser(\"TABLES\"),\n \"TRIGGERS\": _show_parser(\"TRIGGERS\"),\n \"GLOBAL VARIABLES\": _show_parser(\"VARIABLES\", global_=True),\n \"SESSION VARIABLES\": _show_parser(\"VARIABLES\"),\n \"VARIABLES\": _show_parser(\"VARIABLES\"),\n \"WARNINGS\": _show_parser(\"WARNINGS\"),\n }\n\n PROPERTY_PARSERS = {\n **parser.Parser.PROPERTY_PARSERS,\n \"LOCK\": lambda self: self._parse_property_assignment(exp.LockProperty),\n }\n\n SET_PARSERS = {\n **parser.Parser.SET_PARSERS,\n \"PERSIST\": lambda self: self._parse_set_item_assignment(\"PERSIST\"),\n \"PERSIST_ONLY\": lambda self: self._parse_set_item_assignment(\"PERSIST_ONLY\"),\n \"CHARACTER SET\": lambda self: self._parse_set_item_charset(\"CHARACTER SET\"),\n \"CHARSET\": lambda self: self._parse_set_item_charset(\"CHARACTER SET\"),\n \"NAMES\": lambda self: self._parse_set_item_names(),\n }\n\n CONSTRAINT_PARSERS = {\n **parser.Parser.CONSTRAINT_PARSERS,\n \"FULLTEXT\": lambda self: self._parse_index_constraint(kind=\"FULLTEXT\"),\n \"INDEX\": lambda self: self._parse_index_constraint(),\n \"KEY\": lambda self: self._parse_index_constraint(),\n \"SPATIAL\": lambda self: self._parse_index_constraint(kind=\"SPATIAL\"),\n }\n\n ALTER_PARSERS = {\n **parser.Parser.ALTER_PARSERS,\n \"MODIFY\": lambda self: self._parse_alter_table_alter(),\n }\n\n SCHEMA_UNNAMED_CONSTRAINTS = {\n *parser.Parser.SCHEMA_UNNAMED_CONSTRAINTS,\n \"FULLTEXT\",\n \"INDEX\",\n \"KEY\",\n \"SPATIAL\",\n }\n\n PROFILE_TYPES: parser.OPTIONS_TYPE = {\n **dict.fromkeys((\"ALL\", \"CPU\", \"IPC\", \"MEMORY\", \"SOURCE\", \"SWAPS\"), tuple()),\n \"BLOCK\": (\"IO\",),\n \"CONTEXT\": (\"SWITCHES\",),\n \"PAGE\": (\"FAULTS\",),\n }\n\n TYPE_TOKENS = {\n *parser.Parser.TYPE_TOKENS,\n TokenType.SET,\n }\n\n ENUM_TYPE_TOKENS = {\n *parser.Parser.ENUM_TYPE_TOKENS,\n TokenType.SET,\n }\n\n LOG_DEFAULTS_TO_LN = True\n STRING_ALIASES = True\n VALUES_FOLLOWED_BY_PAREN = False\n SUPPORTS_PARTITION_SELECTION = True\n\n def _parse_primary_key_part(self) -> t.Optional[exp.Expression]:\n this = self._parse_id_var()\n if not self._match(TokenType.L_PAREN):\n return this\n\n expression = self._parse_number()\n self._match_r_paren()\n return self.expression(exp.ColumnPrefix, this=this, expression=expression)\n\n def _parse_index_constraint(\n self, kind: t.Optional[str] = None\n ) -> exp.IndexColumnConstraint:\n if kind:\n self._match_texts((\"INDEX\", \"KEY\"))\n\n this = self._parse_id_var(any_token=False)\n index_type = self._match(TokenType.USING) and self._advance_any() and self._prev.text\n expressions = self._parse_wrapped_csv(self._parse_ordered)\n\n options = []\n while True:\n if self._match_text_seq(\"KEY_BLOCK_SIZE\"):\n self._match(TokenType.EQ)\n opt = exp.IndexConstraintOption(key_block_size=self._parse_number())\n elif self._match(TokenType.USING):\n opt = exp.IndexConstraintOption(using=self._advance_any() and self._prev.text)\n elif self._match_text_seq(\"WITH\", \"PARSER\"):\n opt = exp.IndexConstraintOption(parser=self._parse_var(any_token=True))\n elif self._match(TokenType.COMMENT):\n opt = exp.IndexConstraintOption(comment=self._parse_string())\n elif self._match_text_seq(\"VISIBLE\"):\n opt = exp.IndexConstraintOption(visible=True)\n elif self._match_text_seq(\"INVISIBLE\"):\n opt = exp.IndexConstraintOption(visible=False)\n elif self._match_text_seq(\"ENGINE_ATTRIBUTE\"):\n self._match(TokenType.EQ)\n opt = exp.IndexConstraintOption(engine_attr=self._parse_string())\n elif self._match_text_seq(\"SECONDARY_ENGINE_ATTRIBUTE\"):\n self._match(TokenType.EQ)\n opt = exp.IndexConstraintOption(secondary_engine_attr=self._parse_string())\n else:\n opt = None\n\n if not opt:\n break\n\n options.append(opt)\n\n return self.expression(\n exp.IndexColumnConstraint,\n this=this,\n expressions=expressions,\n kind=kind,\n index_type=index_type,\n options=options,\n )\n\n def _parse_show_mysql(\n self,\n this: str,\n target: bool | str = False,\n full: t.Optional[bool] = None,\n global_: t.Optional[bool] = None,\n ) -> exp.Show:\n if target:\n if isinstance(target, str):\n self._match_text_seq(target)\n target_id = self._parse_id_var()\n else:\n target_id = None\n\n log = self._parse_string() if self._match_text_seq(\"IN\") else None\n\n if this in (\"BINLOG EVENTS\", \"RELAYLOG EVENTS\"):\n position = self._parse_number() if self._match_text_seq(\"FROM\") else None\n db = None\n else:\n position = None\n db = None\n\n if self._match(TokenType.FROM):\n db = self._parse_id_var()\n elif self._match(TokenType.DOT):\n db = target_id\n target_id = self._parse_id_var()\n\n channel = self._parse_id_var() if self._match_text_seq(\"FOR\", \"CHANNEL\") else None\n\n like = self._parse_string() if self._match_text_seq(\"LIKE\") else None\n where = self._parse_where()\n\n if this == \"PROFILE\":\n types = self._parse_csv(lambda: self._parse_var_from_options(self.PROFILE_TYPES))\n query = self._parse_number() if self._match_text_seq(\"FOR\", \"QUERY\") else None\n offset = self._parse_number() if self._match_text_seq(\"OFFSET\") else None\n limit = self._parse_number() if self._match_text_seq(\"LIMIT\") else None\n else:\n types, query = None, None\n offset, limit = self._parse_oldstyle_limit()\n\n mutex = True if self._match_text_seq(\"MUTEX\") else None\n mutex = False if self._match_text_seq(\"STATUS\") else mutex\n\n return self.expression(\n exp.Show,\n this=this,\n target=target_id,\n full=full,\n log=log,\n position=position,\n db=db,\n channel=channel,\n like=like,\n where=where,\n types=types,\n query=query,\n offset=offset,\n limit=limit,\n mutex=mutex,\n **{\"global\": global_}, # type: ignore\n )\n\n def _parse_oldstyle_limit(\n self,\n ) -> t.Tuple[t.Optional[exp.Expression], t.Optional[exp.Expression]]:\n limit = None\n offset = None\n if self._match_text_seq(\"LIMIT\"):\n parts = self._parse_csv(self._parse_number)\n if len(parts) == 1:\n limit = parts[0]\n elif len(parts) == 2:\n limit = parts[1]\n offset = parts[0]\n\n return offset, limit\n\n def _parse_set_item_charset(self, kind: str) -> exp.Expression:\n this = self._parse_string() or self._parse_id_var()\n return self.expression(exp.SetItem, this=this, kind=kind)\n\n def _parse_set_item_names(self) -> exp.Expression:\n charset = self._parse_string() or self._parse_id_var()\n if self._match_text_seq(\"COLLATE\"):\n collate = self._parse_string() or self._parse_id_var()\n else:\n collate = None\n\n return self.expression(exp.SetItem, this=charset, collate=collate, kind=\"NAMES\")\n\n def _parse_type(self, parse_interval: bool = True) -> t.Optional[exp.Expression]:\n # mysql binary is special and can work anywhere, even in order by operations\n # it operates like a no paren func\n if self._match(TokenType.BINARY, advance=False):\n data_type = self._parse_types(check_func=True, allow_identifiers=False)\n\n if isinstance(data_type, exp.DataType):\n return self.expression(exp.Cast, this=self._parse_column(), to=data_type)\n\n return super()._parse_type(parse_interval=parse_interval)\n\n def _parse_chr(self) -> t.Optional[exp.Expression]:\n expressions = self._parse_csv(self._parse_conjunction)\n kwargs: t.Dict[str, t.Any] = {\"this\": seq_get(expressions, 0)}\n\n if len(expressions) > 1:\n kwargs[\"expressions\"] = expressions[1:]\n\n if self._match(TokenType.USING):\n kwargs[\"charset\"] = self._parse_var()\n\n return self.expression(exp.Chr, **kwargs)\n\n def _parse_group_concat(self) -> t.Optional[exp.Expression]:\n def concat_exprs(\n node: t.Optional[exp.Expression], exprs: t.List[exp.Expression]\n ) -> exp.Expression:\n if isinstance(node, exp.Distinct) and len(node.expressions) > 1:\n concat_exprs = [\n self.expression(exp.Concat, expressions=node.expressions, safe=True)\n ]\n node.set(\"expressions\", concat_exprs)\n return node\n if len(exprs) == 1:\n return exprs[0]\n return self.expression(exp.Concat, expressions=args, safe=True)\n\n args = self._parse_csv(self._parse_lambda)\n\n if args:\n order = args[-1] if isinstance(args[-1], exp.Order) else None\n\n if order:\n # Order By is the last (or only) expression in the list and has consumed the 'expr' before it,\n # remove 'expr' from exp.Order and add it back to args\n args[-1] = order.this\n order.set(\"this\", concat_exprs(order.this, args))\n\n this = order or concat_exprs(args[0], args)\n else:\n this = None\n\n separator = self._parse_field() if self._match(TokenType.SEPARATOR) else None\n\n return self.expression(exp.GroupConcat, this=this, separator=separator)\n\n class Generator(generator.Generator):\n LOCKING_READS_SUPPORTED = True\n NULL_ORDERING_SUPPORTED = None\n JOIN_HINTS = False\n TABLE_HINTS = True\n DUPLICATE_KEY_UPDATE_WITH_SET = False\n QUERY_HINT_SEP = \" \"\n VALUES_AS_TABLE = False\n NVL2_SUPPORTED = False\n LAST_DAY_SUPPORTS_DATE_PART = False\n JSON_TYPE_REQUIRED_FOR_EXTRACTION = True\n JSON_PATH_BRACKETED_KEY_SUPPORTED = False\n JSON_KEY_VALUE_PAIR_SEP = \",\"\n SUPPORTS_TO_NUMBER = False\n\n TRANSFORMS = {\n **generator.Generator.TRANSFORMS,\n exp.ArrayAgg: rename_func(\"GROUP_CONCAT\"),\n exp.CurrentDate: no_paren_current_date_sql,\n exp.DateDiff: _remove_ts_or_ds_to_date(\n lambda self, e: self.func(\"DATEDIFF\", e.this, e.expression), (\"this\", \"expression\")\n ),\n exp.DateAdd: _remove_ts_or_ds_to_date(date_add_sql(\"ADD\")),\n exp.DateStrToDate: datestrtodate_sql,\n exp.DateSub: _remove_ts_or_ds_to_date(date_add_sql(\"SUB\")),\n exp.DateTrunc: _date_trunc_sql,\n exp.Day: _remove_ts_or_ds_to_date(),\n exp.DayOfMonth: _remove_ts_or_ds_to_date(rename_func(\"DAYOFMONTH\")),\n exp.DayOfWeek: _remove_ts_or_ds_to_date(rename_func(\"DAYOFWEEK\")),\n exp.DayOfYear: _remove_ts_or_ds_to_date(rename_func(\"DAYOFYEAR\")),\n exp.GroupConcat: lambda self,\n e: f\"\"\"GROUP_CONCAT({self.sql(e, \"this\")} SEPARATOR {self.sql(e, \"separator\") or \"','\"})\"\"\",\n exp.ILike: no_ilike_sql,\n exp.JSONExtractScalar: arrow_json_extract_sql,\n exp.Max: max_or_greatest,\n exp.Min: min_or_least,\n exp.Month: _remove_ts_or_ds_to_date(),\n exp.NullSafeEQ: lambda self, e: self.binary(e, \"<=>\"),\n exp.NullSafeNEQ: lambda self, e: f\"NOT {self.binary(e, '<=>')}\",\n exp.ParseJSON: lambda self, e: self.sql(e, \"this\"),\n exp.Pivot: no_pivot_sql,\n exp.Select: transforms.preprocess(\n [\n transforms.eliminate_distinct_on,\n transforms.eliminate_semi_and_anti_joins,\n transforms.eliminate_qualify,\n transforms.eliminate_full_outer_join,\n ]\n ),\n exp.StrPosition: strposition_to_locate_sql,\n exp.StrToDate: _str_to_date_sql,\n exp.StrToTime: _str_to_date_sql,\n exp.Stuff: rename_func(\"INSERT\"),\n exp.TableSample: no_tablesample_sql,\n exp.TimeFromParts: rename_func(\"MAKETIME\"),\n exp.TimestampAdd: date_add_interval_sql(\"DATE\", \"ADD\"),\n exp.TimestampDiff: lambda self, e: self.func(\n \"TIMESTAMPDIFF\", unit_to_var(e), e.expression, e.this\n ),\n exp.TimestampSub: date_add_interval_sql(\"DATE\", \"SUB\"),\n exp.TimeStrToUnix: rename_func(\"UNIX_TIMESTAMP\"),\n exp.TimeStrToTime: lambda self, e: self.sql(\n exp.cast(e.this, exp.DataType.Type.DATETIME, copy=True)\n ),\n exp.TimeToStr: _remove_ts_or_ds_to_date(\n lambda self, e: self.func(\"DATE_FORMAT\", e.this, self.format_time(e))\n ),\n exp.Trim: _trim_sql,\n exp.TryCast: no_trycast_sql,\n exp.TsOrDsAdd: date_add_sql(\"ADD\"),\n exp.TsOrDsDiff: lambda self, e: self.func(\"DATEDIFF\", e.this, e.expression),\n exp.TsOrDsToDate: _ts_or_ds_to_date_sql,\n exp.UnixToTime: lambda self, e: self.func(\"FROM_UNIXTIME\", e.this, self.format_time(e)),\n exp.Week: _remove_ts_or_ds_to_date(),\n exp.WeekOfYear: _remove_ts_or_ds_to_date(rename_func(\"WEEKOFYEAR\")),\n exp.Year: _remove_ts_or_ds_to_date(),\n }\n\n UNSIGNED_TYPE_MAPPING = {\n exp.DataType.Type.UBIGINT: \"BIGINT\",\n exp.DataType.Type.UINT: \"INT\",\n exp.DataType.Type.UMEDIUMINT: \"MEDIUMINT\",\n exp.DataType.Type.USMALLINT: \"SMALLINT\",\n exp.DataType.Type.UTINYINT: \"TINYINT\",\n exp.DataType.Type.UDECIMAL: \"DECIMAL\",\n }\n\n TIMESTAMP_TYPE_MAPPING = {\n exp.DataType.Type.TIMESTAMP: \"DATETIME\",\n exp.DataType.Type.TIMESTAMPTZ: \"TIMESTAMP\",\n exp.DataType.Type.TIMESTAMPLTZ: \"TIMESTAMP\",\n }\n\n TYPE_MAPPING = {\n **generator.Generator.TYPE_MAPPING,\n **UNSIGNED_TYPE_MAPPING,\n **TIMESTAMP_TYPE_MAPPING,\n }\n\n TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMTEXT)\n TYPE_MAPPING.pop(exp.DataType.Type.LONGTEXT)\n TYPE_MAPPING.pop(exp.DataType.Type.TINYTEXT)\n TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMBLOB)\n TYPE_MAPPING.pop(exp.DataType.Type.LONGBLOB)\n TYPE_MAPPING.pop(exp.DataType.Type.TINYBLOB)\n\n PROPERTIES_LOCATION = {\n **generator.Generator.PROPERTIES_LOCATION,\n exp.TransientProperty: exp.Properties.Location.UNSUPPORTED,\n exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,\n }\n\n LIMIT_FETCH = \"LIMIT\"\n\n LIMIT_ONLY_LITERALS = True\n\n CHAR_CAST_MAPPING = dict.fromkeys(\n (\n exp.DataType.Type.LONGTEXT,\n exp.DataType.Type.LONGBLOB,\n exp.DataType.Type.MEDIUMBLOB,\n exp.DataType.Type.MEDIUMTEXT,\n exp.DataType.Type.TEXT,\n exp.DataType.Type.TINYBLOB,\n exp.DataType.Type.TINYTEXT,\n exp.DataType.Type.VARCHAR,\n ),\n \"CHAR\",\n )\n SIGNED_CAST_MAPPING = dict.fromkeys(\n (\n exp.DataType.Type.BIGINT,\n exp.DataType.Type.BOOLEAN,\n exp.DataType.Type.INT,\n exp.DataType.Type.SMALLINT,\n exp.DataType.Type.TINYINT,\n exp.DataType.Type.MEDIUMINT,\n ),\n \"SIGNED\",\n )\n\n # MySQL doesn't support many datatypes in cast.\n # https://dev.mysql.com/doc/refman/8.0/en/cast-functions.html#function_cast\n CAST_MAPPING = {\n **CHAR_CAST_MAPPING,\n **SIGNED_CAST_MAPPING,\n exp.DataType.Type.UBIGINT: \"UNSIGNED\",\n }\n\n TIMESTAMP_FUNC_TYPES = {\n exp.DataType.Type.TIMESTAMPTZ,\n exp.DataType.Type.TIMESTAMPLTZ,\n }\n\n def extract_sql(self, expression: exp.Extract) -> str:\n unit = expression.name\n if unit and unit.lower() == \"epoch\":\n return self.func(\"UNIX_TIMESTAMP\", expression.expression)\n\n return super().extract_sql(expression)\n\n def datatype_sql(self, expression: exp.DataType) -> str:\n # https://dev.mysql.com/doc/refman/8.0/en/numeric-type-syntax.html\n result = super().datatype_sql(expression)\n if expression.this in self.UNSIGNED_TYPE_MAPPING:\n result = f\"{result} UNSIGNED\"\n return result\n\n def jsonarraycontains_sql(self, expression: exp.JSONArrayContains) -> str:\n return f\"{self.sql(expression, 'this')} MEMBER OF({self.sql(expression, 'expression')})\"\n\n def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:\n if expression.to.this in self.TIMESTAMP_FUNC_TYPES:\n return self.func(\"TIMESTAMP\", expression.this)\n\n to = self.CAST_MAPPING.get(expression.to.this)\n\n if to:\n expression.to.set(\"this\", to)\n return super().cast_sql(expression)\n\n def show_sql(self, expression: exp.Show) -> str:\n this = f\" {expression.name}\"\n full = \" FULL\" if expression.args.get(\"full\") else \"\"\n global_ = \" GLOBAL\" if expression.args.get(\"global\") else \"\"\n\n target = self.sql(expression, \"target\")\n target = f\" {target}\" if target else \"\"\n if expression.name in (\"COLUMNS\", \"INDEX\"):\n target = f\" FROM{target}\"\n elif expression.name == \"GRANTS\":\n target = f\" FOR{target}\"\n\n db = self._prefixed_sql(\"FROM\", expression, \"db\")\n\n like = self._prefixed_sql(\"LIKE\", expression, \"like\")\n where = self.sql(expression, \"where\")\n\n types = self.expressions(expression, key=\"types\")\n types = f\" {types}\" if types else types\n query = self._prefixed_sql(\"FOR QUERY\", expression, \"query\")\n\n if expression.name == \"PROFILE\":\n offset = self._prefixed_sql(\"OFFSET\", expression, \"offset\")\n limit = self._prefixed_sql(\"LIMIT\", expression, \"limit\")\n else:\n offset = \"\"\n limit = self._oldstyle_limit_sql(expression)\n\n log = self._prefixed_sql(\"IN\", expression, \"log\")\n position = self._prefixed_sql(\"FROM\", expression, \"position\")\n\n channel = self._prefixed_sql(\"FOR CHANNEL\", expression, \"channel\")\n\n if expression.name == \"ENGINE\":\n mutex_or_status = \" MUTEX\" if expression.args.get(\"mutex\") else \" STATUS\"\n else:\n mutex_or_status = \"\"\n\n return f\"SHOW{full}{global_}{this}{target}{types}{db}{query}{log}{position}{channel}{mutex_or_status}{like}{where}{offset}{limit}\"\n\n def altercolumn_sql(self, expression: exp.AlterColumn) -> str:\n dtype = self.sql(expression, \"dtype\")\n if not dtype:\n return super().altercolumn_sql(expression)\n\n this = self.sql(expression, \"this\")\n return f\"MODIFY COLUMN {this} {dtype}\"\n\n def _prefixed_sql(self, prefix: str, expression: exp.Expression, arg: str) -> str:\n sql = self.sql(expression, arg)\n return f\" {prefix} {sql}\" if sql else \"\"\n\n def _oldstyle_limit_sql(self, expression: exp.Show) -> str:\n limit = self.sql(expression, \"limit\")\n offset = self.sql(expression, \"offset\")\n if limit:\n limit_offset = f\"{offset}, {limit}\" if offset else limit\n return f\" LIMIT {limit_offset}\"\n return \"\"\n\n def chr_sql(self, expression: exp.Chr) -> str:\n this = self.expressions(sqls=[expression.this] + expression.expressions)\n charset = expression.args.get(\"charset\")\n using = f\" USING {self.sql(charset)}\" if charset else \"\"\n return f\"CHAR({this}{using})\"\n\n def timestamptrunc_sql(self, expression: exp.TimestampTrunc) -> str:\n unit = expression.args.get(\"unit\")\n\n # Pick an old-enough date to avoid negative timestamp diffs\n start_ts = \"'0000-01-01 00:00:00'\"\n\n # Source: https://stackoverflow.com/a/32955740\n timestamp_diff = build_date_delta(exp.TimestampDiff)([unit, start_ts, expression.this])\n interval = exp.Interval(this=timestamp_diff, unit=unit)\n dateadd = build_date_delta_with_interval(exp.DateAdd)([start_ts, interval])\n\n return self.sql(dateadd)\n", "path": "sqlglot/dialects/mysql.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import exp, generator, parser, tokens, transforms\nfrom sqlglot.dialects.dialect import (\n Dialect,\n NormalizationStrategy,\n arrow_json_extract_sql,\n date_add_interval_sql,\n datestrtodate_sql,\n build_formatted_time,\n isnull_to_is_null,\n locate_to_strposition,\n max_or_greatest,\n min_or_least,\n no_ilike_sql,\n no_paren_current_date_sql,\n no_pivot_sql,\n no_tablesample_sql,\n no_trycast_sql,\n build_date_delta,\n build_date_delta_with_interval,\n rename_func,\n strposition_to_locate_sql,\n unit_to_var,\n)\nfrom sqlglot.helper import seq_get\nfrom sqlglot.tokens import TokenType\n\n\ndef _show_parser(*args: t.Any, **kwargs: t.Any) -> t.Callable[[MySQL.Parser], exp.Show]:\n def _parse(self: MySQL.Parser) -> exp.Show:\n return self._parse_show_mysql(*args, **kwargs)\n\n return _parse\n\n\ndef _date_trunc_sql(self: MySQL.Generator, expression: exp.DateTrunc) -> str:\n expr = self.sql(expression, \"this\")\n unit = expression.text(\"unit\").upper()\n\n if unit == \"WEEK\":\n concat = f\"CONCAT(YEAR({expr}), ' ', WEEK({expr}, 1), ' 1')\"\n date_format = \"%Y %u %w\"\n elif unit == \"MONTH\":\n concat = f\"CONCAT(YEAR({expr}), ' ', MONTH({expr}), ' 1')\"\n date_format = \"%Y %c %e\"\n elif unit == \"QUARTER\":\n concat = f\"CONCAT(YEAR({expr}), ' ', QUARTER({expr}) * 3 - 2, ' 1')\"\n date_format = \"%Y %c %e\"\n elif unit == \"YEAR\":\n concat = f\"CONCAT(YEAR({expr}), ' 1 1')\"\n date_format = \"%Y %c %e\"\n else:\n if unit != \"DAY\":\n self.unsupported(f\"Unexpected interval unit: {unit}\")\n return self.func(\"DATE\", expr)\n\n return self.func(\"STR_TO_DATE\", concat, f\"'{date_format}'\")\n\n\n# All specifiers for time parts (as opposed to date parts)\n# https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_date-format\nTIME_SPECIFIERS = {\"f\", \"H\", \"h\", \"I\", \"i\", \"k\", \"l\", \"p\", \"r\", \"S\", \"s\", \"T\"}\n\n\ndef _has_time_specifier(date_format: str) -> bool:\n i = 0\n length = len(date_format)\n\n while i < length:\n if date_format[i] == \"%\":\n i += 1\n if i < length and date_format[i] in TIME_SPECIFIERS:\n return True\n i += 1\n return False\n\n\ndef _str_to_date(args: t.List) -> exp.StrToDate | exp.StrToTime:\n mysql_date_format = seq_get(args, 1)\n date_format = MySQL.format_time(mysql_date_format)\n this = seq_get(args, 0)\n\n if mysql_date_format and _has_time_specifier(mysql_date_format.name):\n return exp.StrToTime(this=this, format=date_format)\n\n return exp.StrToDate(this=this, format=date_format)\n\n\ndef _str_to_date_sql(\n self: MySQL.Generator, expression: exp.StrToDate | exp.StrToTime | exp.TsOrDsToDate\n) -> str:\n return self.func(\"STR_TO_DATE\", expression.this, self.format_time(expression))\n\n\ndef _trim_sql(self: MySQL.Generator, expression: exp.Trim) -> str:\n target = self.sql(expression, \"this\")\n trim_type = self.sql(expression, \"position\")\n remove_chars = self.sql(expression, \"expression\")\n\n # Use TRIM/LTRIM/RTRIM syntax if the expression isn't mysql-specific\n if not remove_chars:\n return self.trim_sql(expression)\n\n trim_type = f\"{trim_type} \" if trim_type else \"\"\n remove_chars = f\"{remove_chars} \" if remove_chars else \"\"\n from_part = \"FROM \" if trim_type or remove_chars else \"\"\n return f\"TRIM({trim_type}{remove_chars}{from_part}{target})\"\n\n\ndef date_add_sql(\n kind: str,\n) -> t.Callable[[generator.Generator, exp.Expression], str]:\n def func(self: generator.Generator, expression: exp.Expression) -> str:\n return self.func(\n f\"DATE_{kind}\",\n expression.this,\n exp.Interval(this=expression.expression, unit=unit_to_var(expression)),\n )\n\n return func\n\n\ndef _ts_or_ds_to_date_sql(self: MySQL.Generator, expression: exp.TsOrDsToDate) -> str:\n time_format = expression.args.get(\"format\")\n return _str_to_date_sql(self, expression) if time_format else self.func(\"DATE\", expression.this)\n\n\ndef _remove_ts_or_ds_to_date(\n to_sql: t.Optional[t.Callable[[MySQL.Generator, exp.Expression], str]] = None,\n args: t.Tuple[str, ...] = (\"this\",),\n) -> t.Callable[[MySQL.Generator, exp.Func], str]:\n def func(self: MySQL.Generator, expression: exp.Func) -> str:\n for arg_key in args:\n arg = expression.args.get(arg_key)\n if isinstance(arg, exp.TsOrDsToDate) and not arg.args.get(\"format\"):\n expression.set(arg_key, arg.this)\n\n return to_sql(self, expression) if to_sql else self.function_fallback_sql(expression)\n\n return func\n\n\nclass MySQL(Dialect):\n # https://dev.mysql.com/doc/refman/8.0/en/identifiers.html\n IDENTIFIERS_CAN_START_WITH_DIGIT = True\n\n # We default to treating all identifiers as case-sensitive, since it matches MySQL's\n # behavior on Linux systems. For MacOS and Windows systems, one can override this\n # setting by specifying `dialect=\"mysql, normalization_strategy = lowercase\"`.\n #\n # See also https://dev.mysql.com/doc/refman/8.2/en/identifier-case-sensitivity.html\n NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_SENSITIVE\n\n TIME_FORMAT = \"'%Y-%m-%d %T'\"\n DPIPE_IS_STRING_CONCAT = False\n SUPPORTS_USER_DEFINED_TYPES = False\n SUPPORTS_SEMI_ANTI_JOIN = False\n SAFE_DIVISION = True\n\n # https://prestodb.io/docs/current/functions/datetime.html#mysql-date-functions\n TIME_MAPPING = {\n \"%M\": \"%B\",\n \"%c\": \"%-m\",\n \"%e\": \"%-d\",\n \"%h\": \"%I\",\n \"%i\": \"%M\",\n \"%s\": \"%S\",\n \"%u\": \"%W\",\n \"%k\": \"%-H\",\n \"%l\": \"%-I\",\n \"%T\": \"%H:%M:%S\",\n \"%W\": \"%a\",\n }\n\n class Tokenizer(tokens.Tokenizer):\n QUOTES = [\"'\", '\"']\n COMMENTS = [\"--\", \"#\", (\"/*\", \"*/\")]\n IDENTIFIERS = [\"`\"]\n STRING_ESCAPES = [\"'\", '\"', \"\\\\\"]\n BIT_STRINGS = [(\"b'\", \"'\"), (\"B'\", \"'\"), (\"0b\", \"\")]\n HEX_STRINGS = [(\"x'\", \"'\"), (\"X'\", \"'\"), (\"0x\", \"\")]\n\n KEYWORDS = {\n **tokens.Tokenizer.KEYWORDS,\n \"CHARSET\": TokenType.CHARACTER_SET,\n \"FORCE\": TokenType.FORCE,\n \"IGNORE\": TokenType.IGNORE,\n \"LOCK TABLES\": TokenType.COMMAND,\n \"LONGBLOB\": TokenType.LONGBLOB,\n \"LONGTEXT\": TokenType.LONGTEXT,\n \"MEDIUMBLOB\": TokenType.MEDIUMBLOB,\n \"TINYBLOB\": TokenType.TINYBLOB,\n \"TINYTEXT\": TokenType.TINYTEXT,\n \"MEDIUMTEXT\": TokenType.MEDIUMTEXT,\n \"MEDIUMINT\": TokenType.MEDIUMINT,\n \"MEMBER OF\": TokenType.MEMBER_OF,\n \"SEPARATOR\": TokenType.SEPARATOR,\n \"START\": TokenType.BEGIN,\n \"SIGNED\": TokenType.BIGINT,\n \"SIGNED INTEGER\": TokenType.BIGINT,\n \"UNLOCK TABLES\": TokenType.COMMAND,\n \"UNSIGNED\": TokenType.UBIGINT,\n \"UNSIGNED INTEGER\": TokenType.UBIGINT,\n \"YEAR\": TokenType.YEAR,\n \"_ARMSCII8\": TokenType.INTRODUCER,\n \"_ASCII\": TokenType.INTRODUCER,\n \"_BIG5\": TokenType.INTRODUCER,\n \"_BINARY\": TokenType.INTRODUCER,\n \"_CP1250\": TokenType.INTRODUCER,\n \"_CP1251\": TokenType.INTRODUCER,\n \"_CP1256\": TokenType.INTRODUCER,\n \"_CP1257\": TokenType.INTRODUCER,\n \"_CP850\": TokenType.INTRODUCER,\n \"_CP852\": TokenType.INTRODUCER,\n \"_CP866\": TokenType.INTRODUCER,\n \"_CP932\": TokenType.INTRODUCER,\n \"_DEC8\": TokenType.INTRODUCER,\n \"_EUCJPMS\": TokenType.INTRODUCER,\n \"_EUCKR\": TokenType.INTRODUCER,\n \"_GB18030\": TokenType.INTRODUCER,\n \"_GB2312\": TokenType.INTRODUCER,\n \"_GBK\": TokenType.INTRODUCER,\n \"_GEOSTD8\": TokenType.INTRODUCER,\n \"_GREEK\": TokenType.INTRODUCER,\n \"_HEBREW\": TokenType.INTRODUCER,\n \"_HP8\": TokenType.INTRODUCER,\n \"_KEYBCS2\": TokenType.INTRODUCER,\n \"_KOI8R\": TokenType.INTRODUCER,\n \"_KOI8U\": TokenType.INTRODUCER,\n \"_LATIN1\": TokenType.INTRODUCER,\n \"_LATIN2\": TokenType.INTRODUCER,\n \"_LATIN5\": TokenType.INTRODUCER,\n \"_LATIN7\": TokenType.INTRODUCER,\n \"_MACCE\": TokenType.INTRODUCER,\n \"_MACROMAN\": TokenType.INTRODUCER,\n \"_SJIS\": TokenType.INTRODUCER,\n \"_SWE7\": TokenType.INTRODUCER,\n \"_TIS620\": TokenType.INTRODUCER,\n \"_UCS2\": TokenType.INTRODUCER,\n \"_UJIS\": TokenType.INTRODUCER,\n # https://dev.mysql.com/doc/refman/8.0/en/string-literals.html\n \"_UTF8\": TokenType.INTRODUCER,\n \"_UTF16\": TokenType.INTRODUCER,\n \"_UTF16LE\": TokenType.INTRODUCER,\n \"_UTF32\": TokenType.INTRODUCER,\n \"_UTF8MB3\": TokenType.INTRODUCER,\n \"_UTF8MB4\": TokenType.INTRODUCER,\n \"@@\": TokenType.SESSION_PARAMETER,\n }\n\n COMMANDS = {*tokens.Tokenizer.COMMANDS, TokenType.REPLACE} - {TokenType.SHOW}\n\n class Parser(parser.Parser):\n FUNC_TOKENS = {\n *parser.Parser.FUNC_TOKENS,\n TokenType.DATABASE,\n TokenType.SCHEMA,\n TokenType.VALUES,\n }\n\n CONJUNCTION = {\n **parser.Parser.CONJUNCTION,\n TokenType.DAMP: exp.And,\n TokenType.XOR: exp.Xor,\n TokenType.DPIPE: exp.Or,\n }\n\n TABLE_ALIAS_TOKENS = (\n parser.Parser.TABLE_ALIAS_TOKENS - parser.Parser.TABLE_INDEX_HINT_TOKENS\n )\n\n RANGE_PARSERS = {\n **parser.Parser.RANGE_PARSERS,\n TokenType.MEMBER_OF: lambda self, this: self.expression(\n exp.JSONArrayContains,\n this=this,\n expression=self._parse_wrapped(self._parse_expression),\n ),\n }\n\n FUNCTIONS = {\n **parser.Parser.FUNCTIONS,\n \"DATE\": lambda args: exp.TsOrDsToDate(this=seq_get(args, 0)),\n \"DATE_ADD\": build_date_delta_with_interval(exp.DateAdd),\n \"DATE_FORMAT\": build_formatted_time(exp.TimeToStr, \"mysql\"),\n \"DATE_SUB\": build_date_delta_with_interval(exp.DateSub),\n \"DAY\": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))),\n \"DAYOFMONTH\": lambda args: exp.DayOfMonth(this=exp.TsOrDsToDate(this=seq_get(args, 0))),\n \"DAYOFWEEK\": lambda args: exp.DayOfWeek(this=exp.TsOrDsToDate(this=seq_get(args, 0))),\n \"DAYOFYEAR\": lambda args: exp.DayOfYear(this=exp.TsOrDsToDate(this=seq_get(args, 0))),\n \"INSTR\": lambda args: exp.StrPosition(substr=seq_get(args, 1), this=seq_get(args, 0)),\n \"FROM_UNIXTIME\": build_formatted_time(exp.UnixToTime, \"mysql\"),\n \"ISNULL\": isnull_to_is_null,\n \"LOCATE\": locate_to_strposition,\n \"MAKETIME\": exp.TimeFromParts.from_arg_list,\n \"MONTH\": lambda args: exp.Month(this=exp.TsOrDsToDate(this=seq_get(args, 0))),\n \"MONTHNAME\": lambda args: exp.TimeToStr(\n this=exp.TsOrDsToDate(this=seq_get(args, 0)),\n format=exp.Literal.string(\"%B\"),\n ),\n \"STR_TO_DATE\": _str_to_date,\n \"TIMESTAMPDIFF\": build_date_delta(exp.TimestampDiff),\n \"TO_DAYS\": lambda args: exp.paren(\n exp.DateDiff(\n this=exp.TsOrDsToDate(this=seq_get(args, 0)),\n expression=exp.TsOrDsToDate(this=exp.Literal.string(\"0000-01-01\")),\n unit=exp.var(\"DAY\"),\n )\n + 1\n ),\n \"WEEK\": lambda args: exp.Week(\n this=exp.TsOrDsToDate(this=seq_get(args, 0)), mode=seq_get(args, 1)\n ),\n \"WEEKOFYEAR\": lambda args: exp.WeekOfYear(this=exp.TsOrDsToDate(this=seq_get(args, 0))),\n \"YEAR\": lambda args: exp.Year(this=exp.TsOrDsToDate(this=seq_get(args, 0))),\n }\n\n FUNCTION_PARSERS = {\n **parser.Parser.FUNCTION_PARSERS,\n \"CHAR\": lambda self: self._parse_chr(),\n \"GROUP_CONCAT\": lambda self: self._parse_group_concat(),\n # https://dev.mysql.com/doc/refman/5.7/en/miscellaneous-functions.html#function_values\n \"VALUES\": lambda self: self.expression(\n exp.Anonymous, this=\"VALUES\", expressions=[self._parse_id_var()]\n ),\n }\n\n STATEMENT_PARSERS = {\n **parser.Parser.STATEMENT_PARSERS,\n TokenType.SHOW: lambda self: self._parse_show(),\n }\n\n SHOW_PARSERS = {\n \"BINARY LOGS\": _show_parser(\"BINARY LOGS\"),\n \"MASTER LOGS\": _show_parser(\"BINARY LOGS\"),\n \"BINLOG EVENTS\": _show_parser(\"BINLOG EVENTS\"),\n \"CHARACTER SET\": _show_parser(\"CHARACTER SET\"),\n \"CHARSET\": _show_parser(\"CHARACTER SET\"),\n \"COLLATION\": _show_parser(\"COLLATION\"),\n \"FULL COLUMNS\": _show_parser(\"COLUMNS\", target=\"FROM\", full=True),\n \"COLUMNS\": _show_parser(\"COLUMNS\", target=\"FROM\"),\n \"CREATE DATABASE\": _show_parser(\"CREATE DATABASE\", target=True),\n \"CREATE EVENT\": _show_parser(\"CREATE EVENT\", target=True),\n \"CREATE FUNCTION\": _show_parser(\"CREATE FUNCTION\", target=True),\n \"CREATE PROCEDURE\": _show_parser(\"CREATE PROCEDURE\", target=True),\n \"CREATE TABLE\": _show_parser(\"CREATE TABLE\", target=True),\n \"CREATE TRIGGER\": _show_parser(\"CREATE TRIGGER\", target=True),\n \"CREATE VIEW\": _show_parser(\"CREATE VIEW\", target=True),\n \"DATABASES\": _show_parser(\"DATABASES\"),\n \"SCHEMAS\": _show_parser(\"DATABASES\"),\n \"ENGINE\": _show_parser(\"ENGINE\", target=True),\n \"STORAGE ENGINES\": _show_parser(\"ENGINES\"),\n \"ENGINES\": _show_parser(\"ENGINES\"),\n \"ERRORS\": _show_parser(\"ERRORS\"),\n \"EVENTS\": _show_parser(\"EVENTS\"),\n \"FUNCTION CODE\": _show_parser(\"FUNCTION CODE\", target=True),\n \"FUNCTION STATUS\": _show_parser(\"FUNCTION STATUS\"),\n \"GRANTS\": _show_parser(\"GRANTS\", target=\"FOR\"),\n \"INDEX\": _show_parser(\"INDEX\", target=\"FROM\"),\n \"MASTER STATUS\": _show_parser(\"MASTER STATUS\"),\n \"OPEN TABLES\": _show_parser(\"OPEN TABLES\"),\n \"PLUGINS\": _show_parser(\"PLUGINS\"),\n \"PROCEDURE CODE\": _show_parser(\"PROCEDURE CODE\", target=True),\n \"PROCEDURE STATUS\": _show_parser(\"PROCEDURE STATUS\"),\n \"PRIVILEGES\": _show_parser(\"PRIVILEGES\"),\n \"FULL PROCESSLIST\": _show_parser(\"PROCESSLIST\", full=True),\n \"PROCESSLIST\": _show_parser(\"PROCESSLIST\"),\n \"PROFILE\": _show_parser(\"PROFILE\"),\n \"PROFILES\": _show_parser(\"PROFILES\"),\n \"RELAYLOG EVENTS\": _show_parser(\"RELAYLOG EVENTS\"),\n \"REPLICAS\": _show_parser(\"REPLICAS\"),\n \"SLAVE HOSTS\": _show_parser(\"REPLICAS\"),\n \"REPLICA STATUS\": _show_parser(\"REPLICA STATUS\"),\n \"SLAVE STATUS\": _show_parser(\"REPLICA STATUS\"),\n \"GLOBAL STATUS\": _show_parser(\"STATUS\", global_=True),\n \"SESSION STATUS\": _show_parser(\"STATUS\"),\n \"STATUS\": _show_parser(\"STATUS\"),\n \"TABLE STATUS\": _show_parser(\"TABLE STATUS\"),\n \"FULL TABLES\": _show_parser(\"TABLES\", full=True),\n \"TABLES\": _show_parser(\"TABLES\"),\n \"TRIGGERS\": _show_parser(\"TRIGGERS\"),\n \"GLOBAL VARIABLES\": _show_parser(\"VARIABLES\", global_=True),\n \"SESSION VARIABLES\": _show_parser(\"VARIABLES\"),\n \"VARIABLES\": _show_parser(\"VARIABLES\"),\n \"WARNINGS\": _show_parser(\"WARNINGS\"),\n }\n\n PROPERTY_PARSERS = {\n **parser.Parser.PROPERTY_PARSERS,\n \"LOCK\": lambda self: self._parse_property_assignment(exp.LockProperty),\n }\n\n SET_PARSERS = {\n **parser.Parser.SET_PARSERS,\n \"PERSIST\": lambda self: self._parse_set_item_assignment(\"PERSIST\"),\n \"PERSIST_ONLY\": lambda self: self._parse_set_item_assignment(\"PERSIST_ONLY\"),\n \"CHARACTER SET\": lambda self: self._parse_set_item_charset(\"CHARACTER SET\"),\n \"CHARSET\": lambda self: self._parse_set_item_charset(\"CHARACTER SET\"),\n \"NAMES\": lambda self: self._parse_set_item_names(),\n }\n\n CONSTRAINT_PARSERS = {\n **parser.Parser.CONSTRAINT_PARSERS,\n \"FULLTEXT\": lambda self: self._parse_index_constraint(kind=\"FULLTEXT\"),\n \"INDEX\": lambda self: self._parse_index_constraint(),\n \"KEY\": lambda self: self._parse_index_constraint(),\n \"SPATIAL\": lambda self: self._parse_index_constraint(kind=\"SPATIAL\"),\n }\n\n ALTER_PARSERS = {\n **parser.Parser.ALTER_PARSERS,\n \"MODIFY\": lambda self: self._parse_alter_table_alter(),\n }\n\n SCHEMA_UNNAMED_CONSTRAINTS = {\n *parser.Parser.SCHEMA_UNNAMED_CONSTRAINTS,\n \"FULLTEXT\",\n \"INDEX\",\n \"KEY\",\n \"SPATIAL\",\n }\n\n PROFILE_TYPES: parser.OPTIONS_TYPE = {\n **dict.fromkeys((\"ALL\", \"CPU\", \"IPC\", \"MEMORY\", \"SOURCE\", \"SWAPS\"), tuple()),\n \"BLOCK\": (\"IO\",),\n \"CONTEXT\": (\"SWITCHES\",),\n \"PAGE\": (\"FAULTS\",),\n }\n\n TYPE_TOKENS = {\n *parser.Parser.TYPE_TOKENS,\n TokenType.SET,\n }\n\n ENUM_TYPE_TOKENS = {\n *parser.Parser.ENUM_TYPE_TOKENS,\n TokenType.SET,\n }\n\n LOG_DEFAULTS_TO_LN = True\n STRING_ALIASES = True\n VALUES_FOLLOWED_BY_PAREN = False\n SUPPORTS_PARTITION_SELECTION = True\n\n def _parse_primary_key_part(self) -> t.Optional[exp.Expression]:\n this = self._parse_id_var()\n if not self._match(TokenType.L_PAREN):\n return this\n\n expression = self._parse_number()\n self._match_r_paren()\n return self.expression(exp.ColumnPrefix, this=this, expression=expression)\n\n def _parse_index_constraint(\n self, kind: t.Optional[str] = None\n ) -> exp.IndexColumnConstraint:\n if kind:\n self._match_texts((\"INDEX\", \"KEY\"))\n\n this = self._parse_id_var(any_token=False)\n index_type = self._match(TokenType.USING) and self._advance_any() and self._prev.text\n expressions = self._parse_wrapped_csv(self._parse_ordered)\n\n options = []\n while True:\n if self._match_text_seq(\"KEY_BLOCK_SIZE\"):\n self._match(TokenType.EQ)\n opt = exp.IndexConstraintOption(key_block_size=self._parse_number())\n elif self._match(TokenType.USING):\n opt = exp.IndexConstraintOption(using=self._advance_any() and self._prev.text)\n elif self._match_text_seq(\"WITH\", \"PARSER\"):\n opt = exp.IndexConstraintOption(parser=self._parse_var(any_token=True))\n elif self._match(TokenType.COMMENT):\n opt = exp.IndexConstraintOption(comment=self._parse_string())\n elif self._match_text_seq(\"VISIBLE\"):\n opt = exp.IndexConstraintOption(visible=True)\n elif self._match_text_seq(\"INVISIBLE\"):\n opt = exp.IndexConstraintOption(visible=False)\n elif self._match_text_seq(\"ENGINE_ATTRIBUTE\"):\n self._match(TokenType.EQ)\n opt = exp.IndexConstraintOption(engine_attr=self._parse_string())\n elif self._match_text_seq(\"SECONDARY_ENGINE_ATTRIBUTE\"):\n self._match(TokenType.EQ)\n opt = exp.IndexConstraintOption(secondary_engine_attr=self._parse_string())\n else:\n opt = None\n\n if not opt:\n break\n\n options.append(opt)\n\n return self.expression(\n exp.IndexColumnConstraint,\n this=this,\n expressions=expressions,\n kind=kind,\n index_type=index_type,\n options=options,\n )\n\n def _parse_show_mysql(\n self,\n this: str,\n target: bool | str = False,\n full: t.Optional[bool] = None,\n global_: t.Optional[bool] = None,\n ) -> exp.Show:\n if target:\n if isinstance(target, str):\n self._match_text_seq(target)\n target_id = self._parse_id_var()\n else:\n target_id = None\n\n log = self._parse_string() if self._match_text_seq(\"IN\") else None\n\n if this in (\"BINLOG EVENTS\", \"RELAYLOG EVENTS\"):\n position = self._parse_number() if self._match_text_seq(\"FROM\") else None\n db = None\n else:\n position = None\n db = None\n\n if self._match(TokenType.FROM):\n db = self._parse_id_var()\n elif self._match(TokenType.DOT):\n db = target_id\n target_id = self._parse_id_var()\n\n channel = self._parse_id_var() if self._match_text_seq(\"FOR\", \"CHANNEL\") else None\n\n like = self._parse_string() if self._match_text_seq(\"LIKE\") else None\n where = self._parse_where()\n\n if this == \"PROFILE\":\n types = self._parse_csv(lambda: self._parse_var_from_options(self.PROFILE_TYPES))\n query = self._parse_number() if self._match_text_seq(\"FOR\", \"QUERY\") else None\n offset = self._parse_number() if self._match_text_seq(\"OFFSET\") else None\n limit = self._parse_number() if self._match_text_seq(\"LIMIT\") else None\n else:\n types, query = None, None\n offset, limit = self._parse_oldstyle_limit()\n\n mutex = True if self._match_text_seq(\"MUTEX\") else None\n mutex = False if self._match_text_seq(\"STATUS\") else mutex\n\n return self.expression(\n exp.Show,\n this=this,\n target=target_id,\n full=full,\n log=log,\n position=position,\n db=db,\n channel=channel,\n like=like,\n where=where,\n types=types,\n query=query,\n offset=offset,\n limit=limit,\n mutex=mutex,\n **{\"global\": global_}, # type: ignore\n )\n\n def _parse_oldstyle_limit(\n self,\n ) -> t.Tuple[t.Optional[exp.Expression], t.Optional[exp.Expression]]:\n limit = None\n offset = None\n if self._match_text_seq(\"LIMIT\"):\n parts = self._parse_csv(self._parse_number)\n if len(parts) == 1:\n limit = parts[0]\n elif len(parts) == 2:\n limit = parts[1]\n offset = parts[0]\n\n return offset, limit\n\n def _parse_set_item_charset(self, kind: str) -> exp.Expression:\n this = self._parse_string() or self._parse_id_var()\n return self.expression(exp.SetItem, this=this, kind=kind)\n\n def _parse_set_item_names(self) -> exp.Expression:\n charset = self._parse_string() or self._parse_id_var()\n if self._match_text_seq(\"COLLATE\"):\n collate = self._parse_string() or self._parse_id_var()\n else:\n collate = None\n\n return self.expression(exp.SetItem, this=charset, collate=collate, kind=\"NAMES\")\n\n def _parse_type(self, parse_interval: bool = True) -> t.Optional[exp.Expression]:\n # mysql binary is special and can work anywhere, even in order by operations\n # it operates like a no paren func\n if self._match(TokenType.BINARY, advance=False):\n data_type = self._parse_types(check_func=True, allow_identifiers=False)\n\n if isinstance(data_type, exp.DataType):\n return self.expression(exp.Cast, this=self._parse_column(), to=data_type)\n\n return super()._parse_type(parse_interval=parse_interval)\n\n def _parse_chr(self) -> t.Optional[exp.Expression]:\n expressions = self._parse_csv(self._parse_conjunction)\n kwargs: t.Dict[str, t.Any] = {\"this\": seq_get(expressions, 0)}\n\n if len(expressions) > 1:\n kwargs[\"expressions\"] = expressions[1:]\n\n if self._match(TokenType.USING):\n kwargs[\"charset\"] = self._parse_var()\n\n return self.expression(exp.Chr, **kwargs)\n\n def _parse_group_concat(self) -> t.Optional[exp.Expression]:\n def concat_exprs(\n node: t.Optional[exp.Expression], exprs: t.List[exp.Expression]\n ) -> exp.Expression:\n if isinstance(node, exp.Distinct) and len(node.expressions) > 1:\n concat_exprs = [\n self.expression(exp.Concat, expressions=node.expressions, safe=True)\n ]\n node.set(\"expressions\", concat_exprs)\n return node\n if len(exprs) == 1:\n return exprs[0]\n return self.expression(exp.Concat, expressions=args, safe=True)\n\n args = self._parse_csv(self._parse_lambda)\n\n if args:\n order = args[-1] if isinstance(args[-1], exp.Order) else None\n\n if order:\n # Order By is the last (or only) expression in the list and has consumed the 'expr' before it,\n # remove 'expr' from exp.Order and add it back to args\n args[-1] = order.this\n order.set(\"this\", concat_exprs(order.this, args))\n\n this = order or concat_exprs(args[0], args)\n else:\n this = None\n\n separator = self._parse_field() if self._match(TokenType.SEPARATOR) else None\n\n return self.expression(exp.GroupConcat, this=this, separator=separator)\n\n class Generator(generator.Generator):\n LOCKING_READS_SUPPORTED = True\n NULL_ORDERING_SUPPORTED = None\n JOIN_HINTS = False\n TABLE_HINTS = True\n DUPLICATE_KEY_UPDATE_WITH_SET = False\n QUERY_HINT_SEP = \" \"\n VALUES_AS_TABLE = False\n NVL2_SUPPORTED = False\n LAST_DAY_SUPPORTS_DATE_PART = False\n JSON_TYPE_REQUIRED_FOR_EXTRACTION = True\n JSON_PATH_BRACKETED_KEY_SUPPORTED = False\n JSON_KEY_VALUE_PAIR_SEP = \",\"\n SUPPORTS_TO_NUMBER = False\n\n TRANSFORMS = {\n **generator.Generator.TRANSFORMS,\n exp.ArrayAgg: rename_func(\"GROUP_CONCAT\"),\n exp.CurrentDate: no_paren_current_date_sql,\n exp.DateDiff: _remove_ts_or_ds_to_date(\n lambda self, e: self.func(\"DATEDIFF\", e.this, e.expression), (\"this\", \"expression\")\n ),\n exp.DateAdd: _remove_ts_or_ds_to_date(date_add_sql(\"ADD\")),\n exp.DateStrToDate: datestrtodate_sql,\n exp.DateSub: _remove_ts_or_ds_to_date(date_add_sql(\"SUB\")),\n exp.DateTrunc: _date_trunc_sql,\n exp.Day: _remove_ts_or_ds_to_date(),\n exp.DayOfMonth: _remove_ts_or_ds_to_date(rename_func(\"DAYOFMONTH\")),\n exp.DayOfWeek: _remove_ts_or_ds_to_date(rename_func(\"DAYOFWEEK\")),\n exp.DayOfYear: _remove_ts_or_ds_to_date(rename_func(\"DAYOFYEAR\")),\n exp.GroupConcat: lambda self,\n e: f\"\"\"GROUP_CONCAT({self.sql(e, \"this\")} SEPARATOR {self.sql(e, \"separator\") or \"','\"})\"\"\",\n exp.ILike: no_ilike_sql,\n exp.JSONExtractScalar: arrow_json_extract_sql,\n exp.Max: max_or_greatest,\n exp.Min: min_or_least,\n exp.Month: _remove_ts_or_ds_to_date(),\n exp.NullSafeEQ: lambda self, e: self.binary(e, \"<=>\"),\n exp.NullSafeNEQ: lambda self, e: f\"NOT {self.binary(e, '<=>')}\",\n exp.ParseJSON: lambda self, e: self.sql(e, \"this\"),\n exp.Pivot: no_pivot_sql,\n exp.Select: transforms.preprocess(\n [\n transforms.eliminate_distinct_on,\n transforms.eliminate_semi_and_anti_joins,\n transforms.eliminate_qualify,\n transforms.eliminate_full_outer_join,\n ]\n ),\n exp.StrPosition: strposition_to_locate_sql,\n exp.StrToDate: _str_to_date_sql,\n exp.StrToTime: _str_to_date_sql,\n exp.Stuff: rename_func(\"INSERT\"),\n exp.TableSample: no_tablesample_sql,\n exp.TimeFromParts: rename_func(\"MAKETIME\"),\n exp.TimestampAdd: date_add_interval_sql(\"DATE\", \"ADD\"),\n exp.TimestampDiff: lambda self, e: self.func(\n \"TIMESTAMPDIFF\", unit_to_var(e), e.expression, e.this\n ),\n exp.TimestampSub: date_add_interval_sql(\"DATE\", \"SUB\"),\n exp.TimeStrToUnix: rename_func(\"UNIX_TIMESTAMP\"),\n exp.TimeStrToTime: lambda self, e: self.sql(\n exp.cast(e.this, exp.DataType.Type.DATETIME, copy=True)\n ),\n exp.TimeToStr: _remove_ts_or_ds_to_date(\n lambda self, e: self.func(\"DATE_FORMAT\", e.this, self.format_time(e))\n ),\n exp.Trim: _trim_sql,\n exp.TryCast: no_trycast_sql,\n exp.TsOrDsAdd: date_add_sql(\"ADD\"),\n exp.TsOrDsDiff: lambda self, e: self.func(\"DATEDIFF\", e.this, e.expression),\n exp.TsOrDsToDate: _ts_or_ds_to_date_sql,\n exp.UnixToTime: lambda self, e: self.func(\"FROM_UNIXTIME\", e.this, self.format_time(e)),\n exp.Week: _remove_ts_or_ds_to_date(),\n exp.WeekOfYear: _remove_ts_or_ds_to_date(rename_func(\"WEEKOFYEAR\")),\n exp.Year: _remove_ts_or_ds_to_date(),\n }\n\n UNSIGNED_TYPE_MAPPING = {\n exp.DataType.Type.UBIGINT: \"BIGINT\",\n exp.DataType.Type.UINT: \"INT\",\n exp.DataType.Type.UMEDIUMINT: \"MEDIUMINT\",\n exp.DataType.Type.USMALLINT: \"SMALLINT\",\n exp.DataType.Type.UTINYINT: \"TINYINT\",\n exp.DataType.Type.UDECIMAL: \"DECIMAL\",\n }\n\n TIMESTAMP_TYPE_MAPPING = {\n exp.DataType.Type.TIMESTAMP: \"DATETIME\",\n exp.DataType.Type.TIMESTAMPTZ: \"TIMESTAMP\",\n exp.DataType.Type.TIMESTAMPLTZ: \"TIMESTAMP\",\n }\n\n TYPE_MAPPING = {\n **generator.Generator.TYPE_MAPPING,\n **UNSIGNED_TYPE_MAPPING,\n **TIMESTAMP_TYPE_MAPPING,\n }\n\n TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMTEXT)\n TYPE_MAPPING.pop(exp.DataType.Type.LONGTEXT)\n TYPE_MAPPING.pop(exp.DataType.Type.TINYTEXT)\n TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMBLOB)\n TYPE_MAPPING.pop(exp.DataType.Type.LONGBLOB)\n TYPE_MAPPING.pop(exp.DataType.Type.TINYBLOB)\n\n PROPERTIES_LOCATION = {\n **generator.Generator.PROPERTIES_LOCATION,\n exp.TransientProperty: exp.Properties.Location.UNSUPPORTED,\n exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,\n }\n\n LIMIT_FETCH = \"LIMIT\"\n\n LIMIT_ONLY_LITERALS = True\n\n CHAR_CAST_MAPPING = dict.fromkeys(\n (\n exp.DataType.Type.LONGTEXT,\n exp.DataType.Type.LONGBLOB,\n exp.DataType.Type.MEDIUMBLOB,\n exp.DataType.Type.MEDIUMTEXT,\n exp.DataType.Type.TEXT,\n exp.DataType.Type.TINYBLOB,\n exp.DataType.Type.TINYTEXT,\n exp.DataType.Type.VARCHAR,\n ),\n \"CHAR\",\n )\n SIGNED_CAST_MAPPING = dict.fromkeys(\n (\n exp.DataType.Type.BIGINT,\n exp.DataType.Type.BOOLEAN,\n exp.DataType.Type.INT,\n exp.DataType.Type.SMALLINT,\n exp.DataType.Type.TINYINT,\n exp.DataType.Type.MEDIUMINT,\n ),\n \"SIGNED\",\n )\n\n # MySQL doesn't support many datatypes in cast.\n # https://dev.mysql.com/doc/refman/8.0/en/cast-functions.html#function_cast\n CAST_MAPPING = {\n **CHAR_CAST_MAPPING,\n **SIGNED_CAST_MAPPING,\n exp.DataType.Type.UBIGINT: \"UNSIGNED\",\n }\n\n TIMESTAMP_FUNC_TYPES = {\n exp.DataType.Type.TIMESTAMPTZ,\n exp.DataType.Type.TIMESTAMPLTZ,\n }\n\n def dpipe_sql(self, expression: exp.DPipe) -> str:\n return self.func(\"CONCAT\", *expression.flatten())\n\n def extract_sql(self, expression: exp.Extract) -> str:\n unit = expression.name\n if unit and unit.lower() == \"epoch\":\n return self.func(\"UNIX_TIMESTAMP\", expression.expression)\n\n return super().extract_sql(expression)\n\n def datatype_sql(self, expression: exp.DataType) -> str:\n # https://dev.mysql.com/doc/refman/8.0/en/numeric-type-syntax.html\n result = super().datatype_sql(expression)\n if expression.this in self.UNSIGNED_TYPE_MAPPING:\n result = f\"{result} UNSIGNED\"\n return result\n\n def jsonarraycontains_sql(self, expression: exp.JSONArrayContains) -> str:\n return f\"{self.sql(expression, 'this')} MEMBER OF({self.sql(expression, 'expression')})\"\n\n def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:\n if expression.to.this in self.TIMESTAMP_FUNC_TYPES:\n return self.func(\"TIMESTAMP\", expression.this)\n\n to = self.CAST_MAPPING.get(expression.to.this)\n\n if to:\n expression.to.set(\"this\", to)\n return super().cast_sql(expression)\n\n def show_sql(self, expression: exp.Show) -> str:\n this = f\" {expression.name}\"\n full = \" FULL\" if expression.args.get(\"full\") else \"\"\n global_ = \" GLOBAL\" if expression.args.get(\"global\") else \"\"\n\n target = self.sql(expression, \"target\")\n target = f\" {target}\" if target else \"\"\n if expression.name in (\"COLUMNS\", \"INDEX\"):\n target = f\" FROM{target}\"\n elif expression.name == \"GRANTS\":\n target = f\" FOR{target}\"\n\n db = self._prefixed_sql(\"FROM\", expression, \"db\")\n\n like = self._prefixed_sql(\"LIKE\", expression, \"like\")\n where = self.sql(expression, \"where\")\n\n types = self.expressions(expression, key=\"types\")\n types = f\" {types}\" if types else types\n query = self._prefixed_sql(\"FOR QUERY\", expression, \"query\")\n\n if expression.name == \"PROFILE\":\n offset = self._prefixed_sql(\"OFFSET\", expression, \"offset\")\n limit = self._prefixed_sql(\"LIMIT\", expression, \"limit\")\n else:\n offset = \"\"\n limit = self._oldstyle_limit_sql(expression)\n\n log = self._prefixed_sql(\"IN\", expression, \"log\")\n position = self._prefixed_sql(\"FROM\", expression, \"position\")\n\n channel = self._prefixed_sql(\"FOR CHANNEL\", expression, \"channel\")\n\n if expression.name == \"ENGINE\":\n mutex_or_status = \" MUTEX\" if expression.args.get(\"mutex\") else \" STATUS\"\n else:\n mutex_or_status = \"\"\n\n return f\"SHOW{full}{global_}{this}{target}{types}{db}{query}{log}{position}{channel}{mutex_or_status}{like}{where}{offset}{limit}\"\n\n def altercolumn_sql(self, expression: exp.AlterColumn) -> str:\n dtype = self.sql(expression, \"dtype\")\n if not dtype:\n return super().altercolumn_sql(expression)\n\n this = self.sql(expression, \"this\")\n return f\"MODIFY COLUMN {this} {dtype}\"\n\n def _prefixed_sql(self, prefix: str, expression: exp.Expression, arg: str) -> str:\n sql = self.sql(expression, arg)\n return f\" {prefix} {sql}\" if sql else \"\"\n\n def _oldstyle_limit_sql(self, expression: exp.Show) -> str:\n limit = self.sql(expression, \"limit\")\n offset = self.sql(expression, \"offset\")\n if limit:\n limit_offset = f\"{offset}, {limit}\" if offset else limit\n return f\" LIMIT {limit_offset}\"\n return \"\"\n\n def chr_sql(self, expression: exp.Chr) -> str:\n this = self.expressions(sqls=[expression.this] + expression.expressions)\n charset = expression.args.get(\"charset\")\n using = f\" USING {self.sql(charset)}\" if charset else \"\"\n return f\"CHAR({this}{using})\"\n\n def timestamptrunc_sql(self, expression: exp.TimestampTrunc) -> str:\n unit = expression.args.get(\"unit\")\n\n # Pick an old-enough date to avoid negative timestamp diffs\n start_ts = \"'0000-01-01 00:00:00'\"\n\n # Source: https://stackoverflow.com/a/32955740\n timestamp_diff = build_date_delta(exp.TimestampDiff)([unit, start_ts, expression.this])\n interval = exp.Interval(this=timestamp_diff, unit=unit)\n dateadd = build_date_delta_with_interval(exp.DateAdd)([start_ts, interval])\n\n return self.sql(dateadd)\n", "path": "sqlglot/dialects/mysql.py"}]} |
gh_patches_debug_1288 | rasdani/github-patches | git_diff | open-mmlab__mmpose-267 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pylint: R1719
```bash
mmpose/models/backbones/shufflenet_v1.py:238:26: R1719: The if expression can be replaced with 'test' (simplifiable-if-expression)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmpose/models/backbones/shufflenet_v1.py`
Content:
```
1 import logging
2
3 import torch
4 import torch.nn as nn
5 import torch.utils.checkpoint as cp
6 from mmcv.cnn import (ConvModule, build_activation_layer, constant_init,
7 normal_init)
8 from torch.nn.modules.batchnorm import _BatchNorm
9
10 from ..registry import BACKBONES
11 from .base_backbone import BaseBackbone
12 from .utils import channel_shuffle, load_checkpoint, make_divisible
13
14
15 class ShuffleUnit(nn.Module):
16 """ShuffleUnit block.
17
18 ShuffleNet unit with pointwise group convolution (GConv) and channel
19 shuffle.
20
21 Args:
22 in_channels (int): The input channels of the ShuffleUnit.
23 out_channels (int): The output channels of the ShuffleUnit.
24 groups (int, optional): The number of groups to be used in grouped 1x1
25 convolutions in each ShuffleUnit. Default: 3
26 first_block (bool, optional): Whether it is the first ShuffleUnit of a
27 sequential ShuffleUnits. Default: False, which means not using the
28 grouped 1x1 convolution.
29 combine (str, optional): The ways to combine the input and output
30 branches. Default: 'add'.
31 conv_cfg (dict): Config dict for convolution layer. Default: None,
32 which means using conv2d.
33 norm_cfg (dict): Config dict for normalization layer.
34 Default: dict(type='BN').
35 act_cfg (dict): Config dict for activation layer.
36 Default: dict(type='ReLU').
37 with_cp (bool, optional): Use checkpoint or not. Using checkpoint
38 will save some memory while slowing down the training speed.
39 Default: False.
40
41 Returns:
42 Tensor: The output tensor.
43 """
44
45 def __init__(self,
46 in_channels,
47 out_channels,
48 groups=3,
49 first_block=True,
50 combine='add',
51 conv_cfg=None,
52 norm_cfg=dict(type='BN'),
53 act_cfg=dict(type='ReLU'),
54 with_cp=False):
55 super().__init__()
56 self.in_channels = in_channels
57 self.out_channels = out_channels
58 self.first_block = first_block
59 self.combine = combine
60 self.groups = groups
61 self.bottleneck_channels = self.out_channels // 4
62 self.with_cp = with_cp
63
64 if self.combine == 'add':
65 self.depthwise_stride = 1
66 self._combine_func = self._add
67 assert in_channels == out_channels, (
68 'in_channels must be equal to out_channels when combine '
69 'is add')
70 elif self.combine == 'concat':
71 self.depthwise_stride = 2
72 self._combine_func = self._concat
73 self.out_channels -= self.in_channels
74 self.avgpool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1)
75 else:
76 raise ValueError(f'Cannot combine tensors with {self.combine}. '
77 'Only "add" and "concat" are supported')
78
79 self.first_1x1_groups = 1 if first_block else self.groups
80 self.g_conv_1x1_compress = ConvModule(
81 in_channels=self.in_channels,
82 out_channels=self.bottleneck_channels,
83 kernel_size=1,
84 groups=self.first_1x1_groups,
85 conv_cfg=conv_cfg,
86 norm_cfg=norm_cfg,
87 act_cfg=act_cfg)
88
89 self.depthwise_conv3x3_bn = ConvModule(
90 in_channels=self.bottleneck_channels,
91 out_channels=self.bottleneck_channels,
92 kernel_size=3,
93 stride=self.depthwise_stride,
94 padding=1,
95 groups=self.bottleneck_channels,
96 conv_cfg=conv_cfg,
97 norm_cfg=norm_cfg,
98 act_cfg=None)
99
100 self.g_conv_1x1_expand = ConvModule(
101 in_channels=self.bottleneck_channels,
102 out_channels=self.out_channels,
103 kernel_size=1,
104 groups=self.groups,
105 conv_cfg=conv_cfg,
106 norm_cfg=norm_cfg,
107 act_cfg=None)
108
109 self.act = build_activation_layer(act_cfg)
110
111 @staticmethod
112 def _add(x, out):
113 # residual connection
114 return x + out
115
116 @staticmethod
117 def _concat(x, out):
118 # concatenate along channel axis
119 return torch.cat((x, out), 1)
120
121 def forward(self, x):
122
123 def _inner_forward(x):
124 residual = x
125
126 out = self.g_conv_1x1_compress(x)
127 out = self.depthwise_conv3x3_bn(out)
128
129 if self.groups > 1:
130 out = channel_shuffle(out, self.groups)
131
132 out = self.g_conv_1x1_expand(out)
133
134 if self.combine == 'concat':
135 residual = self.avgpool(residual)
136 out = self.act(out)
137 out = self._combine_func(residual, out)
138 else:
139 out = self._combine_func(residual, out)
140 out = self.act(out)
141 return out
142
143 if self.with_cp and x.requires_grad:
144 out = cp.checkpoint(_inner_forward, x)
145 else:
146 out = _inner_forward(x)
147
148 return out
149
150
151 @BACKBONES.register_module()
152 class ShuffleNetV1(BaseBackbone):
153 """ShuffleNetV1 backbone.
154
155 Args:
156 groups (int, optional): The number of groups to be used in grouped 1x1
157 convolutions in each ShuffleUnit. Default: 3.
158 widen_factor (float, optional): Width multiplier - adjusts the number
159 of channels in each layer by this amount. Default: 1.0.
160 out_indices (Sequence[int]): Output from which stages.
161 Default: (2, )
162 frozen_stages (int): Stages to be frozen (all param fixed).
163 Default: -1, which means not freezing any parameters.
164 conv_cfg (dict): Config dict for convolution layer. Default: None,
165 which means using conv2d.
166 norm_cfg (dict): Config dict for normalization layer.
167 Default: dict(type='BN').
168 act_cfg (dict): Config dict for activation layer.
169 Default: dict(type='ReLU').
170 norm_eval (bool): Whether to set norm layers to eval mode, namely,
171 freeze running stats (mean and var). Note: Effect on Batch Norm
172 and its variants only. Default: False.
173 with_cp (bool): Use checkpoint or not. Using checkpoint will save some
174 memory while slowing down the training speed. Default: False.
175 """
176
177 def __init__(self,
178 groups=3,
179 widen_factor=1.0,
180 out_indices=(2, ),
181 frozen_stages=-1,
182 conv_cfg=None,
183 norm_cfg=dict(type='BN'),
184 act_cfg=dict(type='ReLU'),
185 norm_eval=False,
186 with_cp=False):
187 super().__init__()
188 self.stage_blocks = [4, 8, 4]
189 self.groups = groups
190
191 for index in out_indices:
192 if index not in range(0, 3):
193 raise ValueError('the item in out_indices must in '
194 f'range(0, 3). But received {index}')
195
196 if frozen_stages not in range(-1, 3):
197 raise ValueError('frozen_stages must be in range(-1, 3). '
198 f'But received {frozen_stages}')
199 self.out_indices = out_indices
200 self.frozen_stages = frozen_stages
201 self.conv_cfg = conv_cfg
202 self.norm_cfg = norm_cfg
203 self.act_cfg = act_cfg
204 self.norm_eval = norm_eval
205 self.with_cp = with_cp
206
207 if groups == 1:
208 channels = (144, 288, 576)
209 elif groups == 2:
210 channels = (200, 400, 800)
211 elif groups == 3:
212 channels = (240, 480, 960)
213 elif groups == 4:
214 channels = (272, 544, 1088)
215 elif groups == 8:
216 channels = (384, 768, 1536)
217 else:
218 raise ValueError(f'{groups} groups is not supported for 1x1 '
219 'Grouped Convolutions')
220
221 channels = [make_divisible(ch * widen_factor, 8) for ch in channels]
222
223 self.in_channels = int(24 * widen_factor)
224
225 self.conv1 = ConvModule(
226 in_channels=3,
227 out_channels=self.in_channels,
228 kernel_size=3,
229 stride=2,
230 padding=1,
231 conv_cfg=conv_cfg,
232 norm_cfg=norm_cfg,
233 act_cfg=act_cfg)
234 self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
235
236 self.layers = nn.ModuleList()
237 for i, num_blocks in enumerate(self.stage_blocks):
238 first_block = True if i == 0 else False
239 layer = self.make_layer(channels[i], num_blocks, first_block)
240 self.layers.append(layer)
241
242 def _freeze_stages(self):
243 if self.frozen_stages >= 0:
244 for param in self.conv1.parameters():
245 param.requires_grad = False
246 for i in range(self.frozen_stages):
247 layer = self.layers[i]
248 layer.eval()
249 for param in layer.parameters():
250 param.requires_grad = False
251
252 def init_weights(self, pretrained=None):
253 if isinstance(pretrained, str):
254 logger = logging.getLogger()
255 load_checkpoint(self, pretrained, strict=False, logger=logger)
256 elif pretrained is None:
257 for name, m in self.named_modules():
258 if isinstance(m, nn.Conv2d):
259 if 'conv1' in name:
260 normal_init(m, mean=0, std=0.01)
261 else:
262 normal_init(m, mean=0, std=1.0 / m.weight.shape[1])
263 elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
264 constant_init(m.weight, val=1, bias=0.0001)
265 if isinstance(m, _BatchNorm):
266 if m.running_mean is not None:
267 nn.init.constant_(m.running_mean, 0)
268 else:
269 raise TypeError('pretrained must be a str or None. But received '
270 f'{type(pretrained)}')
271
272 def make_layer(self, out_channels, num_blocks, first_block=False):
273 """Stack ShuffleUnit blocks to make a layer.
274
275 Args:
276 out_channels (int): out_channels of the block.
277 num_blocks (int): Number of blocks.
278 first_block (bool, optional): Whether is the first ShuffleUnit of a
279 sequential ShuffleUnits. Default: False, which means not using
280 the grouped 1x1 convolution.
281 """
282 layers = []
283 for i in range(num_blocks):
284 first_block = first_block if i == 0 else False
285 combine_mode = 'concat' if i == 0 else 'add'
286 layers.append(
287 ShuffleUnit(
288 self.in_channels,
289 out_channels,
290 groups=self.groups,
291 first_block=first_block,
292 combine=combine_mode,
293 conv_cfg=self.conv_cfg,
294 norm_cfg=self.norm_cfg,
295 act_cfg=self.act_cfg,
296 with_cp=self.with_cp))
297 self.in_channels = out_channels
298
299 return nn.Sequential(*layers)
300
301 def forward(self, x):
302 x = self.conv1(x)
303 x = self.maxpool(x)
304
305 outs = []
306 for i, layer in enumerate(self.layers):
307 x = layer(x)
308 if i in self.out_indices:
309 outs.append(x)
310
311 if len(outs) == 1:
312 return outs[0]
313 else:
314 return tuple(outs)
315
316 def train(self, mode=True):
317 super().train(mode)
318 self._freeze_stages()
319 if mode and self.norm_eval:
320 for m in self.modules():
321 if isinstance(m, _BatchNorm):
322 m.eval()
323
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mmpose/models/backbones/shufflenet_v1.py b/mmpose/models/backbones/shufflenet_v1.py
--- a/mmpose/models/backbones/shufflenet_v1.py
+++ b/mmpose/models/backbones/shufflenet_v1.py
@@ -235,7 +235,7 @@
self.layers = nn.ModuleList()
for i, num_blocks in enumerate(self.stage_blocks):
- first_block = True if i == 0 else False
+ first_block = (i == 0)
layer = self.make_layer(channels[i], num_blocks, first_block)
self.layers.append(layer)
| {"golden_diff": "diff --git a/mmpose/models/backbones/shufflenet_v1.py b/mmpose/models/backbones/shufflenet_v1.py\n--- a/mmpose/models/backbones/shufflenet_v1.py\n+++ b/mmpose/models/backbones/shufflenet_v1.py\n@@ -235,7 +235,7 @@\n \n self.layers = nn.ModuleList()\n for i, num_blocks in enumerate(self.stage_blocks):\n- first_block = True if i == 0 else False\n+ first_block = (i == 0)\n layer = self.make_layer(channels[i], num_blocks, first_block)\n self.layers.append(layer)\n", "issue": "Pylint: R1719\n```bash\r\nmmpose/models/backbones/shufflenet_v1.py:238:26: R1719: The if expression can be replaced with 'test' (simplifiable-if-expression)\r\n```\n", "before_files": [{"content": "import logging\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.checkpoint as cp\nfrom mmcv.cnn import (ConvModule, build_activation_layer, constant_init,\n normal_init)\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\nfrom ..registry import BACKBONES\nfrom .base_backbone import BaseBackbone\nfrom .utils import channel_shuffle, load_checkpoint, make_divisible\n\n\nclass ShuffleUnit(nn.Module):\n \"\"\"ShuffleUnit block.\n\n ShuffleNet unit with pointwise group convolution (GConv) and channel\n shuffle.\n\n Args:\n in_channels (int): The input channels of the ShuffleUnit.\n out_channels (int): The output channels of the ShuffleUnit.\n groups (int, optional): The number of groups to be used in grouped 1x1\n convolutions in each ShuffleUnit. Default: 3\n first_block (bool, optional): Whether it is the first ShuffleUnit of a\n sequential ShuffleUnits. Default: False, which means not using the\n grouped 1x1 convolution.\n combine (str, optional): The ways to combine the input and output\n branches. Default: 'add'.\n conv_cfg (dict): Config dict for convolution layer. Default: None,\n which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='BN').\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='ReLU').\n with_cp (bool, optional): Use checkpoint or not. Using checkpoint\n will save some memory while slowing down the training speed.\n Default: False.\n\n Returns:\n Tensor: The output tensor.\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n groups=3,\n first_block=True,\n combine='add',\n conv_cfg=None,\n norm_cfg=dict(type='BN'),\n act_cfg=dict(type='ReLU'),\n with_cp=False):\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.first_block = first_block\n self.combine = combine\n self.groups = groups\n self.bottleneck_channels = self.out_channels // 4\n self.with_cp = with_cp\n\n if self.combine == 'add':\n self.depthwise_stride = 1\n self._combine_func = self._add\n assert in_channels == out_channels, (\n 'in_channels must be equal to out_channels when combine '\n 'is add')\n elif self.combine == 'concat':\n self.depthwise_stride = 2\n self._combine_func = self._concat\n self.out_channels -= self.in_channels\n self.avgpool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1)\n else:\n raise ValueError(f'Cannot combine tensors with {self.combine}. '\n 'Only \"add\" and \"concat\" are supported')\n\n self.first_1x1_groups = 1 if first_block else self.groups\n self.g_conv_1x1_compress = ConvModule(\n in_channels=self.in_channels,\n out_channels=self.bottleneck_channels,\n kernel_size=1,\n groups=self.first_1x1_groups,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg)\n\n self.depthwise_conv3x3_bn = ConvModule(\n in_channels=self.bottleneck_channels,\n out_channels=self.bottleneck_channels,\n kernel_size=3,\n stride=self.depthwise_stride,\n padding=1,\n groups=self.bottleneck_channels,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=None)\n\n self.g_conv_1x1_expand = ConvModule(\n in_channels=self.bottleneck_channels,\n out_channels=self.out_channels,\n kernel_size=1,\n groups=self.groups,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=None)\n\n self.act = build_activation_layer(act_cfg)\n\n @staticmethod\n def _add(x, out):\n # residual connection\n return x + out\n\n @staticmethod\n def _concat(x, out):\n # concatenate along channel axis\n return torch.cat((x, out), 1)\n\n def forward(self, x):\n\n def _inner_forward(x):\n residual = x\n\n out = self.g_conv_1x1_compress(x)\n out = self.depthwise_conv3x3_bn(out)\n\n if self.groups > 1:\n out = channel_shuffle(out, self.groups)\n\n out = self.g_conv_1x1_expand(out)\n\n if self.combine == 'concat':\n residual = self.avgpool(residual)\n out = self.act(out)\n out = self._combine_func(residual, out)\n else:\n out = self._combine_func(residual, out)\n out = self.act(out)\n return out\n\n if self.with_cp and x.requires_grad:\n out = cp.checkpoint(_inner_forward, x)\n else:\n out = _inner_forward(x)\n\n return out\n\n\[email protected]_module()\nclass ShuffleNetV1(BaseBackbone):\n \"\"\"ShuffleNetV1 backbone.\n\n Args:\n groups (int, optional): The number of groups to be used in grouped 1x1\n convolutions in each ShuffleUnit. Default: 3.\n widen_factor (float, optional): Width multiplier - adjusts the number\n of channels in each layer by this amount. Default: 1.0.\n out_indices (Sequence[int]): Output from which stages.\n Default: (2, )\n frozen_stages (int): Stages to be frozen (all param fixed).\n Default: -1, which means not freezing any parameters.\n conv_cfg (dict): Config dict for convolution layer. Default: None,\n which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='BN').\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='ReLU').\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only. Default: False.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed. Default: False.\n \"\"\"\n\n def __init__(self,\n groups=3,\n widen_factor=1.0,\n out_indices=(2, ),\n frozen_stages=-1,\n conv_cfg=None,\n norm_cfg=dict(type='BN'),\n act_cfg=dict(type='ReLU'),\n norm_eval=False,\n with_cp=False):\n super().__init__()\n self.stage_blocks = [4, 8, 4]\n self.groups = groups\n\n for index in out_indices:\n if index not in range(0, 3):\n raise ValueError('the item in out_indices must in '\n f'range(0, 3). But received {index}')\n\n if frozen_stages not in range(-1, 3):\n raise ValueError('frozen_stages must be in range(-1, 3). '\n f'But received {frozen_stages}')\n self.out_indices = out_indices\n self.frozen_stages = frozen_stages\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.act_cfg = act_cfg\n self.norm_eval = norm_eval\n self.with_cp = with_cp\n\n if groups == 1:\n channels = (144, 288, 576)\n elif groups == 2:\n channels = (200, 400, 800)\n elif groups == 3:\n channels = (240, 480, 960)\n elif groups == 4:\n channels = (272, 544, 1088)\n elif groups == 8:\n channels = (384, 768, 1536)\n else:\n raise ValueError(f'{groups} groups is not supported for 1x1 '\n 'Grouped Convolutions')\n\n channels = [make_divisible(ch * widen_factor, 8) for ch in channels]\n\n self.in_channels = int(24 * widen_factor)\n\n self.conv1 = ConvModule(\n in_channels=3,\n out_channels=self.in_channels,\n kernel_size=3,\n stride=2,\n padding=1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n self.layers = nn.ModuleList()\n for i, num_blocks in enumerate(self.stage_blocks):\n first_block = True if i == 0 else False\n layer = self.make_layer(channels[i], num_blocks, first_block)\n self.layers.append(layer)\n\n def _freeze_stages(self):\n if self.frozen_stages >= 0:\n for param in self.conv1.parameters():\n param.requires_grad = False\n for i in range(self.frozen_stages):\n layer = self.layers[i]\n layer.eval()\n for param in layer.parameters():\n param.requires_grad = False\n\n def init_weights(self, pretrained=None):\n if isinstance(pretrained, str):\n logger = logging.getLogger()\n load_checkpoint(self, pretrained, strict=False, logger=logger)\n elif pretrained is None:\n for name, m in self.named_modules():\n if isinstance(m, nn.Conv2d):\n if 'conv1' in name:\n normal_init(m, mean=0, std=0.01)\n else:\n normal_init(m, mean=0, std=1.0 / m.weight.shape[1])\n elif isinstance(m, (_BatchNorm, nn.GroupNorm)):\n constant_init(m.weight, val=1, bias=0.0001)\n if isinstance(m, _BatchNorm):\n if m.running_mean is not None:\n nn.init.constant_(m.running_mean, 0)\n else:\n raise TypeError('pretrained must be a str or None. But received '\n f'{type(pretrained)}')\n\n def make_layer(self, out_channels, num_blocks, first_block=False):\n \"\"\"Stack ShuffleUnit blocks to make a layer.\n\n Args:\n out_channels (int): out_channels of the block.\n num_blocks (int): Number of blocks.\n first_block (bool, optional): Whether is the first ShuffleUnit of a\n sequential ShuffleUnits. Default: False, which means not using\n the grouped 1x1 convolution.\n \"\"\"\n layers = []\n for i in range(num_blocks):\n first_block = first_block if i == 0 else False\n combine_mode = 'concat' if i == 0 else 'add'\n layers.append(\n ShuffleUnit(\n self.in_channels,\n out_channels,\n groups=self.groups,\n first_block=first_block,\n combine=combine_mode,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n act_cfg=self.act_cfg,\n with_cp=self.with_cp))\n self.in_channels = out_channels\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.maxpool(x)\n\n outs = []\n for i, layer in enumerate(self.layers):\n x = layer(x)\n if i in self.out_indices:\n outs.append(x)\n\n if len(outs) == 1:\n return outs[0]\n else:\n return tuple(outs)\n\n def train(self, mode=True):\n super().train(mode)\n self._freeze_stages()\n if mode and self.norm_eval:\n for m in self.modules():\n if isinstance(m, _BatchNorm):\n m.eval()\n", "path": "mmpose/models/backbones/shufflenet_v1.py"}], "after_files": [{"content": "import logging\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.checkpoint as cp\nfrom mmcv.cnn import (ConvModule, build_activation_layer, constant_init,\n normal_init)\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\nfrom ..registry import BACKBONES\nfrom .base_backbone import BaseBackbone\nfrom .utils import channel_shuffle, load_checkpoint, make_divisible\n\n\nclass ShuffleUnit(nn.Module):\n \"\"\"ShuffleUnit block.\n\n ShuffleNet unit with pointwise group convolution (GConv) and channel\n shuffle.\n\n Args:\n in_channels (int): The input channels of the ShuffleUnit.\n out_channels (int): The output channels of the ShuffleUnit.\n groups (int, optional): The number of groups to be used in grouped 1x1\n convolutions in each ShuffleUnit. Default: 3\n first_block (bool, optional): Whether it is the first ShuffleUnit of a\n sequential ShuffleUnits. Default: False, which means not using the\n grouped 1x1 convolution.\n combine (str, optional): The ways to combine the input and output\n branches. Default: 'add'.\n conv_cfg (dict): Config dict for convolution layer. Default: None,\n which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='BN').\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='ReLU').\n with_cp (bool, optional): Use checkpoint or not. Using checkpoint\n will save some memory while slowing down the training speed.\n Default: False.\n\n Returns:\n Tensor: The output tensor.\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n groups=3,\n first_block=True,\n combine='add',\n conv_cfg=None,\n norm_cfg=dict(type='BN'),\n act_cfg=dict(type='ReLU'),\n with_cp=False):\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.first_block = first_block\n self.combine = combine\n self.groups = groups\n self.bottleneck_channels = self.out_channels // 4\n self.with_cp = with_cp\n\n if self.combine == 'add':\n self.depthwise_stride = 1\n self._combine_func = self._add\n assert in_channels == out_channels, (\n 'in_channels must be equal to out_channels when combine '\n 'is add')\n elif self.combine == 'concat':\n self.depthwise_stride = 2\n self._combine_func = self._concat\n self.out_channels -= self.in_channels\n self.avgpool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1)\n else:\n raise ValueError(f'Cannot combine tensors with {self.combine}. '\n 'Only \"add\" and \"concat\" are supported')\n\n self.first_1x1_groups = 1 if first_block else self.groups\n self.g_conv_1x1_compress = ConvModule(\n in_channels=self.in_channels,\n out_channels=self.bottleneck_channels,\n kernel_size=1,\n groups=self.first_1x1_groups,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg)\n\n self.depthwise_conv3x3_bn = ConvModule(\n in_channels=self.bottleneck_channels,\n out_channels=self.bottleneck_channels,\n kernel_size=3,\n stride=self.depthwise_stride,\n padding=1,\n groups=self.bottleneck_channels,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=None)\n\n self.g_conv_1x1_expand = ConvModule(\n in_channels=self.bottleneck_channels,\n out_channels=self.out_channels,\n kernel_size=1,\n groups=self.groups,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=None)\n\n self.act = build_activation_layer(act_cfg)\n\n @staticmethod\n def _add(x, out):\n # residual connection\n return x + out\n\n @staticmethod\n def _concat(x, out):\n # concatenate along channel axis\n return torch.cat((x, out), 1)\n\n def forward(self, x):\n\n def _inner_forward(x):\n residual = x\n\n out = self.g_conv_1x1_compress(x)\n out = self.depthwise_conv3x3_bn(out)\n\n if self.groups > 1:\n out = channel_shuffle(out, self.groups)\n\n out = self.g_conv_1x1_expand(out)\n\n if self.combine == 'concat':\n residual = self.avgpool(residual)\n out = self.act(out)\n out = self._combine_func(residual, out)\n else:\n out = self._combine_func(residual, out)\n out = self.act(out)\n return out\n\n if self.with_cp and x.requires_grad:\n out = cp.checkpoint(_inner_forward, x)\n else:\n out = _inner_forward(x)\n\n return out\n\n\[email protected]_module()\nclass ShuffleNetV1(BaseBackbone):\n \"\"\"ShuffleNetV1 backbone.\n\n Args:\n groups (int, optional): The number of groups to be used in grouped 1x1\n convolutions in each ShuffleUnit. Default: 3.\n widen_factor (float, optional): Width multiplier - adjusts the number\n of channels in each layer by this amount. Default: 1.0.\n out_indices (Sequence[int]): Output from which stages.\n Default: (2, )\n frozen_stages (int): Stages to be frozen (all param fixed).\n Default: -1, which means not freezing any parameters.\n conv_cfg (dict): Config dict for convolution layer. Default: None,\n which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='BN').\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='ReLU').\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only. Default: False.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed. Default: False.\n \"\"\"\n\n def __init__(self,\n groups=3,\n widen_factor=1.0,\n out_indices=(2, ),\n frozen_stages=-1,\n conv_cfg=None,\n norm_cfg=dict(type='BN'),\n act_cfg=dict(type='ReLU'),\n norm_eval=False,\n with_cp=False):\n super().__init__()\n self.stage_blocks = [4, 8, 4]\n self.groups = groups\n\n for index in out_indices:\n if index not in range(0, 3):\n raise ValueError('the item in out_indices must in '\n f'range(0, 3). But received {index}')\n\n if frozen_stages not in range(-1, 3):\n raise ValueError('frozen_stages must be in range(-1, 3). '\n f'But received {frozen_stages}')\n self.out_indices = out_indices\n self.frozen_stages = frozen_stages\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.act_cfg = act_cfg\n self.norm_eval = norm_eval\n self.with_cp = with_cp\n\n if groups == 1:\n channels = (144, 288, 576)\n elif groups == 2:\n channels = (200, 400, 800)\n elif groups == 3:\n channels = (240, 480, 960)\n elif groups == 4:\n channels = (272, 544, 1088)\n elif groups == 8:\n channels = (384, 768, 1536)\n else:\n raise ValueError(f'{groups} groups is not supported for 1x1 '\n 'Grouped Convolutions')\n\n channels = [make_divisible(ch * widen_factor, 8) for ch in channels]\n\n self.in_channels = int(24 * widen_factor)\n\n self.conv1 = ConvModule(\n in_channels=3,\n out_channels=self.in_channels,\n kernel_size=3,\n stride=2,\n padding=1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n self.layers = nn.ModuleList()\n for i, num_blocks in enumerate(self.stage_blocks):\n first_block = (i == 0)\n layer = self.make_layer(channels[i], num_blocks, first_block)\n self.layers.append(layer)\n\n def _freeze_stages(self):\n if self.frozen_stages >= 0:\n for param in self.conv1.parameters():\n param.requires_grad = False\n for i in range(self.frozen_stages):\n layer = self.layers[i]\n layer.eval()\n for param in layer.parameters():\n param.requires_grad = False\n\n def init_weights(self, pretrained=None):\n if isinstance(pretrained, str):\n logger = logging.getLogger()\n load_checkpoint(self, pretrained, strict=False, logger=logger)\n elif pretrained is None:\n for name, m in self.named_modules():\n if isinstance(m, nn.Conv2d):\n if 'conv1' in name:\n normal_init(m, mean=0, std=0.01)\n else:\n normal_init(m, mean=0, std=1.0 / m.weight.shape[1])\n elif isinstance(m, (_BatchNorm, nn.GroupNorm)):\n constant_init(m.weight, val=1, bias=0.0001)\n if isinstance(m, _BatchNorm):\n if m.running_mean is not None:\n nn.init.constant_(m.running_mean, 0)\n else:\n raise TypeError('pretrained must be a str or None. But received '\n f'{type(pretrained)}')\n\n def make_layer(self, out_channels, num_blocks, first_block=False):\n \"\"\"Stack ShuffleUnit blocks to make a layer.\n\n Args:\n out_channels (int): out_channels of the block.\n num_blocks (int): Number of blocks.\n first_block (bool, optional): Whether is the first ShuffleUnit of a\n sequential ShuffleUnits. Default: False, which means not using\n the grouped 1x1 convolution.\n \"\"\"\n layers = []\n for i in range(num_blocks):\n first_block = first_block if i == 0 else False\n combine_mode = 'concat' if i == 0 else 'add'\n layers.append(\n ShuffleUnit(\n self.in_channels,\n out_channels,\n groups=self.groups,\n first_block=first_block,\n combine=combine_mode,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n act_cfg=self.act_cfg,\n with_cp=self.with_cp))\n self.in_channels = out_channels\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.maxpool(x)\n\n outs = []\n for i, layer in enumerate(self.layers):\n x = layer(x)\n if i in self.out_indices:\n outs.append(x)\n\n if len(outs) == 1:\n return outs[0]\n else:\n return tuple(outs)\n\n def train(self, mode=True):\n super().train(mode)\n self._freeze_stages()\n if mode and self.norm_eval:\n for m in self.modules():\n if isinstance(m, _BatchNorm):\n m.eval()\n", "path": "mmpose/models/backbones/shufflenet_v1.py"}]} |
gh_patches_debug_1289 | rasdani/github-patches | git_diff | lutris__lutris-998 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Prevent crash when saving wine game without appid
Traceback (most recent call last):
File "/usr/lib/python3.6/site-packages/lutris/gui/config_dialogs.py", line 369, in on_save
self.game.steamid = self.lutris_config.game_config['appid']
KeyError: 'appid'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lutris/gui/config_dialogs.py`
Content:
```
1 import os
2 from gi.repository import Gtk, Pango
3
4 from lutris import runners, settings
5 from lutris.config import LutrisConfig, TEMP_CONFIG, make_game_config_id
6 from lutris.game import Game
7 from lutris import gui
8 from lutris.gui.config_boxes import GameBox, RunnerBox, SystemBox
9 from lutris.gui.dialogs import ErrorDialog
10 from lutris.gui.widgets.common import VBox, SlugEntry, NumberEntry
11 from lutris.gui.widgets.dialogs import Dialog
12 from lutris.gui.widgets.utils import get_pixbuf_for_game, get_pixbuf, BANNER_SIZE, ICON_SIZE
13 from lutris.util.strings import slugify
14 from lutris.util import datapath, resources
15
16 DIALOG_WIDTH = 780
17 DIALOG_HEIGHT = 560
18
19
20 class GameDialogCommon(object):
21 no_runner_label = "Select a runner in the Game Info tab"
22
23 @staticmethod
24 def build_scrolled_window(widget):
25 scrolled_window = Gtk.ScrolledWindow()
26 scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC,
27 Gtk.PolicyType.AUTOMATIC)
28 scrolled_window.add(widget)
29 return scrolled_window
30
31 def build_notebook(self):
32 self.notebook = Gtk.Notebook()
33 self.vbox.pack_start(self.notebook, True, True, 10)
34
35 def build_tabs(self, config_level):
36 if config_level == 'game':
37 self._build_info_tab()
38 self._build_game_tab()
39 self._build_runner_tab(config_level)
40 self._build_system_tab(config_level)
41
42 def _build_info_tab(self):
43 info_box = VBox()
44
45 info_box.pack_start(self._get_name_box(), False, False, 5) # Game name
46
47 if self.game:
48 info_box.pack_start(self._get_slug_box(), False, False, 5) # Game id
49 info_box.pack_start(self._get_banner_box(), False, False, 5) # Banner
50
51 self.runner_box = self._get_runner_box()
52 info_box.pack_start(self.runner_box, False, False, 5) # Runner
53
54 info_box.pack_start(self._get_year_box(), False, False, 5) # Year
55
56 info_sw = self.build_scrolled_window(info_box)
57 self._add_notebook_tab(info_sw, "Game info")
58
59 def _get_name_box(self):
60 box = Gtk.HBox()
61
62 label = Gtk.Label(label="Name")
63 box.pack_start(label, False, False, 20)
64
65 self.name_entry = Gtk.Entry()
66 if self.game:
67 self.name_entry.set_text(self.game.name)
68 box.pack_start(self.name_entry, True, True, 20)
69
70 return box
71
72 def _get_slug_box(self):
73 box = Gtk.HBox()
74
75 label = Gtk.Label(label="Identifier")
76 box.pack_start(label, False, False, 20)
77
78 self.slug_entry = SlugEntry()
79 self.slug_entry.set_text(self.game.slug)
80 self.slug_entry.set_sensitive(False)
81 self.slug_entry.connect('activate', self.on_slug_entry_activate)
82 box.pack_start(self.slug_entry, True, True, 0)
83
84 slug_change_button = Gtk.Button("Change")
85 slug_change_button.connect('clicked', self.on_slug_change_clicked)
86 box.pack_start(slug_change_button, False, False, 20)
87
88 return box
89
90 def _get_runner_box(self):
91 runner_box = Gtk.HBox()
92 runner_label = Gtk.Label("Runner")
93 runner_label.set_alignment(0.5, 0.5)
94 self.runner_dropdown = self._get_runner_dropdown()
95 install_runners_btn = Gtk.Button(label="Install runners")
96 install_runners_btn.connect('clicked', self.on_install_runners_clicked)
97 install_runners_btn.set_margin_right(20)
98
99 runner_box.pack_start(runner_label, False, False, 20)
100 runner_box.pack_start(self.runner_dropdown, False, False, 20)
101 runner_box.pack_start(install_runners_btn, False, False, 0)
102 return runner_box
103
104 def _get_banner_box(self):
105 banner_box = Gtk.HBox()
106 banner_label = Gtk.Label("Banner")
107 banner_label.set_alignment(0.5, 0.5)
108 self.banner_button = Gtk.Button()
109 self._set_image('banner')
110 self.banner_button.connect('clicked', self.on_custom_image_select, 'banner')
111
112 reset_banner_button = Gtk.Button.new_from_icon_name('edit-clear',
113 Gtk.IconSize.MENU)
114 reset_banner_button.set_relief(Gtk.ReliefStyle.NONE)
115 reset_banner_button.set_tooltip_text("Remove custom banner")
116 reset_banner_button.connect('clicked',
117 self.on_custom_image_reset_clicked,
118 'banner')
119
120 self.icon_button = Gtk.Button()
121 self._set_image('icon')
122 self.icon_button.connect('clicked', self.on_custom_image_select, 'icon')
123
124 reset_icon_button = Gtk.Button.new_from_icon_name('edit-clear',
125 Gtk.IconSize.MENU)
126 reset_icon_button.set_relief(Gtk.ReliefStyle.NONE)
127 reset_icon_button.set_tooltip_text("Remove custom icon")
128 reset_icon_button.connect('clicked', self.on_custom_image_reset_clicked, 'icon')
129
130 banner_box.pack_start(banner_label, False, False, 20)
131 banner_box.pack_start(self.banner_button, False, False, 0)
132 banner_box.pack_start(reset_banner_button, False, False, 0)
133 banner_box.pack_start(self.icon_button, False, False, 0)
134 banner_box.pack_start(reset_icon_button, False, False, 0)
135 return banner_box
136
137 def _get_year_box(self):
138 box = Gtk.HBox()
139
140 label = Gtk.Label(label="Release year")
141 box.pack_start(label, False, False, 20)
142
143 self.year_entry = NumberEntry()
144 if self.game:
145 self.year_entry.set_text(str(self.game.year or ''))
146 box.pack_start(self.year_entry, True, True, 20)
147
148 return box
149
150 def _set_image(self, image_format):
151 assert image_format in ('banner', 'icon')
152 image = Gtk.Image()
153 game_slug = self.game.slug if self.game else ''
154 image.set_from_pixbuf(get_pixbuf_for_game(game_slug, image_format))
155 if image_format == 'banner':
156 self.banner_button.set_image(image)
157 else:
158 self.icon_button.set_image(image)
159
160 def _set_icon_image(self):
161 image = Gtk.Image()
162 game_slug = self.game.slug if self.game else ''
163 image.set_from_pixbuf(get_pixbuf_for_game(game_slug, 'banner'))
164 self.banner_button.set_image(image)
165
166 def _get_runner_dropdown(self):
167 runner_liststore = self._get_runner_liststore()
168 runner_dropdown = Gtk.ComboBox.new_with_model(runner_liststore)
169 runner_dropdown.set_id_column(1)
170 runner_index = 0
171 if self.runner_name:
172 for runner in runner_liststore:
173 if self.runner_name == str(runner[1]):
174 break
175 runner_index += 1
176 runner_dropdown.set_active(runner_index)
177 runner_dropdown.connect("changed", self.on_runner_changed)
178 cell = Gtk.CellRendererText()
179 cell.props.ellipsize = Pango.EllipsizeMode.END
180 runner_dropdown.pack_start(cell, True)
181 runner_dropdown.add_attribute(cell, 'text', 0)
182 return runner_dropdown
183
184 @staticmethod
185 def _get_runner_liststore():
186 """Build a ListStore with available runners."""
187 runner_liststore = Gtk.ListStore(str, str)
188 runner_liststore.append(("Select a runner from the list", ""))
189 for runner in runners.get_installed():
190 description = runner.description
191 runner_liststore.append(
192 ("%s (%s)" % (runner.human_name, description), runner.name)
193 )
194 return runner_liststore
195
196 def on_slug_change_clicked(self, widget):
197 if self.slug_entry.get_sensitive() is False:
198 self.slug_entry.set_sensitive(True)
199 else:
200 self.change_game_slug()
201
202 def on_slug_entry_activate(self, widget):
203 self.change_game_slug()
204
205 def change_game_slug(self):
206 self.slug = self.slug_entry.get_text()
207 self.slug_entry.set_sensitive(False)
208
209 def on_install_runners_clicked(self, _button):
210 runners_dialog = gui.runnersdialog.RunnersDialog()
211 runners_dialog.connect("runner-installed",
212 self._update_runner_dropdown)
213
214 def _update_runner_dropdown(self, _widget):
215 active_id = self.runner_dropdown.get_active_id()
216 self.runner_dropdown.set_model(self._get_runner_liststore())
217 self.runner_dropdown.set_active_id(active_id)
218
219 def _build_game_tab(self):
220 if self.game and self.runner_name:
221 self.game.runner_name = self.runner_name
222 try:
223 self.game.runner = runners.import_runner(self.runner_name)()
224 except runners.InvalidRunner:
225 pass
226 self.game_box = GameBox(self.lutris_config, self.game)
227 game_sw = self.build_scrolled_window(self.game_box)
228 elif self.runner_name:
229 game = Game(None)
230 game.runner_name = self.runner_name
231 self.game_box = GameBox(self.lutris_config, game)
232 game_sw = self.build_scrolled_window(self.game_box)
233 else:
234 game_sw = Gtk.Label(label=self.no_runner_label)
235 self._add_notebook_tab(game_sw, "Game options")
236
237 def _build_runner_tab(self, config_level):
238 if self.runner_name:
239 self.runner_box = RunnerBox(self.lutris_config)
240 runner_sw = self.build_scrolled_window(self.runner_box)
241 else:
242 runner_sw = Gtk.Label(label=self.no_runner_label)
243 self._add_notebook_tab(runner_sw, "Runner options")
244
245 def _build_system_tab(self, config_level):
246 self.system_box = SystemBox(self.lutris_config)
247 self.system_sw = self.build_scrolled_window(self.system_box)
248 self._add_notebook_tab(self.system_sw, "System options")
249
250 def _add_notebook_tab(self, widget, label):
251 self.notebook.append_page(widget, Gtk.Label(label=label))
252
253 def build_action_area(self, button_callback, callback2=None):
254 self.action_area.set_layout(Gtk.ButtonBoxStyle.EDGE)
255
256 # Advanced settings checkbox
257 checkbox = Gtk.CheckButton(label="Show advanced options")
258 value = settings.read_setting('show_advanced_options')
259 if value == 'True':
260 checkbox.set_active(value)
261 checkbox.connect("toggled", self.on_show_advanced_options_toggled)
262 self.action_area.pack_start(checkbox, False, False, 5)
263
264 # Buttons
265 hbox = Gtk.HBox()
266 cancel_button = Gtk.Button(label="Cancel")
267 cancel_button.connect("clicked", self.on_cancel_clicked)
268 hbox.pack_start(cancel_button, True, True, 10)
269
270 save_button = Gtk.Button(label="Save")
271 if callback2:
272 save_button.connect("clicked", button_callback, callback2)
273 else:
274 save_button.connect("clicked", button_callback)
275 hbox.pack_start(save_button, True, True, 0)
276 self.action_area.pack_start(hbox, True, True, 0)
277
278 def on_show_advanced_options_toggled(self, checkbox):
279 value = True if checkbox.get_active() else False
280 settings.write_setting('show_advanced_options', value)
281
282 self._set_advanced_options_visible(value)
283
284 def _set_advanced_options_visible(self, value):
285 """Change visibility of advanced options across all config tabs."""
286 widgets = self.system_box.get_children()
287 if self.runner_name:
288 widgets += self.runner_box.get_children()
289 if self.game:
290 widgets += self.game_box.get_children()
291
292 for widget in widgets:
293 if widget.get_style_context().has_class('advanced'):
294 widget.set_visible(value)
295 if value:
296 widget.set_no_show_all(not value)
297 widget.show_all()
298
299 def on_runner_changed(self, widget):
300 """Action called when runner drop down is changed."""
301 runner_index = widget.get_active()
302 current_page = self.notebook.get_current_page()
303
304 if runner_index == 0:
305 self.runner_name = None
306 self.lutris_config = LutrisConfig()
307 else:
308 self.runner_name = widget.get_model()[runner_index][1]
309 self.lutris_config = LutrisConfig(
310 runner_slug=self.runner_name,
311 game_config_id=self.game_config_id,
312 level='game'
313 )
314
315 self._rebuild_tabs()
316 self.notebook.set_current_page(current_page)
317
318 def _rebuild_tabs(self):
319 for i in range(self.notebook.get_n_pages(), 1, -1):
320 self.notebook.remove_page(i - 1)
321 self._build_game_tab()
322 self._build_runner_tab('game')
323 self._build_system_tab('game')
324 self.show_all()
325
326 def on_cancel_clicked(self, widget=None):
327 """Dialog destroy callback."""
328 self.destroy()
329
330 def is_valid(self):
331 name = self.name_entry.get_text()
332 if not self.runner_name:
333 ErrorDialog("Runner not provided")
334 return False
335 if not name:
336 ErrorDialog("Please fill in the name")
337 return False
338 return True
339
340 def on_save(self, _button, callback=None):
341 """Save game info and destroy widget. Return True if success."""
342 if not self.is_valid():
343 return False
344 name = self.name_entry.get_text()
345
346 if not self.slug:
347 self.slug = slugify(name)
348
349 if not self.game:
350 self.game = Game()
351
352 year = None
353 if self.year_entry.get_text():
354 year = int(self.year_entry.get_text())
355
356 if self.lutris_config.game_config_id == TEMP_CONFIG:
357 self.lutris_config.game_config_id = self.get_config_id()
358
359 runner_class = runners.import_runner(self.runner_name)
360 runner = runner_class(self.lutris_config)
361 self.game.name = name
362 self.game.slug = self.slug
363 self.game.year = year
364 self.game.runner_name = self.runner_name
365 self.game.config = self.lutris_config
366 self.game.directory = runner.game_path
367 self.game.is_installed = True
368 if self.runner_name in ('steam', 'winesteam'):
369 self.game.steamid = self.lutris_config.game_config['appid']
370 self.game.set_platform_from_runner()
371 self.game.save()
372 self.destroy()
373 self.saved = True
374 if callback:
375 callback()
376
377 def on_custom_image_select(self, widget, image_type):
378 dialog = Gtk.FileChooserDialog("Please choose a custom image", self,
379 Gtk.FileChooserAction.OPEN,
380 (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
381 Gtk.STOCK_OPEN, Gtk.ResponseType.OK))
382
383 image_filter = Gtk.FileFilter()
384 image_filter.set_name("Images")
385 image_filter.add_pixbuf_formats()
386 dialog.add_filter(image_filter)
387
388 response = dialog.run()
389 if response == Gtk.ResponseType.OK:
390 image_path = dialog.get_filename()
391 if image_type == 'banner':
392 self.game.has_custom_banner = True
393 dest_path = datapath.get_banner_path(self.game.slug)
394 size = BANNER_SIZE
395 file_format = 'jpeg'
396 else:
397 self.game.has_custom_icon = True
398 dest_path = datapath.get_icon_path(self.game.slug)
399 size = ICON_SIZE
400 file_format = 'png'
401 pixbuf = get_pixbuf(image_path, None, size)
402 pixbuf.savev(dest_path, file_format, [], [])
403 self._set_image(image_type)
404
405 if image_type == 'icon':
406 resources.udpate_desktop_icons()
407
408 dialog.destroy()
409
410 def on_custom_image_reset_clicked(self, widget, image_type):
411 if image_type == 'banner':
412 self.game.has_custom_banner = False
413 dest_path = datapath.get_banner_path(self.game.slug)
414 elif image_type == 'icon':
415 self.game.has_custom_icon = False
416 dest_path = datapath.get_icon_path(self.game.slug)
417 else:
418 raise ValueError('Unsupported image type %s', image_type)
419 os.remove(dest_path)
420 self._set_image(image_type)
421
422
423 class AddGameDialog(Dialog, GameDialogCommon):
424 """Add game dialog class."""
425 def __init__(self, parent, game=None, runner=None, callback=None):
426 super(AddGameDialog, self).__init__("Add a new game", parent=parent)
427 self.game = game
428 self.saved = False
429
430 self.set_default_size(DIALOG_WIDTH, DIALOG_HEIGHT)
431 if game:
432 self.runner_name = game.runner_name
433 self.slug = game.slug
434 else:
435 self.runner_name = runner
436 self.slug = None
437
438 self.game_config_id = self.get_config_id()
439 self.lutris_config = LutrisConfig(runner_slug=self.runner_name,
440 game_config_id=self.game_config_id,
441 level='game')
442 self.build_notebook()
443 self.build_tabs('game')
444 self.build_action_area(self.on_save, callback)
445 self.name_entry.grab_focus()
446 self.show_all()
447
448 def get_config_id(self):
449 """For new games, create a special config type that won't be read
450 from disk.
451 """
452 return make_game_config_id(self.slug) if self.slug else TEMP_CONFIG
453
454
455 class EditGameConfigDialog(Dialog, GameDialogCommon):
456 """Game config edit dialog."""
457 def __init__(self, parent, game, callback):
458 super(EditGameConfigDialog, self).__init__(
459 "Configure %s" % game.name,
460 parent=parent
461 )
462 self.game = game
463 self.lutris_config = game.config
464 self.game_config_id = game.config.game_config_id
465 self.slug = game.slug
466 self.runner_name = game.runner_name
467
468 self.set_default_size(DIALOG_WIDTH, DIALOG_HEIGHT)
469
470 self.build_notebook()
471 self.build_tabs('game')
472 self.build_action_area(self.on_save, callback)
473 self.show_all()
474
475
476 class RunnerConfigDialog(Dialog, GameDialogCommon):
477 """Runner config edit dialog."""
478 def __init__(self, runner, parent=None):
479 self.runner_name = runner.__class__.__name__
480 super(RunnerConfigDialog, self).__init__(
481 "Configure %s" % runner.human_name,
482 parent=parent
483 )
484
485 self.game = None
486 self.saved = False
487 self.lutris_config = LutrisConfig(runner_slug=self.runner_name)
488
489 self.set_default_size(DIALOG_WIDTH, DIALOG_HEIGHT)
490
491 self.build_notebook()
492 self.build_tabs('runner')
493 self.build_action_area(self.on_save)
494 self.show_all()
495
496 def on_save(self, wigdet, data=None):
497 self.lutris_config.save()
498 self.destroy()
499
500
501 class SystemConfigDialog(Dialog, GameDialogCommon):
502 def __init__(self, parent=None):
503 super(SystemConfigDialog, self).__init__("System preferences", parent=parent)
504
505 self.game = None
506 self.runner_name = None
507 self.lutris_config = LutrisConfig()
508
509 self.set_default_size(DIALOG_WIDTH, DIALOG_HEIGHT)
510
511 self.system_box = SystemBox(self.lutris_config)
512 self.system_sw = self.build_scrolled_window(self.system_box)
513 self.vbox.pack_start(self.system_sw, True, True, 0)
514 self.build_action_area(self.on_save)
515 self.show_all()
516
517 def on_save(self, widget):
518 self.lutris_config.save()
519 self.destroy()
520
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lutris/gui/config_dialogs.py b/lutris/gui/config_dialogs.py
--- a/lutris/gui/config_dialogs.py
+++ b/lutris/gui/config_dialogs.py
@@ -335,6 +335,9 @@
if not name:
ErrorDialog("Please fill in the name")
return False
+ if self.runner_name in ('steam', 'winesteam') and self.lutris_config.game_config.get('appid') is None:
+ ErrorDialog("Steam AppId not provided")
+ return False
return True
def on_save(self, _button, callback=None):
| {"golden_diff": "diff --git a/lutris/gui/config_dialogs.py b/lutris/gui/config_dialogs.py\n--- a/lutris/gui/config_dialogs.py\n+++ b/lutris/gui/config_dialogs.py\n@@ -335,6 +335,9 @@\n if not name:\n ErrorDialog(\"Please fill in the name\")\n return False\n+ if self.runner_name in ('steam', 'winesteam') and self.lutris_config.game_config.get('appid') is None:\n+ ErrorDialog(\"Steam AppId not provided\")\n+ return False\n return True\n \n def on_save(self, _button, callback=None):\n", "issue": "Prevent crash when saving wine game without appid\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.6/site-packages/lutris/gui/config_dialogs.py\", line 369, in on_save\r\n self.game.steamid = self.lutris_config.game_config['appid']\r\nKeyError: 'appid'\n", "before_files": [{"content": "import os\nfrom gi.repository import Gtk, Pango\n\nfrom lutris import runners, settings\nfrom lutris.config import LutrisConfig, TEMP_CONFIG, make_game_config_id\nfrom lutris.game import Game\nfrom lutris import gui\nfrom lutris.gui.config_boxes import GameBox, RunnerBox, SystemBox\nfrom lutris.gui.dialogs import ErrorDialog\nfrom lutris.gui.widgets.common import VBox, SlugEntry, NumberEntry\nfrom lutris.gui.widgets.dialogs import Dialog\nfrom lutris.gui.widgets.utils import get_pixbuf_for_game, get_pixbuf, BANNER_SIZE, ICON_SIZE\nfrom lutris.util.strings import slugify\nfrom lutris.util import datapath, resources\n\nDIALOG_WIDTH = 780\nDIALOG_HEIGHT = 560\n\n\nclass GameDialogCommon(object):\n no_runner_label = \"Select a runner in the Game Info tab\"\n\n @staticmethod\n def build_scrolled_window(widget):\n scrolled_window = Gtk.ScrolledWindow()\n scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC,\n Gtk.PolicyType.AUTOMATIC)\n scrolled_window.add(widget)\n return scrolled_window\n\n def build_notebook(self):\n self.notebook = Gtk.Notebook()\n self.vbox.pack_start(self.notebook, True, True, 10)\n\n def build_tabs(self, config_level):\n if config_level == 'game':\n self._build_info_tab()\n self._build_game_tab()\n self._build_runner_tab(config_level)\n self._build_system_tab(config_level)\n\n def _build_info_tab(self):\n info_box = VBox()\n\n info_box.pack_start(self._get_name_box(), False, False, 5) # Game name\n\n if self.game:\n info_box.pack_start(self._get_slug_box(), False, False, 5) # Game id\n info_box.pack_start(self._get_banner_box(), False, False, 5) # Banner\n\n self.runner_box = self._get_runner_box()\n info_box.pack_start(self.runner_box, False, False, 5) # Runner\n\n info_box.pack_start(self._get_year_box(), False, False, 5) # Year\n\n info_sw = self.build_scrolled_window(info_box)\n self._add_notebook_tab(info_sw, \"Game info\")\n\n def _get_name_box(self):\n box = Gtk.HBox()\n\n label = Gtk.Label(label=\"Name\")\n box.pack_start(label, False, False, 20)\n\n self.name_entry = Gtk.Entry()\n if self.game:\n self.name_entry.set_text(self.game.name)\n box.pack_start(self.name_entry, True, True, 20)\n\n return box\n\n def _get_slug_box(self):\n box = Gtk.HBox()\n\n label = Gtk.Label(label=\"Identifier\")\n box.pack_start(label, False, False, 20)\n\n self.slug_entry = SlugEntry()\n self.slug_entry.set_text(self.game.slug)\n self.slug_entry.set_sensitive(False)\n self.slug_entry.connect('activate', self.on_slug_entry_activate)\n box.pack_start(self.slug_entry, True, True, 0)\n\n slug_change_button = Gtk.Button(\"Change\")\n slug_change_button.connect('clicked', self.on_slug_change_clicked)\n box.pack_start(slug_change_button, False, False, 20)\n\n return box\n\n def _get_runner_box(self):\n runner_box = Gtk.HBox()\n runner_label = Gtk.Label(\"Runner\")\n runner_label.set_alignment(0.5, 0.5)\n self.runner_dropdown = self._get_runner_dropdown()\n install_runners_btn = Gtk.Button(label=\"Install runners\")\n install_runners_btn.connect('clicked', self.on_install_runners_clicked)\n install_runners_btn.set_margin_right(20)\n\n runner_box.pack_start(runner_label, False, False, 20)\n runner_box.pack_start(self.runner_dropdown, False, False, 20)\n runner_box.pack_start(install_runners_btn, False, False, 0)\n return runner_box\n\n def _get_banner_box(self):\n banner_box = Gtk.HBox()\n banner_label = Gtk.Label(\"Banner\")\n banner_label.set_alignment(0.5, 0.5)\n self.banner_button = Gtk.Button()\n self._set_image('banner')\n self.banner_button.connect('clicked', self.on_custom_image_select, 'banner')\n\n reset_banner_button = Gtk.Button.new_from_icon_name('edit-clear',\n Gtk.IconSize.MENU)\n reset_banner_button.set_relief(Gtk.ReliefStyle.NONE)\n reset_banner_button.set_tooltip_text(\"Remove custom banner\")\n reset_banner_button.connect('clicked',\n self.on_custom_image_reset_clicked,\n 'banner')\n\n self.icon_button = Gtk.Button()\n self._set_image('icon')\n self.icon_button.connect('clicked', self.on_custom_image_select, 'icon')\n\n reset_icon_button = Gtk.Button.new_from_icon_name('edit-clear',\n Gtk.IconSize.MENU)\n reset_icon_button.set_relief(Gtk.ReliefStyle.NONE)\n reset_icon_button.set_tooltip_text(\"Remove custom icon\")\n reset_icon_button.connect('clicked', self.on_custom_image_reset_clicked, 'icon')\n\n banner_box.pack_start(banner_label, False, False, 20)\n banner_box.pack_start(self.banner_button, False, False, 0)\n banner_box.pack_start(reset_banner_button, False, False, 0)\n banner_box.pack_start(self.icon_button, False, False, 0)\n banner_box.pack_start(reset_icon_button, False, False, 0)\n return banner_box\n\n def _get_year_box(self):\n box = Gtk.HBox()\n\n label = Gtk.Label(label=\"Release year\")\n box.pack_start(label, False, False, 20)\n\n self.year_entry = NumberEntry()\n if self.game:\n self.year_entry.set_text(str(self.game.year or ''))\n box.pack_start(self.year_entry, True, True, 20)\n\n return box\n\n def _set_image(self, image_format):\n assert image_format in ('banner', 'icon')\n image = Gtk.Image()\n game_slug = self.game.slug if self.game else ''\n image.set_from_pixbuf(get_pixbuf_for_game(game_slug, image_format))\n if image_format == 'banner':\n self.banner_button.set_image(image)\n else:\n self.icon_button.set_image(image)\n\n def _set_icon_image(self):\n image = Gtk.Image()\n game_slug = self.game.slug if self.game else ''\n image.set_from_pixbuf(get_pixbuf_for_game(game_slug, 'banner'))\n self.banner_button.set_image(image)\n\n def _get_runner_dropdown(self):\n runner_liststore = self._get_runner_liststore()\n runner_dropdown = Gtk.ComboBox.new_with_model(runner_liststore)\n runner_dropdown.set_id_column(1)\n runner_index = 0\n if self.runner_name:\n for runner in runner_liststore:\n if self.runner_name == str(runner[1]):\n break\n runner_index += 1\n runner_dropdown.set_active(runner_index)\n runner_dropdown.connect(\"changed\", self.on_runner_changed)\n cell = Gtk.CellRendererText()\n cell.props.ellipsize = Pango.EllipsizeMode.END\n runner_dropdown.pack_start(cell, True)\n runner_dropdown.add_attribute(cell, 'text', 0)\n return runner_dropdown\n\n @staticmethod\n def _get_runner_liststore():\n \"\"\"Build a ListStore with available runners.\"\"\"\n runner_liststore = Gtk.ListStore(str, str)\n runner_liststore.append((\"Select a runner from the list\", \"\"))\n for runner in runners.get_installed():\n description = runner.description\n runner_liststore.append(\n (\"%s (%s)\" % (runner.human_name, description), runner.name)\n )\n return runner_liststore\n\n def on_slug_change_clicked(self, widget):\n if self.slug_entry.get_sensitive() is False:\n self.slug_entry.set_sensitive(True)\n else:\n self.change_game_slug()\n\n def on_slug_entry_activate(self, widget):\n self.change_game_slug()\n\n def change_game_slug(self):\n self.slug = self.slug_entry.get_text()\n self.slug_entry.set_sensitive(False)\n\n def on_install_runners_clicked(self, _button):\n runners_dialog = gui.runnersdialog.RunnersDialog()\n runners_dialog.connect(\"runner-installed\",\n self._update_runner_dropdown)\n\n def _update_runner_dropdown(self, _widget):\n active_id = self.runner_dropdown.get_active_id()\n self.runner_dropdown.set_model(self._get_runner_liststore())\n self.runner_dropdown.set_active_id(active_id)\n\n def _build_game_tab(self):\n if self.game and self.runner_name:\n self.game.runner_name = self.runner_name\n try:\n self.game.runner = runners.import_runner(self.runner_name)()\n except runners.InvalidRunner:\n pass\n self.game_box = GameBox(self.lutris_config, self.game)\n game_sw = self.build_scrolled_window(self.game_box)\n elif self.runner_name:\n game = Game(None)\n game.runner_name = self.runner_name\n self.game_box = GameBox(self.lutris_config, game)\n game_sw = self.build_scrolled_window(self.game_box)\n else:\n game_sw = Gtk.Label(label=self.no_runner_label)\n self._add_notebook_tab(game_sw, \"Game options\")\n\n def _build_runner_tab(self, config_level):\n if self.runner_name:\n self.runner_box = RunnerBox(self.lutris_config)\n runner_sw = self.build_scrolled_window(self.runner_box)\n else:\n runner_sw = Gtk.Label(label=self.no_runner_label)\n self._add_notebook_tab(runner_sw, \"Runner options\")\n\n def _build_system_tab(self, config_level):\n self.system_box = SystemBox(self.lutris_config)\n self.system_sw = self.build_scrolled_window(self.system_box)\n self._add_notebook_tab(self.system_sw, \"System options\")\n\n def _add_notebook_tab(self, widget, label):\n self.notebook.append_page(widget, Gtk.Label(label=label))\n\n def build_action_area(self, button_callback, callback2=None):\n self.action_area.set_layout(Gtk.ButtonBoxStyle.EDGE)\n\n # Advanced settings checkbox\n checkbox = Gtk.CheckButton(label=\"Show advanced options\")\n value = settings.read_setting('show_advanced_options')\n if value == 'True':\n checkbox.set_active(value)\n checkbox.connect(\"toggled\", self.on_show_advanced_options_toggled)\n self.action_area.pack_start(checkbox, False, False, 5)\n\n # Buttons\n hbox = Gtk.HBox()\n cancel_button = Gtk.Button(label=\"Cancel\")\n cancel_button.connect(\"clicked\", self.on_cancel_clicked)\n hbox.pack_start(cancel_button, True, True, 10)\n\n save_button = Gtk.Button(label=\"Save\")\n if callback2:\n save_button.connect(\"clicked\", button_callback, callback2)\n else:\n save_button.connect(\"clicked\", button_callback)\n hbox.pack_start(save_button, True, True, 0)\n self.action_area.pack_start(hbox, True, True, 0)\n\n def on_show_advanced_options_toggled(self, checkbox):\n value = True if checkbox.get_active() else False\n settings.write_setting('show_advanced_options', value)\n\n self._set_advanced_options_visible(value)\n\n def _set_advanced_options_visible(self, value):\n \"\"\"Change visibility of advanced options across all config tabs.\"\"\"\n widgets = self.system_box.get_children()\n if self.runner_name:\n widgets += self.runner_box.get_children()\n if self.game:\n widgets += self.game_box.get_children()\n\n for widget in widgets:\n if widget.get_style_context().has_class('advanced'):\n widget.set_visible(value)\n if value:\n widget.set_no_show_all(not value)\n widget.show_all()\n\n def on_runner_changed(self, widget):\n \"\"\"Action called when runner drop down is changed.\"\"\"\n runner_index = widget.get_active()\n current_page = self.notebook.get_current_page()\n\n if runner_index == 0:\n self.runner_name = None\n self.lutris_config = LutrisConfig()\n else:\n self.runner_name = widget.get_model()[runner_index][1]\n self.lutris_config = LutrisConfig(\n runner_slug=self.runner_name,\n game_config_id=self.game_config_id,\n level='game'\n )\n\n self._rebuild_tabs()\n self.notebook.set_current_page(current_page)\n\n def _rebuild_tabs(self):\n for i in range(self.notebook.get_n_pages(), 1, -1):\n self.notebook.remove_page(i - 1)\n self._build_game_tab()\n self._build_runner_tab('game')\n self._build_system_tab('game')\n self.show_all()\n\n def on_cancel_clicked(self, widget=None):\n \"\"\"Dialog destroy callback.\"\"\"\n self.destroy()\n\n def is_valid(self):\n name = self.name_entry.get_text()\n if not self.runner_name:\n ErrorDialog(\"Runner not provided\")\n return False\n if not name:\n ErrorDialog(\"Please fill in the name\")\n return False\n return True\n\n def on_save(self, _button, callback=None):\n \"\"\"Save game info and destroy widget. Return True if success.\"\"\"\n if not self.is_valid():\n return False\n name = self.name_entry.get_text()\n\n if not self.slug:\n self.slug = slugify(name)\n\n if not self.game:\n self.game = Game()\n\n year = None\n if self.year_entry.get_text():\n year = int(self.year_entry.get_text())\n\n if self.lutris_config.game_config_id == TEMP_CONFIG:\n self.lutris_config.game_config_id = self.get_config_id()\n\n runner_class = runners.import_runner(self.runner_name)\n runner = runner_class(self.lutris_config)\n self.game.name = name\n self.game.slug = self.slug\n self.game.year = year\n self.game.runner_name = self.runner_name\n self.game.config = self.lutris_config\n self.game.directory = runner.game_path\n self.game.is_installed = True\n if self.runner_name in ('steam', 'winesteam'):\n self.game.steamid = self.lutris_config.game_config['appid']\n self.game.set_platform_from_runner()\n self.game.save()\n self.destroy()\n self.saved = True\n if callback:\n callback()\n\n def on_custom_image_select(self, widget, image_type):\n dialog = Gtk.FileChooserDialog(\"Please choose a custom image\", self,\n Gtk.FileChooserAction.OPEN,\n (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,\n Gtk.STOCK_OPEN, Gtk.ResponseType.OK))\n\n image_filter = Gtk.FileFilter()\n image_filter.set_name(\"Images\")\n image_filter.add_pixbuf_formats()\n dialog.add_filter(image_filter)\n\n response = dialog.run()\n if response == Gtk.ResponseType.OK:\n image_path = dialog.get_filename()\n if image_type == 'banner':\n self.game.has_custom_banner = True\n dest_path = datapath.get_banner_path(self.game.slug)\n size = BANNER_SIZE\n file_format = 'jpeg'\n else:\n self.game.has_custom_icon = True\n dest_path = datapath.get_icon_path(self.game.slug)\n size = ICON_SIZE\n file_format = 'png'\n pixbuf = get_pixbuf(image_path, None, size)\n pixbuf.savev(dest_path, file_format, [], [])\n self._set_image(image_type)\n\n if image_type == 'icon':\n resources.udpate_desktop_icons()\n\n dialog.destroy()\n\n def on_custom_image_reset_clicked(self, widget, image_type):\n if image_type == 'banner':\n self.game.has_custom_banner = False\n dest_path = datapath.get_banner_path(self.game.slug)\n elif image_type == 'icon':\n self.game.has_custom_icon = False\n dest_path = datapath.get_icon_path(self.game.slug)\n else:\n raise ValueError('Unsupported image type %s', image_type)\n os.remove(dest_path)\n self._set_image(image_type)\n\n\nclass AddGameDialog(Dialog, GameDialogCommon):\n \"\"\"Add game dialog class.\"\"\"\n def __init__(self, parent, game=None, runner=None, callback=None):\n super(AddGameDialog, self).__init__(\"Add a new game\", parent=parent)\n self.game = game\n self.saved = False\n\n self.set_default_size(DIALOG_WIDTH, DIALOG_HEIGHT)\n if game:\n self.runner_name = game.runner_name\n self.slug = game.slug\n else:\n self.runner_name = runner\n self.slug = None\n\n self.game_config_id = self.get_config_id()\n self.lutris_config = LutrisConfig(runner_slug=self.runner_name,\n game_config_id=self.game_config_id,\n level='game')\n self.build_notebook()\n self.build_tabs('game')\n self.build_action_area(self.on_save, callback)\n self.name_entry.grab_focus()\n self.show_all()\n\n def get_config_id(self):\n \"\"\"For new games, create a special config type that won't be read\n from disk.\n \"\"\"\n return make_game_config_id(self.slug) if self.slug else TEMP_CONFIG\n\n\nclass EditGameConfigDialog(Dialog, GameDialogCommon):\n \"\"\"Game config edit dialog.\"\"\"\n def __init__(self, parent, game, callback):\n super(EditGameConfigDialog, self).__init__(\n \"Configure %s\" % game.name,\n parent=parent\n )\n self.game = game\n self.lutris_config = game.config\n self.game_config_id = game.config.game_config_id\n self.slug = game.slug\n self.runner_name = game.runner_name\n\n self.set_default_size(DIALOG_WIDTH, DIALOG_HEIGHT)\n\n self.build_notebook()\n self.build_tabs('game')\n self.build_action_area(self.on_save, callback)\n self.show_all()\n\n\nclass RunnerConfigDialog(Dialog, GameDialogCommon):\n \"\"\"Runner config edit dialog.\"\"\"\n def __init__(self, runner, parent=None):\n self.runner_name = runner.__class__.__name__\n super(RunnerConfigDialog, self).__init__(\n \"Configure %s\" % runner.human_name,\n parent=parent\n )\n\n self.game = None\n self.saved = False\n self.lutris_config = LutrisConfig(runner_slug=self.runner_name)\n\n self.set_default_size(DIALOG_WIDTH, DIALOG_HEIGHT)\n\n self.build_notebook()\n self.build_tabs('runner')\n self.build_action_area(self.on_save)\n self.show_all()\n\n def on_save(self, wigdet, data=None):\n self.lutris_config.save()\n self.destroy()\n\n\nclass SystemConfigDialog(Dialog, GameDialogCommon):\n def __init__(self, parent=None):\n super(SystemConfigDialog, self).__init__(\"System preferences\", parent=parent)\n\n self.game = None\n self.runner_name = None\n self.lutris_config = LutrisConfig()\n\n self.set_default_size(DIALOG_WIDTH, DIALOG_HEIGHT)\n\n self.system_box = SystemBox(self.lutris_config)\n self.system_sw = self.build_scrolled_window(self.system_box)\n self.vbox.pack_start(self.system_sw, True, True, 0)\n self.build_action_area(self.on_save)\n self.show_all()\n\n def on_save(self, widget):\n self.lutris_config.save()\n self.destroy()\n", "path": "lutris/gui/config_dialogs.py"}], "after_files": [{"content": "import os\nfrom gi.repository import Gtk, Pango\n\nfrom lutris import runners, settings\nfrom lutris.config import LutrisConfig, TEMP_CONFIG, make_game_config_id\nfrom lutris.game import Game\nfrom lutris import gui\nfrom lutris.gui.config_boxes import GameBox, RunnerBox, SystemBox\nfrom lutris.gui.dialogs import ErrorDialog\nfrom lutris.gui.widgets.common import VBox, SlugEntry, NumberEntry\nfrom lutris.gui.widgets.dialogs import Dialog\nfrom lutris.gui.widgets.utils import get_pixbuf_for_game, get_pixbuf, BANNER_SIZE, ICON_SIZE\nfrom lutris.util.strings import slugify\nfrom lutris.util import datapath, resources\n\nDIALOG_WIDTH = 780\nDIALOG_HEIGHT = 560\n\n\nclass GameDialogCommon(object):\n no_runner_label = \"Select a runner in the Game Info tab\"\n\n @staticmethod\n def build_scrolled_window(widget):\n scrolled_window = Gtk.ScrolledWindow()\n scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC,\n Gtk.PolicyType.AUTOMATIC)\n scrolled_window.add(widget)\n return scrolled_window\n\n def build_notebook(self):\n self.notebook = Gtk.Notebook()\n self.vbox.pack_start(self.notebook, True, True, 10)\n\n def build_tabs(self, config_level):\n if config_level == 'game':\n self._build_info_tab()\n self._build_game_tab()\n self._build_runner_tab(config_level)\n self._build_system_tab(config_level)\n\n def _build_info_tab(self):\n info_box = VBox()\n\n info_box.pack_start(self._get_name_box(), False, False, 5) # Game name\n\n if self.game:\n info_box.pack_start(self._get_slug_box(), False, False, 5) # Game id\n info_box.pack_start(self._get_banner_box(), False, False, 5) # Banner\n\n self.runner_box = self._get_runner_box()\n info_box.pack_start(self.runner_box, False, False, 5) # Runner\n\n info_box.pack_start(self._get_year_box(), False, False, 5) # Year\n\n info_sw = self.build_scrolled_window(info_box)\n self._add_notebook_tab(info_sw, \"Game info\")\n\n def _get_name_box(self):\n box = Gtk.HBox()\n\n label = Gtk.Label(label=\"Name\")\n box.pack_start(label, False, False, 20)\n\n self.name_entry = Gtk.Entry()\n if self.game:\n self.name_entry.set_text(self.game.name)\n box.pack_start(self.name_entry, True, True, 20)\n\n return box\n\n def _get_slug_box(self):\n box = Gtk.HBox()\n\n label = Gtk.Label(label=\"Identifier\")\n box.pack_start(label, False, False, 20)\n\n self.slug_entry = SlugEntry()\n self.slug_entry.set_text(self.game.slug)\n self.slug_entry.set_sensitive(False)\n self.slug_entry.connect('activate', self.on_slug_entry_activate)\n box.pack_start(self.slug_entry, True, True, 0)\n\n slug_change_button = Gtk.Button(\"Change\")\n slug_change_button.connect('clicked', self.on_slug_change_clicked)\n box.pack_start(slug_change_button, False, False, 20)\n\n return box\n\n def _get_runner_box(self):\n runner_box = Gtk.HBox()\n runner_label = Gtk.Label(\"Runner\")\n runner_label.set_alignment(0.5, 0.5)\n self.runner_dropdown = self._get_runner_dropdown()\n install_runners_btn = Gtk.Button(label=\"Install runners\")\n install_runners_btn.connect('clicked', self.on_install_runners_clicked)\n install_runners_btn.set_margin_right(20)\n\n runner_box.pack_start(runner_label, False, False, 20)\n runner_box.pack_start(self.runner_dropdown, False, False, 20)\n runner_box.pack_start(install_runners_btn, False, False, 0)\n return runner_box\n\n def _get_banner_box(self):\n banner_box = Gtk.HBox()\n banner_label = Gtk.Label(\"Banner\")\n banner_label.set_alignment(0.5, 0.5)\n self.banner_button = Gtk.Button()\n self._set_image('banner')\n self.banner_button.connect('clicked', self.on_custom_image_select, 'banner')\n\n reset_banner_button = Gtk.Button.new_from_icon_name('edit-clear',\n Gtk.IconSize.MENU)\n reset_banner_button.set_relief(Gtk.ReliefStyle.NONE)\n reset_banner_button.set_tooltip_text(\"Remove custom banner\")\n reset_banner_button.connect('clicked',\n self.on_custom_image_reset_clicked,\n 'banner')\n\n self.icon_button = Gtk.Button()\n self._set_image('icon')\n self.icon_button.connect('clicked', self.on_custom_image_select, 'icon')\n\n reset_icon_button = Gtk.Button.new_from_icon_name('edit-clear',\n Gtk.IconSize.MENU)\n reset_icon_button.set_relief(Gtk.ReliefStyle.NONE)\n reset_icon_button.set_tooltip_text(\"Remove custom icon\")\n reset_icon_button.connect('clicked', self.on_custom_image_reset_clicked, 'icon')\n\n banner_box.pack_start(banner_label, False, False, 20)\n banner_box.pack_start(self.banner_button, False, False, 0)\n banner_box.pack_start(reset_banner_button, False, False, 0)\n banner_box.pack_start(self.icon_button, False, False, 0)\n banner_box.pack_start(reset_icon_button, False, False, 0)\n return banner_box\n\n def _get_year_box(self):\n box = Gtk.HBox()\n\n label = Gtk.Label(label=\"Release year\")\n box.pack_start(label, False, False, 20)\n\n self.year_entry = NumberEntry()\n if self.game:\n self.year_entry.set_text(str(self.game.year or ''))\n box.pack_start(self.year_entry, True, True, 20)\n\n return box\n\n def _set_image(self, image_format):\n assert image_format in ('banner', 'icon')\n image = Gtk.Image()\n game_slug = self.game.slug if self.game else ''\n image.set_from_pixbuf(get_pixbuf_for_game(game_slug, image_format))\n if image_format == 'banner':\n self.banner_button.set_image(image)\n else:\n self.icon_button.set_image(image)\n\n def _set_icon_image(self):\n image = Gtk.Image()\n game_slug = self.game.slug if self.game else ''\n image.set_from_pixbuf(get_pixbuf_for_game(game_slug, 'banner'))\n self.banner_button.set_image(image)\n\n def _get_runner_dropdown(self):\n runner_liststore = self._get_runner_liststore()\n runner_dropdown = Gtk.ComboBox.new_with_model(runner_liststore)\n runner_dropdown.set_id_column(1)\n runner_index = 0\n if self.runner_name:\n for runner in runner_liststore:\n if self.runner_name == str(runner[1]):\n break\n runner_index += 1\n runner_dropdown.set_active(runner_index)\n runner_dropdown.connect(\"changed\", self.on_runner_changed)\n cell = Gtk.CellRendererText()\n cell.props.ellipsize = Pango.EllipsizeMode.END\n runner_dropdown.pack_start(cell, True)\n runner_dropdown.add_attribute(cell, 'text', 0)\n return runner_dropdown\n\n @staticmethod\n def _get_runner_liststore():\n \"\"\"Build a ListStore with available runners.\"\"\"\n runner_liststore = Gtk.ListStore(str, str)\n runner_liststore.append((\"Select a runner from the list\", \"\"))\n for runner in runners.get_installed():\n description = runner.description\n runner_liststore.append(\n (\"%s (%s)\" % (runner.human_name, description), runner.name)\n )\n return runner_liststore\n\n def on_slug_change_clicked(self, widget):\n if self.slug_entry.get_sensitive() is False:\n self.slug_entry.set_sensitive(True)\n else:\n self.change_game_slug()\n\n def on_slug_entry_activate(self, widget):\n self.change_game_slug()\n\n def change_game_slug(self):\n self.slug = self.slug_entry.get_text()\n self.slug_entry.set_sensitive(False)\n\n def on_install_runners_clicked(self, _button):\n runners_dialog = gui.runnersdialog.RunnersDialog()\n runners_dialog.connect(\"runner-installed\",\n self._update_runner_dropdown)\n\n def _update_runner_dropdown(self, _widget):\n active_id = self.runner_dropdown.get_active_id()\n self.runner_dropdown.set_model(self._get_runner_liststore())\n self.runner_dropdown.set_active_id(active_id)\n\n def _build_game_tab(self):\n if self.game and self.runner_name:\n self.game.runner_name = self.runner_name\n try:\n self.game.runner = runners.import_runner(self.runner_name)()\n except runners.InvalidRunner:\n pass\n self.game_box = GameBox(self.lutris_config, self.game)\n game_sw = self.build_scrolled_window(self.game_box)\n elif self.runner_name:\n game = Game(None)\n game.runner_name = self.runner_name\n self.game_box = GameBox(self.lutris_config, game)\n game_sw = self.build_scrolled_window(self.game_box)\n else:\n game_sw = Gtk.Label(label=self.no_runner_label)\n self._add_notebook_tab(game_sw, \"Game options\")\n\n def _build_runner_tab(self, config_level):\n if self.runner_name:\n self.runner_box = RunnerBox(self.lutris_config)\n runner_sw = self.build_scrolled_window(self.runner_box)\n else:\n runner_sw = Gtk.Label(label=self.no_runner_label)\n self._add_notebook_tab(runner_sw, \"Runner options\")\n\n def _build_system_tab(self, config_level):\n self.system_box = SystemBox(self.lutris_config)\n self.system_sw = self.build_scrolled_window(self.system_box)\n self._add_notebook_tab(self.system_sw, \"System options\")\n\n def _add_notebook_tab(self, widget, label):\n self.notebook.append_page(widget, Gtk.Label(label=label))\n\n def build_action_area(self, button_callback, callback2=None):\n self.action_area.set_layout(Gtk.ButtonBoxStyle.EDGE)\n\n # Advanced settings checkbox\n checkbox = Gtk.CheckButton(label=\"Show advanced options\")\n value = settings.read_setting('show_advanced_options')\n if value == 'True':\n checkbox.set_active(value)\n checkbox.connect(\"toggled\", self.on_show_advanced_options_toggled)\n self.action_area.pack_start(checkbox, False, False, 5)\n\n # Buttons\n hbox = Gtk.HBox()\n cancel_button = Gtk.Button(label=\"Cancel\")\n cancel_button.connect(\"clicked\", self.on_cancel_clicked)\n hbox.pack_start(cancel_button, True, True, 10)\n\n save_button = Gtk.Button(label=\"Save\")\n if callback2:\n save_button.connect(\"clicked\", button_callback, callback2)\n else:\n save_button.connect(\"clicked\", button_callback)\n hbox.pack_start(save_button, True, True, 0)\n self.action_area.pack_start(hbox, True, True, 0)\n\n def on_show_advanced_options_toggled(self, checkbox):\n value = True if checkbox.get_active() else False\n settings.write_setting('show_advanced_options', value)\n\n self._set_advanced_options_visible(value)\n\n def _set_advanced_options_visible(self, value):\n \"\"\"Change visibility of advanced options across all config tabs.\"\"\"\n widgets = self.system_box.get_children()\n if self.runner_name:\n widgets += self.runner_box.get_children()\n if self.game:\n widgets += self.game_box.get_children()\n\n for widget in widgets:\n if widget.get_style_context().has_class('advanced'):\n widget.set_visible(value)\n if value:\n widget.set_no_show_all(not value)\n widget.show_all()\n\n def on_runner_changed(self, widget):\n \"\"\"Action called when runner drop down is changed.\"\"\"\n runner_index = widget.get_active()\n current_page = self.notebook.get_current_page()\n\n if runner_index == 0:\n self.runner_name = None\n self.lutris_config = LutrisConfig()\n else:\n self.runner_name = widget.get_model()[runner_index][1]\n self.lutris_config = LutrisConfig(\n runner_slug=self.runner_name,\n game_config_id=self.game_config_id,\n level='game'\n )\n\n self._rebuild_tabs()\n self.notebook.set_current_page(current_page)\n\n def _rebuild_tabs(self):\n for i in range(self.notebook.get_n_pages(), 1, -1):\n self.notebook.remove_page(i - 1)\n self._build_game_tab()\n self._build_runner_tab('game')\n self._build_system_tab('game')\n self.show_all()\n\n def on_cancel_clicked(self, widget=None):\n \"\"\"Dialog destroy callback.\"\"\"\n self.destroy()\n\n def is_valid(self):\n name = self.name_entry.get_text()\n if not self.runner_name:\n ErrorDialog(\"Runner not provided\")\n return False\n if not name:\n ErrorDialog(\"Please fill in the name\")\n return False\n if self.runner_name in ('steam', 'winesteam') and self.lutris_config.game_config.get('appid') is None:\n ErrorDialog(\"Steam AppId not provided\")\n return False\n return True\n\n def on_save(self, _button, callback=None):\n \"\"\"Save game info and destroy widget. Return True if success.\"\"\"\n if not self.is_valid():\n return False\n name = self.name_entry.get_text()\n\n if not self.slug:\n self.slug = slugify(name)\n\n if not self.game:\n self.game = Game()\n\n year = None\n if self.year_entry.get_text():\n year = int(self.year_entry.get_text())\n\n if self.lutris_config.game_config_id == TEMP_CONFIG:\n self.lutris_config.game_config_id = self.get_config_id()\n\n runner_class = runners.import_runner(self.runner_name)\n runner = runner_class(self.lutris_config)\n self.game.name = name\n self.game.slug = self.slug\n self.game.year = year\n self.game.runner_name = self.runner_name\n self.game.config = self.lutris_config\n self.game.directory = runner.game_path\n self.game.is_installed = True\n if self.runner_name in ('steam', 'winesteam'):\n self.game.steamid = self.lutris_config.game_config['appid']\n self.game.set_platform_from_runner()\n self.game.save()\n self.destroy()\n self.saved = True\n if callback:\n callback()\n\n def on_custom_image_select(self, widget, image_type):\n dialog = Gtk.FileChooserDialog(\"Please choose a custom image\", self,\n Gtk.FileChooserAction.OPEN,\n (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,\n Gtk.STOCK_OPEN, Gtk.ResponseType.OK))\n\n image_filter = Gtk.FileFilter()\n image_filter.set_name(\"Images\")\n image_filter.add_pixbuf_formats()\n dialog.add_filter(image_filter)\n\n response = dialog.run()\n if response == Gtk.ResponseType.OK:\n image_path = dialog.get_filename()\n if image_type == 'banner':\n self.game.has_custom_banner = True\n dest_path = datapath.get_banner_path(self.game.slug)\n size = BANNER_SIZE\n file_format = 'jpeg'\n else:\n self.game.has_custom_icon = True\n dest_path = datapath.get_icon_path(self.game.slug)\n size = ICON_SIZE\n file_format = 'png'\n pixbuf = get_pixbuf(image_path, None, size)\n pixbuf.savev(dest_path, file_format, [], [])\n self._set_image(image_type)\n\n if image_type == 'icon':\n resources.udpate_desktop_icons()\n\n dialog.destroy()\n\n def on_custom_image_reset_clicked(self, widget, image_type):\n if image_type == 'banner':\n self.game.has_custom_banner = False\n dest_path = datapath.get_banner_path(self.game.slug)\n elif image_type == 'icon':\n self.game.has_custom_icon = False\n dest_path = datapath.get_icon_path(self.game.slug)\n else:\n raise ValueError('Unsupported image type %s', image_type)\n os.remove(dest_path)\n self._set_image(image_type)\n\n\nclass AddGameDialog(Dialog, GameDialogCommon):\n \"\"\"Add game dialog class.\"\"\"\n def __init__(self, parent, game=None, runner=None, callback=None):\n super(AddGameDialog, self).__init__(\"Add a new game\", parent=parent)\n self.game = game\n self.saved = False\n\n self.set_default_size(DIALOG_WIDTH, DIALOG_HEIGHT)\n if game:\n self.runner_name = game.runner_name\n self.slug = game.slug\n else:\n self.runner_name = runner\n self.slug = None\n\n self.game_config_id = self.get_config_id()\n self.lutris_config = LutrisConfig(runner_slug=self.runner_name,\n game_config_id=self.game_config_id,\n level='game')\n self.build_notebook()\n self.build_tabs('game')\n self.build_action_area(self.on_save, callback)\n self.name_entry.grab_focus()\n self.show_all()\n\n def get_config_id(self):\n \"\"\"For new games, create a special config type that won't be read\n from disk.\n \"\"\"\n return make_game_config_id(self.slug) if self.slug else TEMP_CONFIG\n\n\nclass EditGameConfigDialog(Dialog, GameDialogCommon):\n \"\"\"Game config edit dialog.\"\"\"\n def __init__(self, parent, game, callback):\n super(EditGameConfigDialog, self).__init__(\n \"Configure %s\" % game.name,\n parent=parent\n )\n self.game = game\n self.lutris_config = game.config\n self.game_config_id = game.config.game_config_id\n self.slug = game.slug\n self.runner_name = game.runner_name\n\n self.set_default_size(DIALOG_WIDTH, DIALOG_HEIGHT)\n\n self.build_notebook()\n self.build_tabs('game')\n self.build_action_area(self.on_save, callback)\n self.show_all()\n\n\nclass RunnerConfigDialog(Dialog, GameDialogCommon):\n \"\"\"Runner config edit dialog.\"\"\"\n def __init__(self, runner, parent=None):\n self.runner_name = runner.__class__.__name__\n super(RunnerConfigDialog, self).__init__(\n \"Configure %s\" % runner.human_name,\n parent=parent\n )\n\n self.game = None\n self.saved = False\n self.lutris_config = LutrisConfig(runner_slug=self.runner_name)\n\n self.set_default_size(DIALOG_WIDTH, DIALOG_HEIGHT)\n\n self.build_notebook()\n self.build_tabs('runner')\n self.build_action_area(self.on_save)\n self.show_all()\n\n def on_save(self, wigdet, data=None):\n self.lutris_config.save()\n self.destroy()\n\n\nclass SystemConfigDialog(Dialog, GameDialogCommon):\n def __init__(self, parent=None):\n super(SystemConfigDialog, self).__init__(\"System preferences\", parent=parent)\n\n self.game = None\n self.runner_name = None\n self.lutris_config = LutrisConfig()\n\n self.set_default_size(DIALOG_WIDTH, DIALOG_HEIGHT)\n\n self.system_box = SystemBox(self.lutris_config)\n self.system_sw = self.build_scrolled_window(self.system_box)\n self.vbox.pack_start(self.system_sw, True, True, 0)\n self.build_action_area(self.on_save)\n self.show_all()\n\n def on_save(self, widget):\n self.lutris_config.save()\n self.destroy()\n", "path": "lutris/gui/config_dialogs.py"}]} |
gh_patches_debug_1290 | rasdani/github-patches | git_diff | beetbox__beets-3868 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
web: GET /album/<n> hides artpath even when INCLUDE_PATHS is set
### Problem
`beet web` provides GET operations to fetch track items and albums. By default this removes paths
from the returned data, but the config setting INCLUDE_PATHS should allow the paths to be returned.
This works correctly for GET /item/... but not GET /album/... In the album case, the artpath is unconditionally deleted from the results.
### Setup
Add to config file:
web:
include_paths: true
Use `beet web` to make a webserver available and do a GET /album/N, where N is the album id of an album in the database which has a cover art set. The JSON result should include the 'artpath' value but does not.
* OS: Linux (Debian Testing)
* Python version: 3.9.1-1
* beets version: 1.4.9-7
* Turning off plugins made problem go away (yes/no): bug in web plugin
Note this is a small issue, although I have hit it. I have a fix (and a regression test) which I will submit as a small PR once my first PR has been finished (so I can learn from the mistakes I made in that!).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `beetsplug/web/__init__.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This file is part of beets.
3 # Copyright 2016, Adrian Sampson.
4 #
5 # Permission is hereby granted, free of charge, to any person obtaining
6 # a copy of this software and associated documentation files (the
7 # "Software"), to deal in the Software without restriction, including
8 # without limitation the rights to use, copy, modify, merge, publish,
9 # distribute, sublicense, and/or sell copies of the Software, and to
10 # permit persons to whom the Software is furnished to do so, subject to
11 # the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be
14 # included in all copies or substantial portions of the Software.
15
16 """A Web interface to beets."""
17 from __future__ import division, absolute_import, print_function
18
19 from beets.plugins import BeetsPlugin
20 from beets import ui
21 from beets import util
22 import beets.library
23 import flask
24 from flask import g, jsonify
25 from werkzeug.routing import BaseConverter, PathConverter
26 import os
27 from unidecode import unidecode
28 import json
29 import base64
30
31
32 # Utilities.
33
34 def _rep(obj, expand=False):
35 """Get a flat -- i.e., JSON-ish -- representation of a beets Item or
36 Album object. For Albums, `expand` dictates whether tracks are
37 included.
38 """
39 out = dict(obj)
40
41 if isinstance(obj, beets.library.Item):
42 if app.config.get('INCLUDE_PATHS', False):
43 out['path'] = util.displayable_path(out['path'])
44 else:
45 del out['path']
46
47 # Filter all bytes attributes and convert them to strings.
48 for key, value in out.items():
49 if isinstance(out[key], bytes):
50 out[key] = base64.b64encode(value).decode('ascii')
51
52 # Get the size (in bytes) of the backing file. This is useful
53 # for the Tomahawk resolver API.
54 try:
55 out['size'] = os.path.getsize(util.syspath(obj.path))
56 except OSError:
57 out['size'] = 0
58
59 return out
60
61 elif isinstance(obj, beets.library.Album):
62 del out['artpath']
63 if expand:
64 out['items'] = [_rep(item) for item in obj.items()]
65 return out
66
67
68 def json_generator(items, root, expand=False):
69 """Generator that dumps list of beets Items or Albums as JSON
70
71 :param root: root key for JSON
72 :param items: list of :class:`Item` or :class:`Album` to dump
73 :param expand: If true every :class:`Album` contains its items in the json
74 representation
75 :returns: generator that yields strings
76 """
77 yield '{"%s":[' % root
78 first = True
79 for item in items:
80 if first:
81 first = False
82 else:
83 yield ','
84 yield json.dumps(_rep(item, expand=expand))
85 yield ']}'
86
87
88 def is_expand():
89 """Returns whether the current request is for an expanded response."""
90
91 return flask.request.args.get('expand') is not None
92
93
94 def is_delete():
95 """Returns whether the current delete request should remove the selected
96 files.
97 """
98
99 return flask.request.args.get('delete') is not None
100
101
102 def get_method():
103 """Returns the HTTP method of the current request."""
104 return flask.request.method
105
106
107 def resource(name, patchable=False):
108 """Decorates a function to handle RESTful HTTP requests for a resource.
109 """
110 def make_responder(retriever):
111 def responder(ids):
112 entities = [retriever(id) for id in ids]
113 entities = [entity for entity in entities if entity]
114
115 if get_method() == "DELETE":
116 for entity in entities:
117 entity.remove(delete=is_delete())
118
119 return flask.make_response(jsonify({'deleted': True}), 200)
120
121 elif get_method() == "PATCH" and patchable:
122 for entity in entities:
123 entity.update(flask.request.get_json())
124 entity.try_sync(True, False) # write, don't move
125
126 if len(entities) == 1:
127 return flask.jsonify(_rep(entities[0], expand=is_expand()))
128 elif entities:
129 return app.response_class(
130 json_generator(entities, root=name),
131 mimetype='application/json'
132 )
133
134 elif get_method() == "GET":
135 if len(entities) == 1:
136 return flask.jsonify(_rep(entities[0], expand=is_expand()))
137 elif entities:
138 return app.response_class(
139 json_generator(entities, root=name),
140 mimetype='application/json'
141 )
142 else:
143 return flask.abort(404)
144
145 else:
146 return flask.abort(405)
147
148 responder.__name__ = 'get_{0}'.format(name)
149
150 return responder
151 return make_responder
152
153
154 def resource_query(name, patchable=False):
155 """Decorates a function to handle RESTful HTTP queries for resources.
156 """
157 def make_responder(query_func):
158 def responder(queries):
159 entities = query_func(queries)
160
161 if get_method() == "DELETE":
162 for entity in entities:
163 entity.remove(delete=is_delete())
164
165 return flask.make_response(jsonify({'deleted': True}), 200)
166
167 elif get_method() == "PATCH" and patchable:
168 for entity in entities:
169 entity.update(flask.request.get_json())
170 entity.try_sync(True, False) # write, don't move
171
172 return app.response_class(
173 json_generator(entities, root=name),
174 mimetype='application/json'
175 )
176
177 elif get_method() == "GET":
178 return app.response_class(
179 json_generator(
180 entities,
181 root='results', expand=is_expand()
182 ),
183 mimetype='application/json'
184 )
185
186 else:
187 return flask.abort(405)
188
189 responder.__name__ = 'query_{0}'.format(name)
190
191 return responder
192
193 return make_responder
194
195
196 def resource_list(name):
197 """Decorates a function to handle RESTful HTTP request for a list of
198 resources.
199 """
200 def make_responder(list_all):
201 def responder():
202 return app.response_class(
203 json_generator(list_all(), root=name, expand=is_expand()),
204 mimetype='application/json'
205 )
206 responder.__name__ = 'all_{0}'.format(name)
207 return responder
208 return make_responder
209
210
211 def _get_unique_table_field_values(model, field, sort_field):
212 """ retrieve all unique values belonging to a key from a model """
213 if field not in model.all_keys() or sort_field not in model.all_keys():
214 raise KeyError
215 with g.lib.transaction() as tx:
216 rows = tx.query('SELECT DISTINCT "{0}" FROM "{1}" ORDER BY "{2}"'
217 .format(field, model._table, sort_field))
218 return [row[0] for row in rows]
219
220
221 class IdListConverter(BaseConverter):
222 """Converts comma separated lists of ids in urls to integer lists.
223 """
224
225 def to_python(self, value):
226 ids = []
227 for id in value.split(','):
228 try:
229 ids.append(int(id))
230 except ValueError:
231 pass
232 return ids
233
234 def to_url(self, value):
235 return ','.join(str(v) for v in value)
236
237
238 class QueryConverter(PathConverter):
239 """Converts slash separated lists of queries in the url to string list.
240 """
241
242 def to_python(self, value):
243 queries = value.split('/')
244 return [query.replace('\\', os.sep) for query in queries]
245
246 def to_url(self, value):
247 return ','.join([v.replace(os.sep, '\\') for v in value])
248
249
250 class EverythingConverter(PathConverter):
251 regex = '.*?'
252
253
254 # Flask setup.
255
256 app = flask.Flask(__name__)
257 app.url_map.converters['idlist'] = IdListConverter
258 app.url_map.converters['query'] = QueryConverter
259 app.url_map.converters['everything'] = EverythingConverter
260
261
262 @app.before_request
263 def before_request():
264 g.lib = app.config['lib']
265
266
267 # Items.
268
269 @app.route('/item/<idlist:ids>', methods=["GET", "DELETE", "PATCH"])
270 @resource('items', patchable=True)
271 def get_item(id):
272 return g.lib.get_item(id)
273
274
275 @app.route('/item/')
276 @app.route('/item/query/')
277 @resource_list('items')
278 def all_items():
279 return g.lib.items()
280
281
282 @app.route('/item/<int:item_id>/file')
283 def item_file(item_id):
284 item = g.lib.get_item(item_id)
285
286 # On Windows under Python 2, Flask wants a Unicode path. On Python 3, it
287 # *always* wants a Unicode path.
288 if os.name == 'nt':
289 item_path = util.syspath(item.path)
290 else:
291 item_path = util.py3_path(item.path)
292
293 try:
294 unicode_item_path = util.text_string(item.path)
295 except (UnicodeDecodeError, UnicodeEncodeError):
296 unicode_item_path = util.displayable_path(item.path)
297
298 base_filename = os.path.basename(unicode_item_path)
299 try:
300 # Imitate http.server behaviour
301 base_filename.encode("latin-1", "strict")
302 except UnicodeEncodeError:
303 safe_filename = unidecode(base_filename)
304 else:
305 safe_filename = base_filename
306
307 response = flask.send_file(
308 item_path,
309 as_attachment=True,
310 attachment_filename=safe_filename
311 )
312 response.headers['Content-Length'] = os.path.getsize(item_path)
313 return response
314
315
316 @app.route('/item/query/<query:queries>', methods=["GET", "DELETE", "PATCH"])
317 @resource_query('items', patchable=True)
318 def item_query(queries):
319 return g.lib.items(queries)
320
321
322 @app.route('/item/path/<everything:path>')
323 def item_at_path(path):
324 query = beets.library.PathQuery('path', path.encode('utf-8'))
325 item = g.lib.items(query).get()
326 if item:
327 return flask.jsonify(_rep(item))
328 else:
329 return flask.abort(404)
330
331
332 @app.route('/item/values/<string:key>')
333 def item_unique_field_values(key):
334 sort_key = flask.request.args.get('sort_key', key)
335 try:
336 values = _get_unique_table_field_values(beets.library.Item, key,
337 sort_key)
338 except KeyError:
339 return flask.abort(404)
340 return flask.jsonify(values=values)
341
342
343 # Albums.
344
345 @app.route('/album/<idlist:ids>', methods=["GET", "DELETE"])
346 @resource('albums')
347 def get_album(id):
348 return g.lib.get_album(id)
349
350
351 @app.route('/album/')
352 @app.route('/album/query/')
353 @resource_list('albums')
354 def all_albums():
355 return g.lib.albums()
356
357
358 @app.route('/album/query/<query:queries>', methods=["GET", "DELETE"])
359 @resource_query('albums')
360 def album_query(queries):
361 return g.lib.albums(queries)
362
363
364 @app.route('/album/<int:album_id>/art')
365 def album_art(album_id):
366 album = g.lib.get_album(album_id)
367 if album and album.artpath:
368 return flask.send_file(album.artpath.decode())
369 else:
370 return flask.abort(404)
371
372
373 @app.route('/album/values/<string:key>')
374 def album_unique_field_values(key):
375 sort_key = flask.request.args.get('sort_key', key)
376 try:
377 values = _get_unique_table_field_values(beets.library.Album, key,
378 sort_key)
379 except KeyError:
380 return flask.abort(404)
381 return flask.jsonify(values=values)
382
383
384 # Artists.
385
386 @app.route('/artist/')
387 def all_artists():
388 with g.lib.transaction() as tx:
389 rows = tx.query("SELECT DISTINCT albumartist FROM albums")
390 all_artists = [row[0] for row in rows]
391 return flask.jsonify(artist_names=all_artists)
392
393
394 # Library information.
395
396 @app.route('/stats')
397 def stats():
398 with g.lib.transaction() as tx:
399 item_rows = tx.query("SELECT COUNT(*) FROM items")
400 album_rows = tx.query("SELECT COUNT(*) FROM albums")
401 return flask.jsonify({
402 'items': item_rows[0][0],
403 'albums': album_rows[0][0],
404 })
405
406
407 # UI.
408
409 @app.route('/')
410 def home():
411 return flask.render_template('index.html')
412
413
414 # Plugin hook.
415
416 class WebPlugin(BeetsPlugin):
417 def __init__(self):
418 super(WebPlugin, self).__init__()
419 self.config.add({
420 'host': u'127.0.0.1',
421 'port': 8337,
422 'cors': '',
423 'cors_supports_credentials': False,
424 'reverse_proxy': False,
425 'include_paths': False,
426 })
427
428 def commands(self):
429 cmd = ui.Subcommand('web', help=u'start a Web interface')
430 cmd.parser.add_option(u'-d', u'--debug', action='store_true',
431 default=False, help=u'debug mode')
432
433 def func(lib, opts, args):
434 args = ui.decargs(args)
435 if args:
436 self.config['host'] = args.pop(0)
437 if args:
438 self.config['port'] = int(args.pop(0))
439
440 app.config['lib'] = lib
441 # Normalizes json output
442 app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
443
444 app.config['INCLUDE_PATHS'] = self.config['include_paths']
445
446 # Enable CORS if required.
447 if self.config['cors']:
448 self._log.info(u'Enabling CORS with origin: {0}',
449 self.config['cors'])
450 from flask_cors import CORS
451 app.config['CORS_ALLOW_HEADERS'] = "Content-Type"
452 app.config['CORS_RESOURCES'] = {
453 r"/*": {"origins": self.config['cors'].get(str)}
454 }
455 CORS(
456 app,
457 supports_credentials=self.config[
458 'cors_supports_credentials'
459 ].get(bool)
460 )
461
462 # Allow serving behind a reverse proxy
463 if self.config['reverse_proxy']:
464 app.wsgi_app = ReverseProxied(app.wsgi_app)
465
466 # Start the web application.
467 app.run(host=self.config['host'].as_str(),
468 port=self.config['port'].get(int),
469 debug=opts.debug, threaded=True)
470 cmd.func = func
471 return [cmd]
472
473
474 class ReverseProxied(object):
475 '''Wrap the application in this middleware and configure the
476 front-end server to add these headers, to let you quietly bind
477 this to a URL other than / and to an HTTP scheme that is
478 different than what is used locally.
479
480 In nginx:
481 location /myprefix {
482 proxy_pass http://192.168.0.1:5001;
483 proxy_set_header Host $host;
484 proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
485 proxy_set_header X-Scheme $scheme;
486 proxy_set_header X-Script-Name /myprefix;
487 }
488
489 From: http://flask.pocoo.org/snippets/35/
490
491 :param app: the WSGI application
492 '''
493 def __init__(self, app):
494 self.app = app
495
496 def __call__(self, environ, start_response):
497 script_name = environ.get('HTTP_X_SCRIPT_NAME', '')
498 if script_name:
499 environ['SCRIPT_NAME'] = script_name
500 path_info = environ['PATH_INFO']
501 if path_info.startswith(script_name):
502 environ['PATH_INFO'] = path_info[len(script_name):]
503
504 scheme = environ.get('HTTP_X_SCHEME', '')
505 if scheme:
506 environ['wsgi.url_scheme'] = scheme
507 return self.app(environ, start_response)
508
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/beetsplug/web/__init__.py b/beetsplug/web/__init__.py
--- a/beetsplug/web/__init__.py
+++ b/beetsplug/web/__init__.py
@@ -59,7 +59,10 @@
return out
elif isinstance(obj, beets.library.Album):
- del out['artpath']
+ if app.config.get('INCLUDE_PATHS', False):
+ out['artpath'] = util.displayable_path(out['artpath'])
+ else:
+ del out['artpath']
if expand:
out['items'] = [_rep(item) for item in obj.items()]
return out
| {"golden_diff": "diff --git a/beetsplug/web/__init__.py b/beetsplug/web/__init__.py\n--- a/beetsplug/web/__init__.py\n+++ b/beetsplug/web/__init__.py\n@@ -59,7 +59,10 @@\n return out\n \n elif isinstance(obj, beets.library.Album):\n- del out['artpath']\n+ if app.config.get('INCLUDE_PATHS', False):\n+ out['artpath'] = util.displayable_path(out['artpath'])\n+ else:\n+ del out['artpath']\n if expand:\n out['items'] = [_rep(item) for item in obj.items()]\n return out\n", "issue": "web: GET /album/<n> hides artpath even when INCLUDE_PATHS is set\n### Problem\r\n\r\n`beet web` provides GET operations to fetch track items and albums. By default this removes paths\r\nfrom the returned data, but the config setting INCLUDE_PATHS should allow the paths to be returned.\r\n\r\nThis works correctly for GET /item/... but not GET /album/... In the album case, the artpath is unconditionally deleted from the results.\r\n\r\n### Setup\r\n\r\nAdd to config file:\r\n\r\nweb:\r\n include_paths: true\r\n\r\nUse `beet web` to make a webserver available and do a GET /album/N, where N is the album id of an album in the database which has a cover art set. The JSON result should include the 'artpath' value but does not.\r\n\r\n* OS: Linux (Debian Testing)\r\n* Python version: 3.9.1-1\r\n* beets version: 1.4.9-7\r\n* Turning off plugins made problem go away (yes/no): bug in web plugin\r\n\r\nNote this is a small issue, although I have hit it. I have a fix (and a regression test) which I will submit as a small PR once my first PR has been finished (so I can learn from the mistakes I made in that!).\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2016, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"A Web interface to beets.\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nfrom beets.plugins import BeetsPlugin\nfrom beets import ui\nfrom beets import util\nimport beets.library\nimport flask\nfrom flask import g, jsonify\nfrom werkzeug.routing import BaseConverter, PathConverter\nimport os\nfrom unidecode import unidecode\nimport json\nimport base64\n\n\n# Utilities.\n\ndef _rep(obj, expand=False):\n \"\"\"Get a flat -- i.e., JSON-ish -- representation of a beets Item or\n Album object. For Albums, `expand` dictates whether tracks are\n included.\n \"\"\"\n out = dict(obj)\n\n if isinstance(obj, beets.library.Item):\n if app.config.get('INCLUDE_PATHS', False):\n out['path'] = util.displayable_path(out['path'])\n else:\n del out['path']\n\n # Filter all bytes attributes and convert them to strings.\n for key, value in out.items():\n if isinstance(out[key], bytes):\n out[key] = base64.b64encode(value).decode('ascii')\n\n # Get the size (in bytes) of the backing file. This is useful\n # for the Tomahawk resolver API.\n try:\n out['size'] = os.path.getsize(util.syspath(obj.path))\n except OSError:\n out['size'] = 0\n\n return out\n\n elif isinstance(obj, beets.library.Album):\n del out['artpath']\n if expand:\n out['items'] = [_rep(item) for item in obj.items()]\n return out\n\n\ndef json_generator(items, root, expand=False):\n \"\"\"Generator that dumps list of beets Items or Albums as JSON\n\n :param root: root key for JSON\n :param items: list of :class:`Item` or :class:`Album` to dump\n :param expand: If true every :class:`Album` contains its items in the json\n representation\n :returns: generator that yields strings\n \"\"\"\n yield '{\"%s\":[' % root\n first = True\n for item in items:\n if first:\n first = False\n else:\n yield ','\n yield json.dumps(_rep(item, expand=expand))\n yield ']}'\n\n\ndef is_expand():\n \"\"\"Returns whether the current request is for an expanded response.\"\"\"\n\n return flask.request.args.get('expand') is not None\n\n\ndef is_delete():\n \"\"\"Returns whether the current delete request should remove the selected\n files.\n \"\"\"\n\n return flask.request.args.get('delete') is not None\n\n\ndef get_method():\n \"\"\"Returns the HTTP method of the current request.\"\"\"\n return flask.request.method\n\n\ndef resource(name, patchable=False):\n \"\"\"Decorates a function to handle RESTful HTTP requests for a resource.\n \"\"\"\n def make_responder(retriever):\n def responder(ids):\n entities = [retriever(id) for id in ids]\n entities = [entity for entity in entities if entity]\n\n if get_method() == \"DELETE\":\n for entity in entities:\n entity.remove(delete=is_delete())\n\n return flask.make_response(jsonify({'deleted': True}), 200)\n\n elif get_method() == \"PATCH\" and patchable:\n for entity in entities:\n entity.update(flask.request.get_json())\n entity.try_sync(True, False) # write, don't move\n\n if len(entities) == 1:\n return flask.jsonify(_rep(entities[0], expand=is_expand()))\n elif entities:\n return app.response_class(\n json_generator(entities, root=name),\n mimetype='application/json'\n )\n\n elif get_method() == \"GET\":\n if len(entities) == 1:\n return flask.jsonify(_rep(entities[0], expand=is_expand()))\n elif entities:\n return app.response_class(\n json_generator(entities, root=name),\n mimetype='application/json'\n )\n else:\n return flask.abort(404)\n\n else:\n return flask.abort(405)\n\n responder.__name__ = 'get_{0}'.format(name)\n\n return responder\n return make_responder\n\n\ndef resource_query(name, patchable=False):\n \"\"\"Decorates a function to handle RESTful HTTP queries for resources.\n \"\"\"\n def make_responder(query_func):\n def responder(queries):\n entities = query_func(queries)\n\n if get_method() == \"DELETE\":\n for entity in entities:\n entity.remove(delete=is_delete())\n\n return flask.make_response(jsonify({'deleted': True}), 200)\n\n elif get_method() == \"PATCH\" and patchable:\n for entity in entities:\n entity.update(flask.request.get_json())\n entity.try_sync(True, False) # write, don't move\n\n return app.response_class(\n json_generator(entities, root=name),\n mimetype='application/json'\n )\n\n elif get_method() == \"GET\":\n return app.response_class(\n json_generator(\n entities,\n root='results', expand=is_expand()\n ),\n mimetype='application/json'\n )\n\n else:\n return flask.abort(405)\n\n responder.__name__ = 'query_{0}'.format(name)\n\n return responder\n\n return make_responder\n\n\ndef resource_list(name):\n \"\"\"Decorates a function to handle RESTful HTTP request for a list of\n resources.\n \"\"\"\n def make_responder(list_all):\n def responder():\n return app.response_class(\n json_generator(list_all(), root=name, expand=is_expand()),\n mimetype='application/json'\n )\n responder.__name__ = 'all_{0}'.format(name)\n return responder\n return make_responder\n\n\ndef _get_unique_table_field_values(model, field, sort_field):\n \"\"\" retrieve all unique values belonging to a key from a model \"\"\"\n if field not in model.all_keys() or sort_field not in model.all_keys():\n raise KeyError\n with g.lib.transaction() as tx:\n rows = tx.query('SELECT DISTINCT \"{0}\" FROM \"{1}\" ORDER BY \"{2}\"'\n .format(field, model._table, sort_field))\n return [row[0] for row in rows]\n\n\nclass IdListConverter(BaseConverter):\n \"\"\"Converts comma separated lists of ids in urls to integer lists.\n \"\"\"\n\n def to_python(self, value):\n ids = []\n for id in value.split(','):\n try:\n ids.append(int(id))\n except ValueError:\n pass\n return ids\n\n def to_url(self, value):\n return ','.join(str(v) for v in value)\n\n\nclass QueryConverter(PathConverter):\n \"\"\"Converts slash separated lists of queries in the url to string list.\n \"\"\"\n\n def to_python(self, value):\n queries = value.split('/')\n return [query.replace('\\\\', os.sep) for query in queries]\n\n def to_url(self, value):\n return ','.join([v.replace(os.sep, '\\\\') for v in value])\n\n\nclass EverythingConverter(PathConverter):\n regex = '.*?'\n\n\n# Flask setup.\n\napp = flask.Flask(__name__)\napp.url_map.converters['idlist'] = IdListConverter\napp.url_map.converters['query'] = QueryConverter\napp.url_map.converters['everything'] = EverythingConverter\n\n\[email protected]_request\ndef before_request():\n g.lib = app.config['lib']\n\n\n# Items.\n\[email protected]('/item/<idlist:ids>', methods=[\"GET\", \"DELETE\", \"PATCH\"])\n@resource('items', patchable=True)\ndef get_item(id):\n return g.lib.get_item(id)\n\n\[email protected]('/item/')\[email protected]('/item/query/')\n@resource_list('items')\ndef all_items():\n return g.lib.items()\n\n\[email protected]('/item/<int:item_id>/file')\ndef item_file(item_id):\n item = g.lib.get_item(item_id)\n\n # On Windows under Python 2, Flask wants a Unicode path. On Python 3, it\n # *always* wants a Unicode path.\n if os.name == 'nt':\n item_path = util.syspath(item.path)\n else:\n item_path = util.py3_path(item.path)\n\n try:\n unicode_item_path = util.text_string(item.path)\n except (UnicodeDecodeError, UnicodeEncodeError):\n unicode_item_path = util.displayable_path(item.path)\n\n base_filename = os.path.basename(unicode_item_path)\n try:\n # Imitate http.server behaviour\n base_filename.encode(\"latin-1\", \"strict\")\n except UnicodeEncodeError:\n safe_filename = unidecode(base_filename)\n else:\n safe_filename = base_filename\n\n response = flask.send_file(\n item_path,\n as_attachment=True,\n attachment_filename=safe_filename\n )\n response.headers['Content-Length'] = os.path.getsize(item_path)\n return response\n\n\[email protected]('/item/query/<query:queries>', methods=[\"GET\", \"DELETE\", \"PATCH\"])\n@resource_query('items', patchable=True)\ndef item_query(queries):\n return g.lib.items(queries)\n\n\[email protected]('/item/path/<everything:path>')\ndef item_at_path(path):\n query = beets.library.PathQuery('path', path.encode('utf-8'))\n item = g.lib.items(query).get()\n if item:\n return flask.jsonify(_rep(item))\n else:\n return flask.abort(404)\n\n\[email protected]('/item/values/<string:key>')\ndef item_unique_field_values(key):\n sort_key = flask.request.args.get('sort_key', key)\n try:\n values = _get_unique_table_field_values(beets.library.Item, key,\n sort_key)\n except KeyError:\n return flask.abort(404)\n return flask.jsonify(values=values)\n\n\n# Albums.\n\[email protected]('/album/<idlist:ids>', methods=[\"GET\", \"DELETE\"])\n@resource('albums')\ndef get_album(id):\n return g.lib.get_album(id)\n\n\[email protected]('/album/')\[email protected]('/album/query/')\n@resource_list('albums')\ndef all_albums():\n return g.lib.albums()\n\n\[email protected]('/album/query/<query:queries>', methods=[\"GET\", \"DELETE\"])\n@resource_query('albums')\ndef album_query(queries):\n return g.lib.albums(queries)\n\n\[email protected]('/album/<int:album_id>/art')\ndef album_art(album_id):\n album = g.lib.get_album(album_id)\n if album and album.artpath:\n return flask.send_file(album.artpath.decode())\n else:\n return flask.abort(404)\n\n\[email protected]('/album/values/<string:key>')\ndef album_unique_field_values(key):\n sort_key = flask.request.args.get('sort_key', key)\n try:\n values = _get_unique_table_field_values(beets.library.Album, key,\n sort_key)\n except KeyError:\n return flask.abort(404)\n return flask.jsonify(values=values)\n\n\n# Artists.\n\[email protected]('/artist/')\ndef all_artists():\n with g.lib.transaction() as tx:\n rows = tx.query(\"SELECT DISTINCT albumartist FROM albums\")\n all_artists = [row[0] for row in rows]\n return flask.jsonify(artist_names=all_artists)\n\n\n# Library information.\n\[email protected]('/stats')\ndef stats():\n with g.lib.transaction() as tx:\n item_rows = tx.query(\"SELECT COUNT(*) FROM items\")\n album_rows = tx.query(\"SELECT COUNT(*) FROM albums\")\n return flask.jsonify({\n 'items': item_rows[0][0],\n 'albums': album_rows[0][0],\n })\n\n\n# UI.\n\[email protected]('/')\ndef home():\n return flask.render_template('index.html')\n\n\n# Plugin hook.\n\nclass WebPlugin(BeetsPlugin):\n def __init__(self):\n super(WebPlugin, self).__init__()\n self.config.add({\n 'host': u'127.0.0.1',\n 'port': 8337,\n 'cors': '',\n 'cors_supports_credentials': False,\n 'reverse_proxy': False,\n 'include_paths': False,\n })\n\n def commands(self):\n cmd = ui.Subcommand('web', help=u'start a Web interface')\n cmd.parser.add_option(u'-d', u'--debug', action='store_true',\n default=False, help=u'debug mode')\n\n def func(lib, opts, args):\n args = ui.decargs(args)\n if args:\n self.config['host'] = args.pop(0)\n if args:\n self.config['port'] = int(args.pop(0))\n\n app.config['lib'] = lib\n # Normalizes json output\n app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False\n\n app.config['INCLUDE_PATHS'] = self.config['include_paths']\n\n # Enable CORS if required.\n if self.config['cors']:\n self._log.info(u'Enabling CORS with origin: {0}',\n self.config['cors'])\n from flask_cors import CORS\n app.config['CORS_ALLOW_HEADERS'] = \"Content-Type\"\n app.config['CORS_RESOURCES'] = {\n r\"/*\": {\"origins\": self.config['cors'].get(str)}\n }\n CORS(\n app,\n supports_credentials=self.config[\n 'cors_supports_credentials'\n ].get(bool)\n )\n\n # Allow serving behind a reverse proxy\n if self.config['reverse_proxy']:\n app.wsgi_app = ReverseProxied(app.wsgi_app)\n\n # Start the web application.\n app.run(host=self.config['host'].as_str(),\n port=self.config['port'].get(int),\n debug=opts.debug, threaded=True)\n cmd.func = func\n return [cmd]\n\n\nclass ReverseProxied(object):\n '''Wrap the application in this middleware and configure the\n front-end server to add these headers, to let you quietly bind\n this to a URL other than / and to an HTTP scheme that is\n different than what is used locally.\n\n In nginx:\n location /myprefix {\n proxy_pass http://192.168.0.1:5001;\n proxy_set_header Host $host;\n proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n proxy_set_header X-Scheme $scheme;\n proxy_set_header X-Script-Name /myprefix;\n }\n\n From: http://flask.pocoo.org/snippets/35/\n\n :param app: the WSGI application\n '''\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n script_name = environ.get('HTTP_X_SCRIPT_NAME', '')\n if script_name:\n environ['SCRIPT_NAME'] = script_name\n path_info = environ['PATH_INFO']\n if path_info.startswith(script_name):\n environ['PATH_INFO'] = path_info[len(script_name):]\n\n scheme = environ.get('HTTP_X_SCHEME', '')\n if scheme:\n environ['wsgi.url_scheme'] = scheme\n return self.app(environ, start_response)\n", "path": "beetsplug/web/__init__.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2016, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"A Web interface to beets.\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nfrom beets.plugins import BeetsPlugin\nfrom beets import ui\nfrom beets import util\nimport beets.library\nimport flask\nfrom flask import g, jsonify\nfrom werkzeug.routing import BaseConverter, PathConverter\nimport os\nfrom unidecode import unidecode\nimport json\nimport base64\n\n\n# Utilities.\n\ndef _rep(obj, expand=False):\n \"\"\"Get a flat -- i.e., JSON-ish -- representation of a beets Item or\n Album object. For Albums, `expand` dictates whether tracks are\n included.\n \"\"\"\n out = dict(obj)\n\n if isinstance(obj, beets.library.Item):\n if app.config.get('INCLUDE_PATHS', False):\n out['path'] = util.displayable_path(out['path'])\n else:\n del out['path']\n\n # Filter all bytes attributes and convert them to strings.\n for key, value in out.items():\n if isinstance(out[key], bytes):\n out[key] = base64.b64encode(value).decode('ascii')\n\n # Get the size (in bytes) of the backing file. This is useful\n # for the Tomahawk resolver API.\n try:\n out['size'] = os.path.getsize(util.syspath(obj.path))\n except OSError:\n out['size'] = 0\n\n return out\n\n elif isinstance(obj, beets.library.Album):\n if app.config.get('INCLUDE_PATHS', False):\n out['artpath'] = util.displayable_path(out['artpath'])\n else:\n del out['artpath']\n if expand:\n out['items'] = [_rep(item) for item in obj.items()]\n return out\n\n\ndef json_generator(items, root, expand=False):\n \"\"\"Generator that dumps list of beets Items or Albums as JSON\n\n :param root: root key for JSON\n :param items: list of :class:`Item` or :class:`Album` to dump\n :param expand: If true every :class:`Album` contains its items in the json\n representation\n :returns: generator that yields strings\n \"\"\"\n yield '{\"%s\":[' % root\n first = True\n for item in items:\n if first:\n first = False\n else:\n yield ','\n yield json.dumps(_rep(item, expand=expand))\n yield ']}'\n\n\ndef is_expand():\n \"\"\"Returns whether the current request is for an expanded response.\"\"\"\n\n return flask.request.args.get('expand') is not None\n\n\ndef is_delete():\n \"\"\"Returns whether the current delete request should remove the selected\n files.\n \"\"\"\n\n return flask.request.args.get('delete') is not None\n\n\ndef get_method():\n \"\"\"Returns the HTTP method of the current request.\"\"\"\n return flask.request.method\n\n\ndef resource(name, patchable=False):\n \"\"\"Decorates a function to handle RESTful HTTP requests for a resource.\n \"\"\"\n def make_responder(retriever):\n def responder(ids):\n entities = [retriever(id) for id in ids]\n entities = [entity for entity in entities if entity]\n\n if get_method() == \"DELETE\":\n for entity in entities:\n entity.remove(delete=is_delete())\n\n return flask.make_response(jsonify({'deleted': True}), 200)\n\n elif get_method() == \"PATCH\" and patchable:\n for entity in entities:\n entity.update(flask.request.get_json())\n entity.try_sync(True, False) # write, don't move\n\n if len(entities) == 1:\n return flask.jsonify(_rep(entities[0], expand=is_expand()))\n elif entities:\n return app.response_class(\n json_generator(entities, root=name),\n mimetype='application/json'\n )\n\n elif get_method() == \"GET\":\n if len(entities) == 1:\n return flask.jsonify(_rep(entities[0], expand=is_expand()))\n elif entities:\n return app.response_class(\n json_generator(entities, root=name),\n mimetype='application/json'\n )\n else:\n return flask.abort(404)\n\n else:\n return flask.abort(405)\n\n responder.__name__ = 'get_{0}'.format(name)\n\n return responder\n return make_responder\n\n\ndef resource_query(name, patchable=False):\n \"\"\"Decorates a function to handle RESTful HTTP queries for resources.\n \"\"\"\n def make_responder(query_func):\n def responder(queries):\n entities = query_func(queries)\n\n if get_method() == \"DELETE\":\n for entity in entities:\n entity.remove(delete=is_delete())\n\n return flask.make_response(jsonify({'deleted': True}), 200)\n\n elif get_method() == \"PATCH\" and patchable:\n for entity in entities:\n entity.update(flask.request.get_json())\n entity.try_sync(True, False) # write, don't move\n\n return app.response_class(\n json_generator(entities, root=name),\n mimetype='application/json'\n )\n\n elif get_method() == \"GET\":\n return app.response_class(\n json_generator(\n entities,\n root='results', expand=is_expand()\n ),\n mimetype='application/json'\n )\n\n else:\n return flask.abort(405)\n\n responder.__name__ = 'query_{0}'.format(name)\n\n return responder\n\n return make_responder\n\n\ndef resource_list(name):\n \"\"\"Decorates a function to handle RESTful HTTP request for a list of\n resources.\n \"\"\"\n def make_responder(list_all):\n def responder():\n return app.response_class(\n json_generator(list_all(), root=name, expand=is_expand()),\n mimetype='application/json'\n )\n responder.__name__ = 'all_{0}'.format(name)\n return responder\n return make_responder\n\n\ndef _get_unique_table_field_values(model, field, sort_field):\n \"\"\" retrieve all unique values belonging to a key from a model \"\"\"\n if field not in model.all_keys() or sort_field not in model.all_keys():\n raise KeyError\n with g.lib.transaction() as tx:\n rows = tx.query('SELECT DISTINCT \"{0}\" FROM \"{1}\" ORDER BY \"{2}\"'\n .format(field, model._table, sort_field))\n return [row[0] for row in rows]\n\n\nclass IdListConverter(BaseConverter):\n \"\"\"Converts comma separated lists of ids in urls to integer lists.\n \"\"\"\n\n def to_python(self, value):\n ids = []\n for id in value.split(','):\n try:\n ids.append(int(id))\n except ValueError:\n pass\n return ids\n\n def to_url(self, value):\n return ','.join(str(v) for v in value)\n\n\nclass QueryConverter(PathConverter):\n \"\"\"Converts slash separated lists of queries in the url to string list.\n \"\"\"\n\n def to_python(self, value):\n queries = value.split('/')\n return [query.replace('\\\\', os.sep) for query in queries]\n\n def to_url(self, value):\n return ','.join([v.replace(os.sep, '\\\\') for v in value])\n\n\nclass EverythingConverter(PathConverter):\n regex = '.*?'\n\n\n# Flask setup.\n\napp = flask.Flask(__name__)\napp.url_map.converters['idlist'] = IdListConverter\napp.url_map.converters['query'] = QueryConverter\napp.url_map.converters['everything'] = EverythingConverter\n\n\[email protected]_request\ndef before_request():\n g.lib = app.config['lib']\n\n\n# Items.\n\[email protected]('/item/<idlist:ids>', methods=[\"GET\", \"DELETE\", \"PATCH\"])\n@resource('items', patchable=True)\ndef get_item(id):\n return g.lib.get_item(id)\n\n\[email protected]('/item/')\[email protected]('/item/query/')\n@resource_list('items')\ndef all_items():\n return g.lib.items()\n\n\[email protected]('/item/<int:item_id>/file')\ndef item_file(item_id):\n item = g.lib.get_item(item_id)\n\n # On Windows under Python 2, Flask wants a Unicode path. On Python 3, it\n # *always* wants a Unicode path.\n if os.name == 'nt':\n item_path = util.syspath(item.path)\n else:\n item_path = util.py3_path(item.path)\n\n try:\n unicode_item_path = util.text_string(item.path)\n except (UnicodeDecodeError, UnicodeEncodeError):\n unicode_item_path = util.displayable_path(item.path)\n\n base_filename = os.path.basename(unicode_item_path)\n try:\n # Imitate http.server behaviour\n base_filename.encode(\"latin-1\", \"strict\")\n except UnicodeEncodeError:\n safe_filename = unidecode(base_filename)\n else:\n safe_filename = base_filename\n\n response = flask.send_file(\n item_path,\n as_attachment=True,\n attachment_filename=safe_filename\n )\n response.headers['Content-Length'] = os.path.getsize(item_path)\n return response\n\n\[email protected]('/item/query/<query:queries>', methods=[\"GET\", \"DELETE\", \"PATCH\"])\n@resource_query('items', patchable=True)\ndef item_query(queries):\n return g.lib.items(queries)\n\n\[email protected]('/item/path/<everything:path>')\ndef item_at_path(path):\n query = beets.library.PathQuery('path', path.encode('utf-8'))\n item = g.lib.items(query).get()\n if item:\n return flask.jsonify(_rep(item))\n else:\n return flask.abort(404)\n\n\[email protected]('/item/values/<string:key>')\ndef item_unique_field_values(key):\n sort_key = flask.request.args.get('sort_key', key)\n try:\n values = _get_unique_table_field_values(beets.library.Item, key,\n sort_key)\n except KeyError:\n return flask.abort(404)\n return flask.jsonify(values=values)\n\n\n# Albums.\n\[email protected]('/album/<idlist:ids>', methods=[\"GET\", \"DELETE\"])\n@resource('albums')\ndef get_album(id):\n return g.lib.get_album(id)\n\n\[email protected]('/album/')\[email protected]('/album/query/')\n@resource_list('albums')\ndef all_albums():\n return g.lib.albums()\n\n\[email protected]('/album/query/<query:queries>', methods=[\"GET\", \"DELETE\"])\n@resource_query('albums')\ndef album_query(queries):\n return g.lib.albums(queries)\n\n\[email protected]('/album/<int:album_id>/art')\ndef album_art(album_id):\n album = g.lib.get_album(album_id)\n if album and album.artpath:\n return flask.send_file(album.artpath.decode())\n else:\n return flask.abort(404)\n\n\[email protected]('/album/values/<string:key>')\ndef album_unique_field_values(key):\n sort_key = flask.request.args.get('sort_key', key)\n try:\n values = _get_unique_table_field_values(beets.library.Album, key,\n sort_key)\n except KeyError:\n return flask.abort(404)\n return flask.jsonify(values=values)\n\n\n# Artists.\n\[email protected]('/artist/')\ndef all_artists():\n with g.lib.transaction() as tx:\n rows = tx.query(\"SELECT DISTINCT albumartist FROM albums\")\n all_artists = [row[0] for row in rows]\n return flask.jsonify(artist_names=all_artists)\n\n\n# Library information.\n\[email protected]('/stats')\ndef stats():\n with g.lib.transaction() as tx:\n item_rows = tx.query(\"SELECT COUNT(*) FROM items\")\n album_rows = tx.query(\"SELECT COUNT(*) FROM albums\")\n return flask.jsonify({\n 'items': item_rows[0][0],\n 'albums': album_rows[0][0],\n })\n\n\n# UI.\n\[email protected]('/')\ndef home():\n return flask.render_template('index.html')\n\n\n# Plugin hook.\n\nclass WebPlugin(BeetsPlugin):\n def __init__(self):\n super(WebPlugin, self).__init__()\n self.config.add({\n 'host': u'127.0.0.1',\n 'port': 8337,\n 'cors': '',\n 'cors_supports_credentials': False,\n 'reverse_proxy': False,\n 'include_paths': False,\n })\n\n def commands(self):\n cmd = ui.Subcommand('web', help=u'start a Web interface')\n cmd.parser.add_option(u'-d', u'--debug', action='store_true',\n default=False, help=u'debug mode')\n\n def func(lib, opts, args):\n args = ui.decargs(args)\n if args:\n self.config['host'] = args.pop(0)\n if args:\n self.config['port'] = int(args.pop(0))\n\n app.config['lib'] = lib\n # Normalizes json output\n app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False\n\n app.config['INCLUDE_PATHS'] = self.config['include_paths']\n\n # Enable CORS if required.\n if self.config['cors']:\n self._log.info(u'Enabling CORS with origin: {0}',\n self.config['cors'])\n from flask_cors import CORS\n app.config['CORS_ALLOW_HEADERS'] = \"Content-Type\"\n app.config['CORS_RESOURCES'] = {\n r\"/*\": {\"origins\": self.config['cors'].get(str)}\n }\n CORS(\n app,\n supports_credentials=self.config[\n 'cors_supports_credentials'\n ].get(bool)\n )\n\n # Allow serving behind a reverse proxy\n if self.config['reverse_proxy']:\n app.wsgi_app = ReverseProxied(app.wsgi_app)\n\n # Start the web application.\n app.run(host=self.config['host'].as_str(),\n port=self.config['port'].get(int),\n debug=opts.debug, threaded=True)\n cmd.func = func\n return [cmd]\n\n\nclass ReverseProxied(object):\n '''Wrap the application in this middleware and configure the\n front-end server to add these headers, to let you quietly bind\n this to a URL other than / and to an HTTP scheme that is\n different than what is used locally.\n\n In nginx:\n location /myprefix {\n proxy_pass http://192.168.0.1:5001;\n proxy_set_header Host $host;\n proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n proxy_set_header X-Scheme $scheme;\n proxy_set_header X-Script-Name /myprefix;\n }\n\n From: http://flask.pocoo.org/snippets/35/\n\n :param app: the WSGI application\n '''\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n script_name = environ.get('HTTP_X_SCRIPT_NAME', '')\n if script_name:\n environ['SCRIPT_NAME'] = script_name\n path_info = environ['PATH_INFO']\n if path_info.startswith(script_name):\n environ['PATH_INFO'] = path_info[len(script_name):]\n\n scheme = environ.get('HTTP_X_SCHEME', '')\n if scheme:\n environ['wsgi.url_scheme'] = scheme\n return self.app(environ, start_response)\n", "path": "beetsplug/web/__init__.py"}]} |
gh_patches_debug_1291 | rasdani/github-patches | git_diff | dj-stripe__dj-stripe-701 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
1.1.0 not compatible with python 2
First of all, thanks a lot for your hard work! We've been using dj-stripe for a long time and it has served us well. Now we're ready to upgrade! Let the fun begin ;).
We're using python 2 and Django 1.11. I'm correct that 1.2 should support that right? Anyway we have to move to 1.1 first for the migrations. There is one problem though in the 1.1 release.
In commit https://github.com/dj-stripe/dj-stripe/commit/6a6f048a3a432a3ba40fba8bf90f8789139daec4 `StripeEnumField` was added with the non python 2 compatible `super()` call:
```name, path, args, kwargs = super().deconstruct()```
What do you guys think? Can we backport a hotfix fix to 1.1.1 or something?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `djstripe/fields.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 .. module:: djstripe.fields.
4
5 :synopsis: dj-stripe Custom Field Definitions
6
7 .. moduleauthor:: Bill Huneke (@wahuneke)
8 """
9 from __future__ import absolute_import, division, print_function, unicode_literals
10
11 import decimal
12
13 from django.core.exceptions import FieldError, ImproperlyConfigured
14 from django.core.validators import MaxValueValidator, MinValueValidator
15 from django.db import models
16
17 from .settings import USE_NATIVE_JSONFIELD
18 from .utils import convert_tstamp, dict_nested_accessor
19
20
21 if USE_NATIVE_JSONFIELD:
22 from django.contrib.postgres.fields import JSONField
23 else:
24 from jsonfield import JSONField
25
26
27 class PaymentMethodForeignKey(models.ForeignKey):
28 def __init__(self, **kwargs):
29 kwargs.setdefault("to", "PaymentMethod")
30 super(PaymentMethodForeignKey, self).__init__(**kwargs)
31
32
33 class StripeFieldMixin(object):
34 """
35 Custom fields for all Stripe data.
36
37 This allows keeping track of which database fields are suitable for
38 sending to or receiving from Stripe. Also, allows a few handy extra parameters.
39 """
40
41 # Used if the name at stripe is different from the name in our database
42 # Include a . in name if value is nested in dict in Stripe's object
43 # (e.g. stripe_name = "data.id" --> obj["data"]["id"])
44 stripe_name = None
45
46 # If stripe_name is None, this can also be used to specify a nested value, but
47 # the final value is assumed to be the database field name
48 # (e.g. nested_name = "data" --> obj["data"][db_field_name]
49 nested_name = None
50
51 # This indicates that this field will always appear in a stripe object. It will be
52 # an Exception if we try to parse a stripe object that does not include this field
53 # in the data. If set to False then null=True attribute will be automatically set
54 stripe_required = True
55
56 # If a field was populated in previous API versions but we don't want to drop the old
57 # data for some reason, mark it as deprecated. This will make sure we never try to send
58 # it to Stripe or expect in Stripe data received
59 # This setting automatically implies Null=True
60 deprecated = False
61
62 def __init__(self, *args, **kwargs):
63 """
64 Assign class instance variables based on kwargs.
65
66 Assign extra class instance variables if stripe_required is defined or
67 if deprecated is defined.
68 """
69 self.stripe_name = kwargs.pop('stripe_name', self.stripe_name)
70 self.nested_name = kwargs.pop('nested_name', self.nested_name)
71 self.stripe_required = kwargs.pop('stripe_required', self.stripe_required)
72 self.deprecated = kwargs.pop('deprecated', self.deprecated)
73 if not self.stripe_required:
74 kwargs["null"] = True
75
76 if self.deprecated:
77 kwargs["null"] = True
78 kwargs["default"] = None
79 super(StripeFieldMixin, self).__init__(*args, **kwargs)
80
81 def stripe_to_db(self, data):
82 """Try converting stripe fields to defined database fields."""
83 if not self.deprecated:
84 try:
85 if self.stripe_name:
86 result = dict_nested_accessor(data, self.stripe_name)
87 elif self.nested_name:
88 result = dict_nested_accessor(data, self.nested_name + "." + self.name)
89 else:
90 result = data[self.name]
91 except (KeyError, TypeError):
92 if self.stripe_required:
93 model_name = self.model._meta.object_name if hasattr(self, "model") else ""
94 raise FieldError("Required stripe field '{field_name}' was not"
95 " provided in {model_name} data object.".format(field_name=self.name,
96 model_name=model_name))
97 else:
98 result = None
99
100 return result
101
102
103 class StripePercentField(StripeFieldMixin, models.DecimalField):
104 """A field used to define a percent according to djstripe logic."""
105
106 def __init__(self, *args, **kwargs):
107 """Assign default args to this field."""
108 defaults = {
109 'decimal_places': 2,
110 'max_digits': 5,
111 'validators': [MinValueValidator(1.00), MaxValueValidator(100.00)]
112 }
113 defaults.update(kwargs)
114 super(StripePercentField, self).__init__(*args, **defaults)
115
116
117 class StripeCurrencyField(StripeFieldMixin, models.DecimalField):
118 """
119 A field used to define currency according to djstripe logic.
120
121 Stripe is always in cents. djstripe stores everything in dollars.
122 """
123
124 def __init__(self, *args, **kwargs):
125 """Assign default args to this field."""
126 defaults = {
127 'decimal_places': 2,
128 'max_digits': 8,
129 }
130 defaults.update(kwargs)
131 super(StripeCurrencyField, self).__init__(*args, **defaults)
132
133 def stripe_to_db(self, data):
134 """Convert the raw value to decimal representation."""
135 val = super(StripeCurrencyField, self).stripe_to_db(data)
136
137 # Note: 0 is a possible return value, which is 'falseish'
138 if val is not None:
139 return val / decimal.Decimal("100")
140
141
142 class StripeBooleanField(StripeFieldMixin, models.BooleanField):
143 """A field used to define a boolean value according to djstripe logic."""
144
145 def __init__(self, *args, **kwargs):
146 """Throw an error when a user tries to deprecate."""
147 if kwargs.get("deprecated", False):
148 raise ImproperlyConfigured("Boolean field cannot be deprecated. Change field type to "
149 "StripeNullBooleanField")
150 super(StripeBooleanField, self).__init__(*args, **kwargs)
151
152
153 class StripeNullBooleanField(StripeFieldMixin, models.NullBooleanField):
154 """A field used to define a NullBooleanField value according to djstripe logic."""
155
156 pass
157
158
159 class StripeCharField(StripeFieldMixin, models.CharField):
160 """A field used to define a CharField value according to djstripe logic."""
161
162 pass
163
164
165 class StripeEnumField(StripeCharField):
166 def __init__(self, enum, *args, **kwargs):
167 self.enum = enum
168 choices = enum.choices
169 defaults = {
170 "choices": choices,
171 "max_length": max(len(k) for k, v in choices)
172 }
173 defaults.update(kwargs)
174 super(StripeEnumField, self).__init__(*args, **defaults)
175
176 def deconstruct(self):
177 name, path, args, kwargs = super().deconstruct()
178 kwargs["enum"] = self.enum
179 del kwargs["choices"]
180 return name, path, args, kwargs
181
182
183 class StripeIdField(StripeCharField):
184 """A field with enough space to hold any stripe ID."""
185
186 def __init__(self, *args, **kwargs):
187 """
188 Assign default args to this field.
189
190 As per: https://stripe.com/docs/upgrades
191 You can safely assume object IDs we generate will never exceed 255
192 characters, but you should be able to handle IDs of up to that
193 length.
194 """
195 defaults = {
196 'max_length': 255,
197 'blank': False,
198 'null': False,
199 }
200 defaults.update(kwargs)
201 super(StripeIdField, self).__init__(*args, **defaults)
202
203
204 class StripeTextField(StripeFieldMixin, models.TextField):
205 """A field used to define a TextField value according to djstripe logic."""
206
207 pass
208
209
210 class StripeDateTimeField(StripeFieldMixin, models.DateTimeField):
211 """A field used to define a DateTimeField value according to djstripe logic."""
212
213 def stripe_to_db(self, data):
214 """Convert the raw timestamp value to a DateTime representation."""
215 val = super(StripeDateTimeField, self).stripe_to_db(data)
216
217 # Note: 0 is a possible return value, which is 'falseish'
218 if val is not None:
219 return convert_tstamp(val)
220
221
222 class StripeIntegerField(StripeFieldMixin, models.IntegerField):
223 """A field used to define a IntegerField value according to djstripe logic."""
224
225 pass
226
227
228 class StripePositiveIntegerField(StripeFieldMixin, models.PositiveIntegerField):
229 """A field used to define a PositiveIntegerField value according to djstripe logic."""
230
231 pass
232
233
234 class StripeJSONField(StripeFieldMixin, JSONField):
235 """A field used to define a JSONField value according to djstripe logic."""
236
237 pass
238
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/djstripe/fields.py b/djstripe/fields.py
--- a/djstripe/fields.py
+++ b/djstripe/fields.py
@@ -174,7 +174,7 @@
super(StripeEnumField, self).__init__(*args, **defaults)
def deconstruct(self):
- name, path, args, kwargs = super().deconstruct()
+ name, path, args, kwargs = super(StripeEnumField, self).deconstruct()
kwargs["enum"] = self.enum
del kwargs["choices"]
return name, path, args, kwargs
| {"golden_diff": "diff --git a/djstripe/fields.py b/djstripe/fields.py\n--- a/djstripe/fields.py\n+++ b/djstripe/fields.py\n@@ -174,7 +174,7 @@\n super(StripeEnumField, self).__init__(*args, **defaults)\n \n def deconstruct(self):\n- name, path, args, kwargs = super().deconstruct()\n+ name, path, args, kwargs = super(StripeEnumField, self).deconstruct()\n kwargs[\"enum\"] = self.enum\n del kwargs[\"choices\"]\n return name, path, args, kwargs\n", "issue": "1.1.0 not compatible with python 2\nFirst of all, thanks a lot for your hard work! We've been using dj-stripe for a long time and it has served us well. Now we're ready to upgrade! Let the fun begin ;).\r\n\r\nWe're using python 2 and Django 1.11. I'm correct that 1.2 should support that right? Anyway we have to move to 1.1 first for the migrations. There is one problem though in the 1.1 release.\r\n\r\nIn commit https://github.com/dj-stripe/dj-stripe/commit/6a6f048a3a432a3ba40fba8bf90f8789139daec4 `StripeEnumField` was added with the non python 2 compatible `super()` call:\r\n\r\n```name, path, args, kwargs = super().deconstruct()```\r\n\r\nWhat do you guys think? Can we backport a hotfix fix to 1.1.1 or something?\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n.. module:: djstripe.fields.\n\n :synopsis: dj-stripe Custom Field Definitions\n\n.. moduleauthor:: Bill Huneke (@wahuneke)\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport decimal\n\nfrom django.core.exceptions import FieldError, ImproperlyConfigured\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.db import models\n\nfrom .settings import USE_NATIVE_JSONFIELD\nfrom .utils import convert_tstamp, dict_nested_accessor\n\n\nif USE_NATIVE_JSONFIELD:\n from django.contrib.postgres.fields import JSONField\nelse:\n from jsonfield import JSONField\n\n\nclass PaymentMethodForeignKey(models.ForeignKey):\n def __init__(self, **kwargs):\n kwargs.setdefault(\"to\", \"PaymentMethod\")\n super(PaymentMethodForeignKey, self).__init__(**kwargs)\n\n\nclass StripeFieldMixin(object):\n \"\"\"\n Custom fields for all Stripe data.\n\n This allows keeping track of which database fields are suitable for\n sending to or receiving from Stripe. Also, allows a few handy extra parameters.\n \"\"\"\n\n # Used if the name at stripe is different from the name in our database\n # Include a . in name if value is nested in dict in Stripe's object\n # (e.g. stripe_name = \"data.id\" --> obj[\"data\"][\"id\"])\n stripe_name = None\n\n # If stripe_name is None, this can also be used to specify a nested value, but\n # the final value is assumed to be the database field name\n # (e.g. nested_name = \"data\" --> obj[\"data\"][db_field_name]\n nested_name = None\n\n # This indicates that this field will always appear in a stripe object. It will be\n # an Exception if we try to parse a stripe object that does not include this field\n # in the data. If set to False then null=True attribute will be automatically set\n stripe_required = True\n\n # If a field was populated in previous API versions but we don't want to drop the old\n # data for some reason, mark it as deprecated. This will make sure we never try to send\n # it to Stripe or expect in Stripe data received\n # This setting automatically implies Null=True\n deprecated = False\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Assign class instance variables based on kwargs.\n\n Assign extra class instance variables if stripe_required is defined or\n if deprecated is defined.\n \"\"\"\n self.stripe_name = kwargs.pop('stripe_name', self.stripe_name)\n self.nested_name = kwargs.pop('nested_name', self.nested_name)\n self.stripe_required = kwargs.pop('stripe_required', self.stripe_required)\n self.deprecated = kwargs.pop('deprecated', self.deprecated)\n if not self.stripe_required:\n kwargs[\"null\"] = True\n\n if self.deprecated:\n kwargs[\"null\"] = True\n kwargs[\"default\"] = None\n super(StripeFieldMixin, self).__init__(*args, **kwargs)\n\n def stripe_to_db(self, data):\n \"\"\"Try converting stripe fields to defined database fields.\"\"\"\n if not self.deprecated:\n try:\n if self.stripe_name:\n result = dict_nested_accessor(data, self.stripe_name)\n elif self.nested_name:\n result = dict_nested_accessor(data, self.nested_name + \".\" + self.name)\n else:\n result = data[self.name]\n except (KeyError, TypeError):\n if self.stripe_required:\n model_name = self.model._meta.object_name if hasattr(self, \"model\") else \"\"\n raise FieldError(\"Required stripe field '{field_name}' was not\"\n \" provided in {model_name} data object.\".format(field_name=self.name,\n model_name=model_name))\n else:\n result = None\n\n return result\n\n\nclass StripePercentField(StripeFieldMixin, models.DecimalField):\n \"\"\"A field used to define a percent according to djstripe logic.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Assign default args to this field.\"\"\"\n defaults = {\n 'decimal_places': 2,\n 'max_digits': 5,\n 'validators': [MinValueValidator(1.00), MaxValueValidator(100.00)]\n }\n defaults.update(kwargs)\n super(StripePercentField, self).__init__(*args, **defaults)\n\n\nclass StripeCurrencyField(StripeFieldMixin, models.DecimalField):\n \"\"\"\n A field used to define currency according to djstripe logic.\n\n Stripe is always in cents. djstripe stores everything in dollars.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Assign default args to this field.\"\"\"\n defaults = {\n 'decimal_places': 2,\n 'max_digits': 8,\n }\n defaults.update(kwargs)\n super(StripeCurrencyField, self).__init__(*args, **defaults)\n\n def stripe_to_db(self, data):\n \"\"\"Convert the raw value to decimal representation.\"\"\"\n val = super(StripeCurrencyField, self).stripe_to_db(data)\n\n # Note: 0 is a possible return value, which is 'falseish'\n if val is not None:\n return val / decimal.Decimal(\"100\")\n\n\nclass StripeBooleanField(StripeFieldMixin, models.BooleanField):\n \"\"\"A field used to define a boolean value according to djstripe logic.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Throw an error when a user tries to deprecate.\"\"\"\n if kwargs.get(\"deprecated\", False):\n raise ImproperlyConfigured(\"Boolean field cannot be deprecated. Change field type to \"\n \"StripeNullBooleanField\")\n super(StripeBooleanField, self).__init__(*args, **kwargs)\n\n\nclass StripeNullBooleanField(StripeFieldMixin, models.NullBooleanField):\n \"\"\"A field used to define a NullBooleanField value according to djstripe logic.\"\"\"\n\n pass\n\n\nclass StripeCharField(StripeFieldMixin, models.CharField):\n \"\"\"A field used to define a CharField value according to djstripe logic.\"\"\"\n\n pass\n\n\nclass StripeEnumField(StripeCharField):\n def __init__(self, enum, *args, **kwargs):\n self.enum = enum\n choices = enum.choices\n defaults = {\n \"choices\": choices,\n \"max_length\": max(len(k) for k, v in choices)\n }\n defaults.update(kwargs)\n super(StripeEnumField, self).__init__(*args, **defaults)\n\n def deconstruct(self):\n name, path, args, kwargs = super().deconstruct()\n kwargs[\"enum\"] = self.enum\n del kwargs[\"choices\"]\n return name, path, args, kwargs\n\n\nclass StripeIdField(StripeCharField):\n \"\"\"A field with enough space to hold any stripe ID.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Assign default args to this field.\n\n As per: https://stripe.com/docs/upgrades\n You can safely assume object IDs we generate will never exceed 255\n characters, but you should be able to handle IDs of up to that\n length.\n \"\"\"\n defaults = {\n 'max_length': 255,\n 'blank': False,\n 'null': False,\n }\n defaults.update(kwargs)\n super(StripeIdField, self).__init__(*args, **defaults)\n\n\nclass StripeTextField(StripeFieldMixin, models.TextField):\n \"\"\"A field used to define a TextField value according to djstripe logic.\"\"\"\n\n pass\n\n\nclass StripeDateTimeField(StripeFieldMixin, models.DateTimeField):\n \"\"\"A field used to define a DateTimeField value according to djstripe logic.\"\"\"\n\n def stripe_to_db(self, data):\n \"\"\"Convert the raw timestamp value to a DateTime representation.\"\"\"\n val = super(StripeDateTimeField, self).stripe_to_db(data)\n\n # Note: 0 is a possible return value, which is 'falseish'\n if val is not None:\n return convert_tstamp(val)\n\n\nclass StripeIntegerField(StripeFieldMixin, models.IntegerField):\n \"\"\"A field used to define a IntegerField value according to djstripe logic.\"\"\"\n\n pass\n\n\nclass StripePositiveIntegerField(StripeFieldMixin, models.PositiveIntegerField):\n \"\"\"A field used to define a PositiveIntegerField value according to djstripe logic.\"\"\"\n\n pass\n\n\nclass StripeJSONField(StripeFieldMixin, JSONField):\n \"\"\"A field used to define a JSONField value according to djstripe logic.\"\"\"\n\n pass\n", "path": "djstripe/fields.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n.. module:: djstripe.fields.\n\n :synopsis: dj-stripe Custom Field Definitions\n\n.. moduleauthor:: Bill Huneke (@wahuneke)\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport decimal\n\nfrom django.core.exceptions import FieldError, ImproperlyConfigured\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.db import models\n\nfrom .settings import USE_NATIVE_JSONFIELD\nfrom .utils import convert_tstamp, dict_nested_accessor\n\n\nif USE_NATIVE_JSONFIELD:\n from django.contrib.postgres.fields import JSONField\nelse:\n from jsonfield import JSONField\n\n\nclass PaymentMethodForeignKey(models.ForeignKey):\n def __init__(self, **kwargs):\n kwargs.setdefault(\"to\", \"PaymentMethod\")\n super(PaymentMethodForeignKey, self).__init__(**kwargs)\n\n\nclass StripeFieldMixin(object):\n \"\"\"\n Custom fields for all Stripe data.\n\n This allows keeping track of which database fields are suitable for\n sending to or receiving from Stripe. Also, allows a few handy extra parameters.\n \"\"\"\n\n # Used if the name at stripe is different from the name in our database\n # Include a . in name if value is nested in dict in Stripe's object\n # (e.g. stripe_name = \"data.id\" --> obj[\"data\"][\"id\"])\n stripe_name = None\n\n # If stripe_name is None, this can also be used to specify a nested value, but\n # the final value is assumed to be the database field name\n # (e.g. nested_name = \"data\" --> obj[\"data\"][db_field_name]\n nested_name = None\n\n # This indicates that this field will always appear in a stripe object. It will be\n # an Exception if we try to parse a stripe object that does not include this field\n # in the data. If set to False then null=True attribute will be automatically set\n stripe_required = True\n\n # If a field was populated in previous API versions but we don't want to drop the old\n # data for some reason, mark it as deprecated. This will make sure we never try to send\n # it to Stripe or expect in Stripe data received\n # This setting automatically implies Null=True\n deprecated = False\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Assign class instance variables based on kwargs.\n\n Assign extra class instance variables if stripe_required is defined or\n if deprecated is defined.\n \"\"\"\n self.stripe_name = kwargs.pop('stripe_name', self.stripe_name)\n self.nested_name = kwargs.pop('nested_name', self.nested_name)\n self.stripe_required = kwargs.pop('stripe_required', self.stripe_required)\n self.deprecated = kwargs.pop('deprecated', self.deprecated)\n if not self.stripe_required:\n kwargs[\"null\"] = True\n\n if self.deprecated:\n kwargs[\"null\"] = True\n kwargs[\"default\"] = None\n super(StripeFieldMixin, self).__init__(*args, **kwargs)\n\n def stripe_to_db(self, data):\n \"\"\"Try converting stripe fields to defined database fields.\"\"\"\n if not self.deprecated:\n try:\n if self.stripe_name:\n result = dict_nested_accessor(data, self.stripe_name)\n elif self.nested_name:\n result = dict_nested_accessor(data, self.nested_name + \".\" + self.name)\n else:\n result = data[self.name]\n except (KeyError, TypeError):\n if self.stripe_required:\n model_name = self.model._meta.object_name if hasattr(self, \"model\") else \"\"\n raise FieldError(\"Required stripe field '{field_name}' was not\"\n \" provided in {model_name} data object.\".format(field_name=self.name,\n model_name=model_name))\n else:\n result = None\n\n return result\n\n\nclass StripePercentField(StripeFieldMixin, models.DecimalField):\n \"\"\"A field used to define a percent according to djstripe logic.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Assign default args to this field.\"\"\"\n defaults = {\n 'decimal_places': 2,\n 'max_digits': 5,\n 'validators': [MinValueValidator(1.00), MaxValueValidator(100.00)]\n }\n defaults.update(kwargs)\n super(StripePercentField, self).__init__(*args, **defaults)\n\n\nclass StripeCurrencyField(StripeFieldMixin, models.DecimalField):\n \"\"\"\n A field used to define currency according to djstripe logic.\n\n Stripe is always in cents. djstripe stores everything in dollars.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Assign default args to this field.\"\"\"\n defaults = {\n 'decimal_places': 2,\n 'max_digits': 8,\n }\n defaults.update(kwargs)\n super(StripeCurrencyField, self).__init__(*args, **defaults)\n\n def stripe_to_db(self, data):\n \"\"\"Convert the raw value to decimal representation.\"\"\"\n val = super(StripeCurrencyField, self).stripe_to_db(data)\n\n # Note: 0 is a possible return value, which is 'falseish'\n if val is not None:\n return val / decimal.Decimal(\"100\")\n\n\nclass StripeBooleanField(StripeFieldMixin, models.BooleanField):\n \"\"\"A field used to define a boolean value according to djstripe logic.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Throw an error when a user tries to deprecate.\"\"\"\n if kwargs.get(\"deprecated\", False):\n raise ImproperlyConfigured(\"Boolean field cannot be deprecated. Change field type to \"\n \"StripeNullBooleanField\")\n super(StripeBooleanField, self).__init__(*args, **kwargs)\n\n\nclass StripeNullBooleanField(StripeFieldMixin, models.NullBooleanField):\n \"\"\"A field used to define a NullBooleanField value according to djstripe logic.\"\"\"\n\n pass\n\n\nclass StripeCharField(StripeFieldMixin, models.CharField):\n \"\"\"A field used to define a CharField value according to djstripe logic.\"\"\"\n\n pass\n\n\nclass StripeEnumField(StripeCharField):\n def __init__(self, enum, *args, **kwargs):\n self.enum = enum\n choices = enum.choices\n defaults = {\n \"choices\": choices,\n \"max_length\": max(len(k) for k, v in choices)\n }\n defaults.update(kwargs)\n super(StripeEnumField, self).__init__(*args, **defaults)\n\n def deconstruct(self):\n name, path, args, kwargs = super(StripeEnumField, self).deconstruct()\n kwargs[\"enum\"] = self.enum\n del kwargs[\"choices\"]\n return name, path, args, kwargs\n\n\nclass StripeIdField(StripeCharField):\n \"\"\"A field with enough space to hold any stripe ID.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Assign default args to this field.\n\n As per: https://stripe.com/docs/upgrades\n You can safely assume object IDs we generate will never exceed 255\n characters, but you should be able to handle IDs of up to that\n length.\n \"\"\"\n defaults = {\n 'max_length': 255,\n 'blank': False,\n 'null': False,\n }\n defaults.update(kwargs)\n super(StripeIdField, self).__init__(*args, **defaults)\n\n\nclass StripeTextField(StripeFieldMixin, models.TextField):\n \"\"\"A field used to define a TextField value according to djstripe logic.\"\"\"\n\n pass\n\n\nclass StripeDateTimeField(StripeFieldMixin, models.DateTimeField):\n \"\"\"A field used to define a DateTimeField value according to djstripe logic.\"\"\"\n\n def stripe_to_db(self, data):\n \"\"\"Convert the raw timestamp value to a DateTime representation.\"\"\"\n val = super(StripeDateTimeField, self).stripe_to_db(data)\n\n # Note: 0 is a possible return value, which is 'falseish'\n if val is not None:\n return convert_tstamp(val)\n\n\nclass StripeIntegerField(StripeFieldMixin, models.IntegerField):\n \"\"\"A field used to define a IntegerField value according to djstripe logic.\"\"\"\n\n pass\n\n\nclass StripePositiveIntegerField(StripeFieldMixin, models.PositiveIntegerField):\n \"\"\"A field used to define a PositiveIntegerField value according to djstripe logic.\"\"\"\n\n pass\n\n\nclass StripeJSONField(StripeFieldMixin, JSONField):\n \"\"\"A field used to define a JSONField value according to djstripe logic.\"\"\"\n\n pass\n", "path": "djstripe/fields.py"}]} |
gh_patches_debug_1292 | rasdani/github-patches | git_diff | pwr-Solaar__Solaar-1504 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow scrolling over tray icon to switch between icons when there are fewer than 4 devices
**Information**
<!-- Please update to Solaar from this repository before asking for a new feature. -->
- Solaar version (`solaar --version` and `git describe --tags`): solaar 1.1.1, 1.1.1-135-gd115ade
**Is your feature request related to a problem? Please describe.**
It would be useful for me to be able to switch between which device the tray icon shows, even though I have only two devices.
[This commit](https://github.com/pwr-Solaar/Solaar/commit/6a66370ffe54a6b08f6f4b0e6fc76b0da2a23d30) has introduced such an ability, but for some reason it doesn't enable it for people with fewer than 4 devices.
Specifically, this is the block that does the check and where I think `4` should be replaced with `2` (or `3` if the receiver itself is in that list as well, although ideally those should probably be filtered out before the check):
https://github.com/pwr-Solaar/Solaar/blob/1ace3ef4f4cc90520d5607fde073e6cd1b66d56a/lib/solaar/ui/tray.py#L87-L90
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/solaar/ui/tray.py`
Content:
```
1 # -*- python-mode -*-
2
3 ## Copyright (C) 2012-2013 Daniel Pavel
4 ##
5 ## This program is free software; you can redistribute it and/or modify
6 ## it under the terms of the GNU General Public License as published by
7 ## the Free Software Foundation; either version 2 of the License, or
8 ## (at your option) any later version.
9 ##
10 ## This program is distributed in the hope that it will be useful,
11 ## but WITHOUT ANY WARRANTY; without even the implied warranty of
12 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 ## GNU General Public License for more details.
14 ##
15 ## You should have received a copy of the GNU General Public License along
16 ## with this program; if not, write to the Free Software Foundation, Inc.,
17 ## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18
19 import os
20
21 from logging import DEBUG as _DEBUG
22 from logging import getLogger
23 from time import time as _timestamp
24
25 import solaar.gtk as gtk
26
27 from gi.repository import GLib, Gtk
28 from gi.repository.Gdk import ScrollDirection
29 from logitech_receiver.status import KEYS as _K
30 from solaar import NAME
31 from solaar.i18n import _
32
33 from . import icons as _icons
34 from .about import show_window as _show_about_window
35 from .window import popup as _window_popup
36 from .window import toggle as _window_toggle
37
38 _log = getLogger(__name__)
39 del getLogger
40
41 #
42 # constants
43 #
44
45 _TRAY_ICON_SIZE = 48
46 _MENU_ICON_SIZE = Gtk.IconSize.LARGE_TOOLBAR
47
48 #
49 #
50 #
51
52
53 def _create_menu(quit_handler):
54 menu = Gtk.Menu()
55
56 # per-device menu entries will be generated as-needed
57
58 no_receiver = Gtk.MenuItem.new_with_label(_('No Logitech device found'))
59 no_receiver.set_sensitive(False)
60 menu.append(no_receiver)
61 menu.append(Gtk.SeparatorMenuItem.new())
62
63 from .action import make
64 menu.append(make('help-about', _('About %s') % NAME, _show_about_window, stock_id='help-about').create_menu_item())
65 menu.append(make('application-exit', _('Quit %s') % NAME, quit_handler, stock_id='application-exit').create_menu_item())
66 del make
67
68 menu.show_all()
69
70 return menu
71
72
73 _last_scroll = 0
74
75
76 def _scroll(tray_icon, event, direction=None):
77 if direction is None:
78 direction = event.direction
79 now = event.time / 1000.0
80 else:
81 now = None
82
83 if direction != ScrollDirection.UP and direction != ScrollDirection.DOWN:
84 # ignore all other directions
85 return
86
87 if len(_devices_info) < 4:
88 # don't bother with scrolling when there's only one receiver
89 # with only one or two devices
90 return
91
92 # scroll events come way too fast (at least 5-6 at once)
93 # so take a little break between them
94 global _last_scroll
95 now = now or _timestamp()
96 if now - _last_scroll < 0.33: # seconds
97 return
98 _last_scroll = now
99
100 # if _log.isEnabledFor(_DEBUG):
101 # _log.debug("scroll direction %s", direction)
102
103 global _picked_device
104 candidate = None
105
106 if _picked_device is None:
107 for info in _devices_info:
108 # pick first peripheral found
109 if info[1] is not None:
110 candidate = info
111 break
112 else:
113 found = False
114 for info in _devices_info:
115 if not info[1]:
116 # only conside peripherals
117 continue
118 # compare peripherals
119 if info[0:2] == _picked_device[0:2]:
120 if direction == ScrollDirection.UP and candidate:
121 # select previous device
122 break
123 found = True
124 else:
125 if found:
126 candidate = info
127 if direction == ScrollDirection.DOWN:
128 break
129 # if direction is up, but no candidate found before _picked,
130 # let it run through all candidates, will get stuck with the last one
131 else:
132 if direction == ScrollDirection.DOWN:
133 # only use the first one, in case no candidates are after _picked
134 if candidate is None:
135 candidate = info
136 else:
137 candidate = info
138
139 # if the last _picked_device is gone, clear it
140 # the candidate will be either the first or last one remaining,
141 # depending on the scroll direction
142 if not found:
143 _picked_device = None
144
145 _picked_device = candidate or _picked_device
146 if _log.isEnabledFor(_DEBUG):
147 _log.debug('scroll: picked %s', _picked_device)
148 _update_tray_icon()
149
150
151 try:
152 import gi
153 try:
154 gi.require_version('AyatanaAppIndicator3', '0.1')
155 ayatana_appindicator_found = True
156 except ValueError:
157 try:
158 gi.require_version('AppIndicator3', '0.1')
159 ayatana_appindicator_found = False
160 except ValueError:
161 # treat unavailable versions the same as unavailable packages
162 raise ImportError
163
164 if ayatana_appindicator_found:
165 from gi.repository import AyatanaAppIndicator3 as AppIndicator3
166 else:
167 from gi.repository import AppIndicator3
168
169 if _log.isEnabledFor(_DEBUG):
170 _log.debug('using %sAppIndicator3' % ('Ayatana ' if ayatana_appindicator_found else ''))
171
172 # Defense against AppIndicator3 bug that treats files in current directory as icon files
173 # https://bugs.launchpad.net/ubuntu/+source/libappindicator/+bug/1363277
174 # Defense against bug that shows up in XFCE 4.16 where icons are not upscaled
175 def _icon_file(icon_name):
176 if gtk.tray_icon_size is None and not os.path.isfile(icon_name):
177 return icon_name
178 icon_info = Gtk.IconTheme.get_default().lookup_icon(
179 icon_name, gtk.tray_icon_size or _TRAY_ICON_SIZE, Gtk.IconLookupFlags.FORCE_SVG
180 )
181 return icon_info.get_filename() if icon_info else icon_name
182
183 def _create(menu):
184 _icons._init_icon_paths()
185 theme_paths = Gtk.IconTheme.get_default().get_search_path()
186
187 ind = AppIndicator3.Indicator.new_with_path(
188 'indicator-solaar', _icon_file(_icons.TRAY_INIT), AppIndicator3.IndicatorCategory.HARDWARE, theme_paths[0]
189 )
190 ind.set_title(NAME)
191 ind.set_status(AppIndicator3.IndicatorStatus.ACTIVE)
192 # ind.set_attention_icon_full(_icon_file(_icons.TRAY_ATTENTION), '') # works poorly for XFCE 16
193 # ind.set_label(NAME, NAME)
194
195 ind.set_menu(menu)
196 ind.connect('scroll-event', _scroll)
197
198 return ind
199
200 def _hide(indicator):
201 indicator.set_status(AppIndicator3.IndicatorStatus.PASSIVE)
202
203 def _show(indicator):
204 indicator.set_status(AppIndicator3.IndicatorStatus.ACTIVE)
205
206 def _update_tray_icon():
207 if _picked_device and gtk.battery_icons_style != 'solaar':
208 _ignore, _ignore, name, device_status = _picked_device
209 battery_level = device_status.get(_K.BATTERY_LEVEL)
210 battery_charging = device_status.get(_K.BATTERY_CHARGING)
211 tray_icon_name = _icons.battery(battery_level, battery_charging)
212
213 description = '%s: %s' % (name, device_status.to_string())
214 else:
215 # there may be a receiver, but no peripherals
216 tray_icon_name = _icons.TRAY_OKAY if _devices_info else _icons.TRAY_INIT
217
218 description_lines = _generate_description_lines()
219 description = '\n'.join(description_lines).rstrip('\n')
220
221 # icon_file = _icons.icon_file(icon_name, _TRAY_ICON_SIZE)
222 _icon.set_icon_full(_icon_file(tray_icon_name), description)
223
224 def _update_menu_icon(image_widget, icon_name):
225 image_widget.set_from_icon_name(icon_name, _MENU_ICON_SIZE)
226 # icon_file = _icons.icon_file(icon_name, _MENU_ICON_SIZE)
227 # image_widget.set_from_file(icon_file)
228 # image_widget.set_pixel_size(_TRAY_ICON_SIZE)
229
230 def attention(reason=None):
231 if _icon.get_status() != AppIndicator3.IndicatorStatus.ATTENTION:
232 # _icon.set_attention_icon_full(_icon_file(_icons.TRAY_ATTENTION), reason or '') # works poorly for XFCe 16
233 _icon.set_status(AppIndicator3.IndicatorStatus.ATTENTION)
234 GLib.timeout_add(10 * 1000, _icon.set_status, AppIndicator3.IndicatorStatus.ACTIVE)
235
236 except ImportError:
237
238 if _log.isEnabledFor(_DEBUG):
239 _log.debug('using StatusIcon')
240
241 def _create(menu):
242 icon = Gtk.StatusIcon.new_from_icon_name(_icons.TRAY_INIT)
243 icon.set_name(NAME)
244 icon.set_title(NAME)
245 icon.set_tooltip_text(NAME)
246 icon.connect('activate', _window_toggle)
247 icon.connect('scroll-event', _scroll)
248 icon.connect('popup-menu', lambda icon, button, time: menu.popup(None, None, icon.position_menu, icon, button, time))
249
250 return icon
251
252 def _hide(icon):
253 icon.set_visible(False)
254
255 def _show(icon):
256 icon.set_visible(True)
257
258 def _update_tray_icon():
259 tooltip_lines = _generate_tooltip_lines()
260 tooltip = '\n'.join(tooltip_lines).rstrip('\n')
261 _icon.set_tooltip_markup(tooltip)
262
263 if _picked_device and gtk.battery_icons_style != 'solaar':
264 _ignore, _ignore, name, device_status = _picked_device
265 battery_level = device_status.get(_K.BATTERY_LEVEL)
266 battery_charging = device_status.get(_K.BATTERY_CHARGING)
267 tray_icon_name = _icons.battery(battery_level, battery_charging)
268 else:
269 # there may be a receiver, but no peripherals
270 tray_icon_name = _icons.TRAY_OKAY if _devices_info else _icons.TRAY_ATTENTION
271 _icon.set_from_icon_name(tray_icon_name)
272
273 def _update_menu_icon(image_widget, icon_name):
274 image_widget.set_from_icon_name(icon_name, _MENU_ICON_SIZE)
275
276 _icon_before_attention = None
277
278 def _blink(count):
279 global _icon_before_attention
280 if count % 2:
281 _icon.set_from_icon_name(_icons.TRAY_ATTENTION)
282 else:
283 _icon.set_from_icon_name(_icon_before_attention)
284
285 if count > 0:
286 GLib.timeout_add(1000, _blink, count - 1)
287 else:
288 _icon_before_attention = None
289
290 def attention(reason=None):
291 global _icon_before_attention
292 if _icon_before_attention is None:
293 _icon_before_attention = _icon.get_icon_name()
294 GLib.idle_add(_blink, 9)
295
296
297 #
298 #
299 #
300
301
302 def _generate_tooltip_lines():
303 if not _devices_info:
304 yield '<b>%s</b>: ' % NAME + _('no receiver')
305 return
306
307 yield from _generate_description_lines()
308
309
310 def _generate_description_lines():
311 if not _devices_info:
312 yield _('no receiver')
313 return
314
315 for _ignore, number, name, status in _devices_info:
316 if number is None: # receiver
317 continue
318
319 p = status.to_string()
320 if p: # does it have any properties to print?
321 yield '<b>%s</b>' % name
322 if status:
323 yield '\t%s' % p
324 else:
325 yield '\t%s <small>(' % p + _('offline') + ')</small>'
326 else:
327 if status:
328 yield '<b>%s</b> <small>(' % name + _('no status') + ')</small>'
329 else:
330 yield '<b>%s</b> <small>(' % name + _('offline') + ')</small>'
331 yield ''
332
333
334 def _pick_device_with_lowest_battery():
335 if not _devices_info:
336 return None
337
338 picked = None
339 picked_level = 1000
340
341 for info in _devices_info:
342 if info[1] is None: # is receiver
343 continue
344 level = info[-1].get(_K.BATTERY_LEVEL)
345 # print ("checking %s -> %s", info, level)
346 if level is not None and picked_level > level:
347 picked = info
348 picked_level = level or 0
349
350 if _log.isEnabledFor(_DEBUG):
351 _log.debug('picked device with lowest battery: %s', picked)
352
353 return picked
354
355
356 #
357 #
358 #
359
360
361 def _add_device(device):
362 assert device
363
364 index = 0
365 receiver_path = device.receiver.path if device.receiver is not None else device.path
366 if device.receiver is not None: # if receiver insert into devices for the receiver in device number order
367 for idx, (path, _ignore, _ignore, _ignore) in enumerate(_devices_info):
368 if path and path == receiver_path:
369 index = idx + 1 # the first entry matching the receiver serial should be for the receiver itself
370 break
371 while index < len(_devices_info):
372 path, number, _ignore, _ignore = _devices_info[index]
373 if not path == receiver_path:
374 break
375 assert number != device.number
376 if number > device.number:
377 break
378 index = index + 1
379
380 new_device_info = (receiver_path, device.number, device.name, device.status)
381 _devices_info.insert(index, new_device_info)
382
383 label_prefix = ' '
384 new_menu_item = Gtk.ImageMenuItem.new_with_label((label_prefix if device.number else '') + device.name)
385 new_menu_item.set_image(Gtk.Image())
386 new_menu_item.show_all()
387 new_menu_item.connect('activate', _window_popup, receiver_path, device.number)
388 _menu.insert(new_menu_item, index)
389
390 return index
391
392
393 def _remove_device(index):
394 assert index is not None
395
396 menu_items = _menu.get_children()
397 _menu.remove(menu_items[index])
398
399 removed_device = _devices_info.pop(index)
400 global _picked_device
401 if _picked_device and _picked_device[0:2] == removed_device[0:2]:
402 # the current pick was unpaired
403 _picked_device = None
404
405
406 def _add_receiver(receiver):
407 index = len(_devices_info)
408
409 new_receiver_info = (receiver.path, None, receiver.name, None)
410 _devices_info.insert(index, new_receiver_info)
411
412 new_menu_item = Gtk.ImageMenuItem.new_with_label(receiver.name)
413 icon_set = _icons.device_icon_set(receiver.name)
414 new_menu_item.set_image(Gtk.Image().new_from_icon_name(icon_set.names[0], _MENU_ICON_SIZE))
415 new_menu_item.show_all()
416 new_menu_item.connect('activate', _window_popup, receiver.path)
417 _menu.insert(new_menu_item, index)
418
419 return 0
420
421
422 def _remove_receiver(receiver):
423 index = 0
424 # remove all entries in devices_info that match this receiver
425 while index < len(_devices_info):
426 path, _ignore, _ignore, _ignore = _devices_info[index]
427 if path == receiver.path:
428 _remove_device(index)
429 else:
430 index += 1
431
432
433 def _update_menu_item(index, device):
434 if device is None or device.status is None:
435 _log.warn('updating an inactive device %s, assuming disconnected', device)
436 return None
437
438 menu_items = _menu.get_children()
439 menu_item = menu_items[index]
440
441 level = device.status.get(_K.BATTERY_LEVEL)
442 charging = device.status.get(_K.BATTERY_CHARGING)
443 icon_name = _icons.battery(level, charging)
444
445 image_widget = menu_item.get_image()
446 image_widget.set_sensitive(bool(device.online))
447 _update_menu_icon(image_widget, icon_name)
448
449
450 #
451 #
452 #
453
454 # for which device to show the battery info in systray, if more than one
455 # it's actually an entry in _devices_info
456 _picked_device = None
457
458 # cached list of devices and some of their properties
459 # contains tuples of (receiver path, device number, name, status)
460 _devices_info = []
461
462 _menu = None
463 _icon = None
464
465
466 def init(_quit_handler):
467 global _menu, _icon
468 assert _menu is None
469 _menu = _create_menu(_quit_handler)
470 assert _icon is None
471 _icon = _create(_menu)
472 update()
473
474
475 def destroy():
476 global _icon, _menu, _devices_info
477 if _icon is not None:
478 i, _icon = _icon, None
479 _hide(i)
480 i = None
481
482 _icon = None
483 _menu = None
484 _devices_info = None
485
486
487 def update(device=None):
488 if _icon is None:
489 return
490
491 if device is not None:
492 if device.kind is None:
493 # receiver
494 is_alive = bool(device)
495 receiver_path = device.path
496 if is_alive:
497 index = None
498 for idx, (path, _ignore, _ignore, _ignore) in enumerate(_devices_info):
499 if path == receiver_path:
500 index = idx
501 break
502
503 if index is None:
504 _add_receiver(device)
505 else:
506 _remove_receiver(device)
507
508 else:
509 # peripheral
510 is_paired = bool(device)
511 receiver_path = device.receiver.path if device.receiver is not None else device.path
512 index = None
513 for idx, (path, number, _ignore, _ignore) in enumerate(_devices_info):
514 if path == receiver_path and number == device.number:
515 index = idx
516
517 if is_paired:
518 if index is None:
519 index = _add_device(device)
520 _update_menu_item(index, device)
521 else: # was just unpaired or unplugged
522 if index is not None:
523 _remove_device(index)
524
525 menu_items = _menu.get_children()
526 no_receivers_index = len(_devices_info)
527 menu_items[no_receivers_index].set_visible(not _devices_info)
528
529 global _picked_device
530 if (not _picked_device or _last_scroll == 0) and device is not None and device.kind is not None:
531 # if it's just a receiver update, it's unlikely the picked device would change
532 _picked_device = _pick_device_with_lowest_battery()
533
534 _update_tray_icon()
535
536 if _icon:
537 if not _devices_info:
538 _hide(_icon)
539 else:
540 _show(_icon)
541
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/solaar/ui/tray.py b/lib/solaar/ui/tray.py
--- a/lib/solaar/ui/tray.py
+++ b/lib/solaar/ui/tray.py
@@ -84,9 +84,7 @@
# ignore all other directions
return
- if len(_devices_info) < 4:
- # don't bother with scrolling when there's only one receiver
- # with only one or two devices
+ if sum(map(lambda i: i[1] is not None, _devices_info)) < 2: # don't bother even trying to scroll if less than two devices
return
# scroll events come way too fast (at least 5-6 at once)
| {"golden_diff": "diff --git a/lib/solaar/ui/tray.py b/lib/solaar/ui/tray.py\n--- a/lib/solaar/ui/tray.py\n+++ b/lib/solaar/ui/tray.py\n@@ -84,9 +84,7 @@\n # ignore all other directions\n return\n \n- if len(_devices_info) < 4:\n- # don't bother with scrolling when there's only one receiver\n- # with only one or two devices\n+ if sum(map(lambda i: i[1] is not None, _devices_info)) < 2: # don't bother even trying to scroll if less than two devices\n return\n \n # scroll events come way too fast (at least 5-6 at once)\n", "issue": "Allow scrolling over tray icon to switch between icons when there are fewer than 4 devices\n**Information**\r\n<!-- Please update to Solaar from this repository before asking for a new feature. -->\r\n- Solaar version (`solaar --version` and `git describe --tags`): solaar 1.1.1, 1.1.1-135-gd115ade\r\n\r\n\r\n\r\n**Is your feature request related to a problem? Please describe.**\r\nIt would be useful for me to be able to switch between which device the tray icon shows, even though I have only two devices.\r\n[This commit](https://github.com/pwr-Solaar/Solaar/commit/6a66370ffe54a6b08f6f4b0e6fc76b0da2a23d30) has introduced such an ability, but for some reason it doesn't enable it for people with fewer than 4 devices.\r\nSpecifically, this is the block that does the check and where I think `4` should be replaced with `2` (or `3` if the receiver itself is in that list as well, although ideally those should probably be filtered out before the check):\r\nhttps://github.com/pwr-Solaar/Solaar/blob/1ace3ef4f4cc90520d5607fde073e6cd1b66d56a/lib/solaar/ui/tray.py#L87-L90\r\n\n", "before_files": [{"content": "# -*- python-mode -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nimport os\n\nfrom logging import DEBUG as _DEBUG\nfrom logging import getLogger\nfrom time import time as _timestamp\n\nimport solaar.gtk as gtk\n\nfrom gi.repository import GLib, Gtk\nfrom gi.repository.Gdk import ScrollDirection\nfrom logitech_receiver.status import KEYS as _K\nfrom solaar import NAME\nfrom solaar.i18n import _\n\nfrom . import icons as _icons\nfrom .about import show_window as _show_about_window\nfrom .window import popup as _window_popup\nfrom .window import toggle as _window_toggle\n\n_log = getLogger(__name__)\ndel getLogger\n\n#\n# constants\n#\n\n_TRAY_ICON_SIZE = 48\n_MENU_ICON_SIZE = Gtk.IconSize.LARGE_TOOLBAR\n\n#\n#\n#\n\n\ndef _create_menu(quit_handler):\n menu = Gtk.Menu()\n\n # per-device menu entries will be generated as-needed\n\n no_receiver = Gtk.MenuItem.new_with_label(_('No Logitech device found'))\n no_receiver.set_sensitive(False)\n menu.append(no_receiver)\n menu.append(Gtk.SeparatorMenuItem.new())\n\n from .action import make\n menu.append(make('help-about', _('About %s') % NAME, _show_about_window, stock_id='help-about').create_menu_item())\n menu.append(make('application-exit', _('Quit %s') % NAME, quit_handler, stock_id='application-exit').create_menu_item())\n del make\n\n menu.show_all()\n\n return menu\n\n\n_last_scroll = 0\n\n\ndef _scroll(tray_icon, event, direction=None):\n if direction is None:\n direction = event.direction\n now = event.time / 1000.0\n else:\n now = None\n\n if direction != ScrollDirection.UP and direction != ScrollDirection.DOWN:\n # ignore all other directions\n return\n\n if len(_devices_info) < 4:\n # don't bother with scrolling when there's only one receiver\n # with only one or two devices\n return\n\n # scroll events come way too fast (at least 5-6 at once)\n # so take a little break between them\n global _last_scroll\n now = now or _timestamp()\n if now - _last_scroll < 0.33: # seconds\n return\n _last_scroll = now\n\n # if _log.isEnabledFor(_DEBUG):\n # _log.debug(\"scroll direction %s\", direction)\n\n global _picked_device\n candidate = None\n\n if _picked_device is None:\n for info in _devices_info:\n # pick first peripheral found\n if info[1] is not None:\n candidate = info\n break\n else:\n found = False\n for info in _devices_info:\n if not info[1]:\n # only conside peripherals\n continue\n # compare peripherals\n if info[0:2] == _picked_device[0:2]:\n if direction == ScrollDirection.UP and candidate:\n # select previous device\n break\n found = True\n else:\n if found:\n candidate = info\n if direction == ScrollDirection.DOWN:\n break\n # if direction is up, but no candidate found before _picked,\n # let it run through all candidates, will get stuck with the last one\n else:\n if direction == ScrollDirection.DOWN:\n # only use the first one, in case no candidates are after _picked\n if candidate is None:\n candidate = info\n else:\n candidate = info\n\n # if the last _picked_device is gone, clear it\n # the candidate will be either the first or last one remaining,\n # depending on the scroll direction\n if not found:\n _picked_device = None\n\n _picked_device = candidate or _picked_device\n if _log.isEnabledFor(_DEBUG):\n _log.debug('scroll: picked %s', _picked_device)\n _update_tray_icon()\n\n\ntry:\n import gi\n try:\n gi.require_version('AyatanaAppIndicator3', '0.1')\n ayatana_appindicator_found = True\n except ValueError:\n try:\n gi.require_version('AppIndicator3', '0.1')\n ayatana_appindicator_found = False\n except ValueError:\n # treat unavailable versions the same as unavailable packages\n raise ImportError\n\n if ayatana_appindicator_found:\n from gi.repository import AyatanaAppIndicator3 as AppIndicator3\n else:\n from gi.repository import AppIndicator3\n\n if _log.isEnabledFor(_DEBUG):\n _log.debug('using %sAppIndicator3' % ('Ayatana ' if ayatana_appindicator_found else ''))\n\n # Defense against AppIndicator3 bug that treats files in current directory as icon files\n # https://bugs.launchpad.net/ubuntu/+source/libappindicator/+bug/1363277\n # Defense against bug that shows up in XFCE 4.16 where icons are not upscaled\n def _icon_file(icon_name):\n if gtk.tray_icon_size is None and not os.path.isfile(icon_name):\n return icon_name\n icon_info = Gtk.IconTheme.get_default().lookup_icon(\n icon_name, gtk.tray_icon_size or _TRAY_ICON_SIZE, Gtk.IconLookupFlags.FORCE_SVG\n )\n return icon_info.get_filename() if icon_info else icon_name\n\n def _create(menu):\n _icons._init_icon_paths()\n theme_paths = Gtk.IconTheme.get_default().get_search_path()\n\n ind = AppIndicator3.Indicator.new_with_path(\n 'indicator-solaar', _icon_file(_icons.TRAY_INIT), AppIndicator3.IndicatorCategory.HARDWARE, theme_paths[0]\n )\n ind.set_title(NAME)\n ind.set_status(AppIndicator3.IndicatorStatus.ACTIVE)\n # ind.set_attention_icon_full(_icon_file(_icons.TRAY_ATTENTION), '') # works poorly for XFCE 16\n # ind.set_label(NAME, NAME)\n\n ind.set_menu(menu)\n ind.connect('scroll-event', _scroll)\n\n return ind\n\n def _hide(indicator):\n indicator.set_status(AppIndicator3.IndicatorStatus.PASSIVE)\n\n def _show(indicator):\n indicator.set_status(AppIndicator3.IndicatorStatus.ACTIVE)\n\n def _update_tray_icon():\n if _picked_device and gtk.battery_icons_style != 'solaar':\n _ignore, _ignore, name, device_status = _picked_device\n battery_level = device_status.get(_K.BATTERY_LEVEL)\n battery_charging = device_status.get(_K.BATTERY_CHARGING)\n tray_icon_name = _icons.battery(battery_level, battery_charging)\n\n description = '%s: %s' % (name, device_status.to_string())\n else:\n # there may be a receiver, but no peripherals\n tray_icon_name = _icons.TRAY_OKAY if _devices_info else _icons.TRAY_INIT\n\n description_lines = _generate_description_lines()\n description = '\\n'.join(description_lines).rstrip('\\n')\n\n # icon_file = _icons.icon_file(icon_name, _TRAY_ICON_SIZE)\n _icon.set_icon_full(_icon_file(tray_icon_name), description)\n\n def _update_menu_icon(image_widget, icon_name):\n image_widget.set_from_icon_name(icon_name, _MENU_ICON_SIZE)\n # icon_file = _icons.icon_file(icon_name, _MENU_ICON_SIZE)\n # image_widget.set_from_file(icon_file)\n # image_widget.set_pixel_size(_TRAY_ICON_SIZE)\n\n def attention(reason=None):\n if _icon.get_status() != AppIndicator3.IndicatorStatus.ATTENTION:\n # _icon.set_attention_icon_full(_icon_file(_icons.TRAY_ATTENTION), reason or '') # works poorly for XFCe 16\n _icon.set_status(AppIndicator3.IndicatorStatus.ATTENTION)\n GLib.timeout_add(10 * 1000, _icon.set_status, AppIndicator3.IndicatorStatus.ACTIVE)\n\nexcept ImportError:\n\n if _log.isEnabledFor(_DEBUG):\n _log.debug('using StatusIcon')\n\n def _create(menu):\n icon = Gtk.StatusIcon.new_from_icon_name(_icons.TRAY_INIT)\n icon.set_name(NAME)\n icon.set_title(NAME)\n icon.set_tooltip_text(NAME)\n icon.connect('activate', _window_toggle)\n icon.connect('scroll-event', _scroll)\n icon.connect('popup-menu', lambda icon, button, time: menu.popup(None, None, icon.position_menu, icon, button, time))\n\n return icon\n\n def _hide(icon):\n icon.set_visible(False)\n\n def _show(icon):\n icon.set_visible(True)\n\n def _update_tray_icon():\n tooltip_lines = _generate_tooltip_lines()\n tooltip = '\\n'.join(tooltip_lines).rstrip('\\n')\n _icon.set_tooltip_markup(tooltip)\n\n if _picked_device and gtk.battery_icons_style != 'solaar':\n _ignore, _ignore, name, device_status = _picked_device\n battery_level = device_status.get(_K.BATTERY_LEVEL)\n battery_charging = device_status.get(_K.BATTERY_CHARGING)\n tray_icon_name = _icons.battery(battery_level, battery_charging)\n else:\n # there may be a receiver, but no peripherals\n tray_icon_name = _icons.TRAY_OKAY if _devices_info else _icons.TRAY_ATTENTION\n _icon.set_from_icon_name(tray_icon_name)\n\n def _update_menu_icon(image_widget, icon_name):\n image_widget.set_from_icon_name(icon_name, _MENU_ICON_SIZE)\n\n _icon_before_attention = None\n\n def _blink(count):\n global _icon_before_attention\n if count % 2:\n _icon.set_from_icon_name(_icons.TRAY_ATTENTION)\n else:\n _icon.set_from_icon_name(_icon_before_attention)\n\n if count > 0:\n GLib.timeout_add(1000, _blink, count - 1)\n else:\n _icon_before_attention = None\n\n def attention(reason=None):\n global _icon_before_attention\n if _icon_before_attention is None:\n _icon_before_attention = _icon.get_icon_name()\n GLib.idle_add(_blink, 9)\n\n\n#\n#\n#\n\n\ndef _generate_tooltip_lines():\n if not _devices_info:\n yield '<b>%s</b>: ' % NAME + _('no receiver')\n return\n\n yield from _generate_description_lines()\n\n\ndef _generate_description_lines():\n if not _devices_info:\n yield _('no receiver')\n return\n\n for _ignore, number, name, status in _devices_info:\n if number is None: # receiver\n continue\n\n p = status.to_string()\n if p: # does it have any properties to print?\n yield '<b>%s</b>' % name\n if status:\n yield '\\t%s' % p\n else:\n yield '\\t%s <small>(' % p + _('offline') + ')</small>'\n else:\n if status:\n yield '<b>%s</b> <small>(' % name + _('no status') + ')</small>'\n else:\n yield '<b>%s</b> <small>(' % name + _('offline') + ')</small>'\n yield ''\n\n\ndef _pick_device_with_lowest_battery():\n if not _devices_info:\n return None\n\n picked = None\n picked_level = 1000\n\n for info in _devices_info:\n if info[1] is None: # is receiver\n continue\n level = info[-1].get(_K.BATTERY_LEVEL)\n # print (\"checking %s -> %s\", info, level)\n if level is not None and picked_level > level:\n picked = info\n picked_level = level or 0\n\n if _log.isEnabledFor(_DEBUG):\n _log.debug('picked device with lowest battery: %s', picked)\n\n return picked\n\n\n#\n#\n#\n\n\ndef _add_device(device):\n assert device\n\n index = 0\n receiver_path = device.receiver.path if device.receiver is not None else device.path\n if device.receiver is not None: # if receiver insert into devices for the receiver in device number order\n for idx, (path, _ignore, _ignore, _ignore) in enumerate(_devices_info):\n if path and path == receiver_path:\n index = idx + 1 # the first entry matching the receiver serial should be for the receiver itself\n break\n while index < len(_devices_info):\n path, number, _ignore, _ignore = _devices_info[index]\n if not path == receiver_path:\n break\n assert number != device.number\n if number > device.number:\n break\n index = index + 1\n\n new_device_info = (receiver_path, device.number, device.name, device.status)\n _devices_info.insert(index, new_device_info)\n\n label_prefix = ' '\n new_menu_item = Gtk.ImageMenuItem.new_with_label((label_prefix if device.number else '') + device.name)\n new_menu_item.set_image(Gtk.Image())\n new_menu_item.show_all()\n new_menu_item.connect('activate', _window_popup, receiver_path, device.number)\n _menu.insert(new_menu_item, index)\n\n return index\n\n\ndef _remove_device(index):\n assert index is not None\n\n menu_items = _menu.get_children()\n _menu.remove(menu_items[index])\n\n removed_device = _devices_info.pop(index)\n global _picked_device\n if _picked_device and _picked_device[0:2] == removed_device[0:2]:\n # the current pick was unpaired\n _picked_device = None\n\n\ndef _add_receiver(receiver):\n index = len(_devices_info)\n\n new_receiver_info = (receiver.path, None, receiver.name, None)\n _devices_info.insert(index, new_receiver_info)\n\n new_menu_item = Gtk.ImageMenuItem.new_with_label(receiver.name)\n icon_set = _icons.device_icon_set(receiver.name)\n new_menu_item.set_image(Gtk.Image().new_from_icon_name(icon_set.names[0], _MENU_ICON_SIZE))\n new_menu_item.show_all()\n new_menu_item.connect('activate', _window_popup, receiver.path)\n _menu.insert(new_menu_item, index)\n\n return 0\n\n\ndef _remove_receiver(receiver):\n index = 0\n # remove all entries in devices_info that match this receiver\n while index < len(_devices_info):\n path, _ignore, _ignore, _ignore = _devices_info[index]\n if path == receiver.path:\n _remove_device(index)\n else:\n index += 1\n\n\ndef _update_menu_item(index, device):\n if device is None or device.status is None:\n _log.warn('updating an inactive device %s, assuming disconnected', device)\n return None\n\n menu_items = _menu.get_children()\n menu_item = menu_items[index]\n\n level = device.status.get(_K.BATTERY_LEVEL)\n charging = device.status.get(_K.BATTERY_CHARGING)\n icon_name = _icons.battery(level, charging)\n\n image_widget = menu_item.get_image()\n image_widget.set_sensitive(bool(device.online))\n _update_menu_icon(image_widget, icon_name)\n\n\n#\n#\n#\n\n# for which device to show the battery info in systray, if more than one\n# it's actually an entry in _devices_info\n_picked_device = None\n\n# cached list of devices and some of their properties\n# contains tuples of (receiver path, device number, name, status)\n_devices_info = []\n\n_menu = None\n_icon = None\n\n\ndef init(_quit_handler):\n global _menu, _icon\n assert _menu is None\n _menu = _create_menu(_quit_handler)\n assert _icon is None\n _icon = _create(_menu)\n update()\n\n\ndef destroy():\n global _icon, _menu, _devices_info\n if _icon is not None:\n i, _icon = _icon, None\n _hide(i)\n i = None\n\n _icon = None\n _menu = None\n _devices_info = None\n\n\ndef update(device=None):\n if _icon is None:\n return\n\n if device is not None:\n if device.kind is None:\n # receiver\n is_alive = bool(device)\n receiver_path = device.path\n if is_alive:\n index = None\n for idx, (path, _ignore, _ignore, _ignore) in enumerate(_devices_info):\n if path == receiver_path:\n index = idx\n break\n\n if index is None:\n _add_receiver(device)\n else:\n _remove_receiver(device)\n\n else:\n # peripheral\n is_paired = bool(device)\n receiver_path = device.receiver.path if device.receiver is not None else device.path\n index = None\n for idx, (path, number, _ignore, _ignore) in enumerate(_devices_info):\n if path == receiver_path and number == device.number:\n index = idx\n\n if is_paired:\n if index is None:\n index = _add_device(device)\n _update_menu_item(index, device)\n else: # was just unpaired or unplugged\n if index is not None:\n _remove_device(index)\n\n menu_items = _menu.get_children()\n no_receivers_index = len(_devices_info)\n menu_items[no_receivers_index].set_visible(not _devices_info)\n\n global _picked_device\n if (not _picked_device or _last_scroll == 0) and device is not None and device.kind is not None:\n # if it's just a receiver update, it's unlikely the picked device would change\n _picked_device = _pick_device_with_lowest_battery()\n\n _update_tray_icon()\n\n if _icon:\n if not _devices_info:\n _hide(_icon)\n else:\n _show(_icon)\n", "path": "lib/solaar/ui/tray.py"}], "after_files": [{"content": "# -*- python-mode -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nimport os\n\nfrom logging import DEBUG as _DEBUG\nfrom logging import getLogger\nfrom time import time as _timestamp\n\nimport solaar.gtk as gtk\n\nfrom gi.repository import GLib, Gtk\nfrom gi.repository.Gdk import ScrollDirection\nfrom logitech_receiver.status import KEYS as _K\nfrom solaar import NAME\nfrom solaar.i18n import _\n\nfrom . import icons as _icons\nfrom .about import show_window as _show_about_window\nfrom .window import popup as _window_popup\nfrom .window import toggle as _window_toggle\n\n_log = getLogger(__name__)\ndel getLogger\n\n#\n# constants\n#\n\n_TRAY_ICON_SIZE = 48\n_MENU_ICON_SIZE = Gtk.IconSize.LARGE_TOOLBAR\n\n#\n#\n#\n\n\ndef _create_menu(quit_handler):\n menu = Gtk.Menu()\n\n # per-device menu entries will be generated as-needed\n\n no_receiver = Gtk.MenuItem.new_with_label(_('No Logitech device found'))\n no_receiver.set_sensitive(False)\n menu.append(no_receiver)\n menu.append(Gtk.SeparatorMenuItem.new())\n\n from .action import make\n menu.append(make('help-about', _('About %s') % NAME, _show_about_window, stock_id='help-about').create_menu_item())\n menu.append(make('application-exit', _('Quit %s') % NAME, quit_handler, stock_id='application-exit').create_menu_item())\n del make\n\n menu.show_all()\n\n return menu\n\n\n_last_scroll = 0\n\n\ndef _scroll(tray_icon, event, direction=None):\n if direction is None:\n direction = event.direction\n now = event.time / 1000.0\n else:\n now = None\n\n if direction != ScrollDirection.UP and direction != ScrollDirection.DOWN:\n # ignore all other directions\n return\n\n if sum(map(lambda i: i[1] is not None, _devices_info)) < 2: # don't bother even trying to scroll if less than two devices\n return\n\n # scroll events come way too fast (at least 5-6 at once)\n # so take a little break between them\n global _last_scroll\n now = now or _timestamp()\n if now - _last_scroll < 0.33: # seconds\n return\n _last_scroll = now\n\n # if _log.isEnabledFor(_DEBUG):\n # _log.debug(\"scroll direction %s\", direction)\n\n global _picked_device\n candidate = None\n\n if _picked_device is None:\n for info in _devices_info:\n # pick first peripheral found\n if info[1] is not None:\n candidate = info\n break\n else:\n found = False\n for info in _devices_info:\n if not info[1]:\n # only conside peripherals\n continue\n # compare peripherals\n if info[0:2] == _picked_device[0:2]:\n if direction == ScrollDirection.UP and candidate:\n # select previous device\n break\n found = True\n else:\n if found:\n candidate = info\n if direction == ScrollDirection.DOWN:\n break\n # if direction is up, but no candidate found before _picked,\n # let it run through all candidates, will get stuck with the last one\n else:\n if direction == ScrollDirection.DOWN:\n # only use the first one, in case no candidates are after _picked\n if candidate is None:\n candidate = info\n else:\n candidate = info\n\n # if the last _picked_device is gone, clear it\n # the candidate will be either the first or last one remaining,\n # depending on the scroll direction\n if not found:\n _picked_device = None\n\n _picked_device = candidate or _picked_device\n if _log.isEnabledFor(_DEBUG):\n _log.debug('scroll: picked %s', _picked_device)\n _update_tray_icon()\n\n\ntry:\n import gi\n try:\n gi.require_version('AyatanaAppIndicator3', '0.1')\n ayatana_appindicator_found = True\n except ValueError:\n try:\n gi.require_version('AppIndicator3', '0.1')\n ayatana_appindicator_found = False\n except ValueError:\n # treat unavailable versions the same as unavailable packages\n raise ImportError\n\n if ayatana_appindicator_found:\n from gi.repository import AyatanaAppIndicator3 as AppIndicator3\n else:\n from gi.repository import AppIndicator3\n\n if _log.isEnabledFor(_DEBUG):\n _log.debug('using %sAppIndicator3' % ('Ayatana ' if ayatana_appindicator_found else ''))\n\n # Defense against AppIndicator3 bug that treats files in current directory as icon files\n # https://bugs.launchpad.net/ubuntu/+source/libappindicator/+bug/1363277\n # Defense against bug that shows up in XFCE 4.16 where icons are not upscaled\n def _icon_file(icon_name):\n if gtk.tray_icon_size is None and not os.path.isfile(icon_name):\n return icon_name\n icon_info = Gtk.IconTheme.get_default().lookup_icon(\n icon_name, gtk.tray_icon_size or _TRAY_ICON_SIZE, Gtk.IconLookupFlags.FORCE_SVG\n )\n return icon_info.get_filename() if icon_info else icon_name\n\n def _create(menu):\n _icons._init_icon_paths()\n theme_paths = Gtk.IconTheme.get_default().get_search_path()\n\n ind = AppIndicator3.Indicator.new_with_path(\n 'indicator-solaar', _icon_file(_icons.TRAY_INIT), AppIndicator3.IndicatorCategory.HARDWARE, theme_paths[0]\n )\n ind.set_title(NAME)\n ind.set_status(AppIndicator3.IndicatorStatus.ACTIVE)\n # ind.set_attention_icon_full(_icon_file(_icons.TRAY_ATTENTION), '') # works poorly for XFCE 16\n # ind.set_label(NAME, NAME)\n\n ind.set_menu(menu)\n ind.connect('scroll-event', _scroll)\n\n return ind\n\n def _hide(indicator):\n indicator.set_status(AppIndicator3.IndicatorStatus.PASSIVE)\n\n def _show(indicator):\n indicator.set_status(AppIndicator3.IndicatorStatus.ACTIVE)\n\n def _update_tray_icon():\n if _picked_device and gtk.battery_icons_style != 'solaar':\n _ignore, _ignore, name, device_status = _picked_device\n battery_level = device_status.get(_K.BATTERY_LEVEL)\n battery_charging = device_status.get(_K.BATTERY_CHARGING)\n tray_icon_name = _icons.battery(battery_level, battery_charging)\n\n description = '%s: %s' % (name, device_status.to_string())\n else:\n # there may be a receiver, but no peripherals\n tray_icon_name = _icons.TRAY_OKAY if _devices_info else _icons.TRAY_INIT\n\n description_lines = _generate_description_lines()\n description = '\\n'.join(description_lines).rstrip('\\n')\n\n # icon_file = _icons.icon_file(icon_name, _TRAY_ICON_SIZE)\n _icon.set_icon_full(_icon_file(tray_icon_name), description)\n\n def _update_menu_icon(image_widget, icon_name):\n image_widget.set_from_icon_name(icon_name, _MENU_ICON_SIZE)\n # icon_file = _icons.icon_file(icon_name, _MENU_ICON_SIZE)\n # image_widget.set_from_file(icon_file)\n # image_widget.set_pixel_size(_TRAY_ICON_SIZE)\n\n def attention(reason=None):\n if _icon.get_status() != AppIndicator3.IndicatorStatus.ATTENTION:\n # _icon.set_attention_icon_full(_icon_file(_icons.TRAY_ATTENTION), reason or '') # works poorly for XFCe 16\n _icon.set_status(AppIndicator3.IndicatorStatus.ATTENTION)\n GLib.timeout_add(10 * 1000, _icon.set_status, AppIndicator3.IndicatorStatus.ACTIVE)\n\nexcept ImportError:\n\n if _log.isEnabledFor(_DEBUG):\n _log.debug('using StatusIcon')\n\n def _create(menu):\n icon = Gtk.StatusIcon.new_from_icon_name(_icons.TRAY_INIT)\n icon.set_name(NAME)\n icon.set_title(NAME)\n icon.set_tooltip_text(NAME)\n icon.connect('activate', _window_toggle)\n icon.connect('scroll-event', _scroll)\n icon.connect('popup-menu', lambda icon, button, time: menu.popup(None, None, icon.position_menu, icon, button, time))\n\n return icon\n\n def _hide(icon):\n icon.set_visible(False)\n\n def _show(icon):\n icon.set_visible(True)\n\n def _update_tray_icon():\n tooltip_lines = _generate_tooltip_lines()\n tooltip = '\\n'.join(tooltip_lines).rstrip('\\n')\n _icon.set_tooltip_markup(tooltip)\n\n if _picked_device and gtk.battery_icons_style != 'solaar':\n _ignore, _ignore, name, device_status = _picked_device\n battery_level = device_status.get(_K.BATTERY_LEVEL)\n battery_charging = device_status.get(_K.BATTERY_CHARGING)\n tray_icon_name = _icons.battery(battery_level, battery_charging)\n else:\n # there may be a receiver, but no peripherals\n tray_icon_name = _icons.TRAY_OKAY if _devices_info else _icons.TRAY_ATTENTION\n _icon.set_from_icon_name(tray_icon_name)\n\n def _update_menu_icon(image_widget, icon_name):\n image_widget.set_from_icon_name(icon_name, _MENU_ICON_SIZE)\n\n _icon_before_attention = None\n\n def _blink(count):\n global _icon_before_attention\n if count % 2:\n _icon.set_from_icon_name(_icons.TRAY_ATTENTION)\n else:\n _icon.set_from_icon_name(_icon_before_attention)\n\n if count > 0:\n GLib.timeout_add(1000, _blink, count - 1)\n else:\n _icon_before_attention = None\n\n def attention(reason=None):\n global _icon_before_attention\n if _icon_before_attention is None:\n _icon_before_attention = _icon.get_icon_name()\n GLib.idle_add(_blink, 9)\n\n\n#\n#\n#\n\n\ndef _generate_tooltip_lines():\n if not _devices_info:\n yield '<b>%s</b>: ' % NAME + _('no receiver')\n return\n\n yield from _generate_description_lines()\n\n\ndef _generate_description_lines():\n if not _devices_info:\n yield _('no receiver')\n return\n\n for _ignore, number, name, status in _devices_info:\n if number is None: # receiver\n continue\n\n p = status.to_string()\n if p: # does it have any properties to print?\n yield '<b>%s</b>' % name\n if status:\n yield '\\t%s' % p\n else:\n yield '\\t%s <small>(' % p + _('offline') + ')</small>'\n else:\n if status:\n yield '<b>%s</b> <small>(' % name + _('no status') + ')</small>'\n else:\n yield '<b>%s</b> <small>(' % name + _('offline') + ')</small>'\n yield ''\n\n\ndef _pick_device_with_lowest_battery():\n if not _devices_info:\n return None\n\n picked = None\n picked_level = 1000\n\n for info in _devices_info:\n if info[1] is None: # is receiver\n continue\n level = info[-1].get(_K.BATTERY_LEVEL)\n # print (\"checking %s -> %s\", info, level)\n if level is not None and picked_level > level:\n picked = info\n picked_level = level or 0\n\n if _log.isEnabledFor(_DEBUG):\n _log.debug('picked device with lowest battery: %s', picked)\n\n return picked\n\n\n#\n#\n#\n\n\ndef _add_device(device):\n assert device\n\n index = 0\n receiver_path = device.receiver.path if device.receiver is not None else device.path\n if device.receiver is not None: # if receiver insert into devices for the receiver in device number order\n for idx, (path, _ignore, _ignore, _ignore) in enumerate(_devices_info):\n if path and path == receiver_path:\n index = idx + 1 # the first entry matching the receiver serial should be for the receiver itself\n break\n while index < len(_devices_info):\n path, number, _ignore, _ignore = _devices_info[index]\n if not path == receiver_path:\n break\n assert number != device.number\n if number > device.number:\n break\n index = index + 1\n\n new_device_info = (receiver_path, device.number, device.name, device.status)\n _devices_info.insert(index, new_device_info)\n\n label_prefix = ' '\n new_menu_item = Gtk.ImageMenuItem.new_with_label((label_prefix if device.number else '') + device.name)\n new_menu_item.set_image(Gtk.Image())\n new_menu_item.show_all()\n new_menu_item.connect('activate', _window_popup, receiver_path, device.number)\n _menu.insert(new_menu_item, index)\n\n return index\n\n\ndef _remove_device(index):\n assert index is not None\n\n menu_items = _menu.get_children()\n _menu.remove(menu_items[index])\n\n removed_device = _devices_info.pop(index)\n global _picked_device\n if _picked_device and _picked_device[0:2] == removed_device[0:2]:\n # the current pick was unpaired\n _picked_device = None\n\n\ndef _add_receiver(receiver):\n index = len(_devices_info)\n\n new_receiver_info = (receiver.path, None, receiver.name, None)\n _devices_info.insert(index, new_receiver_info)\n\n new_menu_item = Gtk.ImageMenuItem.new_with_label(receiver.name)\n icon_set = _icons.device_icon_set(receiver.name)\n new_menu_item.set_image(Gtk.Image().new_from_icon_name(icon_set.names[0], _MENU_ICON_SIZE))\n new_menu_item.show_all()\n new_menu_item.connect('activate', _window_popup, receiver.path)\n _menu.insert(new_menu_item, index)\n\n return 0\n\n\ndef _remove_receiver(receiver):\n index = 0\n # remove all entries in devices_info that match this receiver\n while index < len(_devices_info):\n path, _ignore, _ignore, _ignore = _devices_info[index]\n if path == receiver.path:\n _remove_device(index)\n else:\n index += 1\n\n\ndef _update_menu_item(index, device):\n if device is None or device.status is None:\n _log.warn('updating an inactive device %s, assuming disconnected', device)\n return None\n\n menu_items = _menu.get_children()\n menu_item = menu_items[index]\n\n level = device.status.get(_K.BATTERY_LEVEL)\n charging = device.status.get(_K.BATTERY_CHARGING)\n icon_name = _icons.battery(level, charging)\n\n image_widget = menu_item.get_image()\n image_widget.set_sensitive(bool(device.online))\n _update_menu_icon(image_widget, icon_name)\n\n\n#\n#\n#\n\n# for which device to show the battery info in systray, if more than one\n# it's actually an entry in _devices_info\n_picked_device = None\n\n# cached list of devices and some of their properties\n# contains tuples of (receiver path, device number, name, status)\n_devices_info = []\n\n_menu = None\n_icon = None\n\n\ndef init(_quit_handler):\n global _menu, _icon\n assert _menu is None\n _menu = _create_menu(_quit_handler)\n assert _icon is None\n _icon = _create(_menu)\n update()\n\n\ndef destroy():\n global _icon, _menu, _devices_info\n if _icon is not None:\n i, _icon = _icon, None\n _hide(i)\n i = None\n\n _icon = None\n _menu = None\n _devices_info = None\n\n\ndef update(device=None):\n if _icon is None:\n return\n\n if device is not None:\n if device.kind is None:\n # receiver\n is_alive = bool(device)\n receiver_path = device.path\n if is_alive:\n index = None\n for idx, (path, _ignore, _ignore, _ignore) in enumerate(_devices_info):\n if path == receiver_path:\n index = idx\n break\n\n if index is None:\n _add_receiver(device)\n else:\n _remove_receiver(device)\n\n else:\n # peripheral\n is_paired = bool(device)\n receiver_path = device.receiver.path if device.receiver is not None else device.path\n index = None\n for idx, (path, number, _ignore, _ignore) in enumerate(_devices_info):\n if path == receiver_path and number == device.number:\n index = idx\n\n if is_paired:\n if index is None:\n index = _add_device(device)\n _update_menu_item(index, device)\n else: # was just unpaired or unplugged\n if index is not None:\n _remove_device(index)\n\n menu_items = _menu.get_children()\n no_receivers_index = len(_devices_info)\n menu_items[no_receivers_index].set_visible(not _devices_info)\n\n global _picked_device\n if (not _picked_device or _last_scroll == 0) and device is not None and device.kind is not None:\n # if it's just a receiver update, it's unlikely the picked device would change\n _picked_device = _pick_device_with_lowest_battery()\n\n _update_tray_icon()\n\n if _icon:\n if not _devices_info:\n _hide(_icon)\n else:\n _show(_icon)\n", "path": "lib/solaar/ui/tray.py"}]} |
gh_patches_debug_1293 | rasdani/github-patches | git_diff | aio-libs__aiohttp-1752 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Encoding is always UTF-8 in POST data
## Long story short
I'm doing a `POST` request via `client.post`:
```
data = aiohttp.FormData({
'FindText': name,
}, charset='windows-1251')
client.post(base_url, params={'RowFrom': offset}, data=data)
```
where `name` contains some none-latin text (`'хан'`)
## Expected behaviour
POST data should contain: `FindText=%D5%E0%ED`
## Actual behaviour
`FindText=%D1%85%D0%B0%D0%BD'`
## Steps to reproduce
Looking through the code of `formdata.py:99`
```
urlencode(data, doseq=True).encode(charset),
```
I noticed, that `data` is urlencoded in UTF-8 first and then encoded to `windows-1251` (and that has no effect on `%D1...`).
For now, I just manually do in my code:
```
data = urlencode({
'FindText': name,
}, encoding='windows-1251')
```
And I get the string that I need.
Is it a bug? Or am I doing it wrong?
## Your environment
```
Python 3.6.0 (default, Jan 16 2017, 12:12:55)
[GCC 6.3.1 20170109] on linux
---
aiohttp==2.0.3
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `aiohttp/formdata.py`
Content:
```
1 import io
2 from urllib.parse import urlencode
3
4 from multidict import MultiDict, MultiDictProxy
5
6 from . import hdrs, multipart, payload
7 from .helpers import guess_filename
8
9 __all__ = ('FormData',)
10
11
12 class FormData:
13 """Helper class for multipart/form-data and
14 application/x-www-form-urlencoded body generation."""
15
16 def __init__(self, fields=(), quote_fields=True, charset=None):
17 self._writer = multipart.MultipartWriter('form-data')
18 self._fields = []
19 self._is_multipart = False
20 self._quote_fields = quote_fields
21 self._charset = charset
22
23 if isinstance(fields, dict):
24 fields = list(fields.items())
25 elif not isinstance(fields, (list, tuple)):
26 fields = (fields,)
27 self.add_fields(*fields)
28
29 @property
30 def is_multipart(self):
31 return self._is_multipart
32
33 def add_field(self, name, value, *, content_type=None, filename=None,
34 content_transfer_encoding=None):
35
36 if isinstance(value, io.IOBase):
37 self._is_multipart = True
38 elif isinstance(value, (bytes, bytearray, memoryview)):
39 if filename is None and content_transfer_encoding is None:
40 filename = name
41
42 type_options = MultiDict({'name': name})
43 if filename is not None and not isinstance(filename, str):
44 raise TypeError('filename must be an instance of str. '
45 'Got: %s' % filename)
46 if filename is None and isinstance(value, io.IOBase):
47 filename = guess_filename(value, name)
48 if filename is not None:
49 type_options['filename'] = filename
50 self._is_multipart = True
51
52 headers = {}
53 if content_type is not None:
54 if not isinstance(content_type, str):
55 raise TypeError('content_type must be an instance of str. '
56 'Got: %s' % content_type)
57 headers[hdrs.CONTENT_TYPE] = content_type
58 self._is_multipart = True
59 if content_transfer_encoding is not None:
60 if not isinstance(content_transfer_encoding, str):
61 raise TypeError('content_transfer_encoding must be an instance'
62 ' of str. Got: %s' % content_transfer_encoding)
63 headers[hdrs.CONTENT_TRANSFER_ENCODING] = content_transfer_encoding
64 self._is_multipart = True
65
66 self._fields.append((type_options, headers, value))
67
68 def add_fields(self, *fields):
69 to_add = list(fields)
70
71 while to_add:
72 rec = to_add.pop(0)
73
74 if isinstance(rec, io.IOBase):
75 k = guess_filename(rec, 'unknown')
76 self.add_field(k, rec)
77
78 elif isinstance(rec, (MultiDictProxy, MultiDict)):
79 to_add.extend(rec.items())
80
81 elif isinstance(rec, (list, tuple)) and len(rec) == 2:
82 k, fp = rec
83 self.add_field(k, fp)
84
85 else:
86 raise TypeError('Only io.IOBase, multidict and (name, file) '
87 'pairs allowed, use .add_field() for passing '
88 'more complex parameters, got {!r}'
89 .format(rec))
90
91 def _gen_form_urlencoded(self):
92 # form data (x-www-form-urlencoded)
93 data = []
94 for type_options, _, value in self._fields:
95 data.append((type_options['name'], value))
96
97 charset = self._charset if self._charset is not None else 'utf-8'
98 return payload.BytesPayload(
99 urlencode(data, doseq=True).encode(charset),
100 content_type='application/x-www-form-urlencoded')
101
102 def _gen_form_data(self):
103 """Encode a list of fields using the multipart/form-data MIME format"""
104 for dispparams, headers, value in self._fields:
105 try:
106 if hdrs.CONTENT_TYPE in headers:
107 part = payload.get_payload(
108 value, content_type=headers[hdrs.CONTENT_TYPE],
109 headers=headers, encoding=self._charset)
110 else:
111 part = payload.get_payload(
112 value, headers=headers, encoding=self._charset)
113 except Exception as exc:
114 raise TypeError(
115 'Can not serialize value type: %r\n '
116 'headers: %r\n value: %r' % (
117 type(value), headers, value)) from exc
118
119 if dispparams:
120 part.set_content_disposition(
121 'form-data', quote_fields=self._quote_fields, **dispparams
122 )
123 # FIXME cgi.FieldStorage doesn't likes body parts with
124 # Content-Length which were sent via chunked transfer encoding
125 part.headers.pop(hdrs.CONTENT_LENGTH, None)
126
127 self._writer.append_payload(part)
128
129 return self._writer
130
131 def __call__(self):
132 if self._is_multipart:
133 return self._gen_form_data()
134 else:
135 return self._gen_form_urlencoded()
136
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/aiohttp/formdata.py b/aiohttp/formdata.py
--- a/aiohttp/formdata.py
+++ b/aiohttp/formdata.py
@@ -96,7 +96,7 @@
charset = self._charset if self._charset is not None else 'utf-8'
return payload.BytesPayload(
- urlencode(data, doseq=True).encode(charset),
+ urlencode(data, doseq=True, encoding=charset).encode(),
content_type='application/x-www-form-urlencoded')
def _gen_form_data(self):
| {"golden_diff": "diff --git a/aiohttp/formdata.py b/aiohttp/formdata.py\n--- a/aiohttp/formdata.py\n+++ b/aiohttp/formdata.py\n@@ -96,7 +96,7 @@\n \n charset = self._charset if self._charset is not None else 'utf-8'\n return payload.BytesPayload(\n- urlencode(data, doseq=True).encode(charset),\n+ urlencode(data, doseq=True, encoding=charset).encode(),\n content_type='application/x-www-form-urlencoded')\n \n def _gen_form_data(self):\n", "issue": "Encoding is always UTF-8 in POST data\n## Long story short\r\n\r\nI'm doing a `POST` request via `client.post`:\r\n\r\n```\r\ndata = aiohttp.FormData({\r\n 'FindText': name,\r\n }, charset='windows-1251')\r\n\r\nclient.post(base_url, params={'RowFrom': offset}, data=data)\r\n```\r\n\r\nwhere `name` contains some none-latin text (`'\u0445\u0430\u043d'`)\r\n\r\n## Expected behaviour\r\n\r\nPOST data should contain: `FindText=%D5%E0%ED`\r\n\r\n## Actual behaviour\r\n\r\n`FindText=%D1%85%D0%B0%D0%BD'`\r\n\r\n## Steps to reproduce\r\n\r\nLooking through the code of `formdata.py:99`\r\n\r\n```\r\nurlencode(data, doseq=True).encode(charset),\r\n```\r\n\r\nI noticed, that `data` is urlencoded in UTF-8 first and then encoded to `windows-1251` (and that has no effect on `%D1...`).\r\n\r\nFor now, I just manually do in my code:\r\n\r\n```\r\ndata = urlencode({\r\n 'FindText': name,\r\n }, encoding='windows-1251')\r\n```\r\n\r\nAnd I get the string that I need.\r\n\r\nIs it a bug? Or am I doing it wrong?\r\n\r\n## Your environment\r\n\r\n```\r\nPython 3.6.0 (default, Jan 16 2017, 12:12:55) \r\n[GCC 6.3.1 20170109] on linux\r\n---\r\naiohttp==2.0.3\r\n```\r\n\n", "before_files": [{"content": "import io\nfrom urllib.parse import urlencode\n\nfrom multidict import MultiDict, MultiDictProxy\n\nfrom . import hdrs, multipart, payload\nfrom .helpers import guess_filename\n\n__all__ = ('FormData',)\n\n\nclass FormData:\n \"\"\"Helper class for multipart/form-data and\n application/x-www-form-urlencoded body generation.\"\"\"\n\n def __init__(self, fields=(), quote_fields=True, charset=None):\n self._writer = multipart.MultipartWriter('form-data')\n self._fields = []\n self._is_multipart = False\n self._quote_fields = quote_fields\n self._charset = charset\n\n if isinstance(fields, dict):\n fields = list(fields.items())\n elif not isinstance(fields, (list, tuple)):\n fields = (fields,)\n self.add_fields(*fields)\n\n @property\n def is_multipart(self):\n return self._is_multipart\n\n def add_field(self, name, value, *, content_type=None, filename=None,\n content_transfer_encoding=None):\n\n if isinstance(value, io.IOBase):\n self._is_multipart = True\n elif isinstance(value, (bytes, bytearray, memoryview)):\n if filename is None and content_transfer_encoding is None:\n filename = name\n\n type_options = MultiDict({'name': name})\n if filename is not None and not isinstance(filename, str):\n raise TypeError('filename must be an instance of str. '\n 'Got: %s' % filename)\n if filename is None and isinstance(value, io.IOBase):\n filename = guess_filename(value, name)\n if filename is not None:\n type_options['filename'] = filename\n self._is_multipart = True\n\n headers = {}\n if content_type is not None:\n if not isinstance(content_type, str):\n raise TypeError('content_type must be an instance of str. '\n 'Got: %s' % content_type)\n headers[hdrs.CONTENT_TYPE] = content_type\n self._is_multipart = True\n if content_transfer_encoding is not None:\n if not isinstance(content_transfer_encoding, str):\n raise TypeError('content_transfer_encoding must be an instance'\n ' of str. Got: %s' % content_transfer_encoding)\n headers[hdrs.CONTENT_TRANSFER_ENCODING] = content_transfer_encoding\n self._is_multipart = True\n\n self._fields.append((type_options, headers, value))\n\n def add_fields(self, *fields):\n to_add = list(fields)\n\n while to_add:\n rec = to_add.pop(0)\n\n if isinstance(rec, io.IOBase):\n k = guess_filename(rec, 'unknown')\n self.add_field(k, rec)\n\n elif isinstance(rec, (MultiDictProxy, MultiDict)):\n to_add.extend(rec.items())\n\n elif isinstance(rec, (list, tuple)) and len(rec) == 2:\n k, fp = rec\n self.add_field(k, fp)\n\n else:\n raise TypeError('Only io.IOBase, multidict and (name, file) '\n 'pairs allowed, use .add_field() for passing '\n 'more complex parameters, got {!r}'\n .format(rec))\n\n def _gen_form_urlencoded(self):\n # form data (x-www-form-urlencoded)\n data = []\n for type_options, _, value in self._fields:\n data.append((type_options['name'], value))\n\n charset = self._charset if self._charset is not None else 'utf-8'\n return payload.BytesPayload(\n urlencode(data, doseq=True).encode(charset),\n content_type='application/x-www-form-urlencoded')\n\n def _gen_form_data(self):\n \"\"\"Encode a list of fields using the multipart/form-data MIME format\"\"\"\n for dispparams, headers, value in self._fields:\n try:\n if hdrs.CONTENT_TYPE in headers:\n part = payload.get_payload(\n value, content_type=headers[hdrs.CONTENT_TYPE],\n headers=headers, encoding=self._charset)\n else:\n part = payload.get_payload(\n value, headers=headers, encoding=self._charset)\n except Exception as exc:\n raise TypeError(\n 'Can not serialize value type: %r\\n '\n 'headers: %r\\n value: %r' % (\n type(value), headers, value)) from exc\n\n if dispparams:\n part.set_content_disposition(\n 'form-data', quote_fields=self._quote_fields, **dispparams\n )\n # FIXME cgi.FieldStorage doesn't likes body parts with\n # Content-Length which were sent via chunked transfer encoding\n part.headers.pop(hdrs.CONTENT_LENGTH, None)\n\n self._writer.append_payload(part)\n\n return self._writer\n\n def __call__(self):\n if self._is_multipart:\n return self._gen_form_data()\n else:\n return self._gen_form_urlencoded()\n", "path": "aiohttp/formdata.py"}], "after_files": [{"content": "import io\nfrom urllib.parse import urlencode\n\nfrom multidict import MultiDict, MultiDictProxy\n\nfrom . import hdrs, multipart, payload\nfrom .helpers import guess_filename\n\n__all__ = ('FormData',)\n\n\nclass FormData:\n \"\"\"Helper class for multipart/form-data and\n application/x-www-form-urlencoded body generation.\"\"\"\n\n def __init__(self, fields=(), quote_fields=True, charset=None):\n self._writer = multipart.MultipartWriter('form-data')\n self._fields = []\n self._is_multipart = False\n self._quote_fields = quote_fields\n self._charset = charset\n\n if isinstance(fields, dict):\n fields = list(fields.items())\n elif not isinstance(fields, (list, tuple)):\n fields = (fields,)\n self.add_fields(*fields)\n\n @property\n def is_multipart(self):\n return self._is_multipart\n\n def add_field(self, name, value, *, content_type=None, filename=None,\n content_transfer_encoding=None):\n\n if isinstance(value, io.IOBase):\n self._is_multipart = True\n elif isinstance(value, (bytes, bytearray, memoryview)):\n if filename is None and content_transfer_encoding is None:\n filename = name\n\n type_options = MultiDict({'name': name})\n if filename is not None and not isinstance(filename, str):\n raise TypeError('filename must be an instance of str. '\n 'Got: %s' % filename)\n if filename is None and isinstance(value, io.IOBase):\n filename = guess_filename(value, name)\n if filename is not None:\n type_options['filename'] = filename\n self._is_multipart = True\n\n headers = {}\n if content_type is not None:\n if not isinstance(content_type, str):\n raise TypeError('content_type must be an instance of str. '\n 'Got: %s' % content_type)\n headers[hdrs.CONTENT_TYPE] = content_type\n self._is_multipart = True\n if content_transfer_encoding is not None:\n if not isinstance(content_transfer_encoding, str):\n raise TypeError('content_transfer_encoding must be an instance'\n ' of str. Got: %s' % content_transfer_encoding)\n headers[hdrs.CONTENT_TRANSFER_ENCODING] = content_transfer_encoding\n self._is_multipart = True\n\n self._fields.append((type_options, headers, value))\n\n def add_fields(self, *fields):\n to_add = list(fields)\n\n while to_add:\n rec = to_add.pop(0)\n\n if isinstance(rec, io.IOBase):\n k = guess_filename(rec, 'unknown')\n self.add_field(k, rec)\n\n elif isinstance(rec, (MultiDictProxy, MultiDict)):\n to_add.extend(rec.items())\n\n elif isinstance(rec, (list, tuple)) and len(rec) == 2:\n k, fp = rec\n self.add_field(k, fp)\n\n else:\n raise TypeError('Only io.IOBase, multidict and (name, file) '\n 'pairs allowed, use .add_field() for passing '\n 'more complex parameters, got {!r}'\n .format(rec))\n\n def _gen_form_urlencoded(self):\n # form data (x-www-form-urlencoded)\n data = []\n for type_options, _, value in self._fields:\n data.append((type_options['name'], value))\n\n charset = self._charset if self._charset is not None else 'utf-8'\n return payload.BytesPayload(\n urlencode(data, doseq=True, encoding=charset).encode(),\n content_type='application/x-www-form-urlencoded')\n\n def _gen_form_data(self):\n \"\"\"Encode a list of fields using the multipart/form-data MIME format\"\"\"\n for dispparams, headers, value in self._fields:\n try:\n if hdrs.CONTENT_TYPE in headers:\n part = payload.get_payload(\n value, content_type=headers[hdrs.CONTENT_TYPE],\n headers=headers, encoding=self._charset)\n else:\n part = payload.get_payload(\n value, headers=headers, encoding=self._charset)\n except Exception as exc:\n raise TypeError(\n 'Can not serialize value type: %r\\n '\n 'headers: %r\\n value: %r' % (\n type(value), headers, value)) from exc\n\n if dispparams:\n part.set_content_disposition(\n 'form-data', quote_fields=self._quote_fields, **dispparams\n )\n # FIXME cgi.FieldStorage doesn't likes body parts with\n # Content-Length which were sent via chunked transfer encoding\n part.headers.pop(hdrs.CONTENT_LENGTH, None)\n\n self._writer.append_payload(part)\n\n return self._writer\n\n def __call__(self):\n if self._is_multipart:\n return self._gen_form_data()\n else:\n return self._gen_form_urlencoded()\n", "path": "aiohttp/formdata.py"}]} |
gh_patches_debug_1294 | rasdani/github-patches | git_diff | psf__black-4028 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Conflict in blank lines after module docstring and before function
Using macOS:
```sh
$ black --version
black, 23.10.1 (compiled: yes)
Python (CPython) 3.10.12
```
Take this code:
```python
"""This is a test case, note blank lines next."""
def go():
print("Do stuff!")
go()
```
And run this:
```
$ black --diff blanks.py
--- blanks.py 2023-11-06 18:04:21.775563+00:00
+++ blanks.py 2023-11-06 18:04:37.405865+00:00
@@ -1,4 +1,8 @@
"""This is a test case, note blank lines next."""
+
+
def go():
print("Do stuff!")
+
+
go()
would reformat blanks.py
All done! ✨ 🍰 ✨
1 file would be reformatted.
```
Note current default behavior is to put two blank lines between the module docstring and following function. This is as expected, quoting https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html
> It will also insert proper spacing before and after function definitions. It’s one line before and after inner functions and two lines before and after module-level functions and classes.
Now in preview mode:
```
$ black --diff blanks.py --preview
--- blanks.py 2023-11-06 18:04:21.775563+00:00
+++ blanks.py 2023-11-06 18:04:42.146632+00:00
@@ -1,4 +1,7 @@
"""This is a test case, note blank lines next."""
+
def go():
print("Do stuff!")
+
+
go()
```
This now only has one line between the module docstring and following function. This seems like an unintended consequence of #1872.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/black/lines.py`
Content:
```
1 import itertools
2 import math
3 import sys
4 from dataclasses import dataclass, field
5 from typing import (
6 Callable,
7 Dict,
8 Iterator,
9 List,
10 Optional,
11 Sequence,
12 Tuple,
13 TypeVar,
14 Union,
15 cast,
16 )
17
18 from black.brackets import COMMA_PRIORITY, DOT_PRIORITY, BracketTracker
19 from black.mode import Mode, Preview
20 from black.nodes import (
21 BRACKETS,
22 CLOSING_BRACKETS,
23 OPENING_BRACKETS,
24 STANDALONE_COMMENT,
25 TEST_DESCENDANTS,
26 child_towards,
27 is_docstring,
28 is_funcdef,
29 is_import,
30 is_multiline_string,
31 is_one_sequence_between,
32 is_type_comment,
33 is_type_ignore_comment,
34 is_with_or_async_with_stmt,
35 replace_child,
36 syms,
37 whitespace,
38 )
39 from black.strings import str_width
40 from blib2to3.pgen2 import token
41 from blib2to3.pytree import Leaf, Node
42
43 # types
44 T = TypeVar("T")
45 Index = int
46 LeafID = int
47 LN = Union[Leaf, Node]
48
49
50 @dataclass
51 class Line:
52 """Holds leaves and comments. Can be printed with `str(line)`."""
53
54 mode: Mode = field(repr=False)
55 depth: int = 0
56 leaves: List[Leaf] = field(default_factory=list)
57 # keys ordered like `leaves`
58 comments: Dict[LeafID, List[Leaf]] = field(default_factory=dict)
59 bracket_tracker: BracketTracker = field(default_factory=BracketTracker)
60 inside_brackets: bool = False
61 should_split_rhs: bool = False
62 magic_trailing_comma: Optional[Leaf] = None
63
64 def append(
65 self, leaf: Leaf, preformatted: bool = False, track_bracket: bool = False
66 ) -> None:
67 """Add a new `leaf` to the end of the line.
68
69 Unless `preformatted` is True, the `leaf` will receive a new consistent
70 whitespace prefix and metadata applied by :class:`BracketTracker`.
71 Trailing commas are maybe removed, unpacked for loop variables are
72 demoted from being delimiters.
73
74 Inline comments are put aside.
75 """
76 has_value = leaf.type in BRACKETS or bool(leaf.value.strip())
77 if not has_value:
78 return
79
80 if token.COLON == leaf.type and self.is_class_paren_empty:
81 del self.leaves[-2:]
82 if self.leaves and not preformatted:
83 # Note: at this point leaf.prefix should be empty except for
84 # imports, for which we only preserve newlines.
85 leaf.prefix += whitespace(
86 leaf,
87 complex_subscript=self.is_complex_subscript(leaf),
88 mode=self.mode,
89 )
90 if self.inside_brackets or not preformatted or track_bracket:
91 self.bracket_tracker.mark(leaf)
92 if self.mode.magic_trailing_comma:
93 if self.has_magic_trailing_comma(leaf):
94 self.magic_trailing_comma = leaf
95 elif self.has_magic_trailing_comma(leaf, ensure_removable=True):
96 self.remove_trailing_comma()
97 if not self.append_comment(leaf):
98 self.leaves.append(leaf)
99
100 def append_safe(self, leaf: Leaf, preformatted: bool = False) -> None:
101 """Like :func:`append()` but disallow invalid standalone comment structure.
102
103 Raises ValueError when any `leaf` is appended after a standalone comment
104 or when a standalone comment is not the first leaf on the line.
105 """
106 if self.bracket_tracker.depth == 0:
107 if self.is_comment:
108 raise ValueError("cannot append to standalone comments")
109
110 if self.leaves and leaf.type == STANDALONE_COMMENT:
111 raise ValueError(
112 "cannot append standalone comments to a populated line"
113 )
114
115 self.append(leaf, preformatted=preformatted)
116
117 @property
118 def is_comment(self) -> bool:
119 """Is this line a standalone comment?"""
120 return len(self.leaves) == 1 and self.leaves[0].type == STANDALONE_COMMENT
121
122 @property
123 def is_decorator(self) -> bool:
124 """Is this line a decorator?"""
125 return bool(self) and self.leaves[0].type == token.AT
126
127 @property
128 def is_import(self) -> bool:
129 """Is this an import line?"""
130 return bool(self) and is_import(self.leaves[0])
131
132 @property
133 def is_with_or_async_with_stmt(self) -> bool:
134 """Is this a with_stmt line?"""
135 return bool(self) and is_with_or_async_with_stmt(self.leaves[0])
136
137 @property
138 def is_class(self) -> bool:
139 """Is this line a class definition?"""
140 return (
141 bool(self)
142 and self.leaves[0].type == token.NAME
143 and self.leaves[0].value == "class"
144 )
145
146 @property
147 def is_stub_class(self) -> bool:
148 """Is this line a class definition with a body consisting only of "..."?"""
149 return self.is_class and self.leaves[-3:] == [
150 Leaf(token.DOT, ".") for _ in range(3)
151 ]
152
153 @property
154 def is_def(self) -> bool:
155 """Is this a function definition? (Also returns True for async defs.)"""
156 try:
157 first_leaf = self.leaves[0]
158 except IndexError:
159 return False
160
161 try:
162 second_leaf: Optional[Leaf] = self.leaves[1]
163 except IndexError:
164 second_leaf = None
165 return (first_leaf.type == token.NAME and first_leaf.value == "def") or (
166 first_leaf.type == token.ASYNC
167 and second_leaf is not None
168 and second_leaf.type == token.NAME
169 and second_leaf.value == "def"
170 )
171
172 @property
173 def is_stub_def(self) -> bool:
174 """Is this line a function definition with a body consisting only of "..."?"""
175 return self.is_def and self.leaves[-4:] == [Leaf(token.COLON, ":")] + [
176 Leaf(token.DOT, ".") for _ in range(3)
177 ]
178
179 @property
180 def is_class_paren_empty(self) -> bool:
181 """Is this a class with no base classes but using parentheses?
182
183 Those are unnecessary and should be removed.
184 """
185 return (
186 bool(self)
187 and len(self.leaves) == 4
188 and self.is_class
189 and self.leaves[2].type == token.LPAR
190 and self.leaves[2].value == "("
191 and self.leaves[3].type == token.RPAR
192 and self.leaves[3].value == ")"
193 )
194
195 @property
196 def is_triple_quoted_string(self) -> bool:
197 """Is the line a triple quoted string?"""
198 if not self or self.leaves[0].type != token.STRING:
199 return False
200 value = self.leaves[0].value
201 if value.startswith(('"""', "'''")):
202 return True
203 if Preview.accept_raw_docstrings in self.mode and value.startswith(
204 ("r'''", 'r"""', "R'''", 'R"""')
205 ):
206 return True
207 return False
208
209 @property
210 def opens_block(self) -> bool:
211 """Does this line open a new level of indentation."""
212 if len(self.leaves) == 0:
213 return False
214 return self.leaves[-1].type == token.COLON
215
216 def is_fmt_pass_converted(
217 self, *, first_leaf_matches: Optional[Callable[[Leaf], bool]] = None
218 ) -> bool:
219 """Is this line converted from fmt off/skip code?
220
221 If first_leaf_matches is not None, it only returns True if the first
222 leaf of converted code matches.
223 """
224 if len(self.leaves) != 1:
225 return False
226 leaf = self.leaves[0]
227 if (
228 leaf.type != STANDALONE_COMMENT
229 or leaf.fmt_pass_converted_first_leaf is None
230 ):
231 return False
232 return first_leaf_matches is None or first_leaf_matches(
233 leaf.fmt_pass_converted_first_leaf
234 )
235
236 def contains_standalone_comments(self, depth_limit: int = sys.maxsize) -> bool:
237 """If so, needs to be split before emitting."""
238 for leaf in self.leaves:
239 if leaf.type == STANDALONE_COMMENT and leaf.bracket_depth <= depth_limit:
240 return True
241
242 return False
243
244 def contains_implicit_multiline_string_with_comments(self) -> bool:
245 """Chck if we have an implicit multiline string with comments on the line"""
246 for leaf_type, leaf_group_iterator in itertools.groupby(
247 self.leaves, lambda leaf: leaf.type
248 ):
249 if leaf_type != token.STRING:
250 continue
251 leaf_list = list(leaf_group_iterator)
252 if len(leaf_list) == 1:
253 continue
254 for leaf in leaf_list:
255 if self.comments_after(leaf):
256 return True
257 return False
258
259 def contains_uncollapsable_type_comments(self) -> bool:
260 ignored_ids = set()
261 try:
262 last_leaf = self.leaves[-1]
263 ignored_ids.add(id(last_leaf))
264 if last_leaf.type == token.COMMA or (
265 last_leaf.type == token.RPAR and not last_leaf.value
266 ):
267 # When trailing commas or optional parens are inserted by Black for
268 # consistency, comments after the previous last element are not moved
269 # (they don't have to, rendering will still be correct). So we ignore
270 # trailing commas and invisible.
271 last_leaf = self.leaves[-2]
272 ignored_ids.add(id(last_leaf))
273 except IndexError:
274 return False
275
276 # A type comment is uncollapsable if it is attached to a leaf
277 # that isn't at the end of the line (since that could cause it
278 # to get associated to a different argument) or if there are
279 # comments before it (since that could cause it to get hidden
280 # behind a comment.
281 comment_seen = False
282 for leaf_id, comments in self.comments.items():
283 for comment in comments:
284 if is_type_comment(comment):
285 if comment_seen or (
286 not is_type_ignore_comment(comment)
287 and leaf_id not in ignored_ids
288 ):
289 return True
290
291 comment_seen = True
292
293 return False
294
295 def contains_unsplittable_type_ignore(self) -> bool:
296 if not self.leaves:
297 return False
298
299 # If a 'type: ignore' is attached to the end of a line, we
300 # can't split the line, because we can't know which of the
301 # subexpressions the ignore was meant to apply to.
302 #
303 # We only want this to apply to actual physical lines from the
304 # original source, though: we don't want the presence of a
305 # 'type: ignore' at the end of a multiline expression to
306 # justify pushing it all onto one line. Thus we
307 # (unfortunately) need to check the actual source lines and
308 # only report an unsplittable 'type: ignore' if this line was
309 # one line in the original code.
310
311 # Grab the first and last line numbers, skipping generated leaves
312 first_line = next((leaf.lineno for leaf in self.leaves if leaf.lineno != 0), 0)
313 last_line = next(
314 (leaf.lineno for leaf in reversed(self.leaves) if leaf.lineno != 0), 0
315 )
316
317 if first_line == last_line:
318 # We look at the last two leaves since a comma or an
319 # invisible paren could have been added at the end of the
320 # line.
321 for node in self.leaves[-2:]:
322 for comment in self.comments.get(id(node), []):
323 if is_type_ignore_comment(comment):
324 return True
325
326 return False
327
328 def contains_multiline_strings(self) -> bool:
329 return any(is_multiline_string(leaf) for leaf in self.leaves)
330
331 def has_magic_trailing_comma(
332 self, closing: Leaf, ensure_removable: bool = False
333 ) -> bool:
334 """Return True if we have a magic trailing comma, that is when:
335 - there's a trailing comma here
336 - it's not a one-tuple
337 - it's not a single-element subscript
338 Additionally, if ensure_removable:
339 - it's not from square bracket indexing
340 (specifically, single-element square bracket indexing)
341 """
342 if not (
343 closing.type in CLOSING_BRACKETS
344 and self.leaves
345 and self.leaves[-1].type == token.COMMA
346 ):
347 return False
348
349 if closing.type == token.RBRACE:
350 return True
351
352 if closing.type == token.RSQB:
353 if (
354 closing.parent
355 and closing.parent.type == syms.trailer
356 and closing.opening_bracket
357 and is_one_sequence_between(
358 closing.opening_bracket,
359 closing,
360 self.leaves,
361 brackets=(token.LSQB, token.RSQB),
362 )
363 ):
364 return False
365
366 if not ensure_removable:
367 return True
368
369 comma = self.leaves[-1]
370 if comma.parent is None:
371 return False
372 return (
373 comma.parent.type != syms.subscriptlist
374 or closing.opening_bracket is None
375 or not is_one_sequence_between(
376 closing.opening_bracket,
377 closing,
378 self.leaves,
379 brackets=(token.LSQB, token.RSQB),
380 )
381 )
382
383 if self.is_import:
384 return True
385
386 if closing.opening_bracket is not None and not is_one_sequence_between(
387 closing.opening_bracket, closing, self.leaves
388 ):
389 return True
390
391 return False
392
393 def append_comment(self, comment: Leaf) -> bool:
394 """Add an inline or standalone comment to the line."""
395 if (
396 comment.type == STANDALONE_COMMENT
397 and self.bracket_tracker.any_open_brackets()
398 ):
399 comment.prefix = ""
400 return False
401
402 if comment.type != token.COMMENT:
403 return False
404
405 if not self.leaves:
406 comment.type = STANDALONE_COMMENT
407 comment.prefix = ""
408 return False
409
410 last_leaf = self.leaves[-1]
411 if (
412 last_leaf.type == token.RPAR
413 and not last_leaf.value
414 and last_leaf.parent
415 and len(list(last_leaf.parent.leaves())) <= 3
416 and not is_type_comment(comment)
417 ):
418 # Comments on an optional parens wrapping a single leaf should belong to
419 # the wrapped node except if it's a type comment. Pinning the comment like
420 # this avoids unstable formatting caused by comment migration.
421 if len(self.leaves) < 2:
422 comment.type = STANDALONE_COMMENT
423 comment.prefix = ""
424 return False
425
426 last_leaf = self.leaves[-2]
427 self.comments.setdefault(id(last_leaf), []).append(comment)
428 return True
429
430 def comments_after(self, leaf: Leaf) -> List[Leaf]:
431 """Generate comments that should appear directly after `leaf`."""
432 return self.comments.get(id(leaf), [])
433
434 def remove_trailing_comma(self) -> None:
435 """Remove the trailing comma and moves the comments attached to it."""
436 trailing_comma = self.leaves.pop()
437 trailing_comma_comments = self.comments.pop(id(trailing_comma), [])
438 self.comments.setdefault(id(self.leaves[-1]), []).extend(
439 trailing_comma_comments
440 )
441
442 def is_complex_subscript(self, leaf: Leaf) -> bool:
443 """Return True iff `leaf` is part of a slice with non-trivial exprs."""
444 open_lsqb = self.bracket_tracker.get_open_lsqb()
445 if open_lsqb is None:
446 return False
447
448 subscript_start = open_lsqb.next_sibling
449
450 if isinstance(subscript_start, Node):
451 if subscript_start.type == syms.listmaker:
452 return False
453
454 if subscript_start.type == syms.subscriptlist:
455 subscript_start = child_towards(subscript_start, leaf)
456 return subscript_start is not None and any(
457 n.type in TEST_DESCENDANTS for n in subscript_start.pre_order()
458 )
459
460 def enumerate_with_length(
461 self, reversed: bool = False
462 ) -> Iterator[Tuple[Index, Leaf, int]]:
463 """Return an enumeration of leaves with their length.
464
465 Stops prematurely on multiline strings and standalone comments.
466 """
467 op = cast(
468 Callable[[Sequence[Leaf]], Iterator[Tuple[Index, Leaf]]],
469 enumerate_reversed if reversed else enumerate,
470 )
471 for index, leaf in op(self.leaves):
472 length = len(leaf.prefix) + len(leaf.value)
473 if "\n" in leaf.value:
474 return # Multiline strings, we can't continue.
475
476 for comment in self.comments_after(leaf):
477 length += len(comment.value)
478
479 yield index, leaf, length
480
481 def clone(self) -> "Line":
482 return Line(
483 mode=self.mode,
484 depth=self.depth,
485 inside_brackets=self.inside_brackets,
486 should_split_rhs=self.should_split_rhs,
487 magic_trailing_comma=self.magic_trailing_comma,
488 )
489
490 def __str__(self) -> str:
491 """Render the line."""
492 if not self:
493 return "\n"
494
495 indent = " " * self.depth
496 leaves = iter(self.leaves)
497 first = next(leaves)
498 res = f"{first.prefix}{indent}{first.value}"
499 for leaf in leaves:
500 res += str(leaf)
501 for comment in itertools.chain.from_iterable(self.comments.values()):
502 res += str(comment)
503
504 return res + "\n"
505
506 def __bool__(self) -> bool:
507 """Return True if the line has leaves or comments."""
508 return bool(self.leaves or self.comments)
509
510
511 @dataclass
512 class RHSResult:
513 """Intermediate split result from a right hand split."""
514
515 head: Line
516 body: Line
517 tail: Line
518 opening_bracket: Leaf
519 closing_bracket: Leaf
520
521
522 @dataclass
523 class LinesBlock:
524 """Class that holds information about a block of formatted lines.
525
526 This is introduced so that the EmptyLineTracker can look behind the standalone
527 comments and adjust their empty lines for class or def lines.
528 """
529
530 mode: Mode
531 previous_block: Optional["LinesBlock"]
532 original_line: Line
533 before: int = 0
534 content_lines: List[str] = field(default_factory=list)
535 after: int = 0
536
537 def all_lines(self) -> List[str]:
538 empty_line = str(Line(mode=self.mode))
539 return (
540 [empty_line * self.before] + self.content_lines + [empty_line * self.after]
541 )
542
543
544 @dataclass
545 class EmptyLineTracker:
546 """Provides a stateful method that returns the number of potential extra
547 empty lines needed before and after the currently processed line.
548
549 Note: this tracker works on lines that haven't been split yet. It assumes
550 the prefix of the first leaf consists of optional newlines. Those newlines
551 are consumed by `maybe_empty_lines()` and included in the computation.
552 """
553
554 mode: Mode
555 previous_line: Optional[Line] = None
556 previous_block: Optional[LinesBlock] = None
557 previous_defs: List[Line] = field(default_factory=list)
558 semantic_leading_comment: Optional[LinesBlock] = None
559
560 def maybe_empty_lines(self, current_line: Line) -> LinesBlock:
561 """Return the number of extra empty lines before and after the `current_line`.
562
563 This is for separating `def`, `async def` and `class` with extra empty
564 lines (two on module-level).
565 """
566 before, after = self._maybe_empty_lines(current_line)
567 previous_after = self.previous_block.after if self.previous_block else 0
568 before = (
569 # Black should not insert empty lines at the beginning
570 # of the file
571 0
572 if self.previous_line is None
573 else before - previous_after
574 )
575 if (
576 Preview.module_docstring_newlines in current_line.mode
577 and self.previous_block
578 and self.previous_block.previous_block is None
579 and len(self.previous_block.original_line.leaves) == 1
580 and self.previous_block.original_line.is_triple_quoted_string
581 ):
582 before = 1
583
584 block = LinesBlock(
585 mode=self.mode,
586 previous_block=self.previous_block,
587 original_line=current_line,
588 before=before,
589 after=after,
590 )
591
592 # Maintain the semantic_leading_comment state.
593 if current_line.is_comment:
594 if self.previous_line is None or (
595 not self.previous_line.is_decorator
596 # `or before` means this comment already has an empty line before
597 and (not self.previous_line.is_comment or before)
598 and (self.semantic_leading_comment is None or before)
599 ):
600 self.semantic_leading_comment = block
601 # `or before` means this decorator already has an empty line before
602 elif not current_line.is_decorator or before:
603 self.semantic_leading_comment = None
604
605 self.previous_line = current_line
606 self.previous_block = block
607 return block
608
609 def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]:
610 max_allowed = 1
611 if current_line.depth == 0:
612 max_allowed = 1 if self.mode.is_pyi else 2
613 if current_line.leaves:
614 # Consume the first leaf's extra newlines.
615 first_leaf = current_line.leaves[0]
616 before = first_leaf.prefix.count("\n")
617 before = min(before, max_allowed)
618 first_leaf.prefix = ""
619 else:
620 before = 0
621
622 user_had_newline = bool(before)
623 depth = current_line.depth
624
625 previous_def = None
626 while self.previous_defs and self.previous_defs[-1].depth >= depth:
627 previous_def = self.previous_defs.pop()
628
629 if previous_def is not None:
630 assert self.previous_line is not None
631 if self.mode.is_pyi:
632 if depth and not current_line.is_def and self.previous_line.is_def:
633 # Empty lines between attributes and methods should be preserved.
634 before = 1 if user_had_newline else 0
635 elif (
636 Preview.blank_line_after_nested_stub_class in self.mode
637 and previous_def.is_class
638 and not previous_def.is_stub_class
639 ):
640 before = 1
641 elif depth:
642 before = 0
643 else:
644 before = 1
645 else:
646 if depth:
647 before = 1
648 elif (
649 not depth
650 and previous_def.depth
651 and current_line.leaves[-1].type == token.COLON
652 and (
653 current_line.leaves[0].value
654 not in ("with", "try", "for", "while", "if", "match")
655 )
656 ):
657 # We shouldn't add two newlines between an indented function and
658 # a dependent non-indented clause. This is to avoid issues with
659 # conditional function definitions that are technically top-level
660 # and therefore get two trailing newlines, but look weird and
661 # inconsistent when they're followed by elif, else, etc. This is
662 # worse because these functions only get *one* preceding newline
663 # already.
664 before = 1
665 else:
666 before = 2
667
668 if current_line.is_decorator or current_line.is_def or current_line.is_class:
669 return self._maybe_empty_lines_for_class_or_def(
670 current_line, before, user_had_newline
671 )
672
673 if (
674 self.previous_line
675 and self.previous_line.is_import
676 and not current_line.is_import
677 and not current_line.is_fmt_pass_converted(first_leaf_matches=is_import)
678 and depth == self.previous_line.depth
679 ):
680 return (before or 1), 0
681
682 if (
683 self.previous_line
684 and self.previous_line.is_class
685 and current_line.is_triple_quoted_string
686 ):
687 if Preview.no_blank_line_before_class_docstring in current_line.mode:
688 return 0, 1
689 return before, 1
690
691 is_empty_first_line_ok = (
692 Preview.allow_empty_first_line_before_new_block_or_comment
693 in current_line.mode
694 and (
695 # If it's a standalone comment
696 current_line.leaves[0].type == STANDALONE_COMMENT
697 # If it opens a new block
698 or current_line.opens_block
699 # If it's a triple quote comment (but not at the start of a funcdef)
700 or (
701 is_docstring(current_line.leaves[0])
702 and self.previous_line
703 and self.previous_line.leaves[0]
704 and self.previous_line.leaves[0].parent
705 and not is_funcdef(self.previous_line.leaves[0].parent)
706 )
707 )
708 )
709
710 if (
711 self.previous_line
712 and self.previous_line.opens_block
713 and not is_empty_first_line_ok
714 ):
715 return 0, 0
716 return before, 0
717
718 def _maybe_empty_lines_for_class_or_def( # noqa: C901
719 self, current_line: Line, before: int, user_had_newline: bool
720 ) -> Tuple[int, int]:
721 if not current_line.is_decorator:
722 self.previous_defs.append(current_line)
723 if self.previous_line is None:
724 # Don't insert empty lines before the first line in the file.
725 return 0, 0
726
727 if self.previous_line.is_decorator:
728 if self.mode.is_pyi and current_line.is_stub_class:
729 # Insert an empty line after a decorated stub class
730 return 0, 1
731
732 return 0, 0
733
734 if self.previous_line.depth < current_line.depth and (
735 self.previous_line.is_class or self.previous_line.is_def
736 ):
737 return 0, 0
738
739 comment_to_add_newlines: Optional[LinesBlock] = None
740 if (
741 self.previous_line.is_comment
742 and self.previous_line.depth == current_line.depth
743 and before == 0
744 ):
745 slc = self.semantic_leading_comment
746 if (
747 slc is not None
748 and slc.previous_block is not None
749 and not slc.previous_block.original_line.is_class
750 and not slc.previous_block.original_line.opens_block
751 and slc.before <= 1
752 ):
753 comment_to_add_newlines = slc
754 else:
755 return 0, 0
756
757 if self.mode.is_pyi:
758 if current_line.is_class or self.previous_line.is_class:
759 if self.previous_line.depth < current_line.depth:
760 newlines = 0
761 elif self.previous_line.depth > current_line.depth:
762 newlines = 1
763 elif current_line.is_stub_class and self.previous_line.is_stub_class:
764 # No blank line between classes with an empty body
765 newlines = 0
766 else:
767 newlines = 1
768 # Remove case `self.previous_line.depth > current_line.depth` below when
769 # this becomes stable.
770 #
771 # Don't inspect the previous line if it's part of the body of the previous
772 # statement in the same level, we always want a blank line if there's
773 # something with a body preceding.
774 elif (
775 Preview.blank_line_between_nested_and_def_stub_file in current_line.mode
776 and self.previous_line.depth > current_line.depth
777 ):
778 newlines = 1
779 elif (
780 current_line.is_def or current_line.is_decorator
781 ) and not self.previous_line.is_def:
782 if current_line.depth:
783 # In classes empty lines between attributes and methods should
784 # be preserved.
785 newlines = min(1, before)
786 else:
787 # Blank line between a block of functions (maybe with preceding
788 # decorators) and a block of non-functions
789 newlines = 1
790 elif self.previous_line.depth > current_line.depth:
791 newlines = 1
792 else:
793 newlines = 0
794 else:
795 newlines = 1 if current_line.depth else 2
796 # If a user has left no space after a dummy implementation, don't insert
797 # new lines. This is useful for instance for @overload or Protocols.
798 if (
799 Preview.dummy_implementations in self.mode
800 and self.previous_line.is_stub_def
801 and not user_had_newline
802 ):
803 newlines = 0
804 if comment_to_add_newlines is not None:
805 previous_block = comment_to_add_newlines.previous_block
806 if previous_block is not None:
807 comment_to_add_newlines.before = (
808 max(comment_to_add_newlines.before, newlines) - previous_block.after
809 )
810 newlines = 0
811 return newlines, 0
812
813
814 def enumerate_reversed(sequence: Sequence[T]) -> Iterator[Tuple[Index, T]]:
815 """Like `reversed(enumerate(sequence))` if that were possible."""
816 index = len(sequence) - 1
817 for element in reversed(sequence):
818 yield (index, element)
819 index -= 1
820
821
822 def append_leaves(
823 new_line: Line, old_line: Line, leaves: List[Leaf], preformatted: bool = False
824 ) -> None:
825 """
826 Append leaves (taken from @old_line) to @new_line, making sure to fix the
827 underlying Node structure where appropriate.
828
829 All of the leaves in @leaves are duplicated. The duplicates are then
830 appended to @new_line and used to replace their originals in the underlying
831 Node structure. Any comments attached to the old leaves are reattached to
832 the new leaves.
833
834 Pre-conditions:
835 set(@leaves) is a subset of set(@old_line.leaves).
836 """
837 for old_leaf in leaves:
838 new_leaf = Leaf(old_leaf.type, old_leaf.value)
839 replace_child(old_leaf, new_leaf)
840 new_line.append(new_leaf, preformatted=preformatted)
841
842 for comment_leaf in old_line.comments_after(old_leaf):
843 new_line.append(comment_leaf, preformatted=True)
844
845
846 def is_line_short_enough( # noqa: C901
847 line: Line, *, mode: Mode, line_str: str = ""
848 ) -> bool:
849 """For non-multiline strings, return True if `line` is no longer than `line_length`.
850 For multiline strings, looks at the context around `line` to determine
851 if it should be inlined or split up.
852 Uses the provided `line_str` rendering, if any, otherwise computes a new one.
853 """
854 if not line_str:
855 line_str = line_to_string(line)
856
857 width = str_width if mode.preview else len
858
859 if Preview.multiline_string_handling not in mode:
860 return (
861 width(line_str) <= mode.line_length
862 and "\n" not in line_str # multiline strings
863 and not line.contains_standalone_comments()
864 )
865
866 if line.contains_standalone_comments():
867 return False
868 if "\n" not in line_str:
869 # No multiline strings (MLS) present
870 return width(line_str) <= mode.line_length
871
872 first, *_, last = line_str.split("\n")
873 if width(first) > mode.line_length or width(last) > mode.line_length:
874 return False
875
876 # Traverse the AST to examine the context of the multiline string (MLS),
877 # tracking aspects such as depth and comma existence,
878 # to determine whether to split the MLS or keep it together.
879 # Depth (which is based on the existing bracket_depth concept)
880 # is needed to determine nesting level of the MLS.
881 # Includes special case for trailing commas.
882 commas: List[int] = [] # tracks number of commas per depth level
883 multiline_string: Optional[Leaf] = None
884 # store the leaves that contain parts of the MLS
885 multiline_string_contexts: List[LN] = []
886
887 max_level_to_update: Union[int, float] = math.inf # track the depth of the MLS
888 for i, leaf in enumerate(line.leaves):
889 if max_level_to_update == math.inf:
890 had_comma: Optional[int] = None
891 if leaf.bracket_depth + 1 > len(commas):
892 commas.append(0)
893 elif leaf.bracket_depth + 1 < len(commas):
894 had_comma = commas.pop()
895 if (
896 had_comma is not None
897 and multiline_string is not None
898 and multiline_string.bracket_depth == leaf.bracket_depth + 1
899 ):
900 # Have left the level with the MLS, stop tracking commas
901 max_level_to_update = leaf.bracket_depth
902 if had_comma > 0:
903 # MLS was in parens with at least one comma - force split
904 return False
905
906 if leaf.bracket_depth <= max_level_to_update and leaf.type == token.COMMA:
907 # Ignore non-nested trailing comma
908 # directly after MLS/MLS-containing expression
909 ignore_ctxs: List[Optional[LN]] = [None]
910 ignore_ctxs += multiline_string_contexts
911 if not (leaf.prev_sibling in ignore_ctxs and i == len(line.leaves) - 1):
912 commas[leaf.bracket_depth] += 1
913 if max_level_to_update != math.inf:
914 max_level_to_update = min(max_level_to_update, leaf.bracket_depth)
915
916 if is_multiline_string(leaf):
917 if len(multiline_string_contexts) > 0:
918 # >1 multiline string cannot fit on a single line - force split
919 return False
920 multiline_string = leaf
921 ctx: LN = leaf
922 # fetch the leaf components of the MLS in the AST
923 while str(ctx) in line_str:
924 multiline_string_contexts.append(ctx)
925 if ctx.parent is None:
926 break
927 ctx = ctx.parent
928
929 # May not have a triple-quoted multiline string at all,
930 # in case of a regular string with embedded newlines and line continuations
931 if len(multiline_string_contexts) == 0:
932 return True
933
934 return all(val == 0 for val in commas)
935
936
937 def can_be_split(line: Line) -> bool:
938 """Return False if the line cannot be split *for sure*.
939
940 This is not an exhaustive search but a cheap heuristic that we can use to
941 avoid some unfortunate formattings (mostly around wrapping unsplittable code
942 in unnecessary parentheses).
943 """
944 leaves = line.leaves
945 if len(leaves) < 2:
946 return False
947
948 if leaves[0].type == token.STRING and leaves[1].type == token.DOT:
949 call_count = 0
950 dot_count = 0
951 next = leaves[-1]
952 for leaf in leaves[-2::-1]:
953 if leaf.type in OPENING_BRACKETS:
954 if next.type not in CLOSING_BRACKETS:
955 return False
956
957 call_count += 1
958 elif leaf.type == token.DOT:
959 dot_count += 1
960 elif leaf.type == token.NAME:
961 if not (next.type == token.DOT or next.type in OPENING_BRACKETS):
962 return False
963
964 elif leaf.type not in CLOSING_BRACKETS:
965 return False
966
967 if dot_count > 1 and call_count > 1:
968 return False
969
970 return True
971
972
973 def can_omit_invisible_parens(
974 rhs: RHSResult,
975 line_length: int,
976 ) -> bool:
977 """Does `rhs.body` have a shape safe to reformat without optional parens around it?
978
979 Returns True for only a subset of potentially nice looking formattings but
980 the point is to not return false positives that end up producing lines that
981 are too long.
982 """
983 line = rhs.body
984 bt = line.bracket_tracker
985 if not bt.delimiters:
986 # Without delimiters the optional parentheses are useless.
987 return True
988
989 max_priority = bt.max_delimiter_priority()
990 delimiter_count = bt.delimiter_count_with_priority(max_priority)
991 if delimiter_count > 1:
992 # With more than one delimiter of a kind the optional parentheses read better.
993 return False
994
995 if delimiter_count == 1:
996 if (
997 Preview.wrap_multiple_context_managers_in_parens in line.mode
998 and max_priority == COMMA_PRIORITY
999 and rhs.head.is_with_or_async_with_stmt
1000 ):
1001 # For two context manager with statements, the optional parentheses read
1002 # better. In this case, `rhs.body` is the context managers part of
1003 # the with statement. `rhs.head` is the `with (` part on the previous
1004 # line.
1005 return False
1006 # Otherwise it may also read better, but we don't do it today and requires
1007 # careful considerations for all possible cases. See
1008 # https://github.com/psf/black/issues/2156.
1009
1010 if max_priority == DOT_PRIORITY:
1011 # A single stranded method call doesn't require optional parentheses.
1012 return True
1013
1014 assert len(line.leaves) >= 2, "Stranded delimiter"
1015
1016 # With a single delimiter, omit if the expression starts or ends with
1017 # a bracket.
1018 first = line.leaves[0]
1019 second = line.leaves[1]
1020 if first.type in OPENING_BRACKETS and second.type not in CLOSING_BRACKETS:
1021 if _can_omit_opening_paren(line, first=first, line_length=line_length):
1022 return True
1023
1024 # Note: we are not returning False here because a line might have *both*
1025 # a leading opening bracket and a trailing closing bracket. If the
1026 # opening bracket doesn't match our rule, maybe the closing will.
1027
1028 penultimate = line.leaves[-2]
1029 last = line.leaves[-1]
1030
1031 if (
1032 last.type == token.RPAR
1033 or last.type == token.RBRACE
1034 or (
1035 # don't use indexing for omitting optional parentheses;
1036 # it looks weird
1037 last.type == token.RSQB
1038 and last.parent
1039 and last.parent.type != syms.trailer
1040 )
1041 ):
1042 if penultimate.type in OPENING_BRACKETS:
1043 # Empty brackets don't help.
1044 return False
1045
1046 if is_multiline_string(first):
1047 # Additional wrapping of a multiline string in this situation is
1048 # unnecessary.
1049 return True
1050
1051 if _can_omit_closing_paren(line, last=last, line_length=line_length):
1052 return True
1053
1054 return False
1055
1056
1057 def _can_omit_opening_paren(line: Line, *, first: Leaf, line_length: int) -> bool:
1058 """See `can_omit_invisible_parens`."""
1059 remainder = False
1060 length = 4 * line.depth
1061 _index = -1
1062 for _index, leaf, leaf_length in line.enumerate_with_length():
1063 if leaf.type in CLOSING_BRACKETS and leaf.opening_bracket is first:
1064 remainder = True
1065 if remainder:
1066 length += leaf_length
1067 if length > line_length:
1068 break
1069
1070 if leaf.type in OPENING_BRACKETS:
1071 # There are brackets we can further split on.
1072 remainder = False
1073
1074 else:
1075 # checked the entire string and line length wasn't exceeded
1076 if len(line.leaves) == _index + 1:
1077 return True
1078
1079 return False
1080
1081
1082 def _can_omit_closing_paren(line: Line, *, last: Leaf, line_length: int) -> bool:
1083 """See `can_omit_invisible_parens`."""
1084 length = 4 * line.depth
1085 seen_other_brackets = False
1086 for _index, leaf, leaf_length in line.enumerate_with_length():
1087 length += leaf_length
1088 if leaf is last.opening_bracket:
1089 if seen_other_brackets or length <= line_length:
1090 return True
1091
1092 elif leaf.type in OPENING_BRACKETS:
1093 # There are brackets we can further split on.
1094 seen_other_brackets = True
1095
1096 return False
1097
1098
1099 def line_to_string(line: Line) -> str:
1100 """Returns the string representation of @line.
1101
1102 WARNING: This is known to be computationally expensive.
1103 """
1104 return str(line).strip("\n")
1105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/black/lines.py b/src/black/lines.py
--- a/src/black/lines.py
+++ b/src/black/lines.py
@@ -578,6 +578,7 @@
and self.previous_block.previous_block is None
and len(self.previous_block.original_line.leaves) == 1
and self.previous_block.original_line.is_triple_quoted_string
+ and not (current_line.is_class or current_line.is_def)
):
before = 1
| {"golden_diff": "diff --git a/src/black/lines.py b/src/black/lines.py\n--- a/src/black/lines.py\n+++ b/src/black/lines.py\n@@ -578,6 +578,7 @@\n and self.previous_block.previous_block is None\n and len(self.previous_block.original_line.leaves) == 1\n and self.previous_block.original_line.is_triple_quoted_string\n+ and not (current_line.is_class or current_line.is_def)\n ):\n before = 1\n", "issue": "Conflict in blank lines after module docstring and before function\nUsing macOS:\r\n\r\n```sh\r\n$ black --version\r\nblack, 23.10.1 (compiled: yes)\r\nPython (CPython) 3.10.12\r\n```\r\n\r\nTake this code:\r\n\r\n```python\r\n\"\"\"This is a test case, note blank lines next.\"\"\"\r\ndef go():\r\n print(\"Do stuff!\")\r\ngo()\r\n```\r\n\r\nAnd run this:\r\n\r\n```\r\n$ black --diff blanks.py \r\n--- blanks.py\t2023-11-06 18:04:21.775563+00:00\r\n+++ blanks.py\t2023-11-06 18:04:37.405865+00:00\r\n@@ -1,4 +1,8 @@\r\n \"\"\"This is a test case, note blank lines next.\"\"\"\r\n+\r\n+\r\n def go():\r\n print(\"Do stuff!\")\r\n+\r\n+\r\n go()\r\nwould reformat blanks.py\r\n\r\nAll done! \u2728 \ud83c\udf70 \u2728\r\n1 file would be reformatted.\r\n```\r\n\r\nNote current default behavior is to put two blank lines between the module docstring and following function. This is as expected, quoting https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html\r\n\r\n> It will also insert proper spacing before and after function definitions. It\u2019s one line before and after inner functions and two lines before and after module-level functions and classes. \r\n\r\nNow in preview mode:\r\n\r\n```\r\n$ black --diff blanks.py --preview\r\n--- blanks.py\t2023-11-06 18:04:21.775563+00:00\r\n+++ blanks.py\t2023-11-06 18:04:42.146632+00:00\r\n@@ -1,4 +1,7 @@\r\n \"\"\"This is a test case, note blank lines next.\"\"\"\r\n+\r\n def go():\r\n print(\"Do stuff!\")\r\n+\r\n+\r\n go()\r\n```\r\n\r\nThis now only has one line between the module docstring and following function. This seems like an unintended consequence of #1872.\r\n\r\n\n", "before_files": [{"content": "import itertools\nimport math\nimport sys\nfrom dataclasses import dataclass, field\nfrom typing import (\n Callable,\n Dict,\n Iterator,\n List,\n Optional,\n Sequence,\n Tuple,\n TypeVar,\n Union,\n cast,\n)\n\nfrom black.brackets import COMMA_PRIORITY, DOT_PRIORITY, BracketTracker\nfrom black.mode import Mode, Preview\nfrom black.nodes import (\n BRACKETS,\n CLOSING_BRACKETS,\n OPENING_BRACKETS,\n STANDALONE_COMMENT,\n TEST_DESCENDANTS,\n child_towards,\n is_docstring,\n is_funcdef,\n is_import,\n is_multiline_string,\n is_one_sequence_between,\n is_type_comment,\n is_type_ignore_comment,\n is_with_or_async_with_stmt,\n replace_child,\n syms,\n whitespace,\n)\nfrom black.strings import str_width\nfrom blib2to3.pgen2 import token\nfrom blib2to3.pytree import Leaf, Node\n\n# types\nT = TypeVar(\"T\")\nIndex = int\nLeafID = int\nLN = Union[Leaf, Node]\n\n\n@dataclass\nclass Line:\n \"\"\"Holds leaves and comments. Can be printed with `str(line)`.\"\"\"\n\n mode: Mode = field(repr=False)\n depth: int = 0\n leaves: List[Leaf] = field(default_factory=list)\n # keys ordered like `leaves`\n comments: Dict[LeafID, List[Leaf]] = field(default_factory=dict)\n bracket_tracker: BracketTracker = field(default_factory=BracketTracker)\n inside_brackets: bool = False\n should_split_rhs: bool = False\n magic_trailing_comma: Optional[Leaf] = None\n\n def append(\n self, leaf: Leaf, preformatted: bool = False, track_bracket: bool = False\n ) -> None:\n \"\"\"Add a new `leaf` to the end of the line.\n\n Unless `preformatted` is True, the `leaf` will receive a new consistent\n whitespace prefix and metadata applied by :class:`BracketTracker`.\n Trailing commas are maybe removed, unpacked for loop variables are\n demoted from being delimiters.\n\n Inline comments are put aside.\n \"\"\"\n has_value = leaf.type in BRACKETS or bool(leaf.value.strip())\n if not has_value:\n return\n\n if token.COLON == leaf.type and self.is_class_paren_empty:\n del self.leaves[-2:]\n if self.leaves and not preformatted:\n # Note: at this point leaf.prefix should be empty except for\n # imports, for which we only preserve newlines.\n leaf.prefix += whitespace(\n leaf,\n complex_subscript=self.is_complex_subscript(leaf),\n mode=self.mode,\n )\n if self.inside_brackets or not preformatted or track_bracket:\n self.bracket_tracker.mark(leaf)\n if self.mode.magic_trailing_comma:\n if self.has_magic_trailing_comma(leaf):\n self.magic_trailing_comma = leaf\n elif self.has_magic_trailing_comma(leaf, ensure_removable=True):\n self.remove_trailing_comma()\n if not self.append_comment(leaf):\n self.leaves.append(leaf)\n\n def append_safe(self, leaf: Leaf, preformatted: bool = False) -> None:\n \"\"\"Like :func:`append()` but disallow invalid standalone comment structure.\n\n Raises ValueError when any `leaf` is appended after a standalone comment\n or when a standalone comment is not the first leaf on the line.\n \"\"\"\n if self.bracket_tracker.depth == 0:\n if self.is_comment:\n raise ValueError(\"cannot append to standalone comments\")\n\n if self.leaves and leaf.type == STANDALONE_COMMENT:\n raise ValueError(\n \"cannot append standalone comments to a populated line\"\n )\n\n self.append(leaf, preformatted=preformatted)\n\n @property\n def is_comment(self) -> bool:\n \"\"\"Is this line a standalone comment?\"\"\"\n return len(self.leaves) == 1 and self.leaves[0].type == STANDALONE_COMMENT\n\n @property\n def is_decorator(self) -> bool:\n \"\"\"Is this line a decorator?\"\"\"\n return bool(self) and self.leaves[0].type == token.AT\n\n @property\n def is_import(self) -> bool:\n \"\"\"Is this an import line?\"\"\"\n return bool(self) and is_import(self.leaves[0])\n\n @property\n def is_with_or_async_with_stmt(self) -> bool:\n \"\"\"Is this a with_stmt line?\"\"\"\n return bool(self) and is_with_or_async_with_stmt(self.leaves[0])\n\n @property\n def is_class(self) -> bool:\n \"\"\"Is this line a class definition?\"\"\"\n return (\n bool(self)\n and self.leaves[0].type == token.NAME\n and self.leaves[0].value == \"class\"\n )\n\n @property\n def is_stub_class(self) -> bool:\n \"\"\"Is this line a class definition with a body consisting only of \"...\"?\"\"\"\n return self.is_class and self.leaves[-3:] == [\n Leaf(token.DOT, \".\") for _ in range(3)\n ]\n\n @property\n def is_def(self) -> bool:\n \"\"\"Is this a function definition? (Also returns True for async defs.)\"\"\"\n try:\n first_leaf = self.leaves[0]\n except IndexError:\n return False\n\n try:\n second_leaf: Optional[Leaf] = self.leaves[1]\n except IndexError:\n second_leaf = None\n return (first_leaf.type == token.NAME and first_leaf.value == \"def\") or (\n first_leaf.type == token.ASYNC\n and second_leaf is not None\n and second_leaf.type == token.NAME\n and second_leaf.value == \"def\"\n )\n\n @property\n def is_stub_def(self) -> bool:\n \"\"\"Is this line a function definition with a body consisting only of \"...\"?\"\"\"\n return self.is_def and self.leaves[-4:] == [Leaf(token.COLON, \":\")] + [\n Leaf(token.DOT, \".\") for _ in range(3)\n ]\n\n @property\n def is_class_paren_empty(self) -> bool:\n \"\"\"Is this a class with no base classes but using parentheses?\n\n Those are unnecessary and should be removed.\n \"\"\"\n return (\n bool(self)\n and len(self.leaves) == 4\n and self.is_class\n and self.leaves[2].type == token.LPAR\n and self.leaves[2].value == \"(\"\n and self.leaves[3].type == token.RPAR\n and self.leaves[3].value == \")\"\n )\n\n @property\n def is_triple_quoted_string(self) -> bool:\n \"\"\"Is the line a triple quoted string?\"\"\"\n if not self or self.leaves[0].type != token.STRING:\n return False\n value = self.leaves[0].value\n if value.startswith(('\"\"\"', \"'''\")):\n return True\n if Preview.accept_raw_docstrings in self.mode and value.startswith(\n (\"r'''\", 'r\"\"\"', \"R'''\", 'R\"\"\"')\n ):\n return True\n return False\n\n @property\n def opens_block(self) -> bool:\n \"\"\"Does this line open a new level of indentation.\"\"\"\n if len(self.leaves) == 0:\n return False\n return self.leaves[-1].type == token.COLON\n\n def is_fmt_pass_converted(\n self, *, first_leaf_matches: Optional[Callable[[Leaf], bool]] = None\n ) -> bool:\n \"\"\"Is this line converted from fmt off/skip code?\n\n If first_leaf_matches is not None, it only returns True if the first\n leaf of converted code matches.\n \"\"\"\n if len(self.leaves) != 1:\n return False\n leaf = self.leaves[0]\n if (\n leaf.type != STANDALONE_COMMENT\n or leaf.fmt_pass_converted_first_leaf is None\n ):\n return False\n return first_leaf_matches is None or first_leaf_matches(\n leaf.fmt_pass_converted_first_leaf\n )\n\n def contains_standalone_comments(self, depth_limit: int = sys.maxsize) -> bool:\n \"\"\"If so, needs to be split before emitting.\"\"\"\n for leaf in self.leaves:\n if leaf.type == STANDALONE_COMMENT and leaf.bracket_depth <= depth_limit:\n return True\n\n return False\n\n def contains_implicit_multiline_string_with_comments(self) -> bool:\n \"\"\"Chck if we have an implicit multiline string with comments on the line\"\"\"\n for leaf_type, leaf_group_iterator in itertools.groupby(\n self.leaves, lambda leaf: leaf.type\n ):\n if leaf_type != token.STRING:\n continue\n leaf_list = list(leaf_group_iterator)\n if len(leaf_list) == 1:\n continue\n for leaf in leaf_list:\n if self.comments_after(leaf):\n return True\n return False\n\n def contains_uncollapsable_type_comments(self) -> bool:\n ignored_ids = set()\n try:\n last_leaf = self.leaves[-1]\n ignored_ids.add(id(last_leaf))\n if last_leaf.type == token.COMMA or (\n last_leaf.type == token.RPAR and not last_leaf.value\n ):\n # When trailing commas or optional parens are inserted by Black for\n # consistency, comments after the previous last element are not moved\n # (they don't have to, rendering will still be correct). So we ignore\n # trailing commas and invisible.\n last_leaf = self.leaves[-2]\n ignored_ids.add(id(last_leaf))\n except IndexError:\n return False\n\n # A type comment is uncollapsable if it is attached to a leaf\n # that isn't at the end of the line (since that could cause it\n # to get associated to a different argument) or if there are\n # comments before it (since that could cause it to get hidden\n # behind a comment.\n comment_seen = False\n for leaf_id, comments in self.comments.items():\n for comment in comments:\n if is_type_comment(comment):\n if comment_seen or (\n not is_type_ignore_comment(comment)\n and leaf_id not in ignored_ids\n ):\n return True\n\n comment_seen = True\n\n return False\n\n def contains_unsplittable_type_ignore(self) -> bool:\n if not self.leaves:\n return False\n\n # If a 'type: ignore' is attached to the end of a line, we\n # can't split the line, because we can't know which of the\n # subexpressions the ignore was meant to apply to.\n #\n # We only want this to apply to actual physical lines from the\n # original source, though: we don't want the presence of a\n # 'type: ignore' at the end of a multiline expression to\n # justify pushing it all onto one line. Thus we\n # (unfortunately) need to check the actual source lines and\n # only report an unsplittable 'type: ignore' if this line was\n # one line in the original code.\n\n # Grab the first and last line numbers, skipping generated leaves\n first_line = next((leaf.lineno for leaf in self.leaves if leaf.lineno != 0), 0)\n last_line = next(\n (leaf.lineno for leaf in reversed(self.leaves) if leaf.lineno != 0), 0\n )\n\n if first_line == last_line:\n # We look at the last two leaves since a comma or an\n # invisible paren could have been added at the end of the\n # line.\n for node in self.leaves[-2:]:\n for comment in self.comments.get(id(node), []):\n if is_type_ignore_comment(comment):\n return True\n\n return False\n\n def contains_multiline_strings(self) -> bool:\n return any(is_multiline_string(leaf) for leaf in self.leaves)\n\n def has_magic_trailing_comma(\n self, closing: Leaf, ensure_removable: bool = False\n ) -> bool:\n \"\"\"Return True if we have a magic trailing comma, that is when:\n - there's a trailing comma here\n - it's not a one-tuple\n - it's not a single-element subscript\n Additionally, if ensure_removable:\n - it's not from square bracket indexing\n (specifically, single-element square bracket indexing)\n \"\"\"\n if not (\n closing.type in CLOSING_BRACKETS\n and self.leaves\n and self.leaves[-1].type == token.COMMA\n ):\n return False\n\n if closing.type == token.RBRACE:\n return True\n\n if closing.type == token.RSQB:\n if (\n closing.parent\n and closing.parent.type == syms.trailer\n and closing.opening_bracket\n and is_one_sequence_between(\n closing.opening_bracket,\n closing,\n self.leaves,\n brackets=(token.LSQB, token.RSQB),\n )\n ):\n return False\n\n if not ensure_removable:\n return True\n\n comma = self.leaves[-1]\n if comma.parent is None:\n return False\n return (\n comma.parent.type != syms.subscriptlist\n or closing.opening_bracket is None\n or not is_one_sequence_between(\n closing.opening_bracket,\n closing,\n self.leaves,\n brackets=(token.LSQB, token.RSQB),\n )\n )\n\n if self.is_import:\n return True\n\n if closing.opening_bracket is not None and not is_one_sequence_between(\n closing.opening_bracket, closing, self.leaves\n ):\n return True\n\n return False\n\n def append_comment(self, comment: Leaf) -> bool:\n \"\"\"Add an inline or standalone comment to the line.\"\"\"\n if (\n comment.type == STANDALONE_COMMENT\n and self.bracket_tracker.any_open_brackets()\n ):\n comment.prefix = \"\"\n return False\n\n if comment.type != token.COMMENT:\n return False\n\n if not self.leaves:\n comment.type = STANDALONE_COMMENT\n comment.prefix = \"\"\n return False\n\n last_leaf = self.leaves[-1]\n if (\n last_leaf.type == token.RPAR\n and not last_leaf.value\n and last_leaf.parent\n and len(list(last_leaf.parent.leaves())) <= 3\n and not is_type_comment(comment)\n ):\n # Comments on an optional parens wrapping a single leaf should belong to\n # the wrapped node except if it's a type comment. Pinning the comment like\n # this avoids unstable formatting caused by comment migration.\n if len(self.leaves) < 2:\n comment.type = STANDALONE_COMMENT\n comment.prefix = \"\"\n return False\n\n last_leaf = self.leaves[-2]\n self.comments.setdefault(id(last_leaf), []).append(comment)\n return True\n\n def comments_after(self, leaf: Leaf) -> List[Leaf]:\n \"\"\"Generate comments that should appear directly after `leaf`.\"\"\"\n return self.comments.get(id(leaf), [])\n\n def remove_trailing_comma(self) -> None:\n \"\"\"Remove the trailing comma and moves the comments attached to it.\"\"\"\n trailing_comma = self.leaves.pop()\n trailing_comma_comments = self.comments.pop(id(trailing_comma), [])\n self.comments.setdefault(id(self.leaves[-1]), []).extend(\n trailing_comma_comments\n )\n\n def is_complex_subscript(self, leaf: Leaf) -> bool:\n \"\"\"Return True iff `leaf` is part of a slice with non-trivial exprs.\"\"\"\n open_lsqb = self.bracket_tracker.get_open_lsqb()\n if open_lsqb is None:\n return False\n\n subscript_start = open_lsqb.next_sibling\n\n if isinstance(subscript_start, Node):\n if subscript_start.type == syms.listmaker:\n return False\n\n if subscript_start.type == syms.subscriptlist:\n subscript_start = child_towards(subscript_start, leaf)\n return subscript_start is not None and any(\n n.type in TEST_DESCENDANTS for n in subscript_start.pre_order()\n )\n\n def enumerate_with_length(\n self, reversed: bool = False\n ) -> Iterator[Tuple[Index, Leaf, int]]:\n \"\"\"Return an enumeration of leaves with their length.\n\n Stops prematurely on multiline strings and standalone comments.\n \"\"\"\n op = cast(\n Callable[[Sequence[Leaf]], Iterator[Tuple[Index, Leaf]]],\n enumerate_reversed if reversed else enumerate,\n )\n for index, leaf in op(self.leaves):\n length = len(leaf.prefix) + len(leaf.value)\n if \"\\n\" in leaf.value:\n return # Multiline strings, we can't continue.\n\n for comment in self.comments_after(leaf):\n length += len(comment.value)\n\n yield index, leaf, length\n\n def clone(self) -> \"Line\":\n return Line(\n mode=self.mode,\n depth=self.depth,\n inside_brackets=self.inside_brackets,\n should_split_rhs=self.should_split_rhs,\n magic_trailing_comma=self.magic_trailing_comma,\n )\n\n def __str__(self) -> str:\n \"\"\"Render the line.\"\"\"\n if not self:\n return \"\\n\"\n\n indent = \" \" * self.depth\n leaves = iter(self.leaves)\n first = next(leaves)\n res = f\"{first.prefix}{indent}{first.value}\"\n for leaf in leaves:\n res += str(leaf)\n for comment in itertools.chain.from_iterable(self.comments.values()):\n res += str(comment)\n\n return res + \"\\n\"\n\n def __bool__(self) -> bool:\n \"\"\"Return True if the line has leaves or comments.\"\"\"\n return bool(self.leaves or self.comments)\n\n\n@dataclass\nclass RHSResult:\n \"\"\"Intermediate split result from a right hand split.\"\"\"\n\n head: Line\n body: Line\n tail: Line\n opening_bracket: Leaf\n closing_bracket: Leaf\n\n\n@dataclass\nclass LinesBlock:\n \"\"\"Class that holds information about a block of formatted lines.\n\n This is introduced so that the EmptyLineTracker can look behind the standalone\n comments and adjust their empty lines for class or def lines.\n \"\"\"\n\n mode: Mode\n previous_block: Optional[\"LinesBlock\"]\n original_line: Line\n before: int = 0\n content_lines: List[str] = field(default_factory=list)\n after: int = 0\n\n def all_lines(self) -> List[str]:\n empty_line = str(Line(mode=self.mode))\n return (\n [empty_line * self.before] + self.content_lines + [empty_line * self.after]\n )\n\n\n@dataclass\nclass EmptyLineTracker:\n \"\"\"Provides a stateful method that returns the number of potential extra\n empty lines needed before and after the currently processed line.\n\n Note: this tracker works on lines that haven't been split yet. It assumes\n the prefix of the first leaf consists of optional newlines. Those newlines\n are consumed by `maybe_empty_lines()` and included in the computation.\n \"\"\"\n\n mode: Mode\n previous_line: Optional[Line] = None\n previous_block: Optional[LinesBlock] = None\n previous_defs: List[Line] = field(default_factory=list)\n semantic_leading_comment: Optional[LinesBlock] = None\n\n def maybe_empty_lines(self, current_line: Line) -> LinesBlock:\n \"\"\"Return the number of extra empty lines before and after the `current_line`.\n\n This is for separating `def`, `async def` and `class` with extra empty\n lines (two on module-level).\n \"\"\"\n before, after = self._maybe_empty_lines(current_line)\n previous_after = self.previous_block.after if self.previous_block else 0\n before = (\n # Black should not insert empty lines at the beginning\n # of the file\n 0\n if self.previous_line is None\n else before - previous_after\n )\n if (\n Preview.module_docstring_newlines in current_line.mode\n and self.previous_block\n and self.previous_block.previous_block is None\n and len(self.previous_block.original_line.leaves) == 1\n and self.previous_block.original_line.is_triple_quoted_string\n ):\n before = 1\n\n block = LinesBlock(\n mode=self.mode,\n previous_block=self.previous_block,\n original_line=current_line,\n before=before,\n after=after,\n )\n\n # Maintain the semantic_leading_comment state.\n if current_line.is_comment:\n if self.previous_line is None or (\n not self.previous_line.is_decorator\n # `or before` means this comment already has an empty line before\n and (not self.previous_line.is_comment or before)\n and (self.semantic_leading_comment is None or before)\n ):\n self.semantic_leading_comment = block\n # `or before` means this decorator already has an empty line before\n elif not current_line.is_decorator or before:\n self.semantic_leading_comment = None\n\n self.previous_line = current_line\n self.previous_block = block\n return block\n\n def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]:\n max_allowed = 1\n if current_line.depth == 0:\n max_allowed = 1 if self.mode.is_pyi else 2\n if current_line.leaves:\n # Consume the first leaf's extra newlines.\n first_leaf = current_line.leaves[0]\n before = first_leaf.prefix.count(\"\\n\")\n before = min(before, max_allowed)\n first_leaf.prefix = \"\"\n else:\n before = 0\n\n user_had_newline = bool(before)\n depth = current_line.depth\n\n previous_def = None\n while self.previous_defs and self.previous_defs[-1].depth >= depth:\n previous_def = self.previous_defs.pop()\n\n if previous_def is not None:\n assert self.previous_line is not None\n if self.mode.is_pyi:\n if depth and not current_line.is_def and self.previous_line.is_def:\n # Empty lines between attributes and methods should be preserved.\n before = 1 if user_had_newline else 0\n elif (\n Preview.blank_line_after_nested_stub_class in self.mode\n and previous_def.is_class\n and not previous_def.is_stub_class\n ):\n before = 1\n elif depth:\n before = 0\n else:\n before = 1\n else:\n if depth:\n before = 1\n elif (\n not depth\n and previous_def.depth\n and current_line.leaves[-1].type == token.COLON\n and (\n current_line.leaves[0].value\n not in (\"with\", \"try\", \"for\", \"while\", \"if\", \"match\")\n )\n ):\n # We shouldn't add two newlines between an indented function and\n # a dependent non-indented clause. This is to avoid issues with\n # conditional function definitions that are technically top-level\n # and therefore get two trailing newlines, but look weird and\n # inconsistent when they're followed by elif, else, etc. This is\n # worse because these functions only get *one* preceding newline\n # already.\n before = 1\n else:\n before = 2\n\n if current_line.is_decorator or current_line.is_def or current_line.is_class:\n return self._maybe_empty_lines_for_class_or_def(\n current_line, before, user_had_newline\n )\n\n if (\n self.previous_line\n and self.previous_line.is_import\n and not current_line.is_import\n and not current_line.is_fmt_pass_converted(first_leaf_matches=is_import)\n and depth == self.previous_line.depth\n ):\n return (before or 1), 0\n\n if (\n self.previous_line\n and self.previous_line.is_class\n and current_line.is_triple_quoted_string\n ):\n if Preview.no_blank_line_before_class_docstring in current_line.mode:\n return 0, 1\n return before, 1\n\n is_empty_first_line_ok = (\n Preview.allow_empty_first_line_before_new_block_or_comment\n in current_line.mode\n and (\n # If it's a standalone comment\n current_line.leaves[0].type == STANDALONE_COMMENT\n # If it opens a new block\n or current_line.opens_block\n # If it's a triple quote comment (but not at the start of a funcdef)\n or (\n is_docstring(current_line.leaves[0])\n and self.previous_line\n and self.previous_line.leaves[0]\n and self.previous_line.leaves[0].parent\n and not is_funcdef(self.previous_line.leaves[0].parent)\n )\n )\n )\n\n if (\n self.previous_line\n and self.previous_line.opens_block\n and not is_empty_first_line_ok\n ):\n return 0, 0\n return before, 0\n\n def _maybe_empty_lines_for_class_or_def( # noqa: C901\n self, current_line: Line, before: int, user_had_newline: bool\n ) -> Tuple[int, int]:\n if not current_line.is_decorator:\n self.previous_defs.append(current_line)\n if self.previous_line is None:\n # Don't insert empty lines before the first line in the file.\n return 0, 0\n\n if self.previous_line.is_decorator:\n if self.mode.is_pyi and current_line.is_stub_class:\n # Insert an empty line after a decorated stub class\n return 0, 1\n\n return 0, 0\n\n if self.previous_line.depth < current_line.depth and (\n self.previous_line.is_class or self.previous_line.is_def\n ):\n return 0, 0\n\n comment_to_add_newlines: Optional[LinesBlock] = None\n if (\n self.previous_line.is_comment\n and self.previous_line.depth == current_line.depth\n and before == 0\n ):\n slc = self.semantic_leading_comment\n if (\n slc is not None\n and slc.previous_block is not None\n and not slc.previous_block.original_line.is_class\n and not slc.previous_block.original_line.opens_block\n and slc.before <= 1\n ):\n comment_to_add_newlines = slc\n else:\n return 0, 0\n\n if self.mode.is_pyi:\n if current_line.is_class or self.previous_line.is_class:\n if self.previous_line.depth < current_line.depth:\n newlines = 0\n elif self.previous_line.depth > current_line.depth:\n newlines = 1\n elif current_line.is_stub_class and self.previous_line.is_stub_class:\n # No blank line between classes with an empty body\n newlines = 0\n else:\n newlines = 1\n # Remove case `self.previous_line.depth > current_line.depth` below when\n # this becomes stable.\n #\n # Don't inspect the previous line if it's part of the body of the previous\n # statement in the same level, we always want a blank line if there's\n # something with a body preceding.\n elif (\n Preview.blank_line_between_nested_and_def_stub_file in current_line.mode\n and self.previous_line.depth > current_line.depth\n ):\n newlines = 1\n elif (\n current_line.is_def or current_line.is_decorator\n ) and not self.previous_line.is_def:\n if current_line.depth:\n # In classes empty lines between attributes and methods should\n # be preserved.\n newlines = min(1, before)\n else:\n # Blank line between a block of functions (maybe with preceding\n # decorators) and a block of non-functions\n newlines = 1\n elif self.previous_line.depth > current_line.depth:\n newlines = 1\n else:\n newlines = 0\n else:\n newlines = 1 if current_line.depth else 2\n # If a user has left no space after a dummy implementation, don't insert\n # new lines. This is useful for instance for @overload or Protocols.\n if (\n Preview.dummy_implementations in self.mode\n and self.previous_line.is_stub_def\n and not user_had_newline\n ):\n newlines = 0\n if comment_to_add_newlines is not None:\n previous_block = comment_to_add_newlines.previous_block\n if previous_block is not None:\n comment_to_add_newlines.before = (\n max(comment_to_add_newlines.before, newlines) - previous_block.after\n )\n newlines = 0\n return newlines, 0\n\n\ndef enumerate_reversed(sequence: Sequence[T]) -> Iterator[Tuple[Index, T]]:\n \"\"\"Like `reversed(enumerate(sequence))` if that were possible.\"\"\"\n index = len(sequence) - 1\n for element in reversed(sequence):\n yield (index, element)\n index -= 1\n\n\ndef append_leaves(\n new_line: Line, old_line: Line, leaves: List[Leaf], preformatted: bool = False\n) -> None:\n \"\"\"\n Append leaves (taken from @old_line) to @new_line, making sure to fix the\n underlying Node structure where appropriate.\n\n All of the leaves in @leaves are duplicated. The duplicates are then\n appended to @new_line and used to replace their originals in the underlying\n Node structure. Any comments attached to the old leaves are reattached to\n the new leaves.\n\n Pre-conditions:\n set(@leaves) is a subset of set(@old_line.leaves).\n \"\"\"\n for old_leaf in leaves:\n new_leaf = Leaf(old_leaf.type, old_leaf.value)\n replace_child(old_leaf, new_leaf)\n new_line.append(new_leaf, preformatted=preformatted)\n\n for comment_leaf in old_line.comments_after(old_leaf):\n new_line.append(comment_leaf, preformatted=True)\n\n\ndef is_line_short_enough( # noqa: C901\n line: Line, *, mode: Mode, line_str: str = \"\"\n) -> bool:\n \"\"\"For non-multiline strings, return True if `line` is no longer than `line_length`.\n For multiline strings, looks at the context around `line` to determine\n if it should be inlined or split up.\n Uses the provided `line_str` rendering, if any, otherwise computes a new one.\n \"\"\"\n if not line_str:\n line_str = line_to_string(line)\n\n width = str_width if mode.preview else len\n\n if Preview.multiline_string_handling not in mode:\n return (\n width(line_str) <= mode.line_length\n and \"\\n\" not in line_str # multiline strings\n and not line.contains_standalone_comments()\n )\n\n if line.contains_standalone_comments():\n return False\n if \"\\n\" not in line_str:\n # No multiline strings (MLS) present\n return width(line_str) <= mode.line_length\n\n first, *_, last = line_str.split(\"\\n\")\n if width(first) > mode.line_length or width(last) > mode.line_length:\n return False\n\n # Traverse the AST to examine the context of the multiline string (MLS),\n # tracking aspects such as depth and comma existence,\n # to determine whether to split the MLS or keep it together.\n # Depth (which is based on the existing bracket_depth concept)\n # is needed to determine nesting level of the MLS.\n # Includes special case for trailing commas.\n commas: List[int] = [] # tracks number of commas per depth level\n multiline_string: Optional[Leaf] = None\n # store the leaves that contain parts of the MLS\n multiline_string_contexts: List[LN] = []\n\n max_level_to_update: Union[int, float] = math.inf # track the depth of the MLS\n for i, leaf in enumerate(line.leaves):\n if max_level_to_update == math.inf:\n had_comma: Optional[int] = None\n if leaf.bracket_depth + 1 > len(commas):\n commas.append(0)\n elif leaf.bracket_depth + 1 < len(commas):\n had_comma = commas.pop()\n if (\n had_comma is not None\n and multiline_string is not None\n and multiline_string.bracket_depth == leaf.bracket_depth + 1\n ):\n # Have left the level with the MLS, stop tracking commas\n max_level_to_update = leaf.bracket_depth\n if had_comma > 0:\n # MLS was in parens with at least one comma - force split\n return False\n\n if leaf.bracket_depth <= max_level_to_update and leaf.type == token.COMMA:\n # Ignore non-nested trailing comma\n # directly after MLS/MLS-containing expression\n ignore_ctxs: List[Optional[LN]] = [None]\n ignore_ctxs += multiline_string_contexts\n if not (leaf.prev_sibling in ignore_ctxs and i == len(line.leaves) - 1):\n commas[leaf.bracket_depth] += 1\n if max_level_to_update != math.inf:\n max_level_to_update = min(max_level_to_update, leaf.bracket_depth)\n\n if is_multiline_string(leaf):\n if len(multiline_string_contexts) > 0:\n # >1 multiline string cannot fit on a single line - force split\n return False\n multiline_string = leaf\n ctx: LN = leaf\n # fetch the leaf components of the MLS in the AST\n while str(ctx) in line_str:\n multiline_string_contexts.append(ctx)\n if ctx.parent is None:\n break\n ctx = ctx.parent\n\n # May not have a triple-quoted multiline string at all,\n # in case of a regular string with embedded newlines and line continuations\n if len(multiline_string_contexts) == 0:\n return True\n\n return all(val == 0 for val in commas)\n\n\ndef can_be_split(line: Line) -> bool:\n \"\"\"Return False if the line cannot be split *for sure*.\n\n This is not an exhaustive search but a cheap heuristic that we can use to\n avoid some unfortunate formattings (mostly around wrapping unsplittable code\n in unnecessary parentheses).\n \"\"\"\n leaves = line.leaves\n if len(leaves) < 2:\n return False\n\n if leaves[0].type == token.STRING and leaves[1].type == token.DOT:\n call_count = 0\n dot_count = 0\n next = leaves[-1]\n for leaf in leaves[-2::-1]:\n if leaf.type in OPENING_BRACKETS:\n if next.type not in CLOSING_BRACKETS:\n return False\n\n call_count += 1\n elif leaf.type == token.DOT:\n dot_count += 1\n elif leaf.type == token.NAME:\n if not (next.type == token.DOT or next.type in OPENING_BRACKETS):\n return False\n\n elif leaf.type not in CLOSING_BRACKETS:\n return False\n\n if dot_count > 1 and call_count > 1:\n return False\n\n return True\n\n\ndef can_omit_invisible_parens(\n rhs: RHSResult,\n line_length: int,\n) -> bool:\n \"\"\"Does `rhs.body` have a shape safe to reformat without optional parens around it?\n\n Returns True for only a subset of potentially nice looking formattings but\n the point is to not return false positives that end up producing lines that\n are too long.\n \"\"\"\n line = rhs.body\n bt = line.bracket_tracker\n if not bt.delimiters:\n # Without delimiters the optional parentheses are useless.\n return True\n\n max_priority = bt.max_delimiter_priority()\n delimiter_count = bt.delimiter_count_with_priority(max_priority)\n if delimiter_count > 1:\n # With more than one delimiter of a kind the optional parentheses read better.\n return False\n\n if delimiter_count == 1:\n if (\n Preview.wrap_multiple_context_managers_in_parens in line.mode\n and max_priority == COMMA_PRIORITY\n and rhs.head.is_with_or_async_with_stmt\n ):\n # For two context manager with statements, the optional parentheses read\n # better. In this case, `rhs.body` is the context managers part of\n # the with statement. `rhs.head` is the `with (` part on the previous\n # line.\n return False\n # Otherwise it may also read better, but we don't do it today and requires\n # careful considerations for all possible cases. See\n # https://github.com/psf/black/issues/2156.\n\n if max_priority == DOT_PRIORITY:\n # A single stranded method call doesn't require optional parentheses.\n return True\n\n assert len(line.leaves) >= 2, \"Stranded delimiter\"\n\n # With a single delimiter, omit if the expression starts or ends with\n # a bracket.\n first = line.leaves[0]\n second = line.leaves[1]\n if first.type in OPENING_BRACKETS and second.type not in CLOSING_BRACKETS:\n if _can_omit_opening_paren(line, first=first, line_length=line_length):\n return True\n\n # Note: we are not returning False here because a line might have *both*\n # a leading opening bracket and a trailing closing bracket. If the\n # opening bracket doesn't match our rule, maybe the closing will.\n\n penultimate = line.leaves[-2]\n last = line.leaves[-1]\n\n if (\n last.type == token.RPAR\n or last.type == token.RBRACE\n or (\n # don't use indexing for omitting optional parentheses;\n # it looks weird\n last.type == token.RSQB\n and last.parent\n and last.parent.type != syms.trailer\n )\n ):\n if penultimate.type in OPENING_BRACKETS:\n # Empty brackets don't help.\n return False\n\n if is_multiline_string(first):\n # Additional wrapping of a multiline string in this situation is\n # unnecessary.\n return True\n\n if _can_omit_closing_paren(line, last=last, line_length=line_length):\n return True\n\n return False\n\n\ndef _can_omit_opening_paren(line: Line, *, first: Leaf, line_length: int) -> bool:\n \"\"\"See `can_omit_invisible_parens`.\"\"\"\n remainder = False\n length = 4 * line.depth\n _index = -1\n for _index, leaf, leaf_length in line.enumerate_with_length():\n if leaf.type in CLOSING_BRACKETS and leaf.opening_bracket is first:\n remainder = True\n if remainder:\n length += leaf_length\n if length > line_length:\n break\n\n if leaf.type in OPENING_BRACKETS:\n # There are brackets we can further split on.\n remainder = False\n\n else:\n # checked the entire string and line length wasn't exceeded\n if len(line.leaves) == _index + 1:\n return True\n\n return False\n\n\ndef _can_omit_closing_paren(line: Line, *, last: Leaf, line_length: int) -> bool:\n \"\"\"See `can_omit_invisible_parens`.\"\"\"\n length = 4 * line.depth\n seen_other_brackets = False\n for _index, leaf, leaf_length in line.enumerate_with_length():\n length += leaf_length\n if leaf is last.opening_bracket:\n if seen_other_brackets or length <= line_length:\n return True\n\n elif leaf.type in OPENING_BRACKETS:\n # There are brackets we can further split on.\n seen_other_brackets = True\n\n return False\n\n\ndef line_to_string(line: Line) -> str:\n \"\"\"Returns the string representation of @line.\n\n WARNING: This is known to be computationally expensive.\n \"\"\"\n return str(line).strip(\"\\n\")\n", "path": "src/black/lines.py"}], "after_files": [{"content": "import itertools\nimport math\nimport sys\nfrom dataclasses import dataclass, field\nfrom typing import (\n Callable,\n Dict,\n Iterator,\n List,\n Optional,\n Sequence,\n Tuple,\n TypeVar,\n Union,\n cast,\n)\n\nfrom black.brackets import COMMA_PRIORITY, DOT_PRIORITY, BracketTracker\nfrom black.mode import Mode, Preview\nfrom black.nodes import (\n BRACKETS,\n CLOSING_BRACKETS,\n OPENING_BRACKETS,\n STANDALONE_COMMENT,\n TEST_DESCENDANTS,\n child_towards,\n is_docstring,\n is_funcdef,\n is_import,\n is_multiline_string,\n is_one_sequence_between,\n is_type_comment,\n is_type_ignore_comment,\n is_with_or_async_with_stmt,\n replace_child,\n syms,\n whitespace,\n)\nfrom black.strings import str_width\nfrom blib2to3.pgen2 import token\nfrom blib2to3.pytree import Leaf, Node\n\n# types\nT = TypeVar(\"T\")\nIndex = int\nLeafID = int\nLN = Union[Leaf, Node]\n\n\n@dataclass\nclass Line:\n \"\"\"Holds leaves and comments. Can be printed with `str(line)`.\"\"\"\n\n mode: Mode = field(repr=False)\n depth: int = 0\n leaves: List[Leaf] = field(default_factory=list)\n # keys ordered like `leaves`\n comments: Dict[LeafID, List[Leaf]] = field(default_factory=dict)\n bracket_tracker: BracketTracker = field(default_factory=BracketTracker)\n inside_brackets: bool = False\n should_split_rhs: bool = False\n magic_trailing_comma: Optional[Leaf] = None\n\n def append(\n self, leaf: Leaf, preformatted: bool = False, track_bracket: bool = False\n ) -> None:\n \"\"\"Add a new `leaf` to the end of the line.\n\n Unless `preformatted` is True, the `leaf` will receive a new consistent\n whitespace prefix and metadata applied by :class:`BracketTracker`.\n Trailing commas are maybe removed, unpacked for loop variables are\n demoted from being delimiters.\n\n Inline comments are put aside.\n \"\"\"\n has_value = leaf.type in BRACKETS or bool(leaf.value.strip())\n if not has_value:\n return\n\n if token.COLON == leaf.type and self.is_class_paren_empty:\n del self.leaves[-2:]\n if self.leaves and not preformatted:\n # Note: at this point leaf.prefix should be empty except for\n # imports, for which we only preserve newlines.\n leaf.prefix += whitespace(\n leaf,\n complex_subscript=self.is_complex_subscript(leaf),\n mode=self.mode,\n )\n if self.inside_brackets or not preformatted or track_bracket:\n self.bracket_tracker.mark(leaf)\n if self.mode.magic_trailing_comma:\n if self.has_magic_trailing_comma(leaf):\n self.magic_trailing_comma = leaf\n elif self.has_magic_trailing_comma(leaf, ensure_removable=True):\n self.remove_trailing_comma()\n if not self.append_comment(leaf):\n self.leaves.append(leaf)\n\n def append_safe(self, leaf: Leaf, preformatted: bool = False) -> None:\n \"\"\"Like :func:`append()` but disallow invalid standalone comment structure.\n\n Raises ValueError when any `leaf` is appended after a standalone comment\n or when a standalone comment is not the first leaf on the line.\n \"\"\"\n if self.bracket_tracker.depth == 0:\n if self.is_comment:\n raise ValueError(\"cannot append to standalone comments\")\n\n if self.leaves and leaf.type == STANDALONE_COMMENT:\n raise ValueError(\n \"cannot append standalone comments to a populated line\"\n )\n\n self.append(leaf, preformatted=preformatted)\n\n @property\n def is_comment(self) -> bool:\n \"\"\"Is this line a standalone comment?\"\"\"\n return len(self.leaves) == 1 and self.leaves[0].type == STANDALONE_COMMENT\n\n @property\n def is_decorator(self) -> bool:\n \"\"\"Is this line a decorator?\"\"\"\n return bool(self) and self.leaves[0].type == token.AT\n\n @property\n def is_import(self) -> bool:\n \"\"\"Is this an import line?\"\"\"\n return bool(self) and is_import(self.leaves[0])\n\n @property\n def is_with_or_async_with_stmt(self) -> bool:\n \"\"\"Is this a with_stmt line?\"\"\"\n return bool(self) and is_with_or_async_with_stmt(self.leaves[0])\n\n @property\n def is_class(self) -> bool:\n \"\"\"Is this line a class definition?\"\"\"\n return (\n bool(self)\n and self.leaves[0].type == token.NAME\n and self.leaves[0].value == \"class\"\n )\n\n @property\n def is_stub_class(self) -> bool:\n \"\"\"Is this line a class definition with a body consisting only of \"...\"?\"\"\"\n return self.is_class and self.leaves[-3:] == [\n Leaf(token.DOT, \".\") for _ in range(3)\n ]\n\n @property\n def is_def(self) -> bool:\n \"\"\"Is this a function definition? (Also returns True for async defs.)\"\"\"\n try:\n first_leaf = self.leaves[0]\n except IndexError:\n return False\n\n try:\n second_leaf: Optional[Leaf] = self.leaves[1]\n except IndexError:\n second_leaf = None\n return (first_leaf.type == token.NAME and first_leaf.value == \"def\") or (\n first_leaf.type == token.ASYNC\n and second_leaf is not None\n and second_leaf.type == token.NAME\n and second_leaf.value == \"def\"\n )\n\n @property\n def is_stub_def(self) -> bool:\n \"\"\"Is this line a function definition with a body consisting only of \"...\"?\"\"\"\n return self.is_def and self.leaves[-4:] == [Leaf(token.COLON, \":\")] + [\n Leaf(token.DOT, \".\") for _ in range(3)\n ]\n\n @property\n def is_class_paren_empty(self) -> bool:\n \"\"\"Is this a class with no base classes but using parentheses?\n\n Those are unnecessary and should be removed.\n \"\"\"\n return (\n bool(self)\n and len(self.leaves) == 4\n and self.is_class\n and self.leaves[2].type == token.LPAR\n and self.leaves[2].value == \"(\"\n and self.leaves[3].type == token.RPAR\n and self.leaves[3].value == \")\"\n )\n\n @property\n def is_triple_quoted_string(self) -> bool:\n \"\"\"Is the line a triple quoted string?\"\"\"\n if not self or self.leaves[0].type != token.STRING:\n return False\n value = self.leaves[0].value\n if value.startswith(('\"\"\"', \"'''\")):\n return True\n if Preview.accept_raw_docstrings in self.mode and value.startswith(\n (\"r'''\", 'r\"\"\"', \"R'''\", 'R\"\"\"')\n ):\n return True\n return False\n\n @property\n def opens_block(self) -> bool:\n \"\"\"Does this line open a new level of indentation.\"\"\"\n if len(self.leaves) == 0:\n return False\n return self.leaves[-1].type == token.COLON\n\n def is_fmt_pass_converted(\n self, *, first_leaf_matches: Optional[Callable[[Leaf], bool]] = None\n ) -> bool:\n \"\"\"Is this line converted from fmt off/skip code?\n\n If first_leaf_matches is not None, it only returns True if the first\n leaf of converted code matches.\n \"\"\"\n if len(self.leaves) != 1:\n return False\n leaf = self.leaves[0]\n if (\n leaf.type != STANDALONE_COMMENT\n or leaf.fmt_pass_converted_first_leaf is None\n ):\n return False\n return first_leaf_matches is None or first_leaf_matches(\n leaf.fmt_pass_converted_first_leaf\n )\n\n def contains_standalone_comments(self, depth_limit: int = sys.maxsize) -> bool:\n \"\"\"If so, needs to be split before emitting.\"\"\"\n for leaf in self.leaves:\n if leaf.type == STANDALONE_COMMENT and leaf.bracket_depth <= depth_limit:\n return True\n\n return False\n\n def contains_implicit_multiline_string_with_comments(self) -> bool:\n \"\"\"Chck if we have an implicit multiline string with comments on the line\"\"\"\n for leaf_type, leaf_group_iterator in itertools.groupby(\n self.leaves, lambda leaf: leaf.type\n ):\n if leaf_type != token.STRING:\n continue\n leaf_list = list(leaf_group_iterator)\n if len(leaf_list) == 1:\n continue\n for leaf in leaf_list:\n if self.comments_after(leaf):\n return True\n return False\n\n def contains_uncollapsable_type_comments(self) -> bool:\n ignored_ids = set()\n try:\n last_leaf = self.leaves[-1]\n ignored_ids.add(id(last_leaf))\n if last_leaf.type == token.COMMA or (\n last_leaf.type == token.RPAR and not last_leaf.value\n ):\n # When trailing commas or optional parens are inserted by Black for\n # consistency, comments after the previous last element are not moved\n # (they don't have to, rendering will still be correct). So we ignore\n # trailing commas and invisible.\n last_leaf = self.leaves[-2]\n ignored_ids.add(id(last_leaf))\n except IndexError:\n return False\n\n # A type comment is uncollapsable if it is attached to a leaf\n # that isn't at the end of the line (since that could cause it\n # to get associated to a different argument) or if there are\n # comments before it (since that could cause it to get hidden\n # behind a comment.\n comment_seen = False\n for leaf_id, comments in self.comments.items():\n for comment in comments:\n if is_type_comment(comment):\n if comment_seen or (\n not is_type_ignore_comment(comment)\n and leaf_id not in ignored_ids\n ):\n return True\n\n comment_seen = True\n\n return False\n\n def contains_unsplittable_type_ignore(self) -> bool:\n if not self.leaves:\n return False\n\n # If a 'type: ignore' is attached to the end of a line, we\n # can't split the line, because we can't know which of the\n # subexpressions the ignore was meant to apply to.\n #\n # We only want this to apply to actual physical lines from the\n # original source, though: we don't want the presence of a\n # 'type: ignore' at the end of a multiline expression to\n # justify pushing it all onto one line. Thus we\n # (unfortunately) need to check the actual source lines and\n # only report an unsplittable 'type: ignore' if this line was\n # one line in the original code.\n\n # Grab the first and last line numbers, skipping generated leaves\n first_line = next((leaf.lineno for leaf in self.leaves if leaf.lineno != 0), 0)\n last_line = next(\n (leaf.lineno for leaf in reversed(self.leaves) if leaf.lineno != 0), 0\n )\n\n if first_line == last_line:\n # We look at the last two leaves since a comma or an\n # invisible paren could have been added at the end of the\n # line.\n for node in self.leaves[-2:]:\n for comment in self.comments.get(id(node), []):\n if is_type_ignore_comment(comment):\n return True\n\n return False\n\n def contains_multiline_strings(self) -> bool:\n return any(is_multiline_string(leaf) for leaf in self.leaves)\n\n def has_magic_trailing_comma(\n self, closing: Leaf, ensure_removable: bool = False\n ) -> bool:\n \"\"\"Return True if we have a magic trailing comma, that is when:\n - there's a trailing comma here\n - it's not a one-tuple\n - it's not a single-element subscript\n Additionally, if ensure_removable:\n - it's not from square bracket indexing\n (specifically, single-element square bracket indexing)\n \"\"\"\n if not (\n closing.type in CLOSING_BRACKETS\n and self.leaves\n and self.leaves[-1].type == token.COMMA\n ):\n return False\n\n if closing.type == token.RBRACE:\n return True\n\n if closing.type == token.RSQB:\n if (\n closing.parent\n and closing.parent.type == syms.trailer\n and closing.opening_bracket\n and is_one_sequence_between(\n closing.opening_bracket,\n closing,\n self.leaves,\n brackets=(token.LSQB, token.RSQB),\n )\n ):\n return False\n\n if not ensure_removable:\n return True\n\n comma = self.leaves[-1]\n if comma.parent is None:\n return False\n return (\n comma.parent.type != syms.subscriptlist\n or closing.opening_bracket is None\n or not is_one_sequence_between(\n closing.opening_bracket,\n closing,\n self.leaves,\n brackets=(token.LSQB, token.RSQB),\n )\n )\n\n if self.is_import:\n return True\n\n if closing.opening_bracket is not None and not is_one_sequence_between(\n closing.opening_bracket, closing, self.leaves\n ):\n return True\n\n return False\n\n def append_comment(self, comment: Leaf) -> bool:\n \"\"\"Add an inline or standalone comment to the line.\"\"\"\n if (\n comment.type == STANDALONE_COMMENT\n and self.bracket_tracker.any_open_brackets()\n ):\n comment.prefix = \"\"\n return False\n\n if comment.type != token.COMMENT:\n return False\n\n if not self.leaves:\n comment.type = STANDALONE_COMMENT\n comment.prefix = \"\"\n return False\n\n last_leaf = self.leaves[-1]\n if (\n last_leaf.type == token.RPAR\n and not last_leaf.value\n and last_leaf.parent\n and len(list(last_leaf.parent.leaves())) <= 3\n and not is_type_comment(comment)\n ):\n # Comments on an optional parens wrapping a single leaf should belong to\n # the wrapped node except if it's a type comment. Pinning the comment like\n # this avoids unstable formatting caused by comment migration.\n if len(self.leaves) < 2:\n comment.type = STANDALONE_COMMENT\n comment.prefix = \"\"\n return False\n\n last_leaf = self.leaves[-2]\n self.comments.setdefault(id(last_leaf), []).append(comment)\n return True\n\n def comments_after(self, leaf: Leaf) -> List[Leaf]:\n \"\"\"Generate comments that should appear directly after `leaf`.\"\"\"\n return self.comments.get(id(leaf), [])\n\n def remove_trailing_comma(self) -> None:\n \"\"\"Remove the trailing comma and moves the comments attached to it.\"\"\"\n trailing_comma = self.leaves.pop()\n trailing_comma_comments = self.comments.pop(id(trailing_comma), [])\n self.comments.setdefault(id(self.leaves[-1]), []).extend(\n trailing_comma_comments\n )\n\n def is_complex_subscript(self, leaf: Leaf) -> bool:\n \"\"\"Return True iff `leaf` is part of a slice with non-trivial exprs.\"\"\"\n open_lsqb = self.bracket_tracker.get_open_lsqb()\n if open_lsqb is None:\n return False\n\n subscript_start = open_lsqb.next_sibling\n\n if isinstance(subscript_start, Node):\n if subscript_start.type == syms.listmaker:\n return False\n\n if subscript_start.type == syms.subscriptlist:\n subscript_start = child_towards(subscript_start, leaf)\n return subscript_start is not None and any(\n n.type in TEST_DESCENDANTS for n in subscript_start.pre_order()\n )\n\n def enumerate_with_length(\n self, reversed: bool = False\n ) -> Iterator[Tuple[Index, Leaf, int]]:\n \"\"\"Return an enumeration of leaves with their length.\n\n Stops prematurely on multiline strings and standalone comments.\n \"\"\"\n op = cast(\n Callable[[Sequence[Leaf]], Iterator[Tuple[Index, Leaf]]],\n enumerate_reversed if reversed else enumerate,\n )\n for index, leaf in op(self.leaves):\n length = len(leaf.prefix) + len(leaf.value)\n if \"\\n\" in leaf.value:\n return # Multiline strings, we can't continue.\n\n for comment in self.comments_after(leaf):\n length += len(comment.value)\n\n yield index, leaf, length\n\n def clone(self) -> \"Line\":\n return Line(\n mode=self.mode,\n depth=self.depth,\n inside_brackets=self.inside_brackets,\n should_split_rhs=self.should_split_rhs,\n magic_trailing_comma=self.magic_trailing_comma,\n )\n\n def __str__(self) -> str:\n \"\"\"Render the line.\"\"\"\n if not self:\n return \"\\n\"\n\n indent = \" \" * self.depth\n leaves = iter(self.leaves)\n first = next(leaves)\n res = f\"{first.prefix}{indent}{first.value}\"\n for leaf in leaves:\n res += str(leaf)\n for comment in itertools.chain.from_iterable(self.comments.values()):\n res += str(comment)\n\n return res + \"\\n\"\n\n def __bool__(self) -> bool:\n \"\"\"Return True if the line has leaves or comments.\"\"\"\n return bool(self.leaves or self.comments)\n\n\n@dataclass\nclass RHSResult:\n \"\"\"Intermediate split result from a right hand split.\"\"\"\n\n head: Line\n body: Line\n tail: Line\n opening_bracket: Leaf\n closing_bracket: Leaf\n\n\n@dataclass\nclass LinesBlock:\n \"\"\"Class that holds information about a block of formatted lines.\n\n This is introduced so that the EmptyLineTracker can look behind the standalone\n comments and adjust their empty lines for class or def lines.\n \"\"\"\n\n mode: Mode\n previous_block: Optional[\"LinesBlock\"]\n original_line: Line\n before: int = 0\n content_lines: List[str] = field(default_factory=list)\n after: int = 0\n\n def all_lines(self) -> List[str]:\n empty_line = str(Line(mode=self.mode))\n return (\n [empty_line * self.before] + self.content_lines + [empty_line * self.after]\n )\n\n\n@dataclass\nclass EmptyLineTracker:\n \"\"\"Provides a stateful method that returns the number of potential extra\n empty lines needed before and after the currently processed line.\n\n Note: this tracker works on lines that haven't been split yet. It assumes\n the prefix of the first leaf consists of optional newlines. Those newlines\n are consumed by `maybe_empty_lines()` and included in the computation.\n \"\"\"\n\n mode: Mode\n previous_line: Optional[Line] = None\n previous_block: Optional[LinesBlock] = None\n previous_defs: List[Line] = field(default_factory=list)\n semantic_leading_comment: Optional[LinesBlock] = None\n\n def maybe_empty_lines(self, current_line: Line) -> LinesBlock:\n \"\"\"Return the number of extra empty lines before and after the `current_line`.\n\n This is for separating `def`, `async def` and `class` with extra empty\n lines (two on module-level).\n \"\"\"\n before, after = self._maybe_empty_lines(current_line)\n previous_after = self.previous_block.after if self.previous_block else 0\n before = (\n # Black should not insert empty lines at the beginning\n # of the file\n 0\n if self.previous_line is None\n else before - previous_after\n )\n if (\n Preview.module_docstring_newlines in current_line.mode\n and self.previous_block\n and self.previous_block.previous_block is None\n and len(self.previous_block.original_line.leaves) == 1\n and self.previous_block.original_line.is_triple_quoted_string\n and not (current_line.is_class or current_line.is_def)\n ):\n before = 1\n\n block = LinesBlock(\n mode=self.mode,\n previous_block=self.previous_block,\n original_line=current_line,\n before=before,\n after=after,\n )\n\n # Maintain the semantic_leading_comment state.\n if current_line.is_comment:\n if self.previous_line is None or (\n not self.previous_line.is_decorator\n # `or before` means this comment already has an empty line before\n and (not self.previous_line.is_comment or before)\n and (self.semantic_leading_comment is None or before)\n ):\n self.semantic_leading_comment = block\n # `or before` means this decorator already has an empty line before\n elif not current_line.is_decorator or before:\n self.semantic_leading_comment = None\n\n self.previous_line = current_line\n self.previous_block = block\n return block\n\n def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]:\n max_allowed = 1\n if current_line.depth == 0:\n max_allowed = 1 if self.mode.is_pyi else 2\n if current_line.leaves:\n # Consume the first leaf's extra newlines.\n first_leaf = current_line.leaves[0]\n before = first_leaf.prefix.count(\"\\n\")\n before = min(before, max_allowed)\n first_leaf.prefix = \"\"\n else:\n before = 0\n\n user_had_newline = bool(before)\n depth = current_line.depth\n\n previous_def = None\n while self.previous_defs and self.previous_defs[-1].depth >= depth:\n previous_def = self.previous_defs.pop()\n\n if previous_def is not None:\n assert self.previous_line is not None\n if self.mode.is_pyi:\n if depth and not current_line.is_def and self.previous_line.is_def:\n # Empty lines between attributes and methods should be preserved.\n before = 1 if user_had_newline else 0\n elif (\n Preview.blank_line_after_nested_stub_class in self.mode\n and previous_def.is_class\n and not previous_def.is_stub_class\n ):\n before = 1\n elif depth:\n before = 0\n else:\n before = 1\n else:\n if depth:\n before = 1\n elif (\n not depth\n and previous_def.depth\n and current_line.leaves[-1].type == token.COLON\n and (\n current_line.leaves[0].value\n not in (\"with\", \"try\", \"for\", \"while\", \"if\", \"match\")\n )\n ):\n # We shouldn't add two newlines between an indented function and\n # a dependent non-indented clause. This is to avoid issues with\n # conditional function definitions that are technically top-level\n # and therefore get two trailing newlines, but look weird and\n # inconsistent when they're followed by elif, else, etc. This is\n # worse because these functions only get *one* preceding newline\n # already.\n before = 1\n else:\n before = 2\n\n if current_line.is_decorator or current_line.is_def or current_line.is_class:\n return self._maybe_empty_lines_for_class_or_def(\n current_line, before, user_had_newline\n )\n\n if (\n self.previous_line\n and self.previous_line.is_import\n and not current_line.is_import\n and not current_line.is_fmt_pass_converted(first_leaf_matches=is_import)\n and depth == self.previous_line.depth\n ):\n return (before or 1), 0\n\n if (\n self.previous_line\n and self.previous_line.is_class\n and current_line.is_triple_quoted_string\n ):\n if Preview.no_blank_line_before_class_docstring in current_line.mode:\n return 0, 1\n return before, 1\n\n is_empty_first_line_ok = (\n Preview.allow_empty_first_line_before_new_block_or_comment\n in current_line.mode\n and (\n # If it's a standalone comment\n current_line.leaves[0].type == STANDALONE_COMMENT\n # If it opens a new block\n or current_line.opens_block\n # If it's a triple quote comment (but not at the start of a funcdef)\n or (\n is_docstring(current_line.leaves[0])\n and self.previous_line\n and self.previous_line.leaves[0]\n and self.previous_line.leaves[0].parent\n and not is_funcdef(self.previous_line.leaves[0].parent)\n )\n )\n )\n\n if (\n self.previous_line\n and self.previous_line.opens_block\n and not is_empty_first_line_ok\n ):\n return 0, 0\n return before, 0\n\n def _maybe_empty_lines_for_class_or_def( # noqa: C901\n self, current_line: Line, before: int, user_had_newline: bool\n ) -> Tuple[int, int]:\n if not current_line.is_decorator:\n self.previous_defs.append(current_line)\n if self.previous_line is None:\n # Don't insert empty lines before the first line in the file.\n return 0, 0\n\n if self.previous_line.is_decorator:\n if self.mode.is_pyi and current_line.is_stub_class:\n # Insert an empty line after a decorated stub class\n return 0, 1\n\n return 0, 0\n\n if self.previous_line.depth < current_line.depth and (\n self.previous_line.is_class or self.previous_line.is_def\n ):\n return 0, 0\n\n comment_to_add_newlines: Optional[LinesBlock] = None\n if (\n self.previous_line.is_comment\n and self.previous_line.depth == current_line.depth\n and before == 0\n ):\n slc = self.semantic_leading_comment\n if (\n slc is not None\n and slc.previous_block is not None\n and not slc.previous_block.original_line.is_class\n and not slc.previous_block.original_line.opens_block\n and slc.before <= 1\n ):\n comment_to_add_newlines = slc\n else:\n return 0, 0\n\n if self.mode.is_pyi:\n if current_line.is_class or self.previous_line.is_class:\n if self.previous_line.depth < current_line.depth:\n newlines = 0\n elif self.previous_line.depth > current_line.depth:\n newlines = 1\n elif current_line.is_stub_class and self.previous_line.is_stub_class:\n # No blank line between classes with an empty body\n newlines = 0\n else:\n newlines = 1\n # Remove case `self.previous_line.depth > current_line.depth` below when\n # this becomes stable.\n #\n # Don't inspect the previous line if it's part of the body of the previous\n # statement in the same level, we always want a blank line if there's\n # something with a body preceding.\n elif (\n Preview.blank_line_between_nested_and_def_stub_file in current_line.mode\n and self.previous_line.depth > current_line.depth\n ):\n newlines = 1\n elif (\n current_line.is_def or current_line.is_decorator\n ) and not self.previous_line.is_def:\n if current_line.depth:\n # In classes empty lines between attributes and methods should\n # be preserved.\n newlines = min(1, before)\n else:\n # Blank line between a block of functions (maybe with preceding\n # decorators) and a block of non-functions\n newlines = 1\n elif self.previous_line.depth > current_line.depth:\n newlines = 1\n else:\n newlines = 0\n else:\n newlines = 1 if current_line.depth else 2\n # If a user has left no space after a dummy implementation, don't insert\n # new lines. This is useful for instance for @overload or Protocols.\n if (\n Preview.dummy_implementations in self.mode\n and self.previous_line.is_stub_def\n and not user_had_newline\n ):\n newlines = 0\n if comment_to_add_newlines is not None:\n previous_block = comment_to_add_newlines.previous_block\n if previous_block is not None:\n comment_to_add_newlines.before = (\n max(comment_to_add_newlines.before, newlines) - previous_block.after\n )\n newlines = 0\n return newlines, 0\n\n\ndef enumerate_reversed(sequence: Sequence[T]) -> Iterator[Tuple[Index, T]]:\n \"\"\"Like `reversed(enumerate(sequence))` if that were possible.\"\"\"\n index = len(sequence) - 1\n for element in reversed(sequence):\n yield (index, element)\n index -= 1\n\n\ndef append_leaves(\n new_line: Line, old_line: Line, leaves: List[Leaf], preformatted: bool = False\n) -> None:\n \"\"\"\n Append leaves (taken from @old_line) to @new_line, making sure to fix the\n underlying Node structure where appropriate.\n\n All of the leaves in @leaves are duplicated. The duplicates are then\n appended to @new_line and used to replace their originals in the underlying\n Node structure. Any comments attached to the old leaves are reattached to\n the new leaves.\n\n Pre-conditions:\n set(@leaves) is a subset of set(@old_line.leaves).\n \"\"\"\n for old_leaf in leaves:\n new_leaf = Leaf(old_leaf.type, old_leaf.value)\n replace_child(old_leaf, new_leaf)\n new_line.append(new_leaf, preformatted=preformatted)\n\n for comment_leaf in old_line.comments_after(old_leaf):\n new_line.append(comment_leaf, preformatted=True)\n\n\ndef is_line_short_enough( # noqa: C901\n line: Line, *, mode: Mode, line_str: str = \"\"\n) -> bool:\n \"\"\"For non-multiline strings, return True if `line` is no longer than `line_length`.\n For multiline strings, looks at the context around `line` to determine\n if it should be inlined or split up.\n Uses the provided `line_str` rendering, if any, otherwise computes a new one.\n \"\"\"\n if not line_str:\n line_str = line_to_string(line)\n\n width = str_width if mode.preview else len\n\n if Preview.multiline_string_handling not in mode:\n return (\n width(line_str) <= mode.line_length\n and \"\\n\" not in line_str # multiline strings\n and not line.contains_standalone_comments()\n )\n\n if line.contains_standalone_comments():\n return False\n if \"\\n\" not in line_str:\n # No multiline strings (MLS) present\n return width(line_str) <= mode.line_length\n\n first, *_, last = line_str.split(\"\\n\")\n if width(first) > mode.line_length or width(last) > mode.line_length:\n return False\n\n # Traverse the AST to examine the context of the multiline string (MLS),\n # tracking aspects such as depth and comma existence,\n # to determine whether to split the MLS or keep it together.\n # Depth (which is based on the existing bracket_depth concept)\n # is needed to determine nesting level of the MLS.\n # Includes special case for trailing commas.\n commas: List[int] = [] # tracks number of commas per depth level\n multiline_string: Optional[Leaf] = None\n # store the leaves that contain parts of the MLS\n multiline_string_contexts: List[LN] = []\n\n max_level_to_update: Union[int, float] = math.inf # track the depth of the MLS\n for i, leaf in enumerate(line.leaves):\n if max_level_to_update == math.inf:\n had_comma: Optional[int] = None\n if leaf.bracket_depth + 1 > len(commas):\n commas.append(0)\n elif leaf.bracket_depth + 1 < len(commas):\n had_comma = commas.pop()\n if (\n had_comma is not None\n and multiline_string is not None\n and multiline_string.bracket_depth == leaf.bracket_depth + 1\n ):\n # Have left the level with the MLS, stop tracking commas\n max_level_to_update = leaf.bracket_depth\n if had_comma > 0:\n # MLS was in parens with at least one comma - force split\n return False\n\n if leaf.bracket_depth <= max_level_to_update and leaf.type == token.COMMA:\n # Ignore non-nested trailing comma\n # directly after MLS/MLS-containing expression\n ignore_ctxs: List[Optional[LN]] = [None]\n ignore_ctxs += multiline_string_contexts\n if not (leaf.prev_sibling in ignore_ctxs and i == len(line.leaves) - 1):\n commas[leaf.bracket_depth] += 1\n if max_level_to_update != math.inf:\n max_level_to_update = min(max_level_to_update, leaf.bracket_depth)\n\n if is_multiline_string(leaf):\n if len(multiline_string_contexts) > 0:\n # >1 multiline string cannot fit on a single line - force split\n return False\n multiline_string = leaf\n ctx: LN = leaf\n # fetch the leaf components of the MLS in the AST\n while str(ctx) in line_str:\n multiline_string_contexts.append(ctx)\n if ctx.parent is None:\n break\n ctx = ctx.parent\n\n # May not have a triple-quoted multiline string at all,\n # in case of a regular string with embedded newlines and line continuations\n if len(multiline_string_contexts) == 0:\n return True\n\n return all(val == 0 for val in commas)\n\n\ndef can_be_split(line: Line) -> bool:\n \"\"\"Return False if the line cannot be split *for sure*.\n\n This is not an exhaustive search but a cheap heuristic that we can use to\n avoid some unfortunate formattings (mostly around wrapping unsplittable code\n in unnecessary parentheses).\n \"\"\"\n leaves = line.leaves\n if len(leaves) < 2:\n return False\n\n if leaves[0].type == token.STRING and leaves[1].type == token.DOT:\n call_count = 0\n dot_count = 0\n next = leaves[-1]\n for leaf in leaves[-2::-1]:\n if leaf.type in OPENING_BRACKETS:\n if next.type not in CLOSING_BRACKETS:\n return False\n\n call_count += 1\n elif leaf.type == token.DOT:\n dot_count += 1\n elif leaf.type == token.NAME:\n if not (next.type == token.DOT or next.type in OPENING_BRACKETS):\n return False\n\n elif leaf.type not in CLOSING_BRACKETS:\n return False\n\n if dot_count > 1 and call_count > 1:\n return False\n\n return True\n\n\ndef can_omit_invisible_parens(\n rhs: RHSResult,\n line_length: int,\n) -> bool:\n \"\"\"Does `rhs.body` have a shape safe to reformat without optional parens around it?\n\n Returns True for only a subset of potentially nice looking formattings but\n the point is to not return false positives that end up producing lines that\n are too long.\n \"\"\"\n line = rhs.body\n bt = line.bracket_tracker\n if not bt.delimiters:\n # Without delimiters the optional parentheses are useless.\n return True\n\n max_priority = bt.max_delimiter_priority()\n delimiter_count = bt.delimiter_count_with_priority(max_priority)\n if delimiter_count > 1:\n # With more than one delimiter of a kind the optional parentheses read better.\n return False\n\n if delimiter_count == 1:\n if (\n Preview.wrap_multiple_context_managers_in_parens in line.mode\n and max_priority == COMMA_PRIORITY\n and rhs.head.is_with_or_async_with_stmt\n ):\n # For two context manager with statements, the optional parentheses read\n # better. In this case, `rhs.body` is the context managers part of\n # the with statement. `rhs.head` is the `with (` part on the previous\n # line.\n return False\n # Otherwise it may also read better, but we don't do it today and requires\n # careful considerations for all possible cases. See\n # https://github.com/psf/black/issues/2156.\n\n if max_priority == DOT_PRIORITY:\n # A single stranded method call doesn't require optional parentheses.\n return True\n\n assert len(line.leaves) >= 2, \"Stranded delimiter\"\n\n # With a single delimiter, omit if the expression starts or ends with\n # a bracket.\n first = line.leaves[0]\n second = line.leaves[1]\n if first.type in OPENING_BRACKETS and second.type not in CLOSING_BRACKETS:\n if _can_omit_opening_paren(line, first=first, line_length=line_length):\n return True\n\n # Note: we are not returning False here because a line might have *both*\n # a leading opening bracket and a trailing closing bracket. If the\n # opening bracket doesn't match our rule, maybe the closing will.\n\n penultimate = line.leaves[-2]\n last = line.leaves[-1]\n\n if (\n last.type == token.RPAR\n or last.type == token.RBRACE\n or (\n # don't use indexing for omitting optional parentheses;\n # it looks weird\n last.type == token.RSQB\n and last.parent\n and last.parent.type != syms.trailer\n )\n ):\n if penultimate.type in OPENING_BRACKETS:\n # Empty brackets don't help.\n return False\n\n if is_multiline_string(first):\n # Additional wrapping of a multiline string in this situation is\n # unnecessary.\n return True\n\n if _can_omit_closing_paren(line, last=last, line_length=line_length):\n return True\n\n return False\n\n\ndef _can_omit_opening_paren(line: Line, *, first: Leaf, line_length: int) -> bool:\n \"\"\"See `can_omit_invisible_parens`.\"\"\"\n remainder = False\n length = 4 * line.depth\n _index = -1\n for _index, leaf, leaf_length in line.enumerate_with_length():\n if leaf.type in CLOSING_BRACKETS and leaf.opening_bracket is first:\n remainder = True\n if remainder:\n length += leaf_length\n if length > line_length:\n break\n\n if leaf.type in OPENING_BRACKETS:\n # There are brackets we can further split on.\n remainder = False\n\n else:\n # checked the entire string and line length wasn't exceeded\n if len(line.leaves) == _index + 1:\n return True\n\n return False\n\n\ndef _can_omit_closing_paren(line: Line, *, last: Leaf, line_length: int) -> bool:\n \"\"\"See `can_omit_invisible_parens`.\"\"\"\n length = 4 * line.depth\n seen_other_brackets = False\n for _index, leaf, leaf_length in line.enumerate_with_length():\n length += leaf_length\n if leaf is last.opening_bracket:\n if seen_other_brackets or length <= line_length:\n return True\n\n elif leaf.type in OPENING_BRACKETS:\n # There are brackets we can further split on.\n seen_other_brackets = True\n\n return False\n\n\ndef line_to_string(line: Line) -> str:\n \"\"\"Returns the string representation of @line.\n\n WARNING: This is known to be computationally expensive.\n \"\"\"\n return str(line).strip(\"\\n\")\n", "path": "src/black/lines.py"}]} |
gh_patches_debug_1295 | rasdani/github-patches | git_diff | TheAlgorithms__Python-295 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ProjectEuler -- Problem 1 -- solv2.py -- Error
For the Input ```1000``` I get ```233366.4```. The correct answer should be ```233168```
See [file](https://github.com/TheAlgorithms/Python/blob/master/Project%20Euler/Problem%2001/sol2.py)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Project Euler/Problem 01/sol2.py`
Content:
```
1 '''
2 Problem Statement:
3 If we list all the natural numbers below 10 that are multiples of 3 or 5,
4 we get 3,5,6 and 9. The sum of these multiples is 23.
5 Find the sum of all the multiples of 3 or 5 below N.
6 '''
7 from __future__ import print_function
8 try:
9 raw_input # Python 2
10 except NameError:
11 raw_input = input # Python 3
12 n = int(raw_input().strip())
13 sum = 0
14 terms = (n-1)/3
15 sum+= ((terms)*(6+(terms-1)*3))/2 #sum of an A.P.
16 terms = (n-1)/5
17 sum+= ((terms)*(10+(terms-1)*5))/2
18 terms = (n-1)/15
19 sum-= ((terms)*(30+(terms-1)*15))/2
20 print(sum)
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Project Euler/Problem 01/sol2.py b/Project Euler/Problem 01/sol2.py
--- a/Project Euler/Problem 01/sol2.py
+++ b/Project Euler/Problem 01/sol2.py
@@ -11,10 +11,10 @@
raw_input = input # Python 3
n = int(raw_input().strip())
sum = 0
-terms = (n-1)/3
-sum+= ((terms)*(6+(terms-1)*3))/2 #sum of an A.P.
-terms = (n-1)/5
-sum+= ((terms)*(10+(terms-1)*5))/2
-terms = (n-1)/15
-sum-= ((terms)*(30+(terms-1)*15))/2
+terms = (n-1)//3
+sum+= ((terms)*(6+(terms-1)*3))//2 #sum of an A.P.
+terms = (n-1)//5
+sum+= ((terms)*(10+(terms-1)*5))//2
+terms = (n-1)//15
+sum-= ((terms)*(30+(terms-1)*15))//2
print(sum)
| {"golden_diff": "diff --git a/Project Euler/Problem 01/sol2.py b/Project Euler/Problem 01/sol2.py\n--- a/Project Euler/Problem 01/sol2.py\t\n+++ b/Project Euler/Problem 01/sol2.py\t\n@@ -11,10 +11,10 @@\n raw_input = input # Python 3\n n = int(raw_input().strip())\n sum = 0\n-terms = (n-1)/3\n-sum+= ((terms)*(6+(terms-1)*3))/2 #sum of an A.P.\n-terms = (n-1)/5\n-sum+= ((terms)*(10+(terms-1)*5))/2\n-terms = (n-1)/15\n-sum-= ((terms)*(30+(terms-1)*15))/2\n+terms = (n-1)//3\n+sum+= ((terms)*(6+(terms-1)*3))//2 #sum of an A.P.\n+terms = (n-1)//5\n+sum+= ((terms)*(10+(terms-1)*5))//2\n+terms = (n-1)//15\n+sum-= ((terms)*(30+(terms-1)*15))//2\n print(sum)\n", "issue": "ProjectEuler -- Problem 1 -- solv2.py -- Error\nFor the Input ```1000``` I get ```233366.4```. The correct answer should be ```233168``` \r\nSee [file](https://github.com/TheAlgorithms/Python/blob/master/Project%20Euler/Problem%2001/sol2.py)\n", "before_files": [{"content": "'''\nProblem Statement:\nIf we list all the natural numbers below 10 that are multiples of 3 or 5,\nwe get 3,5,6 and 9. The sum of these multiples is 23.\nFind the sum of all the multiples of 3 or 5 below N.\n'''\nfrom __future__ import print_function\ntry:\n raw_input # Python 2\nexcept NameError:\n raw_input = input # Python 3\nn = int(raw_input().strip())\nsum = 0\nterms = (n-1)/3\nsum+= ((terms)*(6+(terms-1)*3))/2 #sum of an A.P.\nterms = (n-1)/5\nsum+= ((terms)*(10+(terms-1)*5))/2\nterms = (n-1)/15\nsum-= ((terms)*(30+(terms-1)*15))/2\nprint(sum)\n", "path": "Project Euler/Problem 01/sol2.py"}], "after_files": [{"content": "'''\nProblem Statement:\nIf we list all the natural numbers below 10 that are multiples of 3 or 5,\nwe get 3,5,6 and 9. The sum of these multiples is 23.\nFind the sum of all the multiples of 3 or 5 below N.\n'''\nfrom __future__ import print_function\ntry:\n raw_input # Python 2\nexcept NameError:\n raw_input = input # Python 3\nn = int(raw_input().strip())\nsum = 0\nterms = (n-1)//3\nsum+= ((terms)*(6+(terms-1)*3))//2 #sum of an A.P.\nterms = (n-1)//5\nsum+= ((terms)*(10+(terms-1)*5))//2\nterms = (n-1)//15\nsum-= ((terms)*(30+(terms-1)*15))//2\nprint(sum)\n", "path": "Project Euler/Problem 01/sol2.py"}]} |
gh_patches_debug_1296 | rasdani/github-patches | git_diff | mars-project__mars-284 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Fuse operand's sparse value is wrong
<!--
Thank you for your contribution!
Please review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.
-->
**Describe the bug**
A fuse operand's sparseness should be the same as tail node's, it is not set correctly now.
**To Reproduce**
``` Python
In [1]: import scipy.sparse as sps
In [2]: import mars.tensor as mt
In [3]: data = sps.rand(10, 10, density=0.05)
In [4]: a = mt.tensor(data, chunk_size=3)
In [5]: b = (a * 2) * 2
In [6]: g = b.build_graph(tiled=True, compose=True)
In [7]: list(g)[0].op.sparse
Out[7]: False
In [8]: list(g)[0].op
Out[8]: <mars.tensor.expressions.fuse.core.TensorFuseChunk at 0xa208b7048>
In [9]: list(g)[0].composed[-1].op.sparse
Out[9]: True
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mars/tensor/expressions/fuse/core.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 # Copyright 1999-2018 Alibaba Group Holding Ltd.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 from .... import operands
18 from ....tiles import NotSupportTile
19 from ..core import TensorOperandMixin
20
21
22 class TensorFuseChunk(operands.Fuse, TensorOperandMixin):
23 def __init__(self, dtype=None, **kw):
24 super(TensorFuseChunk, self).__init__(_dtype=dtype, **kw)
25
26 def calc_shape(self, *inputs_shape):
27 in_shapes = inputs_shape
28 out_shape = None
29
30 # TODO: the logic will be changed when fusion is not only straight line
31 for c in self.outputs[0].composed:
32 out_shape = c.op.calc_shape(*in_shapes)
33 in_shapes = [out_shape]
34 return out_shape
35
36 @classmethod
37 def tile(cls, op):
38 raise NotSupportTile('TensorFuseChunk is a chunk operand which does not support tile')
39
40
41 class TensorFuseChunkMixin(TensorOperandMixin):
42 __slots__ = ()
43
44 @classmethod
45 def tile(cls, op):
46 raise NotSupportTile('TensorFuseChunk is a chunk operand which does not support tile')
47
48 def __call__(self, fuse_chunks):
49 head_chunk = fuse_chunks[0]
50 tail_chunk = fuse_chunks[-1]
51 setattr(self, '_operands', [c.op for c in fuse_chunks])
52 return self.new_chunk(head_chunk.inputs, tail_chunk.shape,
53 _composed=fuse_chunks, _key=tail_chunk.key)
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mars/tensor/expressions/fuse/core.py b/mars/tensor/expressions/fuse/core.py
--- a/mars/tensor/expressions/fuse/core.py
+++ b/mars/tensor/expressions/fuse/core.py
@@ -20,8 +20,8 @@
class TensorFuseChunk(operands.Fuse, TensorOperandMixin):
- def __init__(self, dtype=None, **kw):
- super(TensorFuseChunk, self).__init__(_dtype=dtype, **kw)
+ def __init__(self, dtype=None, sparse=False, **kw):
+ super(TensorFuseChunk, self).__init__(_dtype=dtype, _sparse=sparse, **kw)
def calc_shape(self, *inputs_shape):
in_shapes = inputs_shape
| {"golden_diff": "diff --git a/mars/tensor/expressions/fuse/core.py b/mars/tensor/expressions/fuse/core.py\n--- a/mars/tensor/expressions/fuse/core.py\n+++ b/mars/tensor/expressions/fuse/core.py\n@@ -20,8 +20,8 @@\n \n \n class TensorFuseChunk(operands.Fuse, TensorOperandMixin):\n- def __init__(self, dtype=None, **kw):\n- super(TensorFuseChunk, self).__init__(_dtype=dtype, **kw)\n+ def __init__(self, dtype=None, sparse=False, **kw):\n+ super(TensorFuseChunk, self).__init__(_dtype=dtype, _sparse=sparse, **kw)\n \n def calc_shape(self, *inputs_shape):\n in_shapes = inputs_shape\n", "issue": "[BUG] Fuse operand's sparse value is wrong\n<!--\r\nThank you for your contribution!\r\n\r\nPlease review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.\r\n-->\r\n\r\n**Describe the bug**\r\nA fuse operand's sparseness should be the same as tail node's, it is not set correctly now.\r\n\r\n**To Reproduce**\r\n``` Python\r\nIn [1]: import scipy.sparse as sps \r\n\r\nIn [2]: import mars.tensor as mt \r\n\r\nIn [3]: data = sps.rand(10, 10, density=0.05) \r\n\r\nIn [4]: a = mt.tensor(data, chunk_size=3) \r\n\r\nIn [5]: b = (a * 2) * 2 \r\n\r\nIn [6]: g = b.build_graph(tiled=True, compose=True) \r\n\r\nIn [7]: list(g)[0].op.sparse \r\nOut[7]: False\r\n\r\nIn [8]: list(g)[0].op \r\nOut[8]: <mars.tensor.expressions.fuse.core.TensorFuseChunk at 0xa208b7048>\r\n\r\nIn [9]: list(g)[0].composed[-1].op.sparse \r\nOut[9]: True\r\n```\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom .... import operands\nfrom ....tiles import NotSupportTile\nfrom ..core import TensorOperandMixin\n\n\nclass TensorFuseChunk(operands.Fuse, TensorOperandMixin):\n def __init__(self, dtype=None, **kw):\n super(TensorFuseChunk, self).__init__(_dtype=dtype, **kw)\n\n def calc_shape(self, *inputs_shape):\n in_shapes = inputs_shape\n out_shape = None\n\n # TODO: the logic will be changed when fusion is not only straight line\n for c in self.outputs[0].composed:\n out_shape = c.op.calc_shape(*in_shapes)\n in_shapes = [out_shape]\n return out_shape\n\n @classmethod\n def tile(cls, op):\n raise NotSupportTile('TensorFuseChunk is a chunk operand which does not support tile')\n\n\nclass TensorFuseChunkMixin(TensorOperandMixin):\n __slots__ = ()\n\n @classmethod\n def tile(cls, op):\n raise NotSupportTile('TensorFuseChunk is a chunk operand which does not support tile')\n\n def __call__(self, fuse_chunks):\n head_chunk = fuse_chunks[0]\n tail_chunk = fuse_chunks[-1]\n setattr(self, '_operands', [c.op for c in fuse_chunks])\n return self.new_chunk(head_chunk.inputs, tail_chunk.shape,\n _composed=fuse_chunks, _key=tail_chunk.key)\n", "path": "mars/tensor/expressions/fuse/core.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom .... import operands\nfrom ....tiles import NotSupportTile\nfrom ..core import TensorOperandMixin\n\n\nclass TensorFuseChunk(operands.Fuse, TensorOperandMixin):\n def __init__(self, dtype=None, sparse=False, **kw):\n super(TensorFuseChunk, self).__init__(_dtype=dtype, _sparse=sparse, **kw)\n\n def calc_shape(self, *inputs_shape):\n in_shapes = inputs_shape\n out_shape = None\n\n # TODO: the logic will be changed when fusion is not only straight line\n for c in self.outputs[0].composed:\n out_shape = c.op.calc_shape(*in_shapes)\n in_shapes = [out_shape]\n return out_shape\n\n @classmethod\n def tile(cls, op):\n raise NotSupportTile('TensorFuseChunk is a chunk operand which does not support tile')\n\n\nclass TensorFuseChunkMixin(TensorOperandMixin):\n __slots__ = ()\n\n @classmethod\n def tile(cls, op):\n raise NotSupportTile('TensorFuseChunk is a chunk operand which does not support tile')\n\n def __call__(self, fuse_chunks):\n head_chunk = fuse_chunks[0]\n tail_chunk = fuse_chunks[-1]\n setattr(self, '_operands', [c.op for c in fuse_chunks])\n return self.new_chunk(head_chunk.inputs, tail_chunk.shape,\n _composed=fuse_chunks, _key=tail_chunk.key)\n", "path": "mars/tensor/expressions/fuse/core.py"}]} |
gh_patches_debug_1297 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-505 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
urls in parens don't parse as links
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/outgoing.py`
Content:
```
1 ''' handles all the activity coming out of the server '''
2 import re
3
4 from django.db import IntegrityError, transaction
5 from django.http import JsonResponse
6 from django.shortcuts import get_object_or_404
7 from django.views.decorators.csrf import csrf_exempt
8 from django.views.decorators.http import require_GET
9 from markdown import markdown
10 from requests import HTTPError
11
12 from bookwyrm import activitypub
13 from bookwyrm import models
14 from bookwyrm.connectors import get_data, ConnectorException
15 from bookwyrm.broadcast import broadcast
16 from bookwyrm.sanitize_html import InputHtmlParser
17 from bookwyrm.status import create_notification
18 from bookwyrm.status import create_generated_note
19 from bookwyrm.status import delete_status
20 from bookwyrm.settings import DOMAIN
21 from bookwyrm.utils import regex
22
23
24 @csrf_exempt
25 @require_GET
26 def outbox(request, username):
27 ''' outbox for the requested user '''
28 user = get_object_or_404(models.User, localname=username)
29 filter_type = request.GET.get('type')
30 if filter_type not in models.status_models:
31 filter_type = None
32
33 return JsonResponse(
34 user.to_outbox(**request.GET, filter_type=filter_type),
35 encoder=activitypub.ActivityEncoder
36 )
37
38
39 def handle_remote_webfinger(query):
40 ''' webfingerin' other servers '''
41 user = None
42
43 # usernames could be @user@domain or user@domain
44 if not query:
45 return None
46
47 if query[0] == '@':
48 query = query[1:]
49
50 try:
51 domain = query.split('@')[1]
52 except IndexError:
53 return None
54
55 try:
56 user = models.User.objects.get(username=query)
57 except models.User.DoesNotExist:
58 url = 'https://%s/.well-known/webfinger?resource=acct:%s' % \
59 (domain, query)
60 try:
61 data = get_data(url)
62 except (ConnectorException, HTTPError):
63 return None
64
65 for link in data.get('links'):
66 if link.get('rel') == 'self':
67 try:
68 user = activitypub.resolve_remote_id(
69 models.User, link['href']
70 )
71 except KeyError:
72 return None
73 return user
74
75
76 def handle_follow(user, to_follow):
77 ''' someone local wants to follow someone '''
78 relationship, _ = models.UserFollowRequest.objects.get_or_create(
79 user_subject=user,
80 user_object=to_follow,
81 )
82 activity = relationship.to_activity()
83 broadcast(user, activity, privacy='direct', direct_recipients=[to_follow])
84
85
86 def handle_unfollow(user, to_unfollow):
87 ''' someone local wants to follow someone '''
88 relationship = models.UserFollows.objects.get(
89 user_subject=user,
90 user_object=to_unfollow
91 )
92 activity = relationship.to_undo_activity(user)
93 broadcast(user, activity, privacy='direct', direct_recipients=[to_unfollow])
94 to_unfollow.followers.remove(user)
95
96
97 def handle_accept(follow_request):
98 ''' send an acceptance message to a follow request '''
99 user = follow_request.user_subject
100 to_follow = follow_request.user_object
101 with transaction.atomic():
102 relationship = models.UserFollows.from_request(follow_request)
103 follow_request.delete()
104 relationship.save()
105
106 activity = relationship.to_accept_activity()
107 broadcast(to_follow, activity, privacy='direct', direct_recipients=[user])
108
109
110 def handle_reject(follow_request):
111 ''' a local user who managed follows rejects a follow request '''
112 user = follow_request.user_subject
113 to_follow = follow_request.user_object
114 activity = follow_request.to_reject_activity()
115 follow_request.delete()
116 broadcast(to_follow, activity, privacy='direct', direct_recipients=[user])
117
118
119 def handle_shelve(user, book, shelf):
120 ''' a local user is getting a book put on their shelf '''
121 # update the database
122 shelve = models.ShelfBook(book=book, shelf=shelf, added_by=user)
123 shelve.save()
124
125 broadcast(user, shelve.to_add_activity(user))
126
127
128 def handle_unshelve(user, book, shelf):
129 ''' a local user is getting a book put on their shelf '''
130 # update the database
131 row = models.ShelfBook.objects.get(book=book, shelf=shelf)
132 activity = row.to_remove_activity(user)
133 row.delete()
134
135 broadcast(user, activity)
136
137
138 def handle_reading_status(user, shelf, book, privacy):
139 ''' post about a user reading a book '''
140 # tell the world about this cool thing that happened
141 try:
142 message = {
143 'to-read': 'wants to read',
144 'reading': 'started reading',
145 'read': 'finished reading'
146 }[shelf.identifier]
147 except KeyError:
148 # it's a non-standard shelf, don't worry about it
149 return
150
151 status = create_generated_note(
152 user,
153 message,
154 mention_books=[book],
155 privacy=privacy
156 )
157 status.save()
158
159 broadcast(user, status.to_create_activity(user))
160
161
162 def handle_imported_book(user, item, include_reviews, privacy):
163 ''' process a goodreads csv and then post about it '''
164 if isinstance(item.book, models.Work):
165 item.book = item.book.default_edition
166 if not item.book:
167 return
168
169 existing_shelf = models.ShelfBook.objects.filter(
170 book=item.book, added_by=user).exists()
171
172 # shelve the book if it hasn't been shelved already
173 if item.shelf and not existing_shelf:
174 desired_shelf = models.Shelf.objects.get(
175 identifier=item.shelf,
176 user=user
177 )
178 shelf_book = models.ShelfBook.objects.create(
179 book=item.book, shelf=desired_shelf, added_by=user)
180 broadcast(user, shelf_book.to_add_activity(user), privacy=privacy)
181
182 for read in item.reads:
183 read.book = item.book
184 read.user = user
185 read.save()
186
187 if include_reviews and (item.rating or item.review):
188 review_title = 'Review of {!r} on Goodreads'.format(
189 item.book.title,
190 ) if item.review else ''
191
192 # we don't know the publication date of the review,
193 # but "now" is a bad guess
194 published_date_guess = item.date_read or item.date_added
195 review = models.Review.objects.create(
196 user=user,
197 book=item.book,
198 name=review_title,
199 content=item.review,
200 rating=item.rating,
201 published_date=published_date_guess,
202 privacy=privacy,
203 )
204 # we don't need to send out pure activities because non-bookwyrm
205 # instances don't need this data
206 broadcast(user, review.to_create_activity(user), privacy=privacy)
207
208
209 def handle_delete_status(user, status):
210 ''' delete a status and broadcast deletion to other servers '''
211 delete_status(status)
212 broadcast(user, status.to_delete_activity(user))
213
214
215 def handle_status(user, form):
216 ''' generic handler for statuses '''
217 status = form.save(commit=False)
218 if not status.sensitive and status.content_warning:
219 # the cw text field remains populated when you click "remove"
220 status.content_warning = None
221 status.save()
222
223 # inspect the text for user tags
224 content = status.content
225 for (mention_text, mention_user) in find_mentions(content):
226 # add them to status mentions fk
227 status.mention_users.add(mention_user)
228
229 # turn the mention into a link
230 content = re.sub(
231 r'%s([^@]|$)' % mention_text,
232 r'<a href="%s">%s</a>\g<1>' % \
233 (mention_user.remote_id, mention_text),
234 content)
235
236 # add reply parent to mentions and notify
237 if status.reply_parent:
238 status.mention_users.add(status.reply_parent.user)
239 for mention_user in status.reply_parent.mention_users.all():
240 status.mention_users.add(mention_user)
241
242 if status.reply_parent.user.local:
243 create_notification(
244 status.reply_parent.user,
245 'REPLY',
246 related_user=user,
247 related_status=status
248 )
249
250 # deduplicate mentions
251 status.mention_users.set(set(status.mention_users.all()))
252 # create mention notifications
253 for mention_user in status.mention_users.all():
254 if status.reply_parent and mention_user == status.reply_parent.user:
255 continue
256 if mention_user.local:
257 create_notification(
258 mention_user,
259 'MENTION',
260 related_user=user,
261 related_status=status
262 )
263
264 # don't apply formatting to generated notes
265 if not isinstance(status, models.GeneratedNote):
266 status.content = to_markdown(content)
267 # do apply formatting to quotes
268 if hasattr(status, 'quote'):
269 status.quote = to_markdown(status.quote)
270
271 status.save()
272
273 broadcast(user, status.to_create_activity(user), software='bookwyrm')
274
275 # re-format the activity for non-bookwyrm servers
276 remote_activity = status.to_create_activity(user, pure=True)
277 broadcast(user, remote_activity, software='other')
278
279
280 def find_mentions(content):
281 ''' detect @mentions in raw status content '''
282 for match in re.finditer(regex.strict_username, content):
283 username = match.group().strip().split('@')[1:]
284 if len(username) == 1:
285 # this looks like a local user (@user), fill in the domain
286 username.append(DOMAIN)
287 username = '@'.join(username)
288
289 mention_user = handle_remote_webfinger(username)
290 if not mention_user:
291 # we can ignore users we don't know about
292 continue
293 yield (match.group(), mention_user)
294
295
296 def format_links(content):
297 ''' detect and format links '''
298 return re.sub(
299 r'([^(href=")]|^)(https?:\/\/(%s([\w\.\-_\/+&\?=:;,])*))' % \
300 regex.domain,
301 r'\g<1><a href="\g<2>">\g<3></a>',
302 content)
303
304 def to_markdown(content):
305 ''' catch links and convert to markdown '''
306 content = format_links(content)
307 content = markdown(content)
308 # sanitize resulting html
309 sanitizer = InputHtmlParser()
310 sanitizer.feed(content)
311 return sanitizer.get_output()
312
313
314 def handle_favorite(user, status):
315 ''' a user likes a status '''
316 try:
317 favorite = models.Favorite.objects.create(
318 status=status,
319 user=user
320 )
321 except IntegrityError:
322 # you already fav'ed that
323 return
324
325 fav_activity = favorite.to_activity()
326 broadcast(
327 user, fav_activity, privacy='direct', direct_recipients=[status.user])
328 create_notification(
329 status.user,
330 'FAVORITE',
331 related_user=user,
332 related_status=status
333 )
334
335
336 def handle_unfavorite(user, status):
337 ''' a user likes a status '''
338 try:
339 favorite = models.Favorite.objects.get(
340 status=status,
341 user=user
342 )
343 except models.Favorite.DoesNotExist:
344 # can't find that status, idk
345 return
346
347 fav_activity = favorite.to_undo_activity(user)
348 favorite.delete()
349 broadcast(user, fav_activity, direct_recipients=[status.user])
350
351
352 def handle_boost(user, status):
353 ''' a user wishes to boost a status '''
354 # is it boostable?
355 if not status.boostable:
356 return
357
358 if models.Boost.objects.filter(
359 boosted_status=status, user=user).exists():
360 # you already boosted that.
361 return
362 boost = models.Boost.objects.create(
363 boosted_status=status,
364 privacy=status.privacy,
365 user=user,
366 )
367
368 boost_activity = boost.to_activity()
369 broadcast(user, boost_activity)
370
371 create_notification(
372 status.user,
373 'BOOST',
374 related_user=user,
375 related_status=status
376 )
377
378
379 def handle_unboost(user, status):
380 ''' a user regrets boosting a status '''
381 boost = models.Boost.objects.filter(
382 boosted_status=status, user=user
383 ).first()
384 activity = boost.to_undo_activity(user)
385
386 boost.delete()
387 broadcast(user, activity)
388
389
390 def handle_update_book_data(user, item):
391 ''' broadcast the news about our book '''
392 broadcast(user, item.to_update_activity(user))
393
394
395 def handle_update_user(user):
396 ''' broadcast editing a user's profile '''
397 broadcast(user, user.to_update_activity(user))
398
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bookwyrm/outgoing.py b/bookwyrm/outgoing.py
--- a/bookwyrm/outgoing.py
+++ b/bookwyrm/outgoing.py
@@ -296,7 +296,7 @@
def format_links(content):
''' detect and format links '''
return re.sub(
- r'([^(href=")]|^)(https?:\/\/(%s([\w\.\-_\/+&\?=:;,])*))' % \
+ r'([^(href=")]|^|\()(https?:\/\/(%s([\w\.\-_\/+&\?=:;,])*))' % \
regex.domain,
r'\g<1><a href="\g<2>">\g<3></a>',
content)
| {"golden_diff": "diff --git a/bookwyrm/outgoing.py b/bookwyrm/outgoing.py\n--- a/bookwyrm/outgoing.py\n+++ b/bookwyrm/outgoing.py\n@@ -296,7 +296,7 @@\n def format_links(content):\n ''' detect and format links '''\n return re.sub(\n- r'([^(href=\")]|^)(https?:\\/\\/(%s([\\w\\.\\-_\\/+&\\?=:;,])*))' % \\\n+ r'([^(href=\")]|^|\\()(https?:\\/\\/(%s([\\w\\.\\-_\\/+&\\?=:;,])*))' % \\\n regex.domain,\n r'\\g<1><a href=\"\\g<2>\">\\g<3></a>',\n content)\n", "issue": "urls in parens don't parse as links\n\n", "before_files": [{"content": "''' handles all the activity coming out of the server '''\nimport re\n\nfrom django.db import IntegrityError, transaction\nfrom django.http import JsonResponse\nfrom django.shortcuts import get_object_or_404\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.http import require_GET\nfrom markdown import markdown\nfrom requests import HTTPError\n\nfrom bookwyrm import activitypub\nfrom bookwyrm import models\nfrom bookwyrm.connectors import get_data, ConnectorException\nfrom bookwyrm.broadcast import broadcast\nfrom bookwyrm.sanitize_html import InputHtmlParser\nfrom bookwyrm.status import create_notification\nfrom bookwyrm.status import create_generated_note\nfrom bookwyrm.status import delete_status\nfrom bookwyrm.settings import DOMAIN\nfrom bookwyrm.utils import regex\n\n\n@csrf_exempt\n@require_GET\ndef outbox(request, username):\n ''' outbox for the requested user '''\n user = get_object_or_404(models.User, localname=username)\n filter_type = request.GET.get('type')\n if filter_type not in models.status_models:\n filter_type = None\n\n return JsonResponse(\n user.to_outbox(**request.GET, filter_type=filter_type),\n encoder=activitypub.ActivityEncoder\n )\n\n\ndef handle_remote_webfinger(query):\n ''' webfingerin' other servers '''\n user = None\n\n # usernames could be @user@domain or user@domain\n if not query:\n return None\n\n if query[0] == '@':\n query = query[1:]\n\n try:\n domain = query.split('@')[1]\n except IndexError:\n return None\n\n try:\n user = models.User.objects.get(username=query)\n except models.User.DoesNotExist:\n url = 'https://%s/.well-known/webfinger?resource=acct:%s' % \\\n (domain, query)\n try:\n data = get_data(url)\n except (ConnectorException, HTTPError):\n return None\n\n for link in data.get('links'):\n if link.get('rel') == 'self':\n try:\n user = activitypub.resolve_remote_id(\n models.User, link['href']\n )\n except KeyError:\n return None\n return user\n\n\ndef handle_follow(user, to_follow):\n ''' someone local wants to follow someone '''\n relationship, _ = models.UserFollowRequest.objects.get_or_create(\n user_subject=user,\n user_object=to_follow,\n )\n activity = relationship.to_activity()\n broadcast(user, activity, privacy='direct', direct_recipients=[to_follow])\n\n\ndef handle_unfollow(user, to_unfollow):\n ''' someone local wants to follow someone '''\n relationship = models.UserFollows.objects.get(\n user_subject=user,\n user_object=to_unfollow\n )\n activity = relationship.to_undo_activity(user)\n broadcast(user, activity, privacy='direct', direct_recipients=[to_unfollow])\n to_unfollow.followers.remove(user)\n\n\ndef handle_accept(follow_request):\n ''' send an acceptance message to a follow request '''\n user = follow_request.user_subject\n to_follow = follow_request.user_object\n with transaction.atomic():\n relationship = models.UserFollows.from_request(follow_request)\n follow_request.delete()\n relationship.save()\n\n activity = relationship.to_accept_activity()\n broadcast(to_follow, activity, privacy='direct', direct_recipients=[user])\n\n\ndef handle_reject(follow_request):\n ''' a local user who managed follows rejects a follow request '''\n user = follow_request.user_subject\n to_follow = follow_request.user_object\n activity = follow_request.to_reject_activity()\n follow_request.delete()\n broadcast(to_follow, activity, privacy='direct', direct_recipients=[user])\n\n\ndef handle_shelve(user, book, shelf):\n ''' a local user is getting a book put on their shelf '''\n # update the database\n shelve = models.ShelfBook(book=book, shelf=shelf, added_by=user)\n shelve.save()\n\n broadcast(user, shelve.to_add_activity(user))\n\n\ndef handle_unshelve(user, book, shelf):\n ''' a local user is getting a book put on their shelf '''\n # update the database\n row = models.ShelfBook.objects.get(book=book, shelf=shelf)\n activity = row.to_remove_activity(user)\n row.delete()\n\n broadcast(user, activity)\n\n\ndef handle_reading_status(user, shelf, book, privacy):\n ''' post about a user reading a book '''\n # tell the world about this cool thing that happened\n try:\n message = {\n 'to-read': 'wants to read',\n 'reading': 'started reading',\n 'read': 'finished reading'\n }[shelf.identifier]\n except KeyError:\n # it's a non-standard shelf, don't worry about it\n return\n\n status = create_generated_note(\n user,\n message,\n mention_books=[book],\n privacy=privacy\n )\n status.save()\n\n broadcast(user, status.to_create_activity(user))\n\n\ndef handle_imported_book(user, item, include_reviews, privacy):\n ''' process a goodreads csv and then post about it '''\n if isinstance(item.book, models.Work):\n item.book = item.book.default_edition\n if not item.book:\n return\n\n existing_shelf = models.ShelfBook.objects.filter(\n book=item.book, added_by=user).exists()\n\n # shelve the book if it hasn't been shelved already\n if item.shelf and not existing_shelf:\n desired_shelf = models.Shelf.objects.get(\n identifier=item.shelf,\n user=user\n )\n shelf_book = models.ShelfBook.objects.create(\n book=item.book, shelf=desired_shelf, added_by=user)\n broadcast(user, shelf_book.to_add_activity(user), privacy=privacy)\n\n for read in item.reads:\n read.book = item.book\n read.user = user\n read.save()\n\n if include_reviews and (item.rating or item.review):\n review_title = 'Review of {!r} on Goodreads'.format(\n item.book.title,\n ) if item.review else ''\n\n # we don't know the publication date of the review,\n # but \"now\" is a bad guess\n published_date_guess = item.date_read or item.date_added\n review = models.Review.objects.create(\n user=user,\n book=item.book,\n name=review_title,\n content=item.review,\n rating=item.rating,\n published_date=published_date_guess,\n privacy=privacy,\n )\n # we don't need to send out pure activities because non-bookwyrm\n # instances don't need this data\n broadcast(user, review.to_create_activity(user), privacy=privacy)\n\n\ndef handle_delete_status(user, status):\n ''' delete a status and broadcast deletion to other servers '''\n delete_status(status)\n broadcast(user, status.to_delete_activity(user))\n\n\ndef handle_status(user, form):\n ''' generic handler for statuses '''\n status = form.save(commit=False)\n if not status.sensitive and status.content_warning:\n # the cw text field remains populated when you click \"remove\"\n status.content_warning = None\n status.save()\n\n # inspect the text for user tags\n content = status.content\n for (mention_text, mention_user) in find_mentions(content):\n # add them to status mentions fk\n status.mention_users.add(mention_user)\n\n # turn the mention into a link\n content = re.sub(\n r'%s([^@]|$)' % mention_text,\n r'<a href=\"%s\">%s</a>\\g<1>' % \\\n (mention_user.remote_id, mention_text),\n content)\n\n # add reply parent to mentions and notify\n if status.reply_parent:\n status.mention_users.add(status.reply_parent.user)\n for mention_user in status.reply_parent.mention_users.all():\n status.mention_users.add(mention_user)\n\n if status.reply_parent.user.local:\n create_notification(\n status.reply_parent.user,\n 'REPLY',\n related_user=user,\n related_status=status\n )\n\n # deduplicate mentions\n status.mention_users.set(set(status.mention_users.all()))\n # create mention notifications\n for mention_user in status.mention_users.all():\n if status.reply_parent and mention_user == status.reply_parent.user:\n continue\n if mention_user.local:\n create_notification(\n mention_user,\n 'MENTION',\n related_user=user,\n related_status=status\n )\n\n # don't apply formatting to generated notes\n if not isinstance(status, models.GeneratedNote):\n status.content = to_markdown(content)\n # do apply formatting to quotes\n if hasattr(status, 'quote'):\n status.quote = to_markdown(status.quote)\n\n status.save()\n\n broadcast(user, status.to_create_activity(user), software='bookwyrm')\n\n # re-format the activity for non-bookwyrm servers\n remote_activity = status.to_create_activity(user, pure=True)\n broadcast(user, remote_activity, software='other')\n\n\ndef find_mentions(content):\n ''' detect @mentions in raw status content '''\n for match in re.finditer(regex.strict_username, content):\n username = match.group().strip().split('@')[1:]\n if len(username) == 1:\n # this looks like a local user (@user), fill in the domain\n username.append(DOMAIN)\n username = '@'.join(username)\n\n mention_user = handle_remote_webfinger(username)\n if not mention_user:\n # we can ignore users we don't know about\n continue\n yield (match.group(), mention_user)\n\n\ndef format_links(content):\n ''' detect and format links '''\n return re.sub(\n r'([^(href=\")]|^)(https?:\\/\\/(%s([\\w\\.\\-_\\/+&\\?=:;,])*))' % \\\n regex.domain,\n r'\\g<1><a href=\"\\g<2>\">\\g<3></a>',\n content)\n\ndef to_markdown(content):\n ''' catch links and convert to markdown '''\n content = format_links(content)\n content = markdown(content)\n # sanitize resulting html\n sanitizer = InputHtmlParser()\n sanitizer.feed(content)\n return sanitizer.get_output()\n\n\ndef handle_favorite(user, status):\n ''' a user likes a status '''\n try:\n favorite = models.Favorite.objects.create(\n status=status,\n user=user\n )\n except IntegrityError:\n # you already fav'ed that\n return\n\n fav_activity = favorite.to_activity()\n broadcast(\n user, fav_activity, privacy='direct', direct_recipients=[status.user])\n create_notification(\n status.user,\n 'FAVORITE',\n related_user=user,\n related_status=status\n )\n\n\ndef handle_unfavorite(user, status):\n ''' a user likes a status '''\n try:\n favorite = models.Favorite.objects.get(\n status=status,\n user=user\n )\n except models.Favorite.DoesNotExist:\n # can't find that status, idk\n return\n\n fav_activity = favorite.to_undo_activity(user)\n favorite.delete()\n broadcast(user, fav_activity, direct_recipients=[status.user])\n\n\ndef handle_boost(user, status):\n ''' a user wishes to boost a status '''\n # is it boostable?\n if not status.boostable:\n return\n\n if models.Boost.objects.filter(\n boosted_status=status, user=user).exists():\n # you already boosted that.\n return\n boost = models.Boost.objects.create(\n boosted_status=status,\n privacy=status.privacy,\n user=user,\n )\n\n boost_activity = boost.to_activity()\n broadcast(user, boost_activity)\n\n create_notification(\n status.user,\n 'BOOST',\n related_user=user,\n related_status=status\n )\n\n\ndef handle_unboost(user, status):\n ''' a user regrets boosting a status '''\n boost = models.Boost.objects.filter(\n boosted_status=status, user=user\n ).first()\n activity = boost.to_undo_activity(user)\n\n boost.delete()\n broadcast(user, activity)\n\n\ndef handle_update_book_data(user, item):\n ''' broadcast the news about our book '''\n broadcast(user, item.to_update_activity(user))\n\n\ndef handle_update_user(user):\n ''' broadcast editing a user's profile '''\n broadcast(user, user.to_update_activity(user))\n", "path": "bookwyrm/outgoing.py"}], "after_files": [{"content": "''' handles all the activity coming out of the server '''\nimport re\n\nfrom django.db import IntegrityError, transaction\nfrom django.http import JsonResponse\nfrom django.shortcuts import get_object_or_404\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.http import require_GET\nfrom markdown import markdown\nfrom requests import HTTPError\n\nfrom bookwyrm import activitypub\nfrom bookwyrm import models\nfrom bookwyrm.connectors import get_data, ConnectorException\nfrom bookwyrm.broadcast import broadcast\nfrom bookwyrm.sanitize_html import InputHtmlParser\nfrom bookwyrm.status import create_notification\nfrom bookwyrm.status import create_generated_note\nfrom bookwyrm.status import delete_status\nfrom bookwyrm.settings import DOMAIN\nfrom bookwyrm.utils import regex\n\n\n@csrf_exempt\n@require_GET\ndef outbox(request, username):\n ''' outbox for the requested user '''\n user = get_object_or_404(models.User, localname=username)\n filter_type = request.GET.get('type')\n if filter_type not in models.status_models:\n filter_type = None\n\n return JsonResponse(\n user.to_outbox(**request.GET, filter_type=filter_type),\n encoder=activitypub.ActivityEncoder\n )\n\n\ndef handle_remote_webfinger(query):\n ''' webfingerin' other servers '''\n user = None\n\n # usernames could be @user@domain or user@domain\n if not query:\n return None\n\n if query[0] == '@':\n query = query[1:]\n\n try:\n domain = query.split('@')[1]\n except IndexError:\n return None\n\n try:\n user = models.User.objects.get(username=query)\n except models.User.DoesNotExist:\n url = 'https://%s/.well-known/webfinger?resource=acct:%s' % \\\n (domain, query)\n try:\n data = get_data(url)\n except (ConnectorException, HTTPError):\n return None\n\n for link in data.get('links'):\n if link.get('rel') == 'self':\n try:\n user = activitypub.resolve_remote_id(\n models.User, link['href']\n )\n except KeyError:\n return None\n return user\n\n\ndef handle_follow(user, to_follow):\n ''' someone local wants to follow someone '''\n relationship, _ = models.UserFollowRequest.objects.get_or_create(\n user_subject=user,\n user_object=to_follow,\n )\n activity = relationship.to_activity()\n broadcast(user, activity, privacy='direct', direct_recipients=[to_follow])\n\n\ndef handle_unfollow(user, to_unfollow):\n ''' someone local wants to follow someone '''\n relationship = models.UserFollows.objects.get(\n user_subject=user,\n user_object=to_unfollow\n )\n activity = relationship.to_undo_activity(user)\n broadcast(user, activity, privacy='direct', direct_recipients=[to_unfollow])\n to_unfollow.followers.remove(user)\n\n\ndef handle_accept(follow_request):\n ''' send an acceptance message to a follow request '''\n user = follow_request.user_subject\n to_follow = follow_request.user_object\n with transaction.atomic():\n relationship = models.UserFollows.from_request(follow_request)\n follow_request.delete()\n relationship.save()\n\n activity = relationship.to_accept_activity()\n broadcast(to_follow, activity, privacy='direct', direct_recipients=[user])\n\n\ndef handle_reject(follow_request):\n ''' a local user who managed follows rejects a follow request '''\n user = follow_request.user_subject\n to_follow = follow_request.user_object\n activity = follow_request.to_reject_activity()\n follow_request.delete()\n broadcast(to_follow, activity, privacy='direct', direct_recipients=[user])\n\n\ndef handle_shelve(user, book, shelf):\n ''' a local user is getting a book put on their shelf '''\n # update the database\n shelve = models.ShelfBook(book=book, shelf=shelf, added_by=user)\n shelve.save()\n\n broadcast(user, shelve.to_add_activity(user))\n\n\ndef handle_unshelve(user, book, shelf):\n ''' a local user is getting a book put on their shelf '''\n # update the database\n row = models.ShelfBook.objects.get(book=book, shelf=shelf)\n activity = row.to_remove_activity(user)\n row.delete()\n\n broadcast(user, activity)\n\n\ndef handle_reading_status(user, shelf, book, privacy):\n ''' post about a user reading a book '''\n # tell the world about this cool thing that happened\n try:\n message = {\n 'to-read': 'wants to read',\n 'reading': 'started reading',\n 'read': 'finished reading'\n }[shelf.identifier]\n except KeyError:\n # it's a non-standard shelf, don't worry about it\n return\n\n status = create_generated_note(\n user,\n message,\n mention_books=[book],\n privacy=privacy\n )\n status.save()\n\n broadcast(user, status.to_create_activity(user))\n\n\ndef handle_imported_book(user, item, include_reviews, privacy):\n ''' process a goodreads csv and then post about it '''\n if isinstance(item.book, models.Work):\n item.book = item.book.default_edition\n if not item.book:\n return\n\n existing_shelf = models.ShelfBook.objects.filter(\n book=item.book, added_by=user).exists()\n\n # shelve the book if it hasn't been shelved already\n if item.shelf and not existing_shelf:\n desired_shelf = models.Shelf.objects.get(\n identifier=item.shelf,\n user=user\n )\n shelf_book = models.ShelfBook.objects.create(\n book=item.book, shelf=desired_shelf, added_by=user)\n broadcast(user, shelf_book.to_add_activity(user), privacy=privacy)\n\n for read in item.reads:\n read.book = item.book\n read.user = user\n read.save()\n\n if include_reviews and (item.rating or item.review):\n review_title = 'Review of {!r} on Goodreads'.format(\n item.book.title,\n ) if item.review else ''\n\n # we don't know the publication date of the review,\n # but \"now\" is a bad guess\n published_date_guess = item.date_read or item.date_added\n review = models.Review.objects.create(\n user=user,\n book=item.book,\n name=review_title,\n content=item.review,\n rating=item.rating,\n published_date=published_date_guess,\n privacy=privacy,\n )\n # we don't need to send out pure activities because non-bookwyrm\n # instances don't need this data\n broadcast(user, review.to_create_activity(user), privacy=privacy)\n\n\ndef handle_delete_status(user, status):\n ''' delete a status and broadcast deletion to other servers '''\n delete_status(status)\n broadcast(user, status.to_delete_activity(user))\n\n\ndef handle_status(user, form):\n ''' generic handler for statuses '''\n status = form.save(commit=False)\n if not status.sensitive and status.content_warning:\n # the cw text field remains populated when you click \"remove\"\n status.content_warning = None\n status.save()\n\n # inspect the text for user tags\n content = status.content\n for (mention_text, mention_user) in find_mentions(content):\n # add them to status mentions fk\n status.mention_users.add(mention_user)\n\n # turn the mention into a link\n content = re.sub(\n r'%s([^@]|$)' % mention_text,\n r'<a href=\"%s\">%s</a>\\g<1>' % \\\n (mention_user.remote_id, mention_text),\n content)\n\n # add reply parent to mentions and notify\n if status.reply_parent:\n status.mention_users.add(status.reply_parent.user)\n for mention_user in status.reply_parent.mention_users.all():\n status.mention_users.add(mention_user)\n\n if status.reply_parent.user.local:\n create_notification(\n status.reply_parent.user,\n 'REPLY',\n related_user=user,\n related_status=status\n )\n\n # deduplicate mentions\n status.mention_users.set(set(status.mention_users.all()))\n # create mention notifications\n for mention_user in status.mention_users.all():\n if status.reply_parent and mention_user == status.reply_parent.user:\n continue\n if mention_user.local:\n create_notification(\n mention_user,\n 'MENTION',\n related_user=user,\n related_status=status\n )\n\n # don't apply formatting to generated notes\n if not isinstance(status, models.GeneratedNote):\n status.content = to_markdown(content)\n # do apply formatting to quotes\n if hasattr(status, 'quote'):\n status.quote = to_markdown(status.quote)\n\n status.save()\n\n broadcast(user, status.to_create_activity(user), software='bookwyrm')\n\n # re-format the activity for non-bookwyrm servers\n remote_activity = status.to_create_activity(user, pure=True)\n broadcast(user, remote_activity, software='other')\n\n\ndef find_mentions(content):\n ''' detect @mentions in raw status content '''\n for match in re.finditer(regex.strict_username, content):\n username = match.group().strip().split('@')[1:]\n if len(username) == 1:\n # this looks like a local user (@user), fill in the domain\n username.append(DOMAIN)\n username = '@'.join(username)\n\n mention_user = handle_remote_webfinger(username)\n if not mention_user:\n # we can ignore users we don't know about\n continue\n yield (match.group(), mention_user)\n\n\ndef format_links(content):\n ''' detect and format links '''\n return re.sub(\n r'([^(href=\")]|^|\\()(https?:\\/\\/(%s([\\w\\.\\-_\\/+&\\?=:;,])*))' % \\\n regex.domain,\n r'\\g<1><a href=\"\\g<2>\">\\g<3></a>',\n content)\n\ndef to_markdown(content):\n ''' catch links and convert to markdown '''\n content = format_links(content)\n content = markdown(content)\n # sanitize resulting html\n sanitizer = InputHtmlParser()\n sanitizer.feed(content)\n return sanitizer.get_output()\n\n\ndef handle_favorite(user, status):\n ''' a user likes a status '''\n try:\n favorite = models.Favorite.objects.create(\n status=status,\n user=user\n )\n except IntegrityError:\n # you already fav'ed that\n return\n\n fav_activity = favorite.to_activity()\n broadcast(\n user, fav_activity, privacy='direct', direct_recipients=[status.user])\n create_notification(\n status.user,\n 'FAVORITE',\n related_user=user,\n related_status=status\n )\n\n\ndef handle_unfavorite(user, status):\n ''' a user likes a status '''\n try:\n favorite = models.Favorite.objects.get(\n status=status,\n user=user\n )\n except models.Favorite.DoesNotExist:\n # can't find that status, idk\n return\n\n fav_activity = favorite.to_undo_activity(user)\n favorite.delete()\n broadcast(user, fav_activity, direct_recipients=[status.user])\n\n\ndef handle_boost(user, status):\n ''' a user wishes to boost a status '''\n # is it boostable?\n if not status.boostable:\n return\n\n if models.Boost.objects.filter(\n boosted_status=status, user=user).exists():\n # you already boosted that.\n return\n boost = models.Boost.objects.create(\n boosted_status=status,\n privacy=status.privacy,\n user=user,\n )\n\n boost_activity = boost.to_activity()\n broadcast(user, boost_activity)\n\n create_notification(\n status.user,\n 'BOOST',\n related_user=user,\n related_status=status\n )\n\n\ndef handle_unboost(user, status):\n ''' a user regrets boosting a status '''\n boost = models.Boost.objects.filter(\n boosted_status=status, user=user\n ).first()\n activity = boost.to_undo_activity(user)\n\n boost.delete()\n broadcast(user, activity)\n\n\ndef handle_update_book_data(user, item):\n ''' broadcast the news about our book '''\n broadcast(user, item.to_update_activity(user))\n\n\ndef handle_update_user(user):\n ''' broadcast editing a user's profile '''\n broadcast(user, user.to_update_activity(user))\n", "path": "bookwyrm/outgoing.py"}]} |
gh_patches_debug_1298 | rasdani/github-patches | git_diff | modin-project__modin-2171 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[REFACTOR] Concat for a single frame shouldn't cause additional operations
Currently, in OmniSci engine concat execution for a single frame adds an additional projection (which ais basically NOP). In some corner cases it may cause inefficiency (especially when API layer triggers frame execution). The esiest solution here is to just do nothing for a single frame concat case.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `modin/experimental/engines/omnisci_on_ray/frame/data.py`
Content:
```
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
14 from modin.engines.base.frame.data import BasePandasFrame
15 from modin.experimental.backends.omnisci.query_compiler import DFAlgQueryCompiler
16 from .partition_manager import OmnisciOnRayFrameManager
17
18 from pandas.core.index import ensure_index, Index, MultiIndex, RangeIndex
19 from pandas.core.dtypes.common import _get_dtype, is_list_like, is_bool_dtype
20 import pandas as pd
21
22 from .df_algebra import (
23 MaskNode,
24 FrameNode,
25 GroupbyAggNode,
26 TransformNode,
27 UnionNode,
28 JoinNode,
29 SortNode,
30 FilterNode,
31 translate_exprs_to_base,
32 replace_frame_in_exprs,
33 )
34 from .expr import (
35 AggregateExpr,
36 InputRefExpr,
37 LiteralExpr,
38 OpExpr,
39 build_if_then_else,
40 build_dt_expr,
41 _get_common_dtype,
42 is_cmp_op,
43 )
44 from collections import OrderedDict
45
46 import numpy as np
47 import pyarrow
48 import re
49
50
51 class OmnisciOnRayFrame(BasePandasFrame):
52
53 _query_compiler_cls = DFAlgQueryCompiler
54 _frame_mgr_cls = OmnisciOnRayFrameManager
55
56 _next_id = [1]
57
58 def __init__(
59 self,
60 partitions=None,
61 index=None,
62 columns=None,
63 row_lengths=None,
64 column_widths=None,
65 dtypes=None,
66 op=None,
67 index_cols=None,
68 uses_rowid=False,
69 force_execution_mode=None,
70 ):
71 assert dtypes is not None
72
73 self.id = str(type(self)._next_id[0])
74 type(self)._next_id[0] += 1
75
76 if index is not None:
77 index = ensure_index(index)
78 columns = ensure_index(columns)
79 self._op = op
80 self._index_cols = index_cols
81 self._partitions = partitions
82 self._index_cache = index
83 self._columns_cache = columns
84 self._row_lengths_cache = row_lengths
85 self._column_widths_cache = column_widths
86 if self._op is None:
87 self._op = FrameNode(self)
88
89 self._table_cols = columns.tolist()
90 if self._index_cols is not None:
91 self._table_cols = self._index_cols + self._table_cols
92
93 assert len(dtypes) == len(
94 self._table_cols
95 ), f"unaligned dtypes ({dtypes}) and table columns ({self._table_cols})"
96 if isinstance(dtypes, list):
97 if self._index_cols is not None:
98 # Table stores both index and data columns but those are accessed
99 # differently if we have a MultiIndex for columns. To unify access
100 # to dtype we extend index column names to tuples to have a MultiIndex
101 # of dtypes.
102 if isinstance(columns, MultiIndex):
103 tail = [""] * (columns.nlevels - 1)
104 index_tuples = [(col, *tail) for col in self._index_cols]
105 dtype_index = MultiIndex.from_tuples(index_tuples).append(columns)
106 self._dtypes = pd.Series(dtypes, index=dtype_index)
107 else:
108 self._dtypes = pd.Series(dtypes, index=self._table_cols)
109 else:
110 self._dtypes = pd.Series(dtypes, index=columns)
111 else:
112 self._dtypes = dtypes
113
114 if partitions is not None:
115 self._filter_empties()
116
117 # This frame uses encoding for column names to support exotic
118 # (e.g. non-string and reserved words) column names. Encoded
119 # names are used in OmniSci tables and corresponding Arrow tables.
120 # If we import Arrow table, we have to rename its columns for
121 # proper processing.
122 if self._has_arrow_table() and self._partitions.size > 0:
123 assert self._partitions.size == 1
124 table = self._partitions[0][0].get()
125 if table.column_names[0] != f"F_{self._table_cols[0]}":
126 new_names = [f"F_{col}" for col in table.column_names]
127 new_table = table.rename_columns(new_names)
128 self._partitions[0][0] = self._frame_mgr_cls._partition_class.put_arrow(
129 new_table
130 )
131
132 self._uses_rowid = uses_rowid
133 # Tests use forced execution mode to take control over frame
134 # execution process. Supported values:
135 # "lazy" - RuntimeError is raised if execution is triggered for the frame
136 # "arrow" - RuntimeError is raised if execution is triggered, but we cannot
137 # execute it using Arrow API (have to use OmniSci for execution)
138 self._force_execution_mode = force_execution_mode
139
140 def id_str(self):
141 return f"frame${self.id}"
142
143 def _get_dtype(self, col):
144 # If we search for an index column type in a MultiIndex then we need to
145 # extend index column names to tuples.
146 if isinstance(self._dtypes, MultiIndex) and not isinstance(col, tuple):
147 return self._dtypes[(col, *([""] * (self._dtypes.nlevels - 1)))]
148 return self._dtypes[col]
149
150 def ref(self, col):
151 if col == "__rowid__":
152 return InputRefExpr(self, col, _get_dtype(int))
153 return InputRefExpr(self, col, self._get_dtype(col))
154
155 def mask(
156 self,
157 row_indices=None,
158 row_numeric_idx=None,
159 col_indices=None,
160 col_numeric_idx=None,
161 ):
162 base = self
163
164 if col_indices is not None or col_numeric_idx is not None:
165 if col_indices is not None:
166 new_columns = col_indices
167 elif col_numeric_idx is not None:
168 new_columns = base.columns[col_numeric_idx]
169 exprs = self._index_exprs()
170 for col in new_columns:
171 exprs[col] = base.ref(col)
172 dtypes = self._dtypes_for_exprs(exprs)
173 base = self.__constructor__(
174 columns=new_columns,
175 dtypes=dtypes,
176 op=TransformNode(base, exprs),
177 index_cols=self._index_cols,
178 force_execution_mode=self._force_execution_mode,
179 )
180
181 if row_indices is not None or row_numeric_idx is not None:
182 op = MaskNode(
183 base,
184 row_indices=row_indices,
185 row_numeric_idx=row_numeric_idx,
186 )
187 return self.__constructor__(
188 columns=base.columns,
189 dtypes=base._dtypes,
190 op=op,
191 index_cols=self._index_cols,
192 force_execution_mode=self._force_execution_mode,
193 )
194
195 return base
196
197 def _has_arrow_table(self):
198 if not isinstance(self._op, FrameNode):
199 return False
200 return all(p.arrow_table for p in self._partitions.flatten())
201
202 def _dtypes_for_cols(self, new_index, new_columns):
203 if new_index is not None:
204 if isinstance(self._dtypes, MultiIndex):
205 new_index = [
206 (col, *([""] * (self._dtypes.nlevels - 1))) for col in new_index
207 ]
208 res = self._dtypes[
209 new_index
210 + (
211 new_columns
212 if isinstance(new_columns, list)
213 else new_columns.to_list()
214 )
215 ]
216 else:
217 res = self._dtypes[new_columns]
218 return res
219
220 def _dtypes_for_exprs(self, exprs):
221 return [expr._dtype for expr in exprs.values()]
222
223 def groupby_agg(self, by, axis, agg, groupby_args, **kwargs):
224 # Currently we only expect 'by' to be a projection of the same frame.
225 # If 'by' holds a list of columns/series, then we create such projection
226 # to re-use code.
227 if not isinstance(by, DFAlgQueryCompiler):
228 if is_list_like(by):
229 by_cols = []
230 by_frames = []
231 for obj in by:
232 if isinstance(obj, str):
233 by_cols.append(obj)
234 elif hasattr(obj, "_query_compiler"):
235 by_frames.append(obj._query_compiler._modin_frame)
236 else:
237 raise NotImplementedError("unsupported groupby args")
238 by_cols = Index.__new__(Index, data=by_cols, dtype=self.columns.dtype)
239 by_frame = self.mask(col_indices=by_cols)
240 if by_frames:
241 by_frame = by_frame._concat(
242 axis=1, other_modin_frames=by_frames, ignore_index=True
243 )
244 else:
245 raise NotImplementedError("unsupported groupby args")
246 else:
247 by_frame = by._modin_frame
248
249 if axis != 0:
250 raise NotImplementedError("groupby is supported for axis = 0 only")
251
252 base = by_frame._find_common_projections_base(self)
253 if base is None:
254 raise NotImplementedError("unsupported groupby args")
255
256 if groupby_args["level"] is not None:
257 raise NotImplementedError("levels are not supported for groupby")
258
259 groupby_cols = by_frame.columns.tolist()
260 agg_cols = [col for col in self.columns if col not in by_frame.columns]
261
262 # Create new base where all required columns are computed. We don't allow
263 # complex expressions to be a group key or an aggeregate operand.
264 assert isinstance(by_frame._op, TransformNode), "unexpected by_frame"
265 exprs = OrderedDict(((col, by_frame.ref(col)) for col in groupby_cols))
266 exprs.update(((col, self.ref(col)) for col in agg_cols))
267 exprs = translate_exprs_to_base(exprs, base)
268 base_cols = Index.__new__(
269 Index, data=list(exprs.keys()), dtype=self.columns.dtype
270 )
271 base = self.__constructor__(
272 columns=base_cols,
273 dtypes=self._dtypes_for_exprs(exprs),
274 op=TransformNode(base, exprs, fold=True),
275 index_cols=None,
276 force_execution_mode=self._force_execution_mode,
277 )
278
279 new_columns = []
280 index_cols = None
281
282 if groupby_args["as_index"]:
283 index_cols = groupby_cols.copy()
284 else:
285 new_columns = groupby_cols.copy()
286
287 new_dtypes = by_frame._dtypes[groupby_cols].tolist()
288
289 agg_exprs = OrderedDict()
290 if isinstance(agg, str):
291 for col in agg_cols:
292 agg_exprs[col] = AggregateExpr(agg, base.ref(col))
293 else:
294 assert isinstance(agg, dict), "unsupported aggregate type"
295 multiindex = any(isinstance(v, list) for v in agg.values())
296 for k, v in agg.items():
297 if isinstance(v, list):
298 for item in v:
299 agg_exprs[(k, item)] = AggregateExpr(item, base.ref(k))
300 else:
301 col_name = (k, v) if multiindex else k
302 agg_exprs[col_name] = AggregateExpr(v, base.ref(k))
303 new_columns.extend(agg_exprs.keys())
304 new_dtypes.extend((x._dtype for x in agg_exprs.values()))
305 new_columns = Index.__new__(Index, data=new_columns, dtype=self.columns.dtype)
306
307 new_op = GroupbyAggNode(base, groupby_cols, agg_exprs, groupby_args)
308 new_frame = self.__constructor__(
309 columns=new_columns,
310 dtypes=new_dtypes,
311 op=new_op,
312 index_cols=index_cols,
313 force_execution_mode=self._force_execution_mode,
314 )
315
316 return new_frame
317
318 def fillna(
319 self,
320 value=None,
321 method=None,
322 axis=None,
323 limit=None,
324 downcast=None,
325 ):
326 if axis != 0:
327 raise NotImplementedError("fillna is supported for axis = 0 only")
328
329 if limit is not None:
330 raise NotImplementedError("fillna doesn't support limit yet")
331
332 if downcast is not None:
333 raise NotImplementedError("fillna doesn't support downcast yet")
334
335 if method is not None:
336 raise NotImplementedError("fillna doesn't support method yet")
337
338 exprs = self._index_exprs()
339 if isinstance(value, dict):
340 for col in self.columns:
341 col_expr = self.ref(col)
342 if col in value:
343 value_expr = LiteralExpr(value[col])
344 res_type = _get_common_dtype(value_expr._dtype, col_expr._dtype)
345 exprs[col] = build_if_then_else(
346 col_expr.is_null(), value_expr, col_expr, res_type
347 )
348 else:
349 exprs[col] = col_expr
350 elif np.isscalar(value):
351 value_expr = LiteralExpr(value)
352 for col in self.columns:
353 col_expr = self.ref(col)
354 res_type = _get_common_dtype(value_expr._dtype, col_expr._dtype)
355 exprs[col] = build_if_then_else(
356 col_expr.is_null(), value_expr, col_expr, res_type
357 )
358 else:
359 raise NotImplementedError("unsupported value for fillna")
360
361 new_op = TransformNode(self, exprs)
362 dtypes = self._dtypes_for_exprs(exprs)
363 new_frame = self.__constructor__(
364 columns=self.columns,
365 dtypes=dtypes,
366 op=new_op,
367 index_cols=self._index_cols,
368 force_execution_mode=self._force_execution_mode,
369 )
370
371 return new_frame
372
373 def dt_extract(self, obj):
374 exprs = self._index_exprs()
375 for col in self.columns:
376 exprs[col] = build_dt_expr(obj, self.ref(col))
377 new_op = TransformNode(self, exprs)
378 dtypes = self._dtypes_for_exprs(exprs)
379 return self.__constructor__(
380 columns=self.columns,
381 dtypes=dtypes,
382 op=new_op,
383 index_cols=self._index_cols,
384 force_execution_mode=self._force_execution_mode,
385 )
386
387 def astype(self, col_dtypes, **kwargs):
388 columns = col_dtypes.keys()
389 new_dtypes = self.dtypes.copy()
390 for column in columns:
391 dtype = col_dtypes[column]
392 if (
393 not isinstance(dtype, type(self.dtypes[column]))
394 or dtype != self.dtypes[column]
395 ):
396 # Update the new dtype series to the proper pandas dtype
397 try:
398 new_dtype = np.dtype(dtype)
399 except TypeError:
400 new_dtype = dtype
401
402 if dtype != np.int32 and new_dtype == np.int32:
403 new_dtypes[column] = np.dtype("int64")
404 elif dtype != np.float32 and new_dtype == np.float32:
405 new_dtypes[column] = np.dtype("float64")
406 # We cannot infer without computing the dtype if
407 elif isinstance(new_dtype, str) and new_dtype == "category":
408 raise NotImplementedError("unsupported type conversion")
409 else:
410 new_dtypes[column] = new_dtype
411 exprs = self._index_exprs()
412 for col in self.columns:
413 col_expr = self.ref(col)
414 if col in columns:
415 exprs[col] = col_expr.cast(new_dtypes[col])
416 else:
417 exprs[col] = col_expr
418
419 new_op = TransformNode(self, exprs)
420 return self.__constructor__(
421 columns=self.columns,
422 dtypes=new_dtypes,
423 op=new_op,
424 index_cols=self._index_cols,
425 force_execution_mode=self._force_execution_mode,
426 )
427
428 def join(self, other, how="inner", on=None, sort=False, suffixes=("_x", "_y")):
429 assert (
430 on is not None
431 ), "Merge with unspecified 'on' parameter is not supported in the engine"
432
433 for col in on:
434 assert (
435 col in self.columns and col in other.columns
436 ), "Only cases when both frames contain key column are supported"
437
438 new_columns = []
439 new_dtypes = []
440
441 conflicting_cols = set(self.columns) & set(other.columns) - set(on)
442 for c in self.columns:
443 suffix = suffixes[0] if c in conflicting_cols else ""
444 new_columns.append(c + suffix)
445 new_dtypes.append(self._dtypes[c])
446 for c in other.columns:
447 if c not in on:
448 suffix = suffixes[1] if c in conflicting_cols else ""
449 new_columns.append(c + suffix)
450 new_dtypes.append(other._dtypes[c])
451
452 op = JoinNode(
453 self,
454 other,
455 how=how,
456 on=on,
457 sort=sort,
458 suffixes=suffixes,
459 )
460
461 new_columns = Index.__new__(Index, data=new_columns, dtype=self.columns.dtype)
462 return self.__constructor__(
463 dtypes=new_dtypes,
464 columns=new_columns,
465 op=op,
466 force_execution_mode=self._force_execution_mode,
467 )
468
469 def _index_width(self):
470 if self._index_cols is None:
471 return 1
472 return len(self._index_cols)
473
474 def _union_all(
475 self, axis, other_modin_frames, join="outer", sort=False, ignore_index=False
476 ):
477 # determine output columns
478 new_cols_map = OrderedDict()
479 for col in self.columns:
480 new_cols_map[col] = self._dtypes[col]
481 for frame in other_modin_frames:
482 if join == "inner":
483 for col in list(new_cols_map):
484 if col not in frame.columns:
485 del new_cols_map[col]
486 else:
487 for col in frame.columns:
488 if col not in new_cols_map:
489 new_cols_map[col] = frame._dtypes[col]
490 new_columns = list(new_cols_map.keys())
491
492 if sort:
493 new_columns = sorted(new_columns)
494
495 # determine how many index components are going into
496 # the resulting table
497 if not ignore_index:
498 index_width = self._index_width()
499 for frame in other_modin_frames:
500 index_width = min(index_width, frame._index_width())
501
502 # compute resulting dtypes
503 if sort:
504 new_dtypes = [new_cols_map[col] for col in new_columns]
505 else:
506 new_dtypes = list(new_cols_map.values())
507
508 # build projections to align all frames
509 aligned_frames = []
510 for frame in [self] + other_modin_frames:
511 aligned_index = None
512 exprs = OrderedDict()
513 uses_rowid = False
514
515 if not ignore_index:
516 if frame._index_cols:
517 aligned_index = frame._index_cols[0 : index_width + 1]
518 aligned_index_dtypes = frame._dtypes[aligned_index].tolist()
519 for i in range(0, index_width):
520 col = frame._index_cols[i]
521 exprs[col] = frame.ref(col)
522 else:
523 assert index_width == 1, "unexpected index width"
524 aligned_index = ["__index__"]
525 exprs["__index__"] = frame.ref("__rowid__")
526 aligned_index_dtypes = [_get_dtype(int)]
527 uses_rowid = True
528 aligned_dtypes = aligned_index_dtypes + new_dtypes
529 else:
530 aligned_dtypes = new_dtypes
531
532 for col in new_columns:
533 if col in frame._table_cols:
534 exprs[col] = frame.ref(col)
535 else:
536 exprs[col] = LiteralExpr(None)
537
538 aligned_frame_op = TransformNode(frame, exprs)
539 aligned_frames.append(
540 self.__constructor__(
541 columns=new_columns,
542 dtypes=aligned_dtypes,
543 op=aligned_frame_op,
544 index_cols=aligned_index,
545 uses_rowid=uses_rowid,
546 force_execution_mode=self._force_execution_mode,
547 )
548 )
549
550 new_frame = aligned_frames[0]
551 for frame in aligned_frames[1:]:
552 new_frame = self.__constructor__(
553 columns=new_columns,
554 dtypes=new_frame._dtypes,
555 op=UnionNode([new_frame, frame]),
556 index_cols=new_frame._index_cols,
557 force_execution_mode=self._force_execution_mode,
558 )
559
560 return new_frame
561
562 def _concat(
563 self, axis, other_modin_frames, join="outer", sort=False, ignore_index=False
564 ):
565 if axis == 0:
566 return self._union_all(axis, other_modin_frames, join, sort, ignore_index)
567
568 base = self
569 for frame in other_modin_frames:
570 base = base._find_common_projections_base(frame)
571 if base is None:
572 raise NotImplementedError("concat requiring join is not supported yet")
573
574 exprs = self._index_exprs()
575 new_columns = self.columns.tolist()
576 for col in self.columns:
577 exprs[col] = self.ref(col)
578 for frame in other_modin_frames:
579 for col in frame.columns:
580 if col == "" or col in exprs:
581 new_col = f"__col{len(exprs)}__"
582 else:
583 new_col = col
584 exprs[new_col] = frame.ref(col)
585 new_columns.append(new_col)
586
587 exprs = translate_exprs_to_base(exprs, base)
588 new_columns = Index.__new__(Index, data=new_columns, dtype=self.columns.dtype)
589 new_frame = self.__constructor__(
590 columns=new_columns,
591 dtypes=self._dtypes_for_exprs(exprs),
592 op=TransformNode(base, exprs),
593 index_cols=self._index_cols,
594 force_execution_mode=self._force_execution_mode,
595 )
596 return new_frame
597
598 def bin_op(self, other, op_name, **kwargs):
599 if isinstance(other, (int, float, str)):
600 value_expr = LiteralExpr(other)
601 exprs = self._index_exprs()
602 for col in self.columns:
603 exprs[col] = self.ref(col).bin_op(value_expr, op_name)
604 return self.__constructor__(
605 columns=self.columns,
606 dtypes=self._dtypes_for_exprs(exprs),
607 op=TransformNode(self, exprs),
608 index_cols=self._index_cols,
609 force_execution_mode=self._force_execution_mode,
610 )
611 elif isinstance(other, list):
612 if len(other) != len(self.columns):
613 raise ValueError(
614 f"length must be {len(self.columns)}: given {len(other)}"
615 )
616 exprs = self._index_exprs()
617 for col, val in zip(self.columns, other):
618 exprs[col] = self.ref(col).bin_op(LiteralExpr(val), op_name)
619 return self.__constructor__(
620 columns=self.columns,
621 dtypes=self._dtypes_for_exprs(exprs),
622 op=TransformNode(self, exprs),
623 index_cols=self._index_cols,
624 force_execution_mode=self._force_execution_mode,
625 )
626 elif isinstance(other, type(self)):
627 # For now we only support binary operations on
628 # projections of the same frame, because we have
629 # no support for outer join.
630 base = self._find_common_projections_base(other)
631 if base is None:
632 raise NotImplementedError(
633 "unsupported binary op args (outer join is not supported)"
634 )
635
636 new_columns = self.columns.tolist()
637 for col in other.columns:
638 if col not in self.columns:
639 new_columns.append(col)
640 new_columns = sorted(new_columns)
641
642 fill_value = kwargs.get("fill_value", None)
643 if fill_value is not None:
644 fill_value = LiteralExpr(fill_value)
645 if is_cmp_op(op_name):
646 null_value = LiteralExpr(op_name == "ne")
647 else:
648 null_value = LiteralExpr(None)
649
650 exprs = self._index_exprs()
651 for col in new_columns:
652 lhs = self.ref(col) if col in self.columns else fill_value
653 rhs = other.ref(col) if col in other.columns else fill_value
654 if lhs is None or rhs is None:
655 exprs[col] = null_value
656 else:
657 exprs[col] = lhs.bin_op(rhs, op_name)
658
659 exprs = translate_exprs_to_base(exprs, base)
660 return self.__constructor__(
661 columns=new_columns,
662 dtypes=self._dtypes_for_exprs(exprs),
663 op=TransformNode(base, exprs),
664 index_cols=self._index_cols,
665 force_execution_mode=self._force_execution_mode,
666 )
667 else:
668 raise NotImplementedError(f"unsupported operand type: {type(other)}")
669
670 def insert(self, loc, column, value):
671 assert column not in self._table_cols
672 assert 0 <= loc <= len(self.columns)
673
674 exprs = self._index_exprs()
675 for i in range(0, loc):
676 col = self.columns[i]
677 exprs[col] = self.ref(col)
678 exprs[column] = LiteralExpr(value)
679 for i in range(loc, len(self.columns)):
680 col = self.columns[i]
681 exprs[col] = self.ref(col)
682
683 new_columns = self.columns.insert(loc, column)
684
685 return self.__constructor__(
686 columns=new_columns,
687 dtypes=self._dtypes_for_exprs(exprs),
688 op=TransformNode(self, exprs),
689 index_cols=self._index_cols,
690 force_execution_mode=self._force_execution_mode,
691 )
692
693 def cat_codes(self):
694 assert len(self.columns) == 1
695 assert self._dtypes[-1] == "category"
696
697 col = self.columns[-1]
698 exprs = self._index_exprs()
699 col_expr = self.ref(col)
700 code_expr = OpExpr("KEY_FOR_STRING", [col_expr], _get_dtype("int32"))
701 null_val = LiteralExpr(np.int32(-1))
702 exprs[col] = build_if_then_else(
703 col_expr.is_null(), null_val, code_expr, _get_dtype("int32")
704 )
705
706 return self.__constructor__(
707 columns=self.columns,
708 dtypes=self._dtypes,
709 op=TransformNode(self, exprs),
710 index_cols=self._index_cols,
711 force_execution_mode=self._force_execution_mode,
712 )
713
714 def sort_rows(self, columns, ascending, ignore_index, na_position):
715 if na_position != "first" and na_position != "last":
716 raise ValueError(f"Unsupported na_position value '{na_position}'")
717
718 if not isinstance(columns, list):
719 columns = [columns]
720 columns = [self._find_index_or_col(col) for col in columns]
721
722 if isinstance(ascending, list):
723 if len(ascending) != len(columns):
724 raise ValueError("ascending list length doesn't match columns list")
725 else:
726 if not isinstance(ascending, bool):
727 raise ValueError("unsupported ascending value")
728 ascending = [ascending] * len(columns)
729
730 if ignore_index:
731 # If index is ignored then we might need to drop some columns.
732 # At the same time some of dropped index columns can be used
733 # for sorting and should be droped after sorting is done.
734 if self._index_cols is not None:
735 base = self
736
737 drop_index_cols_before = [
738 col for col in self._index_cols if col not in columns
739 ]
740 drop_index_cols_after = [
741 col for col in self._index_cols if col in columns
742 ]
743 if not drop_index_cols_after:
744 drop_index_cols_after = None
745
746 if drop_index_cols_before:
747 exprs = OrderedDict()
748 index_cols = (
749 drop_index_cols_after if drop_index_cols_after else None
750 )
751 for col in drop_index_cols_after:
752 exprs[col] = base.ref(col)
753 for col in base.columns:
754 exprs[col] = base.ref(col)
755 base = self.__constructor__(
756 columns=base.columns,
757 dtypes=self._dtypes_for_exprs(exprs),
758 op=TransformNode(base, exprs),
759 index_cols=index_cols,
760 force_execution_mode=self._force_execution_mode,
761 )
762
763 base = self.__constructor__(
764 columns=base.columns,
765 dtypes=base._dtypes,
766 op=SortNode(base, columns, ascending, na_position),
767 index_cols=base._index_cols,
768 force_execution_mode=self._force_execution_mode,
769 )
770
771 if drop_index_cols_after:
772 exprs = OrderedDict()
773 for col in base.columns:
774 exprs[col] = base.ref(col)
775 base = self.__constructor__(
776 columns=base.columns,
777 dtypes=self._dtypes_for_exprs(exprs),
778 op=TransformNode(base, exprs),
779 index_cols=None,
780 force_execution_mode=self._force_execution_mode,
781 )
782
783 return base
784 else:
785 return self.__constructor__(
786 columns=self.columns,
787 dtypes=self._dtypes,
788 op=SortNode(self, columns, ascending, na_position),
789 index_cols=None,
790 force_execution_mode=self._force_execution_mode,
791 )
792 else:
793 base = self
794
795 # If index is preserved and we have no index columns then we
796 # need to create one using __rowid__ virtual column.
797 if self._index_cols is None:
798 base = base._materialize_rowid()
799
800 return self.__constructor__(
801 columns=base.columns,
802 dtypes=base._dtypes,
803 op=SortNode(base, columns, ascending, na_position),
804 index_cols=base._index_cols,
805 force_execution_mode=self._force_execution_mode,
806 )
807
808 def filter(self, key):
809 if not isinstance(key, type(self)):
810 raise NotImplementedError("Unsupported key type in filter")
811
812 if not isinstance(key._op, TransformNode) or len(key.columns) != 1:
813 raise NotImplementedError("Unsupported key in filter")
814
815 key_col = key.columns[0]
816 if not is_bool_dtype(key._dtypes[key_col]):
817 raise NotImplementedError("Unsupported key in filter")
818
819 base = self._find_common_projections_base(key)
820 if base is None:
821 raise NotImplementedError("Unsupported key in filter")
822
823 # We build the resulting frame by applying the filter to the
824 # base frame and then using the filtered result as a new base.
825 # If base frame has no index columns, then we need to create
826 # one.
827 key_exprs = translate_exprs_to_base(key._op.exprs, base)
828 if base._index_cols is None:
829 filter_base = base._materialize_rowid()
830 key_exprs = replace_frame_in_exprs(key_exprs, base, filter_base)
831 else:
832 filter_base = base
833 condition = key_exprs[key_col]
834 filtered_base = self.__constructor__(
835 columns=filter_base.columns,
836 dtypes=filter_base._dtypes,
837 op=FilterNode(filter_base, condition),
838 index_cols=filter_base._index_cols,
839 force_execution_mode=self._force_execution_mode,
840 )
841
842 if self is base:
843 exprs = OrderedDict()
844 for col in filtered_base._table_cols:
845 exprs[col] = filtered_base.ref(col)
846 else:
847 assert isinstance(
848 self._op, TransformNode
849 ), f"unexpected op: {self._op.dumps()}"
850 exprs = translate_exprs_to_base(self._op.exprs, base)
851 exprs = replace_frame_in_exprs(exprs, base, filtered_base)
852 if base._index_cols is None:
853 exprs["__index__"] = filtered_base.ref("__index__")
854 exprs.move_to_end("__index__", last=False)
855
856 return self.__constructor__(
857 columns=self.columns,
858 dtypes=self._dtypes_for_exprs(exprs),
859 op=TransformNode(filtered_base, exprs),
860 index_cols=filtered_base._index_cols,
861 force_execution_mode=self._force_execution_mode,
862 )
863
864 def _materialize_rowid(self):
865 exprs = OrderedDict()
866 exprs["__index__"] = self.ref("__rowid__")
867 for col in self._table_cols:
868 exprs[col] = self.ref(col)
869 return self.__constructor__(
870 columns=self.columns,
871 dtypes=self._dtypes_for_exprs(exprs),
872 op=TransformNode(self, exprs),
873 index_cols=["__index__"],
874 uses_rowid=True,
875 force_execution_mode=self._force_execution_mode,
876 )
877
878 def _index_exprs(self):
879 exprs = OrderedDict()
880 if self._index_cols:
881 for col in self._index_cols:
882 exprs[col] = self.ref(col)
883 return exprs
884
885 def _find_common_projections_base(self, rhs):
886 bases = {self}
887 while self._is_projection():
888 self = self._op.input[0]
889 bases.add(self)
890
891 while rhs not in bases and rhs._is_projection():
892 rhs = rhs._op.input[0]
893
894 if rhs in bases:
895 return rhs
896
897 return None
898
899 def _is_projection(self):
900 return isinstance(self._op, TransformNode)
901
902 def _execute(self):
903 if isinstance(self._op, FrameNode):
904 return
905
906 if self._force_execution_mode == "lazy":
907 raise RuntimeError("unexpected execution triggered on lazy frame")
908
909 # Some frames require rowid which is available for executed frames only.
910 # Also there is a common pattern when MaskNode is executed to print
911 # frame. If we run the whole tree then any following frame usage will
912 # require re-compute. So we just execute MaskNode's operands.
913 self._run_sub_queries()
914
915 if self._can_execute_arrow():
916 new_table = self._execute_arrow()
917 new_partitions = np.empty((1, 1), dtype=np.dtype(object))
918 new_partitions[0][0] = self._frame_mgr_cls._partition_class.put_arrow(
919 new_table
920 )
921 else:
922 if self._force_execution_mode == "arrow":
923 raise RuntimeError("forced arrow execution failed")
924
925 new_partitions = self._frame_mgr_cls.run_exec_plan(
926 self._op, self._index_cols, self._dtypes, self._table_cols
927 )
928 self._partitions = new_partitions
929 self._op = FrameNode(self)
930
931 def _require_executed_base(self):
932 if isinstance(self._op, MaskNode):
933 return True
934 return self._uses_rowid
935
936 def _run_sub_queries(self):
937 if isinstance(self._op, FrameNode):
938 return
939
940 if self._require_executed_base():
941 for op in self._op.input:
942 op._execute()
943 else:
944 for frame in self._op.input:
945 frame._run_sub_queries()
946
947 def _can_execute_arrow(self):
948 if isinstance(self._op, FrameNode):
949 return self._has_arrow_table()
950 elif isinstance(self._op, MaskNode):
951 return (
952 self._op.row_indices is None and self._op.input[0]._can_execute_arrow()
953 )
954 elif isinstance(self._op, TransformNode):
955 return self._op.is_drop() and self._op.input[0]._can_execute_arrow()
956 elif isinstance(self._op, UnionNode):
957 return all(frame._can_execute_arrow() for frame in self._op.input)
958 else:
959 return False
960
961 def _execute_arrow(self):
962 if isinstance(self._op, FrameNode):
963 if self._partitions.size == 0:
964 return pyarrow.Table()
965 else:
966 assert self._partitions.size == 1
967 return self._partitions[0][0].get()
968 elif isinstance(self._op, MaskNode):
969 return self._op.input[0]._arrow_row_slice(self._op.row_numeric_idx)
970 elif isinstance(self._op, TransformNode):
971 return self._op.input[0]._arrow_col_slice(set(self._op.exprs.keys()))
972 elif isinstance(self._op, UnionNode):
973 return self._arrow_concat(self._op.input)
974 else:
975 raise RuntimeError(f"Unexpected op ({type(self._op)}) in _execute_arrow")
976
977 def _arrow_col_slice(self, new_columns):
978 table = self._execute_arrow()
979 return table.drop(
980 [f"F_{col}" for col in self._table_cols if col not in new_columns]
981 )
982
983 def _arrow_row_slice(self, row_numeric_idx):
984 table = self._execute_arrow()
985 if isinstance(row_numeric_idx, slice):
986 start = 0 if row_numeric_idx.start is None else row_numeric_idx.start
987 if start < 0:
988 start = table.num_rows - start
989 end = (
990 table.num_rows if row_numeric_idx.stop is None else row_numeric_idx.stop
991 )
992 if end < 0:
993 end = table.num_rows - end
994 if row_numeric_idx.step is None or row_numeric_idx.step == 1:
995 length = 0 if start >= end else end - start
996 return table.slice(start, length)
997 else:
998 parts = []
999 for i in range(start, end, row_numeric_idx.step):
1000 parts.append(table.slice(i, 1))
1001 return pyarrow.concat_tables(parts)
1002
1003 start = None
1004 end = None
1005 parts = []
1006 for idx in row_numeric_idx:
1007 if start is None:
1008 start = idx
1009 end = idx
1010 elif idx == end + 1:
1011 end = idx
1012 else:
1013 if start:
1014 parts.append(table.slice(start, end - start + 1))
1015 start = idx
1016 end = idx
1017 parts.append(table.slice(start, end - start + 1))
1018
1019 return pyarrow.concat_tables(parts)
1020
1021 @classmethod
1022 def _arrow_concat(cls, frames):
1023 return pyarrow.concat_tables(frame._execute_arrow() for frame in frames)
1024
1025 def _build_index_cache(self):
1026 assert isinstance(self._op, FrameNode)
1027
1028 if self._partitions.size == 0:
1029 self._index_cache = Index.__new__(Index)
1030 else:
1031 assert self._partitions.size == 1
1032 obj = self._partitions[0][0].get()
1033 if isinstance(obj, (pd.DataFrame, pd.Series)):
1034 self._index_cache = obj.index
1035 else:
1036 assert isinstance(obj, pyarrow.Table)
1037 if self._index_cols is None:
1038 self._index_cache = Index.__new__(
1039 RangeIndex, data=range(obj.num_rows)
1040 )
1041 else:
1042 index_at = obj.drop([f"F_{col}" for col in self.columns])
1043 index_df = index_at.to_pandas()
1044 index_df.set_index(
1045 [f"F_{col}" for col in self._index_cols], inplace=True
1046 )
1047 index_df.index.rename(
1048 self._index_names(self._index_cols), inplace=True
1049 )
1050 self._index_cache = index_df.index
1051
1052 def _get_index(self):
1053 self._execute()
1054 if self._index_cache is None:
1055 self._build_index_cache()
1056 return self._index_cache
1057
1058 def _set_index(self, new_index):
1059 raise NotImplementedError("OmnisciOnRayFrame._set_index is not yet suported")
1060
1061 def reset_index(self, drop):
1062 if drop:
1063 exprs = OrderedDict()
1064 for c in self.columns:
1065 exprs[c] = self.ref(c)
1066 return self.__constructor__(
1067 columns=self.columns,
1068 dtypes=self._dtypes_for_exprs(exprs),
1069 op=TransformNode(self, exprs),
1070 index_cols=None,
1071 force_execution_mode=self._force_execution_mode,
1072 )
1073 else:
1074 if self._index_cols is None:
1075 raise NotImplementedError(
1076 "default index reset with no drop is not supported"
1077 )
1078 # Need to demangle index names.
1079 exprs = OrderedDict()
1080 for i, c in enumerate(self._index_cols):
1081 name = self._index_name(c)
1082 if name is None:
1083 name = f"level_{i}"
1084 if name in exprs:
1085 raise ValueError(f"cannot insert {name}, already exists")
1086 exprs[name] = self.ref(c)
1087 for c in self.columns:
1088 if c in exprs:
1089 raise ValueError(f"cannot insert {c}, already exists")
1090 exprs[c] = self.ref(c)
1091 new_columns = Index.__new__(Index, data=exprs.keys(), dtype="O")
1092 return self.__constructor__(
1093 columns=new_columns,
1094 dtypes=self._dtypes_for_exprs(exprs),
1095 op=TransformNode(self, exprs),
1096 index_cols=None,
1097 force_execution_mode=self._force_execution_mode,
1098 )
1099
1100 def _set_columns(self, new_columns):
1101 exprs = self._index_exprs()
1102 for old, new in zip(self.columns, new_columns):
1103 exprs[new] = self.ref(old)
1104 return self.__constructor__(
1105 columns=new_columns,
1106 dtypes=self._dtypes.tolist(),
1107 op=TransformNode(self, exprs),
1108 index_cols=self._index_cols,
1109 force_execution_mode=self._force_execution_mode,
1110 )
1111
1112 def _get_columns(self):
1113 return super(OmnisciOnRayFrame, self)._get_columns()
1114
1115 columns = property(_get_columns)
1116 index = property(_get_index, _set_index)
1117
1118 def has_multiindex(self):
1119 if self._index_cache is not None:
1120 return isinstance(self._index_cache, MultiIndex)
1121 return self._index_cols is not None and len(self._index_cols) > 1
1122
1123 def to_pandas(self):
1124 self._execute()
1125
1126 if self._force_execution_mode == "lazy":
1127 raise RuntimeError("unexpected to_pandas triggered on lazy frame")
1128
1129 df = self._frame_mgr_cls.to_pandas(self._partitions)
1130
1131 # If we make dataframe from Arrow table then we might need to set
1132 # index columns.
1133 if len(df.columns) != len(self.columns):
1134 assert self._index_cols
1135 df.set_index([f"F_{col}" for col in self._index_cols], inplace=True)
1136 df.index.rename(self._index_names(self._index_cols), inplace=True)
1137 assert len(df.columns) == len(self.columns)
1138 else:
1139 assert self._index_cols is None
1140 assert df.index.name is None, f"index name '{df.index.name}' is not None"
1141
1142 # Restore original column labels encoded in OmniSci to meet its
1143 # restirctions on column names.
1144 df.columns = self.columns
1145
1146 return df
1147
1148 def _index_names(self, cols):
1149 if len(cols) == 1:
1150 return self._index_name(cols[0])
1151 return [self._index_name(n) for n in cols]
1152
1153 def _index_name(self, col):
1154 if col == "__index__":
1155 return None
1156
1157 match = re.search("__index__\\d+_(.*)", col)
1158 if match:
1159 name = match.group(1)
1160 if name == "__None__":
1161 return None
1162 return name
1163
1164 return col
1165
1166 def _find_index_or_col(self, col):
1167 """For given column or index name return a column name"""
1168 if col in self.columns:
1169 return col
1170
1171 if self._index_cols is not None:
1172 for idx_col in self._index_cols:
1173 if re.match(f"__index__\\d+_{col}", idx_col):
1174 return idx_col
1175
1176 raise ValueError(f"Unknown column '{col}'")
1177
1178 @classmethod
1179 def from_pandas(cls, df):
1180 new_index = df.index
1181 new_columns = df.columns
1182 # If there is non-trivial index, we put it into columns.
1183 # That's what we usually have for arrow tables and execution
1184 # result. Unnamed index is renamed to __index__. Also all
1185 # columns get 'F_' prefix to handle names unsupported in
1186 # OmniSci.
1187 if cls._is_trivial_index(df.index):
1188 index_cols = None
1189 else:
1190 orig_index_names = df.index.names
1191 orig_df = df
1192
1193 index_cols = [
1194 f"__index__{i}_{'__None__' if n is None else n}"
1195 for i, n in enumerate(df.index.names)
1196 ]
1197 df.index.names = index_cols
1198 df = df.reset_index()
1199
1200 orig_df.index.names = orig_index_names
1201 new_dtypes = df.dtypes
1202 df = df.add_prefix("F_")
1203 new_parts, new_lengths, new_widths = cls._frame_mgr_cls.from_pandas(df, True)
1204 return cls(
1205 new_parts,
1206 new_index,
1207 new_columns,
1208 new_lengths,
1209 new_widths,
1210 dtypes=new_dtypes,
1211 index_cols=index_cols,
1212 )
1213
1214 @classmethod
1215 def _is_trivial_index(cls, index):
1216 """Return true if index is a range [0..N]"""
1217 if isinstance(index, pd.RangeIndex):
1218 return index.start == 0 and index.step == 1
1219 if not isinstance(index, pd.Int64Index):
1220 return False
1221 return (
1222 index.is_monotonic_increasing
1223 and index.unique
1224 and index.min == 0
1225 and index.max == len(index) - 1
1226 )
1227
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/modin/experimental/engines/omnisci_on_ray/frame/data.py b/modin/experimental/engines/omnisci_on_ray/frame/data.py
--- a/modin/experimental/engines/omnisci_on_ray/frame/data.py
+++ b/modin/experimental/engines/omnisci_on_ray/frame/data.py
@@ -562,6 +562,9 @@
def _concat(
self, axis, other_modin_frames, join="outer", sort=False, ignore_index=False
):
+ if not other_modin_frames:
+ return self
+
if axis == 0:
return self._union_all(axis, other_modin_frames, join, sort, ignore_index)
| {"golden_diff": "diff --git a/modin/experimental/engines/omnisci_on_ray/frame/data.py b/modin/experimental/engines/omnisci_on_ray/frame/data.py\n--- a/modin/experimental/engines/omnisci_on_ray/frame/data.py\n+++ b/modin/experimental/engines/omnisci_on_ray/frame/data.py\n@@ -562,6 +562,9 @@\n def _concat(\n self, axis, other_modin_frames, join=\"outer\", sort=False, ignore_index=False\n ):\n+ if not other_modin_frames:\n+ return self\n+\n if axis == 0:\n return self._union_all(axis, other_modin_frames, join, sort, ignore_index)\n", "issue": "[REFACTOR] Concat for a single frame shouldn't cause additional operations\nCurrently, in OmniSci engine concat execution for a single frame adds an additional projection (which ais basically NOP). In some corner cases it may cause inefficiency (especially when API layer triggers frame execution). The esiest solution here is to just do nothing for a single frame concat case.\n", "before_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nfrom modin.engines.base.frame.data import BasePandasFrame\nfrom modin.experimental.backends.omnisci.query_compiler import DFAlgQueryCompiler\nfrom .partition_manager import OmnisciOnRayFrameManager\n\nfrom pandas.core.index import ensure_index, Index, MultiIndex, RangeIndex\nfrom pandas.core.dtypes.common import _get_dtype, is_list_like, is_bool_dtype\nimport pandas as pd\n\nfrom .df_algebra import (\n MaskNode,\n FrameNode,\n GroupbyAggNode,\n TransformNode,\n UnionNode,\n JoinNode,\n SortNode,\n FilterNode,\n translate_exprs_to_base,\n replace_frame_in_exprs,\n)\nfrom .expr import (\n AggregateExpr,\n InputRefExpr,\n LiteralExpr,\n OpExpr,\n build_if_then_else,\n build_dt_expr,\n _get_common_dtype,\n is_cmp_op,\n)\nfrom collections import OrderedDict\n\nimport numpy as np\nimport pyarrow\nimport re\n\n\nclass OmnisciOnRayFrame(BasePandasFrame):\n\n _query_compiler_cls = DFAlgQueryCompiler\n _frame_mgr_cls = OmnisciOnRayFrameManager\n\n _next_id = [1]\n\n def __init__(\n self,\n partitions=None,\n index=None,\n columns=None,\n row_lengths=None,\n column_widths=None,\n dtypes=None,\n op=None,\n index_cols=None,\n uses_rowid=False,\n force_execution_mode=None,\n ):\n assert dtypes is not None\n\n self.id = str(type(self)._next_id[0])\n type(self)._next_id[0] += 1\n\n if index is not None:\n index = ensure_index(index)\n columns = ensure_index(columns)\n self._op = op\n self._index_cols = index_cols\n self._partitions = partitions\n self._index_cache = index\n self._columns_cache = columns\n self._row_lengths_cache = row_lengths\n self._column_widths_cache = column_widths\n if self._op is None:\n self._op = FrameNode(self)\n\n self._table_cols = columns.tolist()\n if self._index_cols is not None:\n self._table_cols = self._index_cols + self._table_cols\n\n assert len(dtypes) == len(\n self._table_cols\n ), f\"unaligned dtypes ({dtypes}) and table columns ({self._table_cols})\"\n if isinstance(dtypes, list):\n if self._index_cols is not None:\n # Table stores both index and data columns but those are accessed\n # differently if we have a MultiIndex for columns. To unify access\n # to dtype we extend index column names to tuples to have a MultiIndex\n # of dtypes.\n if isinstance(columns, MultiIndex):\n tail = [\"\"] * (columns.nlevels - 1)\n index_tuples = [(col, *tail) for col in self._index_cols]\n dtype_index = MultiIndex.from_tuples(index_tuples).append(columns)\n self._dtypes = pd.Series(dtypes, index=dtype_index)\n else:\n self._dtypes = pd.Series(dtypes, index=self._table_cols)\n else:\n self._dtypes = pd.Series(dtypes, index=columns)\n else:\n self._dtypes = dtypes\n\n if partitions is not None:\n self._filter_empties()\n\n # This frame uses encoding for column names to support exotic\n # (e.g. non-string and reserved words) column names. Encoded\n # names are used in OmniSci tables and corresponding Arrow tables.\n # If we import Arrow table, we have to rename its columns for\n # proper processing.\n if self._has_arrow_table() and self._partitions.size > 0:\n assert self._partitions.size == 1\n table = self._partitions[0][0].get()\n if table.column_names[0] != f\"F_{self._table_cols[0]}\":\n new_names = [f\"F_{col}\" for col in table.column_names]\n new_table = table.rename_columns(new_names)\n self._partitions[0][0] = self._frame_mgr_cls._partition_class.put_arrow(\n new_table\n )\n\n self._uses_rowid = uses_rowid\n # Tests use forced execution mode to take control over frame\n # execution process. Supported values:\n # \"lazy\" - RuntimeError is raised if execution is triggered for the frame\n # \"arrow\" - RuntimeError is raised if execution is triggered, but we cannot\n # execute it using Arrow API (have to use OmniSci for execution)\n self._force_execution_mode = force_execution_mode\n\n def id_str(self):\n return f\"frame${self.id}\"\n\n def _get_dtype(self, col):\n # If we search for an index column type in a MultiIndex then we need to\n # extend index column names to tuples.\n if isinstance(self._dtypes, MultiIndex) and not isinstance(col, tuple):\n return self._dtypes[(col, *([\"\"] * (self._dtypes.nlevels - 1)))]\n return self._dtypes[col]\n\n def ref(self, col):\n if col == \"__rowid__\":\n return InputRefExpr(self, col, _get_dtype(int))\n return InputRefExpr(self, col, self._get_dtype(col))\n\n def mask(\n self,\n row_indices=None,\n row_numeric_idx=None,\n col_indices=None,\n col_numeric_idx=None,\n ):\n base = self\n\n if col_indices is not None or col_numeric_idx is not None:\n if col_indices is not None:\n new_columns = col_indices\n elif col_numeric_idx is not None:\n new_columns = base.columns[col_numeric_idx]\n exprs = self._index_exprs()\n for col in new_columns:\n exprs[col] = base.ref(col)\n dtypes = self._dtypes_for_exprs(exprs)\n base = self.__constructor__(\n columns=new_columns,\n dtypes=dtypes,\n op=TransformNode(base, exprs),\n index_cols=self._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n\n if row_indices is not None or row_numeric_idx is not None:\n op = MaskNode(\n base,\n row_indices=row_indices,\n row_numeric_idx=row_numeric_idx,\n )\n return self.__constructor__(\n columns=base.columns,\n dtypes=base._dtypes,\n op=op,\n index_cols=self._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n\n return base\n\n def _has_arrow_table(self):\n if not isinstance(self._op, FrameNode):\n return False\n return all(p.arrow_table for p in self._partitions.flatten())\n\n def _dtypes_for_cols(self, new_index, new_columns):\n if new_index is not None:\n if isinstance(self._dtypes, MultiIndex):\n new_index = [\n (col, *([\"\"] * (self._dtypes.nlevels - 1))) for col in new_index\n ]\n res = self._dtypes[\n new_index\n + (\n new_columns\n if isinstance(new_columns, list)\n else new_columns.to_list()\n )\n ]\n else:\n res = self._dtypes[new_columns]\n return res\n\n def _dtypes_for_exprs(self, exprs):\n return [expr._dtype for expr in exprs.values()]\n\n def groupby_agg(self, by, axis, agg, groupby_args, **kwargs):\n # Currently we only expect 'by' to be a projection of the same frame.\n # If 'by' holds a list of columns/series, then we create such projection\n # to re-use code.\n if not isinstance(by, DFAlgQueryCompiler):\n if is_list_like(by):\n by_cols = []\n by_frames = []\n for obj in by:\n if isinstance(obj, str):\n by_cols.append(obj)\n elif hasattr(obj, \"_query_compiler\"):\n by_frames.append(obj._query_compiler._modin_frame)\n else:\n raise NotImplementedError(\"unsupported groupby args\")\n by_cols = Index.__new__(Index, data=by_cols, dtype=self.columns.dtype)\n by_frame = self.mask(col_indices=by_cols)\n if by_frames:\n by_frame = by_frame._concat(\n axis=1, other_modin_frames=by_frames, ignore_index=True\n )\n else:\n raise NotImplementedError(\"unsupported groupby args\")\n else:\n by_frame = by._modin_frame\n\n if axis != 0:\n raise NotImplementedError(\"groupby is supported for axis = 0 only\")\n\n base = by_frame._find_common_projections_base(self)\n if base is None:\n raise NotImplementedError(\"unsupported groupby args\")\n\n if groupby_args[\"level\"] is not None:\n raise NotImplementedError(\"levels are not supported for groupby\")\n\n groupby_cols = by_frame.columns.tolist()\n agg_cols = [col for col in self.columns if col not in by_frame.columns]\n\n # Create new base where all required columns are computed. We don't allow\n # complex expressions to be a group key or an aggeregate operand.\n assert isinstance(by_frame._op, TransformNode), \"unexpected by_frame\"\n exprs = OrderedDict(((col, by_frame.ref(col)) for col in groupby_cols))\n exprs.update(((col, self.ref(col)) for col in agg_cols))\n exprs = translate_exprs_to_base(exprs, base)\n base_cols = Index.__new__(\n Index, data=list(exprs.keys()), dtype=self.columns.dtype\n )\n base = self.__constructor__(\n columns=base_cols,\n dtypes=self._dtypes_for_exprs(exprs),\n op=TransformNode(base, exprs, fold=True),\n index_cols=None,\n force_execution_mode=self._force_execution_mode,\n )\n\n new_columns = []\n index_cols = None\n\n if groupby_args[\"as_index\"]:\n index_cols = groupby_cols.copy()\n else:\n new_columns = groupby_cols.copy()\n\n new_dtypes = by_frame._dtypes[groupby_cols].tolist()\n\n agg_exprs = OrderedDict()\n if isinstance(agg, str):\n for col in agg_cols:\n agg_exprs[col] = AggregateExpr(agg, base.ref(col))\n else:\n assert isinstance(agg, dict), \"unsupported aggregate type\"\n multiindex = any(isinstance(v, list) for v in agg.values())\n for k, v in agg.items():\n if isinstance(v, list):\n for item in v:\n agg_exprs[(k, item)] = AggregateExpr(item, base.ref(k))\n else:\n col_name = (k, v) if multiindex else k\n agg_exprs[col_name] = AggregateExpr(v, base.ref(k))\n new_columns.extend(agg_exprs.keys())\n new_dtypes.extend((x._dtype for x in agg_exprs.values()))\n new_columns = Index.__new__(Index, data=new_columns, dtype=self.columns.dtype)\n\n new_op = GroupbyAggNode(base, groupby_cols, agg_exprs, groupby_args)\n new_frame = self.__constructor__(\n columns=new_columns,\n dtypes=new_dtypes,\n op=new_op,\n index_cols=index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n\n return new_frame\n\n def fillna(\n self,\n value=None,\n method=None,\n axis=None,\n limit=None,\n downcast=None,\n ):\n if axis != 0:\n raise NotImplementedError(\"fillna is supported for axis = 0 only\")\n\n if limit is not None:\n raise NotImplementedError(\"fillna doesn't support limit yet\")\n\n if downcast is not None:\n raise NotImplementedError(\"fillna doesn't support downcast yet\")\n\n if method is not None:\n raise NotImplementedError(\"fillna doesn't support method yet\")\n\n exprs = self._index_exprs()\n if isinstance(value, dict):\n for col in self.columns:\n col_expr = self.ref(col)\n if col in value:\n value_expr = LiteralExpr(value[col])\n res_type = _get_common_dtype(value_expr._dtype, col_expr._dtype)\n exprs[col] = build_if_then_else(\n col_expr.is_null(), value_expr, col_expr, res_type\n )\n else:\n exprs[col] = col_expr\n elif np.isscalar(value):\n value_expr = LiteralExpr(value)\n for col in self.columns:\n col_expr = self.ref(col)\n res_type = _get_common_dtype(value_expr._dtype, col_expr._dtype)\n exprs[col] = build_if_then_else(\n col_expr.is_null(), value_expr, col_expr, res_type\n )\n else:\n raise NotImplementedError(\"unsupported value for fillna\")\n\n new_op = TransformNode(self, exprs)\n dtypes = self._dtypes_for_exprs(exprs)\n new_frame = self.__constructor__(\n columns=self.columns,\n dtypes=dtypes,\n op=new_op,\n index_cols=self._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n\n return new_frame\n\n def dt_extract(self, obj):\n exprs = self._index_exprs()\n for col in self.columns:\n exprs[col] = build_dt_expr(obj, self.ref(col))\n new_op = TransformNode(self, exprs)\n dtypes = self._dtypes_for_exprs(exprs)\n return self.__constructor__(\n columns=self.columns,\n dtypes=dtypes,\n op=new_op,\n index_cols=self._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n\n def astype(self, col_dtypes, **kwargs):\n columns = col_dtypes.keys()\n new_dtypes = self.dtypes.copy()\n for column in columns:\n dtype = col_dtypes[column]\n if (\n not isinstance(dtype, type(self.dtypes[column]))\n or dtype != self.dtypes[column]\n ):\n # Update the new dtype series to the proper pandas dtype\n try:\n new_dtype = np.dtype(dtype)\n except TypeError:\n new_dtype = dtype\n\n if dtype != np.int32 and new_dtype == np.int32:\n new_dtypes[column] = np.dtype(\"int64\")\n elif dtype != np.float32 and new_dtype == np.float32:\n new_dtypes[column] = np.dtype(\"float64\")\n # We cannot infer without computing the dtype if\n elif isinstance(new_dtype, str) and new_dtype == \"category\":\n raise NotImplementedError(\"unsupported type conversion\")\n else:\n new_dtypes[column] = new_dtype\n exprs = self._index_exprs()\n for col in self.columns:\n col_expr = self.ref(col)\n if col in columns:\n exprs[col] = col_expr.cast(new_dtypes[col])\n else:\n exprs[col] = col_expr\n\n new_op = TransformNode(self, exprs)\n return self.__constructor__(\n columns=self.columns,\n dtypes=new_dtypes,\n op=new_op,\n index_cols=self._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n\n def join(self, other, how=\"inner\", on=None, sort=False, suffixes=(\"_x\", \"_y\")):\n assert (\n on is not None\n ), \"Merge with unspecified 'on' parameter is not supported in the engine\"\n\n for col in on:\n assert (\n col in self.columns and col in other.columns\n ), \"Only cases when both frames contain key column are supported\"\n\n new_columns = []\n new_dtypes = []\n\n conflicting_cols = set(self.columns) & set(other.columns) - set(on)\n for c in self.columns:\n suffix = suffixes[0] if c in conflicting_cols else \"\"\n new_columns.append(c + suffix)\n new_dtypes.append(self._dtypes[c])\n for c in other.columns:\n if c not in on:\n suffix = suffixes[1] if c in conflicting_cols else \"\"\n new_columns.append(c + suffix)\n new_dtypes.append(other._dtypes[c])\n\n op = JoinNode(\n self,\n other,\n how=how,\n on=on,\n sort=sort,\n suffixes=suffixes,\n )\n\n new_columns = Index.__new__(Index, data=new_columns, dtype=self.columns.dtype)\n return self.__constructor__(\n dtypes=new_dtypes,\n columns=new_columns,\n op=op,\n force_execution_mode=self._force_execution_mode,\n )\n\n def _index_width(self):\n if self._index_cols is None:\n return 1\n return len(self._index_cols)\n\n def _union_all(\n self, axis, other_modin_frames, join=\"outer\", sort=False, ignore_index=False\n ):\n # determine output columns\n new_cols_map = OrderedDict()\n for col in self.columns:\n new_cols_map[col] = self._dtypes[col]\n for frame in other_modin_frames:\n if join == \"inner\":\n for col in list(new_cols_map):\n if col not in frame.columns:\n del new_cols_map[col]\n else:\n for col in frame.columns:\n if col not in new_cols_map:\n new_cols_map[col] = frame._dtypes[col]\n new_columns = list(new_cols_map.keys())\n\n if sort:\n new_columns = sorted(new_columns)\n\n # determine how many index components are going into\n # the resulting table\n if not ignore_index:\n index_width = self._index_width()\n for frame in other_modin_frames:\n index_width = min(index_width, frame._index_width())\n\n # compute resulting dtypes\n if sort:\n new_dtypes = [new_cols_map[col] for col in new_columns]\n else:\n new_dtypes = list(new_cols_map.values())\n\n # build projections to align all frames\n aligned_frames = []\n for frame in [self] + other_modin_frames:\n aligned_index = None\n exprs = OrderedDict()\n uses_rowid = False\n\n if not ignore_index:\n if frame._index_cols:\n aligned_index = frame._index_cols[0 : index_width + 1]\n aligned_index_dtypes = frame._dtypes[aligned_index].tolist()\n for i in range(0, index_width):\n col = frame._index_cols[i]\n exprs[col] = frame.ref(col)\n else:\n assert index_width == 1, \"unexpected index width\"\n aligned_index = [\"__index__\"]\n exprs[\"__index__\"] = frame.ref(\"__rowid__\")\n aligned_index_dtypes = [_get_dtype(int)]\n uses_rowid = True\n aligned_dtypes = aligned_index_dtypes + new_dtypes\n else:\n aligned_dtypes = new_dtypes\n\n for col in new_columns:\n if col in frame._table_cols:\n exprs[col] = frame.ref(col)\n else:\n exprs[col] = LiteralExpr(None)\n\n aligned_frame_op = TransformNode(frame, exprs)\n aligned_frames.append(\n self.__constructor__(\n columns=new_columns,\n dtypes=aligned_dtypes,\n op=aligned_frame_op,\n index_cols=aligned_index,\n uses_rowid=uses_rowid,\n force_execution_mode=self._force_execution_mode,\n )\n )\n\n new_frame = aligned_frames[0]\n for frame in aligned_frames[1:]:\n new_frame = self.__constructor__(\n columns=new_columns,\n dtypes=new_frame._dtypes,\n op=UnionNode([new_frame, frame]),\n index_cols=new_frame._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n\n return new_frame\n\n def _concat(\n self, axis, other_modin_frames, join=\"outer\", sort=False, ignore_index=False\n ):\n if axis == 0:\n return self._union_all(axis, other_modin_frames, join, sort, ignore_index)\n\n base = self\n for frame in other_modin_frames:\n base = base._find_common_projections_base(frame)\n if base is None:\n raise NotImplementedError(\"concat requiring join is not supported yet\")\n\n exprs = self._index_exprs()\n new_columns = self.columns.tolist()\n for col in self.columns:\n exprs[col] = self.ref(col)\n for frame in other_modin_frames:\n for col in frame.columns:\n if col == \"\" or col in exprs:\n new_col = f\"__col{len(exprs)}__\"\n else:\n new_col = col\n exprs[new_col] = frame.ref(col)\n new_columns.append(new_col)\n\n exprs = translate_exprs_to_base(exprs, base)\n new_columns = Index.__new__(Index, data=new_columns, dtype=self.columns.dtype)\n new_frame = self.__constructor__(\n columns=new_columns,\n dtypes=self._dtypes_for_exprs(exprs),\n op=TransformNode(base, exprs),\n index_cols=self._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n return new_frame\n\n def bin_op(self, other, op_name, **kwargs):\n if isinstance(other, (int, float, str)):\n value_expr = LiteralExpr(other)\n exprs = self._index_exprs()\n for col in self.columns:\n exprs[col] = self.ref(col).bin_op(value_expr, op_name)\n return self.__constructor__(\n columns=self.columns,\n dtypes=self._dtypes_for_exprs(exprs),\n op=TransformNode(self, exprs),\n index_cols=self._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n elif isinstance(other, list):\n if len(other) != len(self.columns):\n raise ValueError(\n f\"length must be {len(self.columns)}: given {len(other)}\"\n )\n exprs = self._index_exprs()\n for col, val in zip(self.columns, other):\n exprs[col] = self.ref(col).bin_op(LiteralExpr(val), op_name)\n return self.__constructor__(\n columns=self.columns,\n dtypes=self._dtypes_for_exprs(exprs),\n op=TransformNode(self, exprs),\n index_cols=self._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n elif isinstance(other, type(self)):\n # For now we only support binary operations on\n # projections of the same frame, because we have\n # no support for outer join.\n base = self._find_common_projections_base(other)\n if base is None:\n raise NotImplementedError(\n \"unsupported binary op args (outer join is not supported)\"\n )\n\n new_columns = self.columns.tolist()\n for col in other.columns:\n if col not in self.columns:\n new_columns.append(col)\n new_columns = sorted(new_columns)\n\n fill_value = kwargs.get(\"fill_value\", None)\n if fill_value is not None:\n fill_value = LiteralExpr(fill_value)\n if is_cmp_op(op_name):\n null_value = LiteralExpr(op_name == \"ne\")\n else:\n null_value = LiteralExpr(None)\n\n exprs = self._index_exprs()\n for col in new_columns:\n lhs = self.ref(col) if col in self.columns else fill_value\n rhs = other.ref(col) if col in other.columns else fill_value\n if lhs is None or rhs is None:\n exprs[col] = null_value\n else:\n exprs[col] = lhs.bin_op(rhs, op_name)\n\n exprs = translate_exprs_to_base(exprs, base)\n return self.__constructor__(\n columns=new_columns,\n dtypes=self._dtypes_for_exprs(exprs),\n op=TransformNode(base, exprs),\n index_cols=self._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n else:\n raise NotImplementedError(f\"unsupported operand type: {type(other)}\")\n\n def insert(self, loc, column, value):\n assert column not in self._table_cols\n assert 0 <= loc <= len(self.columns)\n\n exprs = self._index_exprs()\n for i in range(0, loc):\n col = self.columns[i]\n exprs[col] = self.ref(col)\n exprs[column] = LiteralExpr(value)\n for i in range(loc, len(self.columns)):\n col = self.columns[i]\n exprs[col] = self.ref(col)\n\n new_columns = self.columns.insert(loc, column)\n\n return self.__constructor__(\n columns=new_columns,\n dtypes=self._dtypes_for_exprs(exprs),\n op=TransformNode(self, exprs),\n index_cols=self._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n\n def cat_codes(self):\n assert len(self.columns) == 1\n assert self._dtypes[-1] == \"category\"\n\n col = self.columns[-1]\n exprs = self._index_exprs()\n col_expr = self.ref(col)\n code_expr = OpExpr(\"KEY_FOR_STRING\", [col_expr], _get_dtype(\"int32\"))\n null_val = LiteralExpr(np.int32(-1))\n exprs[col] = build_if_then_else(\n col_expr.is_null(), null_val, code_expr, _get_dtype(\"int32\")\n )\n\n return self.__constructor__(\n columns=self.columns,\n dtypes=self._dtypes,\n op=TransformNode(self, exprs),\n index_cols=self._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n\n def sort_rows(self, columns, ascending, ignore_index, na_position):\n if na_position != \"first\" and na_position != \"last\":\n raise ValueError(f\"Unsupported na_position value '{na_position}'\")\n\n if not isinstance(columns, list):\n columns = [columns]\n columns = [self._find_index_or_col(col) for col in columns]\n\n if isinstance(ascending, list):\n if len(ascending) != len(columns):\n raise ValueError(\"ascending list length doesn't match columns list\")\n else:\n if not isinstance(ascending, bool):\n raise ValueError(\"unsupported ascending value\")\n ascending = [ascending] * len(columns)\n\n if ignore_index:\n # If index is ignored then we might need to drop some columns.\n # At the same time some of dropped index columns can be used\n # for sorting and should be droped after sorting is done.\n if self._index_cols is not None:\n base = self\n\n drop_index_cols_before = [\n col for col in self._index_cols if col not in columns\n ]\n drop_index_cols_after = [\n col for col in self._index_cols if col in columns\n ]\n if not drop_index_cols_after:\n drop_index_cols_after = None\n\n if drop_index_cols_before:\n exprs = OrderedDict()\n index_cols = (\n drop_index_cols_after if drop_index_cols_after else None\n )\n for col in drop_index_cols_after:\n exprs[col] = base.ref(col)\n for col in base.columns:\n exprs[col] = base.ref(col)\n base = self.__constructor__(\n columns=base.columns,\n dtypes=self._dtypes_for_exprs(exprs),\n op=TransformNode(base, exprs),\n index_cols=index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n\n base = self.__constructor__(\n columns=base.columns,\n dtypes=base._dtypes,\n op=SortNode(base, columns, ascending, na_position),\n index_cols=base._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n\n if drop_index_cols_after:\n exprs = OrderedDict()\n for col in base.columns:\n exprs[col] = base.ref(col)\n base = self.__constructor__(\n columns=base.columns,\n dtypes=self._dtypes_for_exprs(exprs),\n op=TransformNode(base, exprs),\n index_cols=None,\n force_execution_mode=self._force_execution_mode,\n )\n\n return base\n else:\n return self.__constructor__(\n columns=self.columns,\n dtypes=self._dtypes,\n op=SortNode(self, columns, ascending, na_position),\n index_cols=None,\n force_execution_mode=self._force_execution_mode,\n )\n else:\n base = self\n\n # If index is preserved and we have no index columns then we\n # need to create one using __rowid__ virtual column.\n if self._index_cols is None:\n base = base._materialize_rowid()\n\n return self.__constructor__(\n columns=base.columns,\n dtypes=base._dtypes,\n op=SortNode(base, columns, ascending, na_position),\n index_cols=base._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n\n def filter(self, key):\n if not isinstance(key, type(self)):\n raise NotImplementedError(\"Unsupported key type in filter\")\n\n if not isinstance(key._op, TransformNode) or len(key.columns) != 1:\n raise NotImplementedError(\"Unsupported key in filter\")\n\n key_col = key.columns[0]\n if not is_bool_dtype(key._dtypes[key_col]):\n raise NotImplementedError(\"Unsupported key in filter\")\n\n base = self._find_common_projections_base(key)\n if base is None:\n raise NotImplementedError(\"Unsupported key in filter\")\n\n # We build the resulting frame by applying the filter to the\n # base frame and then using the filtered result as a new base.\n # If base frame has no index columns, then we need to create\n # one.\n key_exprs = translate_exprs_to_base(key._op.exprs, base)\n if base._index_cols is None:\n filter_base = base._materialize_rowid()\n key_exprs = replace_frame_in_exprs(key_exprs, base, filter_base)\n else:\n filter_base = base\n condition = key_exprs[key_col]\n filtered_base = self.__constructor__(\n columns=filter_base.columns,\n dtypes=filter_base._dtypes,\n op=FilterNode(filter_base, condition),\n index_cols=filter_base._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n\n if self is base:\n exprs = OrderedDict()\n for col in filtered_base._table_cols:\n exprs[col] = filtered_base.ref(col)\n else:\n assert isinstance(\n self._op, TransformNode\n ), f\"unexpected op: {self._op.dumps()}\"\n exprs = translate_exprs_to_base(self._op.exprs, base)\n exprs = replace_frame_in_exprs(exprs, base, filtered_base)\n if base._index_cols is None:\n exprs[\"__index__\"] = filtered_base.ref(\"__index__\")\n exprs.move_to_end(\"__index__\", last=False)\n\n return self.__constructor__(\n columns=self.columns,\n dtypes=self._dtypes_for_exprs(exprs),\n op=TransformNode(filtered_base, exprs),\n index_cols=filtered_base._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n\n def _materialize_rowid(self):\n exprs = OrderedDict()\n exprs[\"__index__\"] = self.ref(\"__rowid__\")\n for col in self._table_cols:\n exprs[col] = self.ref(col)\n return self.__constructor__(\n columns=self.columns,\n dtypes=self._dtypes_for_exprs(exprs),\n op=TransformNode(self, exprs),\n index_cols=[\"__index__\"],\n uses_rowid=True,\n force_execution_mode=self._force_execution_mode,\n )\n\n def _index_exprs(self):\n exprs = OrderedDict()\n if self._index_cols:\n for col in self._index_cols:\n exprs[col] = self.ref(col)\n return exprs\n\n def _find_common_projections_base(self, rhs):\n bases = {self}\n while self._is_projection():\n self = self._op.input[0]\n bases.add(self)\n\n while rhs not in bases and rhs._is_projection():\n rhs = rhs._op.input[0]\n\n if rhs in bases:\n return rhs\n\n return None\n\n def _is_projection(self):\n return isinstance(self._op, TransformNode)\n\n def _execute(self):\n if isinstance(self._op, FrameNode):\n return\n\n if self._force_execution_mode == \"lazy\":\n raise RuntimeError(\"unexpected execution triggered on lazy frame\")\n\n # Some frames require rowid which is available for executed frames only.\n # Also there is a common pattern when MaskNode is executed to print\n # frame. If we run the whole tree then any following frame usage will\n # require re-compute. So we just execute MaskNode's operands.\n self._run_sub_queries()\n\n if self._can_execute_arrow():\n new_table = self._execute_arrow()\n new_partitions = np.empty((1, 1), dtype=np.dtype(object))\n new_partitions[0][0] = self._frame_mgr_cls._partition_class.put_arrow(\n new_table\n )\n else:\n if self._force_execution_mode == \"arrow\":\n raise RuntimeError(\"forced arrow execution failed\")\n\n new_partitions = self._frame_mgr_cls.run_exec_plan(\n self._op, self._index_cols, self._dtypes, self._table_cols\n )\n self._partitions = new_partitions\n self._op = FrameNode(self)\n\n def _require_executed_base(self):\n if isinstance(self._op, MaskNode):\n return True\n return self._uses_rowid\n\n def _run_sub_queries(self):\n if isinstance(self._op, FrameNode):\n return\n\n if self._require_executed_base():\n for op in self._op.input:\n op._execute()\n else:\n for frame in self._op.input:\n frame._run_sub_queries()\n\n def _can_execute_arrow(self):\n if isinstance(self._op, FrameNode):\n return self._has_arrow_table()\n elif isinstance(self._op, MaskNode):\n return (\n self._op.row_indices is None and self._op.input[0]._can_execute_arrow()\n )\n elif isinstance(self._op, TransformNode):\n return self._op.is_drop() and self._op.input[0]._can_execute_arrow()\n elif isinstance(self._op, UnionNode):\n return all(frame._can_execute_arrow() for frame in self._op.input)\n else:\n return False\n\n def _execute_arrow(self):\n if isinstance(self._op, FrameNode):\n if self._partitions.size == 0:\n return pyarrow.Table()\n else:\n assert self._partitions.size == 1\n return self._partitions[0][0].get()\n elif isinstance(self._op, MaskNode):\n return self._op.input[0]._arrow_row_slice(self._op.row_numeric_idx)\n elif isinstance(self._op, TransformNode):\n return self._op.input[0]._arrow_col_slice(set(self._op.exprs.keys()))\n elif isinstance(self._op, UnionNode):\n return self._arrow_concat(self._op.input)\n else:\n raise RuntimeError(f\"Unexpected op ({type(self._op)}) in _execute_arrow\")\n\n def _arrow_col_slice(self, new_columns):\n table = self._execute_arrow()\n return table.drop(\n [f\"F_{col}\" for col in self._table_cols if col not in new_columns]\n )\n\n def _arrow_row_slice(self, row_numeric_idx):\n table = self._execute_arrow()\n if isinstance(row_numeric_idx, slice):\n start = 0 if row_numeric_idx.start is None else row_numeric_idx.start\n if start < 0:\n start = table.num_rows - start\n end = (\n table.num_rows if row_numeric_idx.stop is None else row_numeric_idx.stop\n )\n if end < 0:\n end = table.num_rows - end\n if row_numeric_idx.step is None or row_numeric_idx.step == 1:\n length = 0 if start >= end else end - start\n return table.slice(start, length)\n else:\n parts = []\n for i in range(start, end, row_numeric_idx.step):\n parts.append(table.slice(i, 1))\n return pyarrow.concat_tables(parts)\n\n start = None\n end = None\n parts = []\n for idx in row_numeric_idx:\n if start is None:\n start = idx\n end = idx\n elif idx == end + 1:\n end = idx\n else:\n if start:\n parts.append(table.slice(start, end - start + 1))\n start = idx\n end = idx\n parts.append(table.slice(start, end - start + 1))\n\n return pyarrow.concat_tables(parts)\n\n @classmethod\n def _arrow_concat(cls, frames):\n return pyarrow.concat_tables(frame._execute_arrow() for frame in frames)\n\n def _build_index_cache(self):\n assert isinstance(self._op, FrameNode)\n\n if self._partitions.size == 0:\n self._index_cache = Index.__new__(Index)\n else:\n assert self._partitions.size == 1\n obj = self._partitions[0][0].get()\n if isinstance(obj, (pd.DataFrame, pd.Series)):\n self._index_cache = obj.index\n else:\n assert isinstance(obj, pyarrow.Table)\n if self._index_cols is None:\n self._index_cache = Index.__new__(\n RangeIndex, data=range(obj.num_rows)\n )\n else:\n index_at = obj.drop([f\"F_{col}\" for col in self.columns])\n index_df = index_at.to_pandas()\n index_df.set_index(\n [f\"F_{col}\" for col in self._index_cols], inplace=True\n )\n index_df.index.rename(\n self._index_names(self._index_cols), inplace=True\n )\n self._index_cache = index_df.index\n\n def _get_index(self):\n self._execute()\n if self._index_cache is None:\n self._build_index_cache()\n return self._index_cache\n\n def _set_index(self, new_index):\n raise NotImplementedError(\"OmnisciOnRayFrame._set_index is not yet suported\")\n\n def reset_index(self, drop):\n if drop:\n exprs = OrderedDict()\n for c in self.columns:\n exprs[c] = self.ref(c)\n return self.__constructor__(\n columns=self.columns,\n dtypes=self._dtypes_for_exprs(exprs),\n op=TransformNode(self, exprs),\n index_cols=None,\n force_execution_mode=self._force_execution_mode,\n )\n else:\n if self._index_cols is None:\n raise NotImplementedError(\n \"default index reset with no drop is not supported\"\n )\n # Need to demangle index names.\n exprs = OrderedDict()\n for i, c in enumerate(self._index_cols):\n name = self._index_name(c)\n if name is None:\n name = f\"level_{i}\"\n if name in exprs:\n raise ValueError(f\"cannot insert {name}, already exists\")\n exprs[name] = self.ref(c)\n for c in self.columns:\n if c in exprs:\n raise ValueError(f\"cannot insert {c}, already exists\")\n exprs[c] = self.ref(c)\n new_columns = Index.__new__(Index, data=exprs.keys(), dtype=\"O\")\n return self.__constructor__(\n columns=new_columns,\n dtypes=self._dtypes_for_exprs(exprs),\n op=TransformNode(self, exprs),\n index_cols=None,\n force_execution_mode=self._force_execution_mode,\n )\n\n def _set_columns(self, new_columns):\n exprs = self._index_exprs()\n for old, new in zip(self.columns, new_columns):\n exprs[new] = self.ref(old)\n return self.__constructor__(\n columns=new_columns,\n dtypes=self._dtypes.tolist(),\n op=TransformNode(self, exprs),\n index_cols=self._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n\n def _get_columns(self):\n return super(OmnisciOnRayFrame, self)._get_columns()\n\n columns = property(_get_columns)\n index = property(_get_index, _set_index)\n\n def has_multiindex(self):\n if self._index_cache is not None:\n return isinstance(self._index_cache, MultiIndex)\n return self._index_cols is not None and len(self._index_cols) > 1\n\n def to_pandas(self):\n self._execute()\n\n if self._force_execution_mode == \"lazy\":\n raise RuntimeError(\"unexpected to_pandas triggered on lazy frame\")\n\n df = self._frame_mgr_cls.to_pandas(self._partitions)\n\n # If we make dataframe from Arrow table then we might need to set\n # index columns.\n if len(df.columns) != len(self.columns):\n assert self._index_cols\n df.set_index([f\"F_{col}\" for col in self._index_cols], inplace=True)\n df.index.rename(self._index_names(self._index_cols), inplace=True)\n assert len(df.columns) == len(self.columns)\n else:\n assert self._index_cols is None\n assert df.index.name is None, f\"index name '{df.index.name}' is not None\"\n\n # Restore original column labels encoded in OmniSci to meet its\n # restirctions on column names.\n df.columns = self.columns\n\n return df\n\n def _index_names(self, cols):\n if len(cols) == 1:\n return self._index_name(cols[0])\n return [self._index_name(n) for n in cols]\n\n def _index_name(self, col):\n if col == \"__index__\":\n return None\n\n match = re.search(\"__index__\\\\d+_(.*)\", col)\n if match:\n name = match.group(1)\n if name == \"__None__\":\n return None\n return name\n\n return col\n\n def _find_index_or_col(self, col):\n \"\"\"For given column or index name return a column name\"\"\"\n if col in self.columns:\n return col\n\n if self._index_cols is not None:\n for idx_col in self._index_cols:\n if re.match(f\"__index__\\\\d+_{col}\", idx_col):\n return idx_col\n\n raise ValueError(f\"Unknown column '{col}'\")\n\n @classmethod\n def from_pandas(cls, df):\n new_index = df.index\n new_columns = df.columns\n # If there is non-trivial index, we put it into columns.\n # That's what we usually have for arrow tables and execution\n # result. Unnamed index is renamed to __index__. Also all\n # columns get 'F_' prefix to handle names unsupported in\n # OmniSci.\n if cls._is_trivial_index(df.index):\n index_cols = None\n else:\n orig_index_names = df.index.names\n orig_df = df\n\n index_cols = [\n f\"__index__{i}_{'__None__' if n is None else n}\"\n for i, n in enumerate(df.index.names)\n ]\n df.index.names = index_cols\n df = df.reset_index()\n\n orig_df.index.names = orig_index_names\n new_dtypes = df.dtypes\n df = df.add_prefix(\"F_\")\n new_parts, new_lengths, new_widths = cls._frame_mgr_cls.from_pandas(df, True)\n return cls(\n new_parts,\n new_index,\n new_columns,\n new_lengths,\n new_widths,\n dtypes=new_dtypes,\n index_cols=index_cols,\n )\n\n @classmethod\n def _is_trivial_index(cls, index):\n \"\"\"Return true if index is a range [0..N]\"\"\"\n if isinstance(index, pd.RangeIndex):\n return index.start == 0 and index.step == 1\n if not isinstance(index, pd.Int64Index):\n return False\n return (\n index.is_monotonic_increasing\n and index.unique\n and index.min == 0\n and index.max == len(index) - 1\n )\n", "path": "modin/experimental/engines/omnisci_on_ray/frame/data.py"}], "after_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nfrom modin.engines.base.frame.data import BasePandasFrame\nfrom modin.experimental.backends.omnisci.query_compiler import DFAlgQueryCompiler\nfrom .partition_manager import OmnisciOnRayFrameManager\n\nfrom pandas.core.index import ensure_index, Index, MultiIndex, RangeIndex\nfrom pandas.core.dtypes.common import _get_dtype, is_list_like, is_bool_dtype\nimport pandas as pd\n\nfrom .df_algebra import (\n MaskNode,\n FrameNode,\n GroupbyAggNode,\n TransformNode,\n UnionNode,\n JoinNode,\n SortNode,\n FilterNode,\n translate_exprs_to_base,\n replace_frame_in_exprs,\n)\nfrom .expr import (\n AggregateExpr,\n InputRefExpr,\n LiteralExpr,\n OpExpr,\n build_if_then_else,\n build_dt_expr,\n _get_common_dtype,\n is_cmp_op,\n)\nfrom collections import OrderedDict\n\nimport numpy as np\nimport pyarrow\nimport re\n\n\nclass OmnisciOnRayFrame(BasePandasFrame):\n\n _query_compiler_cls = DFAlgQueryCompiler\n _frame_mgr_cls = OmnisciOnRayFrameManager\n\n _next_id = [1]\n\n def __init__(\n self,\n partitions=None,\n index=None,\n columns=None,\n row_lengths=None,\n column_widths=None,\n dtypes=None,\n op=None,\n index_cols=None,\n uses_rowid=False,\n force_execution_mode=None,\n ):\n assert dtypes is not None\n\n self.id = str(type(self)._next_id[0])\n type(self)._next_id[0] += 1\n\n if index is not None:\n index = ensure_index(index)\n columns = ensure_index(columns)\n self._op = op\n self._index_cols = index_cols\n self._partitions = partitions\n self._index_cache = index\n self._columns_cache = columns\n self._row_lengths_cache = row_lengths\n self._column_widths_cache = column_widths\n if self._op is None:\n self._op = FrameNode(self)\n\n self._table_cols = columns.tolist()\n if self._index_cols is not None:\n self._table_cols = self._index_cols + self._table_cols\n\n assert len(dtypes) == len(\n self._table_cols\n ), f\"unaligned dtypes ({dtypes}) and table columns ({self._table_cols})\"\n if isinstance(dtypes, list):\n if self._index_cols is not None:\n # Table stores both index and data columns but those are accessed\n # differently if we have a MultiIndex for columns. To unify access\n # to dtype we extend index column names to tuples to have a MultiIndex\n # of dtypes.\n if isinstance(columns, MultiIndex):\n tail = [\"\"] * (columns.nlevels - 1)\n index_tuples = [(col, *tail) for col in self._index_cols]\n dtype_index = MultiIndex.from_tuples(index_tuples).append(columns)\n self._dtypes = pd.Series(dtypes, index=dtype_index)\n else:\n self._dtypes = pd.Series(dtypes, index=self._table_cols)\n else:\n self._dtypes = pd.Series(dtypes, index=columns)\n else:\n self._dtypes = dtypes\n\n if partitions is not None:\n self._filter_empties()\n\n # This frame uses encoding for column names to support exotic\n # (e.g. non-string and reserved words) column names. Encoded\n # names are used in OmniSci tables and corresponding Arrow tables.\n # If we import Arrow table, we have to rename its columns for\n # proper processing.\n if self._has_arrow_table() and self._partitions.size > 0:\n assert self._partitions.size == 1\n table = self._partitions[0][0].get()\n if table.column_names[0] != f\"F_{self._table_cols[0]}\":\n new_names = [f\"F_{col}\" for col in table.column_names]\n new_table = table.rename_columns(new_names)\n self._partitions[0][0] = self._frame_mgr_cls._partition_class.put_arrow(\n new_table\n )\n\n self._uses_rowid = uses_rowid\n # Tests use forced execution mode to take control over frame\n # execution process. Supported values:\n # \"lazy\" - RuntimeError is raised if execution is triggered for the frame\n # \"arrow\" - RuntimeError is raised if execution is triggered, but we cannot\n # execute it using Arrow API (have to use OmniSci for execution)\n self._force_execution_mode = force_execution_mode\n\n def id_str(self):\n return f\"frame${self.id}\"\n\n def _get_dtype(self, col):\n # If we search for an index column type in a MultiIndex then we need to\n # extend index column names to tuples.\n if isinstance(self._dtypes, MultiIndex) and not isinstance(col, tuple):\n return self._dtypes[(col, *([\"\"] * (self._dtypes.nlevels - 1)))]\n return self._dtypes[col]\n\n def ref(self, col):\n if col == \"__rowid__\":\n return InputRefExpr(self, col, _get_dtype(int))\n return InputRefExpr(self, col, self._get_dtype(col))\n\n def mask(\n self,\n row_indices=None,\n row_numeric_idx=None,\n col_indices=None,\n col_numeric_idx=None,\n ):\n base = self\n\n if col_indices is not None or col_numeric_idx is not None:\n if col_indices is not None:\n new_columns = col_indices\n elif col_numeric_idx is not None:\n new_columns = base.columns[col_numeric_idx]\n exprs = self._index_exprs()\n for col in new_columns:\n exprs[col] = base.ref(col)\n dtypes = self._dtypes_for_exprs(exprs)\n base = self.__constructor__(\n columns=new_columns,\n dtypes=dtypes,\n op=TransformNode(base, exprs),\n index_cols=self._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n\n if row_indices is not None or row_numeric_idx is not None:\n op = MaskNode(\n base,\n row_indices=row_indices,\n row_numeric_idx=row_numeric_idx,\n )\n return self.__constructor__(\n columns=base.columns,\n dtypes=base._dtypes,\n op=op,\n index_cols=self._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n\n return base\n\n def _has_arrow_table(self):\n if not isinstance(self._op, FrameNode):\n return False\n return all(p.arrow_table for p in self._partitions.flatten())\n\n def _dtypes_for_cols(self, new_index, new_columns):\n if new_index is not None:\n if isinstance(self._dtypes, MultiIndex):\n new_index = [\n (col, *([\"\"] * (self._dtypes.nlevels - 1))) for col in new_index\n ]\n res = self._dtypes[\n new_index\n + (\n new_columns\n if isinstance(new_columns, list)\n else new_columns.to_list()\n )\n ]\n else:\n res = self._dtypes[new_columns]\n return res\n\n def _dtypes_for_exprs(self, exprs):\n return [expr._dtype for expr in exprs.values()]\n\n def groupby_agg(self, by, axis, agg, groupby_args, **kwargs):\n # Currently we only expect 'by' to be a projection of the same frame.\n # If 'by' holds a list of columns/series, then we create such projection\n # to re-use code.\n if not isinstance(by, DFAlgQueryCompiler):\n if is_list_like(by):\n by_cols = []\n by_frames = []\n for obj in by:\n if isinstance(obj, str):\n by_cols.append(obj)\n elif hasattr(obj, \"_query_compiler\"):\n by_frames.append(obj._query_compiler._modin_frame)\n else:\n raise NotImplementedError(\"unsupported groupby args\")\n by_cols = Index.__new__(Index, data=by_cols, dtype=self.columns.dtype)\n by_frame = self.mask(col_indices=by_cols)\n if by_frames:\n by_frame = by_frame._concat(\n axis=1, other_modin_frames=by_frames, ignore_index=True\n )\n else:\n raise NotImplementedError(\"unsupported groupby args\")\n else:\n by_frame = by._modin_frame\n\n if axis != 0:\n raise NotImplementedError(\"groupby is supported for axis = 0 only\")\n\n base = by_frame._find_common_projections_base(self)\n if base is None:\n raise NotImplementedError(\"unsupported groupby args\")\n\n if groupby_args[\"level\"] is not None:\n raise NotImplementedError(\"levels are not supported for groupby\")\n\n groupby_cols = by_frame.columns.tolist()\n agg_cols = [col for col in self.columns if col not in by_frame.columns]\n\n # Create new base where all required columns are computed. We don't allow\n # complex expressions to be a group key or an aggeregate operand.\n assert isinstance(by_frame._op, TransformNode), \"unexpected by_frame\"\n exprs = OrderedDict(((col, by_frame.ref(col)) for col in groupby_cols))\n exprs.update(((col, self.ref(col)) for col in agg_cols))\n exprs = translate_exprs_to_base(exprs, base)\n base_cols = Index.__new__(\n Index, data=list(exprs.keys()), dtype=self.columns.dtype\n )\n base = self.__constructor__(\n columns=base_cols,\n dtypes=self._dtypes_for_exprs(exprs),\n op=TransformNode(base, exprs, fold=True),\n index_cols=None,\n force_execution_mode=self._force_execution_mode,\n )\n\n new_columns = []\n index_cols = None\n\n if groupby_args[\"as_index\"]:\n index_cols = groupby_cols.copy()\n else:\n new_columns = groupby_cols.copy()\n\n new_dtypes = by_frame._dtypes[groupby_cols].tolist()\n\n agg_exprs = OrderedDict()\n if isinstance(agg, str):\n for col in agg_cols:\n agg_exprs[col] = AggregateExpr(agg, base.ref(col))\n else:\n assert isinstance(agg, dict), \"unsupported aggregate type\"\n multiindex = any(isinstance(v, list) for v in agg.values())\n for k, v in agg.items():\n if isinstance(v, list):\n for item in v:\n agg_exprs[(k, item)] = AggregateExpr(item, base.ref(k))\n else:\n col_name = (k, v) if multiindex else k\n agg_exprs[col_name] = AggregateExpr(v, base.ref(k))\n new_columns.extend(agg_exprs.keys())\n new_dtypes.extend((x._dtype for x in agg_exprs.values()))\n new_columns = Index.__new__(Index, data=new_columns, dtype=self.columns.dtype)\n\n new_op = GroupbyAggNode(base, groupby_cols, agg_exprs, groupby_args)\n new_frame = self.__constructor__(\n columns=new_columns,\n dtypes=new_dtypes,\n op=new_op,\n index_cols=index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n\n return new_frame\n\n def fillna(\n self,\n value=None,\n method=None,\n axis=None,\n limit=None,\n downcast=None,\n ):\n if axis != 0:\n raise NotImplementedError(\"fillna is supported for axis = 0 only\")\n\n if limit is not None:\n raise NotImplementedError(\"fillna doesn't support limit yet\")\n\n if downcast is not None:\n raise NotImplementedError(\"fillna doesn't support downcast yet\")\n\n if method is not None:\n raise NotImplementedError(\"fillna doesn't support method yet\")\n\n exprs = self._index_exprs()\n if isinstance(value, dict):\n for col in self.columns:\n col_expr = self.ref(col)\n if col in value:\n value_expr = LiteralExpr(value[col])\n res_type = _get_common_dtype(value_expr._dtype, col_expr._dtype)\n exprs[col] = build_if_then_else(\n col_expr.is_null(), value_expr, col_expr, res_type\n )\n else:\n exprs[col] = col_expr\n elif np.isscalar(value):\n value_expr = LiteralExpr(value)\n for col in self.columns:\n col_expr = self.ref(col)\n res_type = _get_common_dtype(value_expr._dtype, col_expr._dtype)\n exprs[col] = build_if_then_else(\n col_expr.is_null(), value_expr, col_expr, res_type\n )\n else:\n raise NotImplementedError(\"unsupported value for fillna\")\n\n new_op = TransformNode(self, exprs)\n dtypes = self._dtypes_for_exprs(exprs)\n new_frame = self.__constructor__(\n columns=self.columns,\n dtypes=dtypes,\n op=new_op,\n index_cols=self._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n\n return new_frame\n\n def dt_extract(self, obj):\n exprs = self._index_exprs()\n for col in self.columns:\n exprs[col] = build_dt_expr(obj, self.ref(col))\n new_op = TransformNode(self, exprs)\n dtypes = self._dtypes_for_exprs(exprs)\n return self.__constructor__(\n columns=self.columns,\n dtypes=dtypes,\n op=new_op,\n index_cols=self._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n\n def astype(self, col_dtypes, **kwargs):\n columns = col_dtypes.keys()\n new_dtypes = self.dtypes.copy()\n for column in columns:\n dtype = col_dtypes[column]\n if (\n not isinstance(dtype, type(self.dtypes[column]))\n or dtype != self.dtypes[column]\n ):\n # Update the new dtype series to the proper pandas dtype\n try:\n new_dtype = np.dtype(dtype)\n except TypeError:\n new_dtype = dtype\n\n if dtype != np.int32 and new_dtype == np.int32:\n new_dtypes[column] = np.dtype(\"int64\")\n elif dtype != np.float32 and new_dtype == np.float32:\n new_dtypes[column] = np.dtype(\"float64\")\n # We cannot infer without computing the dtype if\n elif isinstance(new_dtype, str) and new_dtype == \"category\":\n raise NotImplementedError(\"unsupported type conversion\")\n else:\n new_dtypes[column] = new_dtype\n exprs = self._index_exprs()\n for col in self.columns:\n col_expr = self.ref(col)\n if col in columns:\n exprs[col] = col_expr.cast(new_dtypes[col])\n else:\n exprs[col] = col_expr\n\n new_op = TransformNode(self, exprs)\n return self.__constructor__(\n columns=self.columns,\n dtypes=new_dtypes,\n op=new_op,\n index_cols=self._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n\n def join(self, other, how=\"inner\", on=None, sort=False, suffixes=(\"_x\", \"_y\")):\n assert (\n on is not None\n ), \"Merge with unspecified 'on' parameter is not supported in the engine\"\n\n for col in on:\n assert (\n col in self.columns and col in other.columns\n ), \"Only cases when both frames contain key column are supported\"\n\n new_columns = []\n new_dtypes = []\n\n conflicting_cols = set(self.columns) & set(other.columns) - set(on)\n for c in self.columns:\n suffix = suffixes[0] if c in conflicting_cols else \"\"\n new_columns.append(c + suffix)\n new_dtypes.append(self._dtypes[c])\n for c in other.columns:\n if c not in on:\n suffix = suffixes[1] if c in conflicting_cols else \"\"\n new_columns.append(c + suffix)\n new_dtypes.append(other._dtypes[c])\n\n op = JoinNode(\n self,\n other,\n how=how,\n on=on,\n sort=sort,\n suffixes=suffixes,\n )\n\n new_columns = Index.__new__(Index, data=new_columns, dtype=self.columns.dtype)\n return self.__constructor__(\n dtypes=new_dtypes,\n columns=new_columns,\n op=op,\n force_execution_mode=self._force_execution_mode,\n )\n\n def _index_width(self):\n if self._index_cols is None:\n return 1\n return len(self._index_cols)\n\n def _union_all(\n self, axis, other_modin_frames, join=\"outer\", sort=False, ignore_index=False\n ):\n # determine output columns\n new_cols_map = OrderedDict()\n for col in self.columns:\n new_cols_map[col] = self._dtypes[col]\n for frame in other_modin_frames:\n if join == \"inner\":\n for col in list(new_cols_map):\n if col not in frame.columns:\n del new_cols_map[col]\n else:\n for col in frame.columns:\n if col not in new_cols_map:\n new_cols_map[col] = frame._dtypes[col]\n new_columns = list(new_cols_map.keys())\n\n if sort:\n new_columns = sorted(new_columns)\n\n # determine how many index components are going into\n # the resulting table\n if not ignore_index:\n index_width = self._index_width()\n for frame in other_modin_frames:\n index_width = min(index_width, frame._index_width())\n\n # compute resulting dtypes\n if sort:\n new_dtypes = [new_cols_map[col] for col in new_columns]\n else:\n new_dtypes = list(new_cols_map.values())\n\n # build projections to align all frames\n aligned_frames = []\n for frame in [self] + other_modin_frames:\n aligned_index = None\n exprs = OrderedDict()\n uses_rowid = False\n\n if not ignore_index:\n if frame._index_cols:\n aligned_index = frame._index_cols[0 : index_width + 1]\n aligned_index_dtypes = frame._dtypes[aligned_index].tolist()\n for i in range(0, index_width):\n col = frame._index_cols[i]\n exprs[col] = frame.ref(col)\n else:\n assert index_width == 1, \"unexpected index width\"\n aligned_index = [\"__index__\"]\n exprs[\"__index__\"] = frame.ref(\"__rowid__\")\n aligned_index_dtypes = [_get_dtype(int)]\n uses_rowid = True\n aligned_dtypes = aligned_index_dtypes + new_dtypes\n else:\n aligned_dtypes = new_dtypes\n\n for col in new_columns:\n if col in frame._table_cols:\n exprs[col] = frame.ref(col)\n else:\n exprs[col] = LiteralExpr(None)\n\n aligned_frame_op = TransformNode(frame, exprs)\n aligned_frames.append(\n self.__constructor__(\n columns=new_columns,\n dtypes=aligned_dtypes,\n op=aligned_frame_op,\n index_cols=aligned_index,\n uses_rowid=uses_rowid,\n force_execution_mode=self._force_execution_mode,\n )\n )\n\n new_frame = aligned_frames[0]\n for frame in aligned_frames[1:]:\n new_frame = self.__constructor__(\n columns=new_columns,\n dtypes=new_frame._dtypes,\n op=UnionNode([new_frame, frame]),\n index_cols=new_frame._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n\n return new_frame\n\n def _concat(\n self, axis, other_modin_frames, join=\"outer\", sort=False, ignore_index=False\n ):\n if not other_modin_frames:\n return self\n\n if axis == 0:\n return self._union_all(axis, other_modin_frames, join, sort, ignore_index)\n\n base = self\n for frame in other_modin_frames:\n base = base._find_common_projections_base(frame)\n if base is None:\n raise NotImplementedError(\"concat requiring join is not supported yet\")\n\n exprs = self._index_exprs()\n new_columns = self.columns.tolist()\n for col in self.columns:\n exprs[col] = self.ref(col)\n for frame in other_modin_frames:\n for col in frame.columns:\n if col == \"\" or col in exprs:\n new_col = f\"__col{len(exprs)}__\"\n else:\n new_col = col\n exprs[new_col] = frame.ref(col)\n new_columns.append(new_col)\n\n exprs = translate_exprs_to_base(exprs, base)\n new_columns = Index.__new__(Index, data=new_columns, dtype=self.columns.dtype)\n new_frame = self.__constructor__(\n columns=new_columns,\n dtypes=self._dtypes_for_exprs(exprs),\n op=TransformNode(base, exprs),\n index_cols=self._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n return new_frame\n\n def bin_op(self, other, op_name, **kwargs):\n if isinstance(other, (int, float, str)):\n value_expr = LiteralExpr(other)\n exprs = self._index_exprs()\n for col in self.columns:\n exprs[col] = self.ref(col).bin_op(value_expr, op_name)\n return self.__constructor__(\n columns=self.columns,\n dtypes=self._dtypes_for_exprs(exprs),\n op=TransformNode(self, exprs),\n index_cols=self._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n elif isinstance(other, list):\n if len(other) != len(self.columns):\n raise ValueError(\n f\"length must be {len(self.columns)}: given {len(other)}\"\n )\n exprs = self._index_exprs()\n for col, val in zip(self.columns, other):\n exprs[col] = self.ref(col).bin_op(LiteralExpr(val), op_name)\n return self.__constructor__(\n columns=self.columns,\n dtypes=self._dtypes_for_exprs(exprs),\n op=TransformNode(self, exprs),\n index_cols=self._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n elif isinstance(other, type(self)):\n # For now we only support binary operations on\n # projections of the same frame, because we have\n # no support for outer join.\n base = self._find_common_projections_base(other)\n if base is None:\n raise NotImplementedError(\n \"unsupported binary op args (outer join is not supported)\"\n )\n\n new_columns = self.columns.tolist()\n for col in other.columns:\n if col not in self.columns:\n new_columns.append(col)\n new_columns = sorted(new_columns)\n\n fill_value = kwargs.get(\"fill_value\", None)\n if fill_value is not None:\n fill_value = LiteralExpr(fill_value)\n if is_cmp_op(op_name):\n null_value = LiteralExpr(op_name == \"ne\")\n else:\n null_value = LiteralExpr(None)\n\n exprs = self._index_exprs()\n for col in new_columns:\n lhs = self.ref(col) if col in self.columns else fill_value\n rhs = other.ref(col) if col in other.columns else fill_value\n if lhs is None or rhs is None:\n exprs[col] = null_value\n else:\n exprs[col] = lhs.bin_op(rhs, op_name)\n\n exprs = translate_exprs_to_base(exprs, base)\n return self.__constructor__(\n columns=new_columns,\n dtypes=self._dtypes_for_exprs(exprs),\n op=TransformNode(base, exprs),\n index_cols=self._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n else:\n raise NotImplementedError(f\"unsupported operand type: {type(other)}\")\n\n def insert(self, loc, column, value):\n assert column not in self._table_cols\n assert 0 <= loc <= len(self.columns)\n\n exprs = self._index_exprs()\n for i in range(0, loc):\n col = self.columns[i]\n exprs[col] = self.ref(col)\n exprs[column] = LiteralExpr(value)\n for i in range(loc, len(self.columns)):\n col = self.columns[i]\n exprs[col] = self.ref(col)\n\n new_columns = self.columns.insert(loc, column)\n\n return self.__constructor__(\n columns=new_columns,\n dtypes=self._dtypes_for_exprs(exprs),\n op=TransformNode(self, exprs),\n index_cols=self._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n\n def cat_codes(self):\n assert len(self.columns) == 1\n assert self._dtypes[-1] == \"category\"\n\n col = self.columns[-1]\n exprs = self._index_exprs()\n col_expr = self.ref(col)\n code_expr = OpExpr(\"KEY_FOR_STRING\", [col_expr], _get_dtype(\"int32\"))\n null_val = LiteralExpr(np.int32(-1))\n exprs[col] = build_if_then_else(\n col_expr.is_null(), null_val, code_expr, _get_dtype(\"int32\")\n )\n\n return self.__constructor__(\n columns=self.columns,\n dtypes=self._dtypes,\n op=TransformNode(self, exprs),\n index_cols=self._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n\n def sort_rows(self, columns, ascending, ignore_index, na_position):\n if na_position != \"first\" and na_position != \"last\":\n raise ValueError(f\"Unsupported na_position value '{na_position}'\")\n\n if not isinstance(columns, list):\n columns = [columns]\n columns = [self._find_index_or_col(col) for col in columns]\n\n if isinstance(ascending, list):\n if len(ascending) != len(columns):\n raise ValueError(\"ascending list length doesn't match columns list\")\n else:\n if not isinstance(ascending, bool):\n raise ValueError(\"unsupported ascending value\")\n ascending = [ascending] * len(columns)\n\n if ignore_index:\n # If index is ignored then we might need to drop some columns.\n # At the same time some of dropped index columns can be used\n # for sorting and should be droped after sorting is done.\n if self._index_cols is not None:\n base = self\n\n drop_index_cols_before = [\n col for col in self._index_cols if col not in columns\n ]\n drop_index_cols_after = [\n col for col in self._index_cols if col in columns\n ]\n if not drop_index_cols_after:\n drop_index_cols_after = None\n\n if drop_index_cols_before:\n exprs = OrderedDict()\n index_cols = (\n drop_index_cols_after if drop_index_cols_after else None\n )\n for col in drop_index_cols_after:\n exprs[col] = base.ref(col)\n for col in base.columns:\n exprs[col] = base.ref(col)\n base = self.__constructor__(\n columns=base.columns,\n dtypes=self._dtypes_for_exprs(exprs),\n op=TransformNode(base, exprs),\n index_cols=index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n\n base = self.__constructor__(\n columns=base.columns,\n dtypes=base._dtypes,\n op=SortNode(base, columns, ascending, na_position),\n index_cols=base._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n\n if drop_index_cols_after:\n exprs = OrderedDict()\n for col in base.columns:\n exprs[col] = base.ref(col)\n base = self.__constructor__(\n columns=base.columns,\n dtypes=self._dtypes_for_exprs(exprs),\n op=TransformNode(base, exprs),\n index_cols=None,\n force_execution_mode=self._force_execution_mode,\n )\n\n return base\n else:\n return self.__constructor__(\n columns=self.columns,\n dtypes=self._dtypes,\n op=SortNode(self, columns, ascending, na_position),\n index_cols=None,\n force_execution_mode=self._force_execution_mode,\n )\n else:\n base = self\n\n # If index is preserved and we have no index columns then we\n # need to create one using __rowid__ virtual column.\n if self._index_cols is None:\n base = base._materialize_rowid()\n\n return self.__constructor__(\n columns=base.columns,\n dtypes=base._dtypes,\n op=SortNode(base, columns, ascending, na_position),\n index_cols=base._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n\n def filter(self, key):\n if not isinstance(key, type(self)):\n raise NotImplementedError(\"Unsupported key type in filter\")\n\n if not isinstance(key._op, TransformNode) or len(key.columns) != 1:\n raise NotImplementedError(\"Unsupported key in filter\")\n\n key_col = key.columns[0]\n if not is_bool_dtype(key._dtypes[key_col]):\n raise NotImplementedError(\"Unsupported key in filter\")\n\n base = self._find_common_projections_base(key)\n if base is None:\n raise NotImplementedError(\"Unsupported key in filter\")\n\n # We build the resulting frame by applying the filter to the\n # base frame and then using the filtered result as a new base.\n # If base frame has no index columns, then we need to create\n # one.\n key_exprs = translate_exprs_to_base(key._op.exprs, base)\n if base._index_cols is None:\n filter_base = base._materialize_rowid()\n key_exprs = replace_frame_in_exprs(key_exprs, base, filter_base)\n else:\n filter_base = base\n condition = key_exprs[key_col]\n filtered_base = self.__constructor__(\n columns=filter_base.columns,\n dtypes=filter_base._dtypes,\n op=FilterNode(filter_base, condition),\n index_cols=filter_base._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n\n if self is base:\n exprs = OrderedDict()\n for col in filtered_base._table_cols:\n exprs[col] = filtered_base.ref(col)\n else:\n assert isinstance(\n self._op, TransformNode\n ), f\"unexpected op: {self._op.dumps()}\"\n exprs = translate_exprs_to_base(self._op.exprs, base)\n exprs = replace_frame_in_exprs(exprs, base, filtered_base)\n if base._index_cols is None:\n exprs[\"__index__\"] = filtered_base.ref(\"__index__\")\n exprs.move_to_end(\"__index__\", last=False)\n\n return self.__constructor__(\n columns=self.columns,\n dtypes=self._dtypes_for_exprs(exprs),\n op=TransformNode(filtered_base, exprs),\n index_cols=filtered_base._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n\n def _materialize_rowid(self):\n exprs = OrderedDict()\n exprs[\"__index__\"] = self.ref(\"__rowid__\")\n for col in self._table_cols:\n exprs[col] = self.ref(col)\n return self.__constructor__(\n columns=self.columns,\n dtypes=self._dtypes_for_exprs(exprs),\n op=TransformNode(self, exprs),\n index_cols=[\"__index__\"],\n uses_rowid=True,\n force_execution_mode=self._force_execution_mode,\n )\n\n def _index_exprs(self):\n exprs = OrderedDict()\n if self._index_cols:\n for col in self._index_cols:\n exprs[col] = self.ref(col)\n return exprs\n\n def _find_common_projections_base(self, rhs):\n bases = {self}\n while self._is_projection():\n self = self._op.input[0]\n bases.add(self)\n\n while rhs not in bases and rhs._is_projection():\n rhs = rhs._op.input[0]\n\n if rhs in bases:\n return rhs\n\n return None\n\n def _is_projection(self):\n return isinstance(self._op, TransformNode)\n\n def _execute(self):\n if isinstance(self._op, FrameNode):\n return\n\n if self._force_execution_mode == \"lazy\":\n raise RuntimeError(\"unexpected execution triggered on lazy frame\")\n\n # Some frames require rowid which is available for executed frames only.\n # Also there is a common pattern when MaskNode is executed to print\n # frame. If we run the whole tree then any following frame usage will\n # require re-compute. So we just execute MaskNode's operands.\n self._run_sub_queries()\n\n if self._can_execute_arrow():\n new_table = self._execute_arrow()\n new_partitions = np.empty((1, 1), dtype=np.dtype(object))\n new_partitions[0][0] = self._frame_mgr_cls._partition_class.put_arrow(\n new_table\n )\n else:\n if self._force_execution_mode == \"arrow\":\n raise RuntimeError(\"forced arrow execution failed\")\n\n new_partitions = self._frame_mgr_cls.run_exec_plan(\n self._op, self._index_cols, self._dtypes, self._table_cols\n )\n self._partitions = new_partitions\n self._op = FrameNode(self)\n\n def _require_executed_base(self):\n if isinstance(self._op, MaskNode):\n return True\n return self._uses_rowid\n\n def _run_sub_queries(self):\n if isinstance(self._op, FrameNode):\n return\n\n if self._require_executed_base():\n for op in self._op.input:\n op._execute()\n else:\n for frame in self._op.input:\n frame._run_sub_queries()\n\n def _can_execute_arrow(self):\n if isinstance(self._op, FrameNode):\n return self._has_arrow_table()\n elif isinstance(self._op, MaskNode):\n return (\n self._op.row_indices is None and self._op.input[0]._can_execute_arrow()\n )\n elif isinstance(self._op, TransformNode):\n return self._op.is_drop() and self._op.input[0]._can_execute_arrow()\n elif isinstance(self._op, UnionNode):\n return all(frame._can_execute_arrow() for frame in self._op.input)\n else:\n return False\n\n def _execute_arrow(self):\n if isinstance(self._op, FrameNode):\n if self._partitions.size == 0:\n return pyarrow.Table()\n else:\n assert self._partitions.size == 1\n return self._partitions[0][0].get()\n elif isinstance(self._op, MaskNode):\n return self._op.input[0]._arrow_row_slice(self._op.row_numeric_idx)\n elif isinstance(self._op, TransformNode):\n return self._op.input[0]._arrow_col_slice(set(self._op.exprs.keys()))\n elif isinstance(self._op, UnionNode):\n return self._arrow_concat(self._op.input)\n else:\n raise RuntimeError(f\"Unexpected op ({type(self._op)}) in _execute_arrow\")\n\n def _arrow_col_slice(self, new_columns):\n table = self._execute_arrow()\n return table.drop(\n [f\"F_{col}\" for col in self._table_cols if col not in new_columns]\n )\n\n def _arrow_row_slice(self, row_numeric_idx):\n table = self._execute_arrow()\n if isinstance(row_numeric_idx, slice):\n start = 0 if row_numeric_idx.start is None else row_numeric_idx.start\n if start < 0:\n start = table.num_rows - start\n end = (\n table.num_rows if row_numeric_idx.stop is None else row_numeric_idx.stop\n )\n if end < 0:\n end = table.num_rows - end\n if row_numeric_idx.step is None or row_numeric_idx.step == 1:\n length = 0 if start >= end else end - start\n return table.slice(start, length)\n else:\n parts = []\n for i in range(start, end, row_numeric_idx.step):\n parts.append(table.slice(i, 1))\n return pyarrow.concat_tables(parts)\n\n start = None\n end = None\n parts = []\n for idx in row_numeric_idx:\n if start is None:\n start = idx\n end = idx\n elif idx == end + 1:\n end = idx\n else:\n if start:\n parts.append(table.slice(start, end - start + 1))\n start = idx\n end = idx\n parts.append(table.slice(start, end - start + 1))\n\n return pyarrow.concat_tables(parts)\n\n @classmethod\n def _arrow_concat(cls, frames):\n return pyarrow.concat_tables(frame._execute_arrow() for frame in frames)\n\n def _build_index_cache(self):\n assert isinstance(self._op, FrameNode)\n\n if self._partitions.size == 0:\n self._index_cache = Index.__new__(Index)\n else:\n assert self._partitions.size == 1\n obj = self._partitions[0][0].get()\n if isinstance(obj, (pd.DataFrame, pd.Series)):\n self._index_cache = obj.index\n else:\n assert isinstance(obj, pyarrow.Table)\n if self._index_cols is None:\n self._index_cache = Index.__new__(\n RangeIndex, data=range(obj.num_rows)\n )\n else:\n index_at = obj.drop([f\"F_{col}\" for col in self.columns])\n index_df = index_at.to_pandas()\n index_df.set_index(\n [f\"F_{col}\" for col in self._index_cols], inplace=True\n )\n index_df.index.rename(\n self._index_names(self._index_cols), inplace=True\n )\n self._index_cache = index_df.index\n\n def _get_index(self):\n self._execute()\n if self._index_cache is None:\n self._build_index_cache()\n return self._index_cache\n\n def _set_index(self, new_index):\n raise NotImplementedError(\"OmnisciOnRayFrame._set_index is not yet suported\")\n\n def reset_index(self, drop):\n if drop:\n exprs = OrderedDict()\n for c in self.columns:\n exprs[c] = self.ref(c)\n return self.__constructor__(\n columns=self.columns,\n dtypes=self._dtypes_for_exprs(exprs),\n op=TransformNode(self, exprs),\n index_cols=None,\n force_execution_mode=self._force_execution_mode,\n )\n else:\n if self._index_cols is None:\n raise NotImplementedError(\n \"default index reset with no drop is not supported\"\n )\n # Need to demangle index names.\n exprs = OrderedDict()\n for i, c in enumerate(self._index_cols):\n name = self._index_name(c)\n if name is None:\n name = f\"level_{i}\"\n if name in exprs:\n raise ValueError(f\"cannot insert {name}, already exists\")\n exprs[name] = self.ref(c)\n for c in self.columns:\n if c in exprs:\n raise ValueError(f\"cannot insert {c}, already exists\")\n exprs[c] = self.ref(c)\n new_columns = Index.__new__(Index, data=exprs.keys(), dtype=\"O\")\n return self.__constructor__(\n columns=new_columns,\n dtypes=self._dtypes_for_exprs(exprs),\n op=TransformNode(self, exprs),\n index_cols=None,\n force_execution_mode=self._force_execution_mode,\n )\n\n def _set_columns(self, new_columns):\n exprs = self._index_exprs()\n for old, new in zip(self.columns, new_columns):\n exprs[new] = self.ref(old)\n return self.__constructor__(\n columns=new_columns,\n dtypes=self._dtypes.tolist(),\n op=TransformNode(self, exprs),\n index_cols=self._index_cols,\n force_execution_mode=self._force_execution_mode,\n )\n\n def _get_columns(self):\n return super(OmnisciOnRayFrame, self)._get_columns()\n\n columns = property(_get_columns)\n index = property(_get_index, _set_index)\n\n def has_multiindex(self):\n if self._index_cache is not None:\n return isinstance(self._index_cache, MultiIndex)\n return self._index_cols is not None and len(self._index_cols) > 1\n\n def to_pandas(self):\n self._execute()\n\n if self._force_execution_mode == \"lazy\":\n raise RuntimeError(\"unexpected to_pandas triggered on lazy frame\")\n\n df = self._frame_mgr_cls.to_pandas(self._partitions)\n\n # If we make dataframe from Arrow table then we might need to set\n # index columns.\n if len(df.columns) != len(self.columns):\n assert self._index_cols\n df.set_index([f\"F_{col}\" for col in self._index_cols], inplace=True)\n df.index.rename(self._index_names(self._index_cols), inplace=True)\n assert len(df.columns) == len(self.columns)\n else:\n assert self._index_cols is None\n assert df.index.name is None, f\"index name '{df.index.name}' is not None\"\n\n # Restore original column labels encoded in OmniSci to meet its\n # restirctions on column names.\n df.columns = self.columns\n\n return df\n\n def _index_names(self, cols):\n if len(cols) == 1:\n return self._index_name(cols[0])\n return [self._index_name(n) for n in cols]\n\n def _index_name(self, col):\n if col == \"__index__\":\n return None\n\n match = re.search(\"__index__\\\\d+_(.*)\", col)\n if match:\n name = match.group(1)\n if name == \"__None__\":\n return None\n return name\n\n return col\n\n def _find_index_or_col(self, col):\n \"\"\"For given column or index name return a column name\"\"\"\n if col in self.columns:\n return col\n\n if self._index_cols is not None:\n for idx_col in self._index_cols:\n if re.match(f\"__index__\\\\d+_{col}\", idx_col):\n return idx_col\n\n raise ValueError(f\"Unknown column '{col}'\")\n\n @classmethod\n def from_pandas(cls, df):\n new_index = df.index\n new_columns = df.columns\n # If there is non-trivial index, we put it into columns.\n # That's what we usually have for arrow tables and execution\n # result. Unnamed index is renamed to __index__. Also all\n # columns get 'F_' prefix to handle names unsupported in\n # OmniSci.\n if cls._is_trivial_index(df.index):\n index_cols = None\n else:\n orig_index_names = df.index.names\n orig_df = df\n\n index_cols = [\n f\"__index__{i}_{'__None__' if n is None else n}\"\n for i, n in enumerate(df.index.names)\n ]\n df.index.names = index_cols\n df = df.reset_index()\n\n orig_df.index.names = orig_index_names\n new_dtypes = df.dtypes\n df = df.add_prefix(\"F_\")\n new_parts, new_lengths, new_widths = cls._frame_mgr_cls.from_pandas(df, True)\n return cls(\n new_parts,\n new_index,\n new_columns,\n new_lengths,\n new_widths,\n dtypes=new_dtypes,\n index_cols=index_cols,\n )\n\n @classmethod\n def _is_trivial_index(cls, index):\n \"\"\"Return true if index is a range [0..N]\"\"\"\n if isinstance(index, pd.RangeIndex):\n return index.start == 0 and index.step == 1\n if not isinstance(index, pd.Int64Index):\n return False\n return (\n index.is_monotonic_increasing\n and index.unique\n and index.min == 0\n and index.max == len(index) - 1\n )\n", "path": "modin/experimental/engines/omnisci_on_ray/frame/data.py"}]} |
gh_patches_debug_1299 | rasdani/github-patches | git_diff | ckan__ckan-7353 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
No default for WTF_CSRF_SECRET_KEY config option
## CKAN version
2.10
## Describe the bug
The `WTF_CSRF_SECRET_KEY` config option, used to generate the CSRF tokens is defined as such in the declaration:
https://github.com/ckan/ckan/blob/9e60350f27c9266b5afbd741484265c5e06a2d38/ckan/config/config_declaration.yaml#L645-L647
This means that when a new ini file is generated, a secret value is added to the ini file by default, but when that config option is not defined in an ini file (ie you are using an ini file generated with a previous version of CKAN when upgrading your instance) the default value is `None`.
This causes the app to fail on all requests with:
```
File "/home/adria/dev/pyenvs/ckan-py3/src/ckan/ckan/templates/base.html", line 27, in block 'meta'
<meta name="{{ g.csrf_field_name }}" content="{{ csrf_token() }}" />
File "/home/adria/dev/pyenvs/ckan-py3/lib/python3.8/site-packages/flask_wtf/csrf.py", line 36, in generate_csrf
secret_key = _get_config(
File "/home/adria/dev/pyenvs/ckan-py3/lib/python3.8/site-packages/flask_wtf/csrf.py", line 136, in _get_config
raise RuntimeError(message)
RuntimeError: A secret key is required to use CSRF.
```
That's because wtforms falls back to Flask's `SECRET_KEY` only if `WTF_CSRF_SECRET_KEY` is not present in the config, but we add it to the config, just empty.
This setting should have either be checked at startup to see if it's set or fallback to a default like we do with the rest of the secret keys (I prefer the latter)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckan/config/middleware/flask_app.py`
Content:
```
1 # encoding: utf-8
2 from __future__ import annotations
3
4 import os
5 import sys
6 import time
7 import inspect
8 import itertools
9 import pkgutil
10 import logging
11
12 from logging.handlers import SMTPHandler
13 from typing import Any, Iterable, Optional, Union, cast
14
15 from flask import Blueprint, send_from_directory, current_app
16 from flask.ctx import _AppCtxGlobals
17 from flask.sessions import SessionInterface
18
19 from werkzeug.exceptions import (
20 default_exceptions,
21 HTTPException,
22 Unauthorized,
23 Forbidden
24 )
25 from werkzeug.routing import Rule
26 from werkzeug.local import LocalProxy
27
28 from flask_babel import Babel
29
30 from beaker.middleware import SessionMiddleware
31 from flask_login import LoginManager
32 from flask_wtf.csrf import CSRFProtect
33 from ckan.common import CKANConfig, asbool, session, current_user
34
35 import ckan.model as model
36 from ckan.lib import base
37 from ckan.lib import helpers as h
38 from ckan.lib import jinja_extensions
39 from ckan.lib import uploader
40 from ckan.lib import i18n
41 from ckan.lib.flask_multistatic import MultiStaticFlask
42 from ckan.common import config, g, request, ungettext
43 from ckan.config.middleware.common_middleware import (TrackingMiddleware,
44 HostHeaderMiddleware,
45 RootPathMiddleware)
46 import ckan.lib.app_globals as app_globals
47 import ckan.lib.plugins as lib_plugins
48 from ckan.lib.webassets_tools import get_webassets_path
49
50 from ckan.plugins import PluginImplementations
51 from ckan.plugins.interfaces import IBlueprint, IMiddleware, ITranslation
52 from ckan.views import (identify_user,
53 set_cors_headers_for_response,
54 set_controller_and_action,
55 set_cache_control_headers_for_response,
56 handle_i18n,
57 set_ckan_current_url,
58 _get_user_for_apitoken,
59 )
60 from ckan.types import CKANApp, Config, Response
61
62 log = logging.getLogger(__name__)
63
64 csrf = CSRFProtect()
65
66 csrf_warn_extensions = (
67 "Extensions are excluded from CSRF protection! "
68 "We allow extensions to run without CSRF protection "
69 "but it will be forced future releases. "
70 "Read the documentation for more information on how to add "
71 "CSRF protection to your extension."
72 )
73
74
75 class I18nMiddleware(object):
76 def __init__(self, app: CKANApp):
77 self.app = app
78
79 def __call__(self, environ: Any, start_response: Any):
80
81 handle_i18n(environ)
82 return self.app(environ, start_response)
83
84
85 class CKANBabel(Babel):
86 app: CKANApp
87
88 def __init__(self, *pargs: Any, **kwargs: Any):
89 super(CKANBabel, self).__init__(*pargs, **kwargs)
90 self._i18n_path_idx = 0
91
92 @property
93 def domain(self) -> str:
94 default = super(CKANBabel, self).domain
95 multiple = self.app.config.get('BABEL_MULTIPLE_DOMAINS')
96 if not multiple:
97 return default
98 domains = multiple.split(';')
99 try:
100 return domains[self._i18n_path_idx]
101 except IndexError:
102 return default
103
104 @property
105 def translation_directories(self) -> Iterable[str]:
106 self._i18n_path_idx = 0
107 for path in super(CKANBabel, self).translation_directories:
108 yield path
109 self._i18n_path_idx += 1
110
111
112 def _ungettext_alias():
113 u'''
114 Provide `ungettext` as an alias of `ngettext` for backwards
115 compatibility
116 '''
117 return dict(ungettext=ungettext)
118
119
120 class BeakerSessionInterface(SessionInterface):
121 def open_session(self, app: Any, request: Any):
122 if 'beaker.session' in request.environ:
123 return request.environ['beaker.session']
124
125 def save_session(self, app: Any, session: Any, response: Any):
126 session.save()
127
128
129 def make_flask_stack(conf: Union[Config, CKANConfig]) -> CKANApp:
130 """ This has to pass the flask app through all the same middleware that
131 Pylons used """
132
133 root = os.path.dirname(
134 os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
135
136 debug = asbool(conf.get('debug', conf.get('DEBUG', False)))
137 testing = asbool(conf.get('testing', conf.get('TESTING', False)))
138 app = flask_app = CKANFlask(__name__, static_url_path='')
139
140 # Register storage for accessing group images, site logo, etc.
141 storage_folder = []
142 storage = uploader.get_storage_path()
143 if storage:
144 storage_folder = [os.path.join(storage, 'storage')]
145
146 # Static files folders (core and extensions)
147 public_folder = config.get(u'ckan.base_public_folder')
148 app.static_folder = config.get(
149 'extra_public_paths'
150 ).split(',') + config.get('plugin_public_paths', []) + [
151 os.path.join(root, public_folder)
152 ] + storage_folder
153
154 app.jinja_options = jinja_extensions.get_jinja_env_options()
155 app.jinja_env.policies['ext.i18n.trimmed'] = True
156
157 app.debug = debug
158 app.testing = testing
159 app.template_folder = os.path.join(root, 'templates')
160 app.app_ctx_globals_class = CKAN_AppCtxGlobals
161 app.url_rule_class = CKAN_Rule
162
163 # Update Flask config with the CKAN values. We use the common config
164 # object as values might have been modified on `load_environment`
165 if config:
166 app.config.update(config)
167 else:
168 app.config.update(conf)
169
170 # Do all the Flask-specific stuff before adding other middlewares
171
172 # Secret key needed for flask-debug-toolbar and sessions
173 if not app.config.get('SECRET_KEY'):
174 app.config['SECRET_KEY'] = config.get('beaker.session.secret')
175 if not app.config.get('SECRET_KEY'):
176 raise RuntimeError(u'You must provide a value for the secret key'
177 ' with the SECRET_KEY config option')
178
179 root_path = config.get('ckan.root_path')
180 if debug:
181 from flask_debugtoolbar import DebugToolbarExtension
182 app.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False
183 debug_ext = DebugToolbarExtension()
184
185 # register path that includes `ckan.site_root` before
186 # initializing debug app. In such a way, our route receives
187 # higher precedence.
188
189 # TODO: After removal of Pylons code, switch to
190 # `APPLICATION_ROOT` config value for flask application. Right
191 # now it's a bad option because we are handling both pylons
192 # and flask urls inside helpers and splitting this logic will
193 # bring us tons of headache.
194 if root_path:
195 app.add_url_rule(
196 root_path.replace('{{LANG}}', '').rstrip('/') +
197 '/_debug_toolbar/static/<path:filename>',
198 '_debug_toolbar.static', debug_ext.send_static_file
199 )
200 debug_ext.init_app(app)
201
202 from werkzeug.debug import DebuggedApplication
203 app.wsgi_app = DebuggedApplication(app.wsgi_app, True)
204
205 namespace = 'beaker.session.'
206 session_opts = {k.replace('beaker.', ''): v
207 for k, v in config.items()
208 if k.startswith(namespace)}
209 if (not session_opts.get('session.data_dir') and
210 session_opts.get('session.type', 'file') == 'file'):
211 cache_dir = conf.get('cache_dir') or conf.get('cache.dir')
212 session_opts['session.data_dir'] = '{data_dir}/sessions'.format(
213 data_dir=cache_dir)
214
215 app.wsgi_app = RootPathMiddleware(app.wsgi_app)
216 app.wsgi_app = SessionMiddleware(app.wsgi_app, session_opts)
217 app.session_interface = BeakerSessionInterface()
218
219 # Add Jinja2 extensions and filters
220 app.jinja_env.filters['empty_and_escape'] = \
221 jinja_extensions.empty_and_escape
222
223 # Common handlers for all requests
224 #
225 # flask types do not mention that it's possible to return a response from
226 # the `before_request` callback
227 app.before_request(ckan_before_request)
228 app.after_request(ckan_after_request)
229
230 # Template context processors
231 app.context_processor(helper_functions)
232 app.context_processor(c_object)
233
234 app.context_processor(_ungettext_alias)
235
236 # Babel
237 _ckan_i18n_dir = i18n.get_ckan_i18n_dir()
238
239 pairs = [
240 cast("tuple[str, str]", (_ckan_i18n_dir, u'ckan'))
241 ] + [
242 (p.i18n_directory(), p.i18n_domain())
243 for p in reversed(list(PluginImplementations(ITranslation)))
244 ]
245
246 i18n_dirs, i18n_domains = zip(*pairs)
247
248 app.config[u'BABEL_TRANSLATION_DIRECTORIES'] = ';'.join(i18n_dirs)
249 app.config[u'BABEL_DOMAIN'] = 'ckan'
250 app.config[u'BABEL_MULTIPLE_DOMAINS'] = ';'.join(i18n_domains)
251 app.config[u'BABEL_DEFAULT_TIMEZONE'] = str(h.get_display_timezone())
252
253 babel = CKANBabel(app)
254
255 babel.localeselector(get_locale)
256
257 # WebAssets
258 _setup_webassets(app)
259
260 # Auto-register all blueprints defined in the `views` folder
261 _register_core_blueprints(app)
262 _register_error_handler(app)
263
264 # CSRF
265 app.config['WTF_CSRF_FIELD_NAME'] = "_csrf_token"
266 csrf.init_app(app)
267
268 # Set up each IBlueprint extension as a Flask Blueprint
269 _register_plugins_blueprints(app)
270
271 if config.get("ckan.csrf_protection.ignore_extensions"):
272 log.warn(csrf_warn_extensions)
273 _exempt_plugins_blueprints_from_csrf(csrf)
274
275 lib_plugins.register_package_blueprints(app)
276 lib_plugins.register_group_blueprints(app)
277
278 # Start other middleware
279 for plugin in PluginImplementations(IMiddleware):
280 app = plugin.make_middleware(app, config)
281
282 for plugin in PluginImplementations(IMiddleware):
283 try:
284 app = plugin.make_error_log_middleware(app, config)
285 except AttributeError:
286 log.critical('Middleware class {0} is missing the method'
287 'make_error_log_middleware.'
288 .format(plugin.__class__.__name__))
289
290 # Initialize flask-login
291 login_manager = LoginManager()
292 login_manager.init_app(app)
293 # make anonymous_user an instance of CKAN custom class
294 login_manager.anonymous_user = model.AnonymousUser
295 # The name of the view to redirect to when the user needs to log in.
296 login_manager.login_view = config.get("ckan.auth.login_view")
297
298 @login_manager.user_loader
299 def load_user(user_id: str) -> Optional["model.User"]: # type: ignore
300 """
301 This callback function is called whenever we need to reload from
302 the database the logged in user in the session (ie the cookie).
303
304 Site maintainers can choose to completely ignore cookie based
305 authentication for API calls, but that will break existing JS widgets
306 that rely on API calls so it should be used with caution.
307 """
308 endpoint = request.endpoint or ""
309 is_api = endpoint.split(".")[0] == "api"
310 if (
311 not config.get("ckan.auth.enable_cookie_auth_in_api")
312 and is_api):
313 return
314
315 return model.User.get(user_id)
316
317 @login_manager.request_loader
318 def load_user_from_request(request): # type: ignore
319 """
320 This callback function is called whenever a user could not be
321 authenticated via the session cookie, so we fall back to the API token.
322 """
323 g.login_via_auth_header = True
324
325 user = _get_user_for_apitoken()
326
327 return user
328
329 # Update the main CKAN config object with the Flask specific keys
330 # that were set here or autogenerated
331 flask_config_keys = set(flask_app.config.keys()) - set(config.keys())
332 for key in flask_config_keys:
333 config[key] = flask_app.config[key]
334
335 # Prevent the host from request to be added to the new header location.
336 app = HostHeaderMiddleware(app)
337
338 app = I18nMiddleware(app)
339
340 if config.get('ckan.tracking_enabled'):
341 app = TrackingMiddleware(app, config)
342
343 # Add a reference to the actual Flask app so it's easier to access
344 # type_ignore_reason: custom attribute
345 app._wsgi_app = flask_app # type: ignore
346
347 return app
348
349
350 def get_locale() -> str:
351 u'''
352 Return the value of the `CKAN_LANG` key of the WSGI environ,
353 set by the I18nMiddleware based on the URL.
354 If no value is defined, it defaults to `ckan.locale_default` or `en`.
355 '''
356 return request.environ.get(
357 u'CKAN_LANG',
358 config.get(u'ckan.locale_default'))
359
360
361 def set_remote_user_as_current_user_for_tests():
362 '''This function exists to maintain backward compatibility
363 for the `TESTS` of the `CKAN` extensions
364
365 If `REMOTE_USER` is in the request environ we will try to get
366 the user_obj from the DB, if there is an user_obj, we will set the
367 `session['_user_id']` with that user_obj.id
368
369 This way, `Flask-Login` will load the user from
370 `session['_user_id']` and will set the `current_user`
371 proxy for us behind the scene.
372 '''
373 if "REMOTE_USER" in request.environ:
374 username = request.environ["REMOTE_USER"]
375 if isinstance(username, bytes):
376 username = username.decode()
377
378 userobj = model.User.get(username)
379 if userobj:
380 session["_user_id"] = userobj.id
381
382
383 def ckan_before_request() -> Optional[Response]:
384 u'''
385 Common handler executed before all Flask requests
386
387 If a response is returned by any of the functions called (
388 currently ``identify_user()` only) any further processing of the
389 request will be stopped and that response will be returned.
390
391 '''
392 response = None
393
394 g.__timer = time.time()
395
396 # Update app_globals
397 app_globals.app_globals._check_uptodate()
398
399 # This is needed for the TESTS of the CKAN extensions only!
400 # we should remove it as soon as the maintainers of the
401 # CKAN extensions change their tests according to the new changes.
402 if config.get("testing"):
403 set_remote_user_as_current_user_for_tests()
404
405 # Identify the user from the flask-login cookie or the API header
406 # Sets g.user and g.userobj for extensions
407 response = identify_user()
408
409 # Disable CSRF protection if user was logged in via the Authorization
410 # header
411 if g.get("login_via_auth_header"):
412 # Get the actual view function, as it might not match the endpoint,
413 # eg "organization.edit" -> "group.edit", or custom dataset types
414 endpoint = request.endpoint or ""
415 view = current_app.view_functions.get(endpoint)
416 dest = f"{view.__module__}.{view.__name__}" # type: ignore
417 csrf.exempt(dest)
418
419 # Set the csrf_field_name so we can use it in our templates
420 g.csrf_field_name = config.get("WTF_CSRF_FIELD_NAME")
421
422 # Provide g.controller and g.action for backward compatibility
423 # with extensions
424 set_controller_and_action()
425
426 set_ckan_current_url(request.environ)
427
428 return response
429
430
431 def ckan_after_request(response: Response) -> Response:
432 u'''Common handler executed after all Flask requests'''
433
434 # Dispose of the SQLALchemy session
435 model.Session.remove()
436
437 # Set CORS headers if necessary
438 response = set_cors_headers_for_response(response)
439
440 # Set Cache Control headers
441 response = set_cache_control_headers_for_response(response)
442
443 r_time = time.time() - g.__timer
444 url = request.environ['PATH_INFO']
445 status_code = response.status_code
446
447 log.info(' %s %s render time %.3f seconds' % (status_code, url, r_time))
448
449 return response
450
451
452 def helper_functions() -> dict[str, h.HelperAttributeDict]:
453 u'''Make helper functions (`h`) available to Flask templates'''
454 if not h.helper_functions:
455 h.load_plugin_helpers()
456 return dict(h=h.helper_functions)
457
458
459 def c_object() -> dict[str, LocalProxy]:
460 u'''
461 Expose `c` as an alias of `g` in templates for backwards compatibility
462 '''
463 return dict(c=g)
464
465
466 class CKAN_Rule(Rule): # noqa
467
468 u'''Custom Flask url_rule_class.
469
470 We use it to be able to flag routes defined in extensions as such
471 '''
472
473 def __init__(self, *args: Any, **kwargs: Any):
474 self.ckan_core = True
475 super(CKAN_Rule, self).__init__(*args, **kwargs)
476
477
478 class CKAN_AppCtxGlobals(_AppCtxGlobals): # noqa
479
480 '''Custom Flask AppCtxGlobal class (flask.g).'''
481
482 def __getattr__(self, name: str):
483 '''
484 If flask.g doesn't have attribute `name`, fall back to CKAN's
485 app_globals object.
486 If the key is also not found in there, an AttributeError will be raised
487 '''
488 return getattr(app_globals.app_globals, name)
489
490
491 class CKANFlask(MultiStaticFlask):
492
493 '''Extend the Flask class with a special method called on incoming
494 requests by AskAppDispatcherMiddleware.
495 '''
496
497 app_name: str = 'flask_app'
498 static_folder: list[str]
499 session_interface: SessionInterface
500
501 def can_handle_request(
502 self,
503 environ: Any) -> Union[tuple[bool, str], tuple[bool, str, str]]:
504 '''
505 Decides whether it can handle a request with the Flask app by
506 matching the request environ against the route mapper
507
508 Returns (True, 'flask_app', origin) if this is the case.
509
510 `origin` can be either 'core' or 'extension' depending on where
511 the route was defined.
512 '''
513 urls = self.url_map.bind_to_environ(environ)
514
515 try:
516 rule, args = urls.match(return_rule=True)
517 origin = 'core'
518 if not getattr(rule, 'ckan_core', True):
519 origin = 'extension'
520 log.debug('Flask route match, endpoint: {0}, args: {1}, '
521 'origin: {2}'.format(rule.endpoint, args, origin))
522
523 # Disable built-in flask's ability to prepend site root to
524 # generated url, as we are going to use locale and existing
525 # logic is not flexible enough for this purpose
526 environ['SCRIPT_NAME'] = ''
527
528 return (True, self.app_name, origin)
529 except HTTPException:
530 return (False, self.app_name)
531
532 def register_extension_blueprint(self, blueprint: Blueprint,
533 **kwargs: dict[str, Any]):
534 '''
535 This method should be used to register blueprints that come from
536 extensions, so there's an opportunity to add extension-specific
537 options.
538
539 Sets the rule property `ckan_core` to False, to indicate that the rule
540 applies to an extension route.
541 '''
542 self.register_blueprint(blueprint, **kwargs)
543
544 # Get the new blueprint rules
545 bp_rules = itertools.chain.from_iterable(
546 v for k, v in self.url_map._rules_by_endpoint.items()
547 if k.startswith(u'{0}.'.format(blueprint.name))
548 )
549
550 # This compare key will ensure the rule will be near the top.
551 top_compare_key = False, -100, [(-2, 0)]
552 for r in bp_rules:
553 setattr(r, "ckan_core", False)
554 setattr(r, "match_compare_key", lambda: top_compare_key)
555
556
557 def _register_plugins_blueprints(app: CKANApp):
558 """ Resgister all blueprints defined in plugins by IBlueprint
559 """
560 for plugin in PluginImplementations(IBlueprint):
561 plugin_blueprints = plugin.get_blueprint()
562 if isinstance(plugin_blueprints, list):
563 for blueprint in plugin_blueprints:
564 app.register_extension_blueprint(blueprint)
565 else:
566 app.register_extension_blueprint(plugin_blueprints)
567
568
569 def _exempt_plugins_blueprints_from_csrf(csrf: CSRFProtect):
570 """Exempt plugins blueprints from CSRF protection.
571
572 This feature will be deprecated in future versions.
573 """
574 for plugin in PluginImplementations(IBlueprint):
575 plugin_blueprints = plugin.get_blueprint()
576 if isinstance(plugin_blueprints, list):
577 for blueprint in plugin_blueprints:
578 csrf.exempt(blueprint)
579 else:
580 csrf.exempt(plugin_blueprints)
581
582
583 def _register_core_blueprints(app: CKANApp):
584 u'''Register all blueprints defined in the `views` folder
585 '''
586 def is_blueprint(mm: Any):
587 return isinstance(mm, Blueprint) and getattr(mm, 'auto_register', True)
588
589 path = os.path.join(os.path.dirname(__file__), '..', '..', 'views')
590
591 for loader, name, __ in pkgutil.iter_modules([path], 'ckan.views.'):
592 # type_ignore_reason: incorrect external type declarations
593 module = loader.find_module(name).load_module(name) # type: ignore
594 for blueprint in inspect.getmembers(module, is_blueprint):
595 app.register_blueprint(blueprint[1])
596 log.debug(u'Registered core blueprint: {0!r}'.format(blueprint[0]))
597
598
599 def _register_error_handler(app: CKANApp):
600 u'''Register error handler'''
601
602 def error_handler(e: Exception) -> Union[
603 tuple[str, Optional[int]], Optional[Response]
604 ]:
605 debug = config.get('debug')
606 if isinstance(e, HTTPException):
607 if debug:
608 log.debug(e, exc_info=sys.exc_info) # type: ignore
609 else:
610 log.info(e)
611
612 show_login_redirect_link = current_user.is_anonymous and type(
613 e
614 ) in (Unauthorized, Forbidden)
615 extra_vars = {
616 u'code': e.code,
617 u'content': e.description,
618 u'name': e.name,
619 u'show_login_redirect_link': show_login_redirect_link
620 }
621 return base.render(
622 u'error_document_template.html', extra_vars), e.code
623
624 log.error(e, exc_info=sys.exc_info) # type: ignore
625 extra_vars = {u'code': [500], u'content': u'Internal server error'}
626 return base.render(u'error_document_template.html', extra_vars), 500
627
628 for code in default_exceptions:
629 app.register_error_handler(code, error_handler)
630 if not app.debug and not app.testing:
631 app.register_error_handler(Exception, error_handler)
632 if config.get('email_to'):
633 _setup_error_mail_handler(app)
634
635
636 def _setup_error_mail_handler(app: CKANApp):
637
638 class ContextualFilter(logging.Filter):
639 def filter(self, log_record: Any) -> bool:
640 log_record.url = request.path
641 log_record.method = request.method
642 log_record.ip = request.environ.get("REMOTE_ADDR")
643 log_record.headers = request.headers
644 return True
645
646 smtp_server = config.get('smtp.server')
647 mailhost = cast("tuple[str, int]", tuple(smtp_server.split(':'))) \
648 if ':' in smtp_server else smtp_server
649 credentials = None
650 if config.get('smtp.user'):
651 credentials = (
652 config.get('smtp.user'),
653 config.get('smtp.password')
654 )
655 secure = () if config.get('smtp.starttls') else None
656 mail_handler = SMTPHandler(
657 mailhost=mailhost,
658 fromaddr=config.get('error_email_from'),
659 toaddrs=[config.get('email_to')],
660 subject='Application Error',
661 credentials=credentials,
662 secure=secure
663 )
664
665 mail_handler.setLevel(logging.ERROR)
666 mail_handler.setFormatter(logging.Formatter('''
667 Time: %(asctime)s
668 URL: %(url)s
669 Method: %(method)s
670 IP: %(ip)s
671 Headers: %(headers)s
672
673 '''))
674
675 context_provider = ContextualFilter()
676 app.logger.addFilter(context_provider)
677 app.logger.addHandler(mail_handler)
678
679
680 def _setup_webassets(app: CKANApp):
681 app.use_x_sendfile = config.get('ckan.webassets.use_x_sendfile')
682
683 webassets_folder = get_webassets_path()
684
685 def webassets(path: str):
686 return send_from_directory(webassets_folder, path)
687
688 app.add_url_rule('/webassets/<path:path>', 'webassets.index', webassets)
689
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ckan/config/middleware/flask_app.py b/ckan/config/middleware/flask_app.py
--- a/ckan/config/middleware/flask_app.py
+++ b/ckan/config/middleware/flask_app.py
@@ -262,7 +262,10 @@
_register_error_handler(app)
# CSRF
- app.config['WTF_CSRF_FIELD_NAME'] = "_csrf_token"
+ wtf_key = "WTF_CSRF_SECRET_KEY"
+ if not app.config.get(wtf_key):
+ config[wtf_key] = app.config[wtf_key] = app.config["SECRET_KEY"]
+ app.config["WTF_CSRF_FIELD_NAME"] = "_csrf_token"
csrf.init_app(app)
# Set up each IBlueprint extension as a Flask Blueprint
| {"golden_diff": "diff --git a/ckan/config/middleware/flask_app.py b/ckan/config/middleware/flask_app.py\n--- a/ckan/config/middleware/flask_app.py\n+++ b/ckan/config/middleware/flask_app.py\n@@ -262,7 +262,10 @@\n _register_error_handler(app)\n \n # CSRF\n- app.config['WTF_CSRF_FIELD_NAME'] = \"_csrf_token\"\n+ wtf_key = \"WTF_CSRF_SECRET_KEY\"\n+ if not app.config.get(wtf_key):\n+ config[wtf_key] = app.config[wtf_key] = app.config[\"SECRET_KEY\"]\n+ app.config[\"WTF_CSRF_FIELD_NAME\"] = \"_csrf_token\"\n csrf.init_app(app)\n \n # Set up each IBlueprint extension as a Flask Blueprint\n", "issue": "No default for WTF_CSRF_SECRET_KEY config option\n## CKAN version\r\n2.10 \r\n\r\n\r\n## Describe the bug\r\nThe `WTF_CSRF_SECRET_KEY` config option, used to generate the CSRF tokens is defined as such in the declaration:\r\n\r\nhttps://github.com/ckan/ckan/blob/9e60350f27c9266b5afbd741484265c5e06a2d38/ckan/config/config_declaration.yaml#L645-L647\r\n\r\nThis means that when a new ini file is generated, a secret value is added to the ini file by default, but when that config option is not defined in an ini file (ie you are using an ini file generated with a previous version of CKAN when upgrading your instance) the default value is `None`.\r\n\r\nThis causes the app to fail on all requests with:\r\n\r\n```\r\n File \"/home/adria/dev/pyenvs/ckan-py3/src/ckan/ckan/templates/base.html\", line 27, in block 'meta'\r\n <meta name=\"{{ g.csrf_field_name }}\" content=\"{{ csrf_token() }}\" />\r\n File \"/home/adria/dev/pyenvs/ckan-py3/lib/python3.8/site-packages/flask_wtf/csrf.py\", line 36, in generate_csrf\r\n secret_key = _get_config(\r\n File \"/home/adria/dev/pyenvs/ckan-py3/lib/python3.8/site-packages/flask_wtf/csrf.py\", line 136, in _get_config\r\n raise RuntimeError(message)\r\nRuntimeError: A secret key is required to use CSRF.\r\n```\r\nThat's because wtforms falls back to Flask's `SECRET_KEY` only if `WTF_CSRF_SECRET_KEY` is not present in the config, but we add it to the config, just empty.\r\n\r\nThis setting should have either be checked at startup to see if it's set or fallback to a default like we do with the rest of the secret keys (I prefer the latter)\r\n\n", "before_files": [{"content": "# encoding: utf-8\nfrom __future__ import annotations\n\nimport os\nimport sys\nimport time\nimport inspect\nimport itertools\nimport pkgutil\nimport logging\n\nfrom logging.handlers import SMTPHandler\nfrom typing import Any, Iterable, Optional, Union, cast\n\nfrom flask import Blueprint, send_from_directory, current_app\nfrom flask.ctx import _AppCtxGlobals\nfrom flask.sessions import SessionInterface\n\nfrom werkzeug.exceptions import (\n default_exceptions,\n HTTPException,\n Unauthorized,\n Forbidden\n)\nfrom werkzeug.routing import Rule\nfrom werkzeug.local import LocalProxy\n\nfrom flask_babel import Babel\n\nfrom beaker.middleware import SessionMiddleware\nfrom flask_login import LoginManager\nfrom flask_wtf.csrf import CSRFProtect\nfrom ckan.common import CKANConfig, asbool, session, current_user\n\nimport ckan.model as model\nfrom ckan.lib import base\nfrom ckan.lib import helpers as h\nfrom ckan.lib import jinja_extensions\nfrom ckan.lib import uploader\nfrom ckan.lib import i18n\nfrom ckan.lib.flask_multistatic import MultiStaticFlask\nfrom ckan.common import config, g, request, ungettext\nfrom ckan.config.middleware.common_middleware import (TrackingMiddleware,\n HostHeaderMiddleware,\n RootPathMiddleware)\nimport ckan.lib.app_globals as app_globals\nimport ckan.lib.plugins as lib_plugins\nfrom ckan.lib.webassets_tools import get_webassets_path\n\nfrom ckan.plugins import PluginImplementations\nfrom ckan.plugins.interfaces import IBlueprint, IMiddleware, ITranslation\nfrom ckan.views import (identify_user,\n set_cors_headers_for_response,\n set_controller_and_action,\n set_cache_control_headers_for_response,\n handle_i18n,\n set_ckan_current_url,\n _get_user_for_apitoken,\n )\nfrom ckan.types import CKANApp, Config, Response\n\nlog = logging.getLogger(__name__)\n\ncsrf = CSRFProtect()\n\ncsrf_warn_extensions = (\n \"Extensions are excluded from CSRF protection! \"\n \"We allow extensions to run without CSRF protection \"\n \"but it will be forced future releases. \"\n \"Read the documentation for more information on how to add \"\n \"CSRF protection to your extension.\"\n )\n\n\nclass I18nMiddleware(object):\n def __init__(self, app: CKANApp):\n self.app = app\n\n def __call__(self, environ: Any, start_response: Any):\n\n handle_i18n(environ)\n return self.app(environ, start_response)\n\n\nclass CKANBabel(Babel):\n app: CKANApp\n\n def __init__(self, *pargs: Any, **kwargs: Any):\n super(CKANBabel, self).__init__(*pargs, **kwargs)\n self._i18n_path_idx = 0\n\n @property\n def domain(self) -> str:\n default = super(CKANBabel, self).domain\n multiple = self.app.config.get('BABEL_MULTIPLE_DOMAINS')\n if not multiple:\n return default\n domains = multiple.split(';')\n try:\n return domains[self._i18n_path_idx]\n except IndexError:\n return default\n\n @property\n def translation_directories(self) -> Iterable[str]:\n self._i18n_path_idx = 0\n for path in super(CKANBabel, self).translation_directories:\n yield path\n self._i18n_path_idx += 1\n\n\ndef _ungettext_alias():\n u'''\n Provide `ungettext` as an alias of `ngettext` for backwards\n compatibility\n '''\n return dict(ungettext=ungettext)\n\n\nclass BeakerSessionInterface(SessionInterface):\n def open_session(self, app: Any, request: Any):\n if 'beaker.session' in request.environ:\n return request.environ['beaker.session']\n\n def save_session(self, app: Any, session: Any, response: Any):\n session.save()\n\n\ndef make_flask_stack(conf: Union[Config, CKANConfig]) -> CKANApp:\n \"\"\" This has to pass the flask app through all the same middleware that\n Pylons used \"\"\"\n\n root = os.path.dirname(\n os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\n debug = asbool(conf.get('debug', conf.get('DEBUG', False)))\n testing = asbool(conf.get('testing', conf.get('TESTING', False)))\n app = flask_app = CKANFlask(__name__, static_url_path='')\n\n # Register storage for accessing group images, site logo, etc.\n storage_folder = []\n storage = uploader.get_storage_path()\n if storage:\n storage_folder = [os.path.join(storage, 'storage')]\n\n # Static files folders (core and extensions)\n public_folder = config.get(u'ckan.base_public_folder')\n app.static_folder = config.get(\n 'extra_public_paths'\n ).split(',') + config.get('plugin_public_paths', []) + [\n os.path.join(root, public_folder)\n ] + storage_folder\n\n app.jinja_options = jinja_extensions.get_jinja_env_options()\n app.jinja_env.policies['ext.i18n.trimmed'] = True\n\n app.debug = debug\n app.testing = testing\n app.template_folder = os.path.join(root, 'templates')\n app.app_ctx_globals_class = CKAN_AppCtxGlobals\n app.url_rule_class = CKAN_Rule\n\n # Update Flask config with the CKAN values. We use the common config\n # object as values might have been modified on `load_environment`\n if config:\n app.config.update(config)\n else:\n app.config.update(conf)\n\n # Do all the Flask-specific stuff before adding other middlewares\n\n # Secret key needed for flask-debug-toolbar and sessions\n if not app.config.get('SECRET_KEY'):\n app.config['SECRET_KEY'] = config.get('beaker.session.secret')\n if not app.config.get('SECRET_KEY'):\n raise RuntimeError(u'You must provide a value for the secret key'\n ' with the SECRET_KEY config option')\n\n root_path = config.get('ckan.root_path')\n if debug:\n from flask_debugtoolbar import DebugToolbarExtension\n app.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False\n debug_ext = DebugToolbarExtension()\n\n # register path that includes `ckan.site_root` before\n # initializing debug app. In such a way, our route receives\n # higher precedence.\n\n # TODO: After removal of Pylons code, switch to\n # `APPLICATION_ROOT` config value for flask application. Right\n # now it's a bad option because we are handling both pylons\n # and flask urls inside helpers and splitting this logic will\n # bring us tons of headache.\n if root_path:\n app.add_url_rule(\n root_path.replace('{{LANG}}', '').rstrip('/') +\n '/_debug_toolbar/static/<path:filename>',\n '_debug_toolbar.static', debug_ext.send_static_file\n )\n debug_ext.init_app(app)\n\n from werkzeug.debug import DebuggedApplication\n app.wsgi_app = DebuggedApplication(app.wsgi_app, True)\n\n namespace = 'beaker.session.'\n session_opts = {k.replace('beaker.', ''): v\n for k, v in config.items()\n if k.startswith(namespace)}\n if (not session_opts.get('session.data_dir') and\n session_opts.get('session.type', 'file') == 'file'):\n cache_dir = conf.get('cache_dir') or conf.get('cache.dir')\n session_opts['session.data_dir'] = '{data_dir}/sessions'.format(\n data_dir=cache_dir)\n\n app.wsgi_app = RootPathMiddleware(app.wsgi_app)\n app.wsgi_app = SessionMiddleware(app.wsgi_app, session_opts)\n app.session_interface = BeakerSessionInterface()\n\n # Add Jinja2 extensions and filters\n app.jinja_env.filters['empty_and_escape'] = \\\n jinja_extensions.empty_and_escape\n\n # Common handlers for all requests\n #\n # flask types do not mention that it's possible to return a response from\n # the `before_request` callback\n app.before_request(ckan_before_request)\n app.after_request(ckan_after_request)\n\n # Template context processors\n app.context_processor(helper_functions)\n app.context_processor(c_object)\n\n app.context_processor(_ungettext_alias)\n\n # Babel\n _ckan_i18n_dir = i18n.get_ckan_i18n_dir()\n\n pairs = [\n cast(\"tuple[str, str]\", (_ckan_i18n_dir, u'ckan'))\n ] + [\n (p.i18n_directory(), p.i18n_domain())\n for p in reversed(list(PluginImplementations(ITranslation)))\n ]\n\n i18n_dirs, i18n_domains = zip(*pairs)\n\n app.config[u'BABEL_TRANSLATION_DIRECTORIES'] = ';'.join(i18n_dirs)\n app.config[u'BABEL_DOMAIN'] = 'ckan'\n app.config[u'BABEL_MULTIPLE_DOMAINS'] = ';'.join(i18n_domains)\n app.config[u'BABEL_DEFAULT_TIMEZONE'] = str(h.get_display_timezone())\n\n babel = CKANBabel(app)\n\n babel.localeselector(get_locale)\n\n # WebAssets\n _setup_webassets(app)\n\n # Auto-register all blueprints defined in the `views` folder\n _register_core_blueprints(app)\n _register_error_handler(app)\n\n # CSRF\n app.config['WTF_CSRF_FIELD_NAME'] = \"_csrf_token\"\n csrf.init_app(app)\n\n # Set up each IBlueprint extension as a Flask Blueprint\n _register_plugins_blueprints(app)\n\n if config.get(\"ckan.csrf_protection.ignore_extensions\"):\n log.warn(csrf_warn_extensions)\n _exempt_plugins_blueprints_from_csrf(csrf)\n\n lib_plugins.register_package_blueprints(app)\n lib_plugins.register_group_blueprints(app)\n\n # Start other middleware\n for plugin in PluginImplementations(IMiddleware):\n app = plugin.make_middleware(app, config)\n\n for plugin in PluginImplementations(IMiddleware):\n try:\n app = plugin.make_error_log_middleware(app, config)\n except AttributeError:\n log.critical('Middleware class {0} is missing the method'\n 'make_error_log_middleware.'\n .format(plugin.__class__.__name__))\n\n # Initialize flask-login\n login_manager = LoginManager()\n login_manager.init_app(app)\n # make anonymous_user an instance of CKAN custom class\n login_manager.anonymous_user = model.AnonymousUser\n # The name of the view to redirect to when the user needs to log in.\n login_manager.login_view = config.get(\"ckan.auth.login_view\")\n\n @login_manager.user_loader\n def load_user(user_id: str) -> Optional[\"model.User\"]: # type: ignore\n \"\"\"\n This callback function is called whenever we need to reload from\n the database the logged in user in the session (ie the cookie).\n\n Site maintainers can choose to completely ignore cookie based\n authentication for API calls, but that will break existing JS widgets\n that rely on API calls so it should be used with caution.\n \"\"\"\n endpoint = request.endpoint or \"\"\n is_api = endpoint.split(\".\")[0] == \"api\"\n if (\n not config.get(\"ckan.auth.enable_cookie_auth_in_api\")\n and is_api):\n return\n\n return model.User.get(user_id)\n\n @login_manager.request_loader\n def load_user_from_request(request): # type: ignore\n \"\"\"\n This callback function is called whenever a user could not be\n authenticated via the session cookie, so we fall back to the API token.\n \"\"\"\n g.login_via_auth_header = True\n\n user = _get_user_for_apitoken()\n\n return user\n\n # Update the main CKAN config object with the Flask specific keys\n # that were set here or autogenerated\n flask_config_keys = set(flask_app.config.keys()) - set(config.keys())\n for key in flask_config_keys:\n config[key] = flask_app.config[key]\n\n # Prevent the host from request to be added to the new header location.\n app = HostHeaderMiddleware(app)\n\n app = I18nMiddleware(app)\n\n if config.get('ckan.tracking_enabled'):\n app = TrackingMiddleware(app, config)\n\n # Add a reference to the actual Flask app so it's easier to access\n # type_ignore_reason: custom attribute\n app._wsgi_app = flask_app # type: ignore\n\n return app\n\n\ndef get_locale() -> str:\n u'''\n Return the value of the `CKAN_LANG` key of the WSGI environ,\n set by the I18nMiddleware based on the URL.\n If no value is defined, it defaults to `ckan.locale_default` or `en`.\n '''\n return request.environ.get(\n u'CKAN_LANG',\n config.get(u'ckan.locale_default'))\n\n\ndef set_remote_user_as_current_user_for_tests():\n '''This function exists to maintain backward compatibility\n for the `TESTS` of the `CKAN` extensions\n\n If `REMOTE_USER` is in the request environ we will try to get\n the user_obj from the DB, if there is an user_obj, we will set the\n `session['_user_id']` with that user_obj.id\n\n This way, `Flask-Login` will load the user from\n `session['_user_id']` and will set the `current_user`\n proxy for us behind the scene.\n '''\n if \"REMOTE_USER\" in request.environ:\n username = request.environ[\"REMOTE_USER\"]\n if isinstance(username, bytes):\n username = username.decode()\n\n userobj = model.User.get(username)\n if userobj:\n session[\"_user_id\"] = userobj.id\n\n\ndef ckan_before_request() -> Optional[Response]:\n u'''\n Common handler executed before all Flask requests\n\n If a response is returned by any of the functions called (\n currently ``identify_user()` only) any further processing of the\n request will be stopped and that response will be returned.\n\n '''\n response = None\n\n g.__timer = time.time()\n\n # Update app_globals\n app_globals.app_globals._check_uptodate()\n\n # This is needed for the TESTS of the CKAN extensions only!\n # we should remove it as soon as the maintainers of the\n # CKAN extensions change their tests according to the new changes.\n if config.get(\"testing\"):\n set_remote_user_as_current_user_for_tests()\n\n # Identify the user from the flask-login cookie or the API header\n # Sets g.user and g.userobj for extensions\n response = identify_user()\n\n # Disable CSRF protection if user was logged in via the Authorization\n # header\n if g.get(\"login_via_auth_header\"):\n # Get the actual view function, as it might not match the endpoint,\n # eg \"organization.edit\" -> \"group.edit\", or custom dataset types\n endpoint = request.endpoint or \"\"\n view = current_app.view_functions.get(endpoint)\n dest = f\"{view.__module__}.{view.__name__}\" # type: ignore\n csrf.exempt(dest)\n\n # Set the csrf_field_name so we can use it in our templates\n g.csrf_field_name = config.get(\"WTF_CSRF_FIELD_NAME\")\n\n # Provide g.controller and g.action for backward compatibility\n # with extensions\n set_controller_and_action()\n\n set_ckan_current_url(request.environ)\n\n return response\n\n\ndef ckan_after_request(response: Response) -> Response:\n u'''Common handler executed after all Flask requests'''\n\n # Dispose of the SQLALchemy session\n model.Session.remove()\n\n # Set CORS headers if necessary\n response = set_cors_headers_for_response(response)\n\n # Set Cache Control headers\n response = set_cache_control_headers_for_response(response)\n\n r_time = time.time() - g.__timer\n url = request.environ['PATH_INFO']\n status_code = response.status_code\n\n log.info(' %s %s render time %.3f seconds' % (status_code, url, r_time))\n\n return response\n\n\ndef helper_functions() -> dict[str, h.HelperAttributeDict]:\n u'''Make helper functions (`h`) available to Flask templates'''\n if not h.helper_functions:\n h.load_plugin_helpers()\n return dict(h=h.helper_functions)\n\n\ndef c_object() -> dict[str, LocalProxy]:\n u'''\n Expose `c` as an alias of `g` in templates for backwards compatibility\n '''\n return dict(c=g)\n\n\nclass CKAN_Rule(Rule): # noqa\n\n u'''Custom Flask url_rule_class.\n\n We use it to be able to flag routes defined in extensions as such\n '''\n\n def __init__(self, *args: Any, **kwargs: Any):\n self.ckan_core = True\n super(CKAN_Rule, self).__init__(*args, **kwargs)\n\n\nclass CKAN_AppCtxGlobals(_AppCtxGlobals): # noqa\n\n '''Custom Flask AppCtxGlobal class (flask.g).'''\n\n def __getattr__(self, name: str):\n '''\n If flask.g doesn't have attribute `name`, fall back to CKAN's\n app_globals object.\n If the key is also not found in there, an AttributeError will be raised\n '''\n return getattr(app_globals.app_globals, name)\n\n\nclass CKANFlask(MultiStaticFlask):\n\n '''Extend the Flask class with a special method called on incoming\n requests by AskAppDispatcherMiddleware.\n '''\n\n app_name: str = 'flask_app'\n static_folder: list[str]\n session_interface: SessionInterface\n\n def can_handle_request(\n self,\n environ: Any) -> Union[tuple[bool, str], tuple[bool, str, str]]:\n '''\n Decides whether it can handle a request with the Flask app by\n matching the request environ against the route mapper\n\n Returns (True, 'flask_app', origin) if this is the case.\n\n `origin` can be either 'core' or 'extension' depending on where\n the route was defined.\n '''\n urls = self.url_map.bind_to_environ(environ)\n\n try:\n rule, args = urls.match(return_rule=True)\n origin = 'core'\n if not getattr(rule, 'ckan_core', True):\n origin = 'extension'\n log.debug('Flask route match, endpoint: {0}, args: {1}, '\n 'origin: {2}'.format(rule.endpoint, args, origin))\n\n # Disable built-in flask's ability to prepend site root to\n # generated url, as we are going to use locale and existing\n # logic is not flexible enough for this purpose\n environ['SCRIPT_NAME'] = ''\n\n return (True, self.app_name, origin)\n except HTTPException:\n return (False, self.app_name)\n\n def register_extension_blueprint(self, blueprint: Blueprint,\n **kwargs: dict[str, Any]):\n '''\n This method should be used to register blueprints that come from\n extensions, so there's an opportunity to add extension-specific\n options.\n\n Sets the rule property `ckan_core` to False, to indicate that the rule\n applies to an extension route.\n '''\n self.register_blueprint(blueprint, **kwargs)\n\n # Get the new blueprint rules\n bp_rules = itertools.chain.from_iterable(\n v for k, v in self.url_map._rules_by_endpoint.items()\n if k.startswith(u'{0}.'.format(blueprint.name))\n )\n\n # This compare key will ensure the rule will be near the top.\n top_compare_key = False, -100, [(-2, 0)]\n for r in bp_rules:\n setattr(r, \"ckan_core\", False)\n setattr(r, \"match_compare_key\", lambda: top_compare_key)\n\n\ndef _register_plugins_blueprints(app: CKANApp):\n \"\"\" Resgister all blueprints defined in plugins by IBlueprint\n \"\"\"\n for plugin in PluginImplementations(IBlueprint):\n plugin_blueprints = plugin.get_blueprint()\n if isinstance(plugin_blueprints, list):\n for blueprint in plugin_blueprints:\n app.register_extension_blueprint(blueprint)\n else:\n app.register_extension_blueprint(plugin_blueprints)\n\n\ndef _exempt_plugins_blueprints_from_csrf(csrf: CSRFProtect):\n \"\"\"Exempt plugins blueprints from CSRF protection.\n\n This feature will be deprecated in future versions.\n \"\"\"\n for plugin in PluginImplementations(IBlueprint):\n plugin_blueprints = plugin.get_blueprint()\n if isinstance(plugin_blueprints, list):\n for blueprint in plugin_blueprints:\n csrf.exempt(blueprint)\n else:\n csrf.exempt(plugin_blueprints)\n\n\ndef _register_core_blueprints(app: CKANApp):\n u'''Register all blueprints defined in the `views` folder\n '''\n def is_blueprint(mm: Any):\n return isinstance(mm, Blueprint) and getattr(mm, 'auto_register', True)\n\n path = os.path.join(os.path.dirname(__file__), '..', '..', 'views')\n\n for loader, name, __ in pkgutil.iter_modules([path], 'ckan.views.'):\n # type_ignore_reason: incorrect external type declarations\n module = loader.find_module(name).load_module(name) # type: ignore\n for blueprint in inspect.getmembers(module, is_blueprint):\n app.register_blueprint(blueprint[1])\n log.debug(u'Registered core blueprint: {0!r}'.format(blueprint[0]))\n\n\ndef _register_error_handler(app: CKANApp):\n u'''Register error handler'''\n\n def error_handler(e: Exception) -> Union[\n tuple[str, Optional[int]], Optional[Response]\n ]:\n debug = config.get('debug')\n if isinstance(e, HTTPException):\n if debug:\n log.debug(e, exc_info=sys.exc_info) # type: ignore\n else:\n log.info(e)\n\n show_login_redirect_link = current_user.is_anonymous and type(\n e\n ) in (Unauthorized, Forbidden)\n extra_vars = {\n u'code': e.code,\n u'content': e.description,\n u'name': e.name,\n u'show_login_redirect_link': show_login_redirect_link\n }\n return base.render(\n u'error_document_template.html', extra_vars), e.code\n\n log.error(e, exc_info=sys.exc_info) # type: ignore\n extra_vars = {u'code': [500], u'content': u'Internal server error'}\n return base.render(u'error_document_template.html', extra_vars), 500\n\n for code in default_exceptions:\n app.register_error_handler(code, error_handler)\n if not app.debug and not app.testing:\n app.register_error_handler(Exception, error_handler)\n if config.get('email_to'):\n _setup_error_mail_handler(app)\n\n\ndef _setup_error_mail_handler(app: CKANApp):\n\n class ContextualFilter(logging.Filter):\n def filter(self, log_record: Any) -> bool:\n log_record.url = request.path\n log_record.method = request.method\n log_record.ip = request.environ.get(\"REMOTE_ADDR\")\n log_record.headers = request.headers\n return True\n\n smtp_server = config.get('smtp.server')\n mailhost = cast(\"tuple[str, int]\", tuple(smtp_server.split(':'))) \\\n if ':' in smtp_server else smtp_server\n credentials = None\n if config.get('smtp.user'):\n credentials = (\n config.get('smtp.user'),\n config.get('smtp.password')\n )\n secure = () if config.get('smtp.starttls') else None\n mail_handler = SMTPHandler(\n mailhost=mailhost,\n fromaddr=config.get('error_email_from'),\n toaddrs=[config.get('email_to')],\n subject='Application Error',\n credentials=credentials,\n secure=secure\n )\n\n mail_handler.setLevel(logging.ERROR)\n mail_handler.setFormatter(logging.Formatter('''\nTime: %(asctime)s\nURL: %(url)s\nMethod: %(method)s\nIP: %(ip)s\nHeaders: %(headers)s\n\n'''))\n\n context_provider = ContextualFilter()\n app.logger.addFilter(context_provider)\n app.logger.addHandler(mail_handler)\n\n\ndef _setup_webassets(app: CKANApp):\n app.use_x_sendfile = config.get('ckan.webassets.use_x_sendfile')\n\n webassets_folder = get_webassets_path()\n\n def webassets(path: str):\n return send_from_directory(webassets_folder, path)\n\n app.add_url_rule('/webassets/<path:path>', 'webassets.index', webassets)\n", "path": "ckan/config/middleware/flask_app.py"}], "after_files": [{"content": "# encoding: utf-8\nfrom __future__ import annotations\n\nimport os\nimport sys\nimport time\nimport inspect\nimport itertools\nimport pkgutil\nimport logging\n\nfrom logging.handlers import SMTPHandler\nfrom typing import Any, Iterable, Optional, Union, cast\n\nfrom flask import Blueprint, send_from_directory, current_app\nfrom flask.ctx import _AppCtxGlobals\nfrom flask.sessions import SessionInterface\n\nfrom werkzeug.exceptions import (\n default_exceptions,\n HTTPException,\n Unauthorized,\n Forbidden\n)\nfrom werkzeug.routing import Rule\nfrom werkzeug.local import LocalProxy\n\nfrom flask_babel import Babel\n\nfrom beaker.middleware import SessionMiddleware\nfrom flask_login import LoginManager\nfrom flask_wtf.csrf import CSRFProtect\nfrom ckan.common import CKANConfig, asbool, session, current_user\n\nimport ckan.model as model\nfrom ckan.lib import base\nfrom ckan.lib import helpers as h\nfrom ckan.lib import jinja_extensions\nfrom ckan.lib import uploader\nfrom ckan.lib import i18n\nfrom ckan.lib.flask_multistatic import MultiStaticFlask\nfrom ckan.common import config, g, request, ungettext\nfrom ckan.config.middleware.common_middleware import (TrackingMiddleware,\n HostHeaderMiddleware,\n RootPathMiddleware)\nimport ckan.lib.app_globals as app_globals\nimport ckan.lib.plugins as lib_plugins\nfrom ckan.lib.webassets_tools import get_webassets_path\n\nfrom ckan.plugins import PluginImplementations\nfrom ckan.plugins.interfaces import IBlueprint, IMiddleware, ITranslation\nfrom ckan.views import (identify_user,\n set_cors_headers_for_response,\n set_controller_and_action,\n set_cache_control_headers_for_response,\n handle_i18n,\n set_ckan_current_url,\n _get_user_for_apitoken,\n )\nfrom ckan.types import CKANApp, Config, Response\n\nlog = logging.getLogger(__name__)\n\ncsrf = CSRFProtect()\n\ncsrf_warn_extensions = (\n \"Extensions are excluded from CSRF protection! \"\n \"We allow extensions to run without CSRF protection \"\n \"but it will be forced future releases. \"\n \"Read the documentation for more information on how to add \"\n \"CSRF protection to your extension.\"\n )\n\n\nclass I18nMiddleware(object):\n def __init__(self, app: CKANApp):\n self.app = app\n\n def __call__(self, environ: Any, start_response: Any):\n\n handle_i18n(environ)\n return self.app(environ, start_response)\n\n\nclass CKANBabel(Babel):\n app: CKANApp\n\n def __init__(self, *pargs: Any, **kwargs: Any):\n super(CKANBabel, self).__init__(*pargs, **kwargs)\n self._i18n_path_idx = 0\n\n @property\n def domain(self) -> str:\n default = super(CKANBabel, self).domain\n multiple = self.app.config.get('BABEL_MULTIPLE_DOMAINS')\n if not multiple:\n return default\n domains = multiple.split(';')\n try:\n return domains[self._i18n_path_idx]\n except IndexError:\n return default\n\n @property\n def translation_directories(self) -> Iterable[str]:\n self._i18n_path_idx = 0\n for path in super(CKANBabel, self).translation_directories:\n yield path\n self._i18n_path_idx += 1\n\n\ndef _ungettext_alias():\n u'''\n Provide `ungettext` as an alias of `ngettext` for backwards\n compatibility\n '''\n return dict(ungettext=ungettext)\n\n\nclass BeakerSessionInterface(SessionInterface):\n def open_session(self, app: Any, request: Any):\n if 'beaker.session' in request.environ:\n return request.environ['beaker.session']\n\n def save_session(self, app: Any, session: Any, response: Any):\n session.save()\n\n\ndef make_flask_stack(conf: Union[Config, CKANConfig]) -> CKANApp:\n \"\"\" This has to pass the flask app through all the same middleware that\n Pylons used \"\"\"\n\n root = os.path.dirname(\n os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\n debug = asbool(conf.get('debug', conf.get('DEBUG', False)))\n testing = asbool(conf.get('testing', conf.get('TESTING', False)))\n app = flask_app = CKANFlask(__name__, static_url_path='')\n\n # Register storage for accessing group images, site logo, etc.\n storage_folder = []\n storage = uploader.get_storage_path()\n if storage:\n storage_folder = [os.path.join(storage, 'storage')]\n\n # Static files folders (core and extensions)\n public_folder = config.get(u'ckan.base_public_folder')\n app.static_folder = config.get(\n 'extra_public_paths'\n ).split(',') + config.get('plugin_public_paths', []) + [\n os.path.join(root, public_folder)\n ] + storage_folder\n\n app.jinja_options = jinja_extensions.get_jinja_env_options()\n app.jinja_env.policies['ext.i18n.trimmed'] = True\n\n app.debug = debug\n app.testing = testing\n app.template_folder = os.path.join(root, 'templates')\n app.app_ctx_globals_class = CKAN_AppCtxGlobals\n app.url_rule_class = CKAN_Rule\n\n # Update Flask config with the CKAN values. We use the common config\n # object as values might have been modified on `load_environment`\n if config:\n app.config.update(config)\n else:\n app.config.update(conf)\n\n # Do all the Flask-specific stuff before adding other middlewares\n\n # Secret key needed for flask-debug-toolbar and sessions\n if not app.config.get('SECRET_KEY'):\n app.config['SECRET_KEY'] = config.get('beaker.session.secret')\n if not app.config.get('SECRET_KEY'):\n raise RuntimeError(u'You must provide a value for the secret key'\n ' with the SECRET_KEY config option')\n\n root_path = config.get('ckan.root_path')\n if debug:\n from flask_debugtoolbar import DebugToolbarExtension\n app.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False\n debug_ext = DebugToolbarExtension()\n\n # register path that includes `ckan.site_root` before\n # initializing debug app. In such a way, our route receives\n # higher precedence.\n\n # TODO: After removal of Pylons code, switch to\n # `APPLICATION_ROOT` config value for flask application. Right\n # now it's a bad option because we are handling both pylons\n # and flask urls inside helpers and splitting this logic will\n # bring us tons of headache.\n if root_path:\n app.add_url_rule(\n root_path.replace('{{LANG}}', '').rstrip('/') +\n '/_debug_toolbar/static/<path:filename>',\n '_debug_toolbar.static', debug_ext.send_static_file\n )\n debug_ext.init_app(app)\n\n from werkzeug.debug import DebuggedApplication\n app.wsgi_app = DebuggedApplication(app.wsgi_app, True)\n\n namespace = 'beaker.session.'\n session_opts = {k.replace('beaker.', ''): v\n for k, v in config.items()\n if k.startswith(namespace)}\n if (not session_opts.get('session.data_dir') and\n session_opts.get('session.type', 'file') == 'file'):\n cache_dir = conf.get('cache_dir') or conf.get('cache.dir')\n session_opts['session.data_dir'] = '{data_dir}/sessions'.format(\n data_dir=cache_dir)\n\n app.wsgi_app = RootPathMiddleware(app.wsgi_app)\n app.wsgi_app = SessionMiddleware(app.wsgi_app, session_opts)\n app.session_interface = BeakerSessionInterface()\n\n # Add Jinja2 extensions and filters\n app.jinja_env.filters['empty_and_escape'] = \\\n jinja_extensions.empty_and_escape\n\n # Common handlers for all requests\n #\n # flask types do not mention that it's possible to return a response from\n # the `before_request` callback\n app.before_request(ckan_before_request)\n app.after_request(ckan_after_request)\n\n # Template context processors\n app.context_processor(helper_functions)\n app.context_processor(c_object)\n\n app.context_processor(_ungettext_alias)\n\n # Babel\n _ckan_i18n_dir = i18n.get_ckan_i18n_dir()\n\n pairs = [\n cast(\"tuple[str, str]\", (_ckan_i18n_dir, u'ckan'))\n ] + [\n (p.i18n_directory(), p.i18n_domain())\n for p in reversed(list(PluginImplementations(ITranslation)))\n ]\n\n i18n_dirs, i18n_domains = zip(*pairs)\n\n app.config[u'BABEL_TRANSLATION_DIRECTORIES'] = ';'.join(i18n_dirs)\n app.config[u'BABEL_DOMAIN'] = 'ckan'\n app.config[u'BABEL_MULTIPLE_DOMAINS'] = ';'.join(i18n_domains)\n app.config[u'BABEL_DEFAULT_TIMEZONE'] = str(h.get_display_timezone())\n\n babel = CKANBabel(app)\n\n babel.localeselector(get_locale)\n\n # WebAssets\n _setup_webassets(app)\n\n # Auto-register all blueprints defined in the `views` folder\n _register_core_blueprints(app)\n _register_error_handler(app)\n\n # CSRF\n wtf_key = \"WTF_CSRF_SECRET_KEY\"\n if not app.config.get(wtf_key):\n config[wtf_key] = app.config[wtf_key] = app.config[\"SECRET_KEY\"]\n app.config[\"WTF_CSRF_FIELD_NAME\"] = \"_csrf_token\"\n csrf.init_app(app)\n\n # Set up each IBlueprint extension as a Flask Blueprint\n _register_plugins_blueprints(app)\n\n if config.get(\"ckan.csrf_protection.ignore_extensions\"):\n log.warn(csrf_warn_extensions)\n _exempt_plugins_blueprints_from_csrf(csrf)\n\n lib_plugins.register_package_blueprints(app)\n lib_plugins.register_group_blueprints(app)\n\n # Start other middleware\n for plugin in PluginImplementations(IMiddleware):\n app = plugin.make_middleware(app, config)\n\n for plugin in PluginImplementations(IMiddleware):\n try:\n app = plugin.make_error_log_middleware(app, config)\n except AttributeError:\n log.critical('Middleware class {0} is missing the method'\n 'make_error_log_middleware.'\n .format(plugin.__class__.__name__))\n\n # Initialize flask-login\n login_manager = LoginManager()\n login_manager.init_app(app)\n # make anonymous_user an instance of CKAN custom class\n login_manager.anonymous_user = model.AnonymousUser\n # The name of the view to redirect to when the user needs to log in.\n login_manager.login_view = config.get(\"ckan.auth.login_view\")\n\n @login_manager.user_loader\n def load_user(user_id: str) -> Optional[\"model.User\"]: # type: ignore\n \"\"\"\n This callback function is called whenever we need to reload from\n the database the logged in user in the session (ie the cookie).\n\n Site maintainers can choose to completely ignore cookie based\n authentication for API calls, but that will break existing JS widgets\n that rely on API calls so it should be used with caution.\n \"\"\"\n endpoint = request.endpoint or \"\"\n is_api = endpoint.split(\".\")[0] == \"api\"\n if (\n not config.get(\"ckan.auth.enable_cookie_auth_in_api\")\n and is_api):\n return\n\n return model.User.get(user_id)\n\n @login_manager.request_loader\n def load_user_from_request(request): # type: ignore\n \"\"\"\n This callback function is called whenever a user could not be\n authenticated via the session cookie, so we fall back to the API token.\n \"\"\"\n g.login_via_auth_header = True\n\n user = _get_user_for_apitoken()\n\n return user\n\n # Update the main CKAN config object with the Flask specific keys\n # that were set here or autogenerated\n flask_config_keys = set(flask_app.config.keys()) - set(config.keys())\n for key in flask_config_keys:\n config[key] = flask_app.config[key]\n\n # Prevent the host from request to be added to the new header location.\n app = HostHeaderMiddleware(app)\n\n app = I18nMiddleware(app)\n\n if config.get('ckan.tracking_enabled'):\n app = TrackingMiddleware(app, config)\n\n # Add a reference to the actual Flask app so it's easier to access\n # type_ignore_reason: custom attribute\n app._wsgi_app = flask_app # type: ignore\n\n return app\n\n\ndef get_locale() -> str:\n u'''\n Return the value of the `CKAN_LANG` key of the WSGI environ,\n set by the I18nMiddleware based on the URL.\n If no value is defined, it defaults to `ckan.locale_default` or `en`.\n '''\n return request.environ.get(\n u'CKAN_LANG',\n config.get(u'ckan.locale_default'))\n\n\ndef set_remote_user_as_current_user_for_tests():\n '''This function exists to maintain backward compatibility\n for the `TESTS` of the `CKAN` extensions\n\n If `REMOTE_USER` is in the request environ we will try to get\n the user_obj from the DB, if there is an user_obj, we will set the\n `session['_user_id']` with that user_obj.id\n\n This way, `Flask-Login` will load the user from\n `session['_user_id']` and will set the `current_user`\n proxy for us behind the scene.\n '''\n if \"REMOTE_USER\" in request.environ:\n username = request.environ[\"REMOTE_USER\"]\n if isinstance(username, bytes):\n username = username.decode()\n\n userobj = model.User.get(username)\n if userobj:\n session[\"_user_id\"] = userobj.id\n\n\ndef ckan_before_request() -> Optional[Response]:\n u'''\n Common handler executed before all Flask requests\n\n If a response is returned by any of the functions called (\n currently ``identify_user()` only) any further processing of the\n request will be stopped and that response will be returned.\n\n '''\n response = None\n\n g.__timer = time.time()\n\n # Update app_globals\n app_globals.app_globals._check_uptodate()\n\n # This is needed for the TESTS of the CKAN extensions only!\n # we should remove it as soon as the maintainers of the\n # CKAN extensions change their tests according to the new changes.\n if config.get(\"testing\"):\n set_remote_user_as_current_user_for_tests()\n\n # Identify the user from the flask-login cookie or the API header\n # Sets g.user and g.userobj for extensions\n response = identify_user()\n\n # Disable CSRF protection if user was logged in via the Authorization\n # header\n if g.get(\"login_via_auth_header\"):\n # Get the actual view function, as it might not match the endpoint,\n # eg \"organization.edit\" -> \"group.edit\", or custom dataset types\n endpoint = request.endpoint or \"\"\n view = current_app.view_functions.get(endpoint)\n dest = f\"{view.__module__}.{view.__name__}\" # type: ignore\n csrf.exempt(dest)\n\n # Set the csrf_field_name so we can use it in our templates\n g.csrf_field_name = config.get(\"WTF_CSRF_FIELD_NAME\")\n\n # Provide g.controller and g.action for backward compatibility\n # with extensions\n set_controller_and_action()\n\n set_ckan_current_url(request.environ)\n\n return response\n\n\ndef ckan_after_request(response: Response) -> Response:\n u'''Common handler executed after all Flask requests'''\n\n # Dispose of the SQLALchemy session\n model.Session.remove()\n\n # Set CORS headers if necessary\n response = set_cors_headers_for_response(response)\n\n # Set Cache Control headers\n response = set_cache_control_headers_for_response(response)\n\n r_time = time.time() - g.__timer\n url = request.environ['PATH_INFO']\n status_code = response.status_code\n\n log.info(' %s %s render time %.3f seconds' % (status_code, url, r_time))\n\n return response\n\n\ndef helper_functions() -> dict[str, h.HelperAttributeDict]:\n u'''Make helper functions (`h`) available to Flask templates'''\n if not h.helper_functions:\n h.load_plugin_helpers()\n return dict(h=h.helper_functions)\n\n\ndef c_object() -> dict[str, LocalProxy]:\n u'''\n Expose `c` as an alias of `g` in templates for backwards compatibility\n '''\n return dict(c=g)\n\n\nclass CKAN_Rule(Rule): # noqa\n\n u'''Custom Flask url_rule_class.\n\n We use it to be able to flag routes defined in extensions as such\n '''\n\n def __init__(self, *args: Any, **kwargs: Any):\n self.ckan_core = True\n super(CKAN_Rule, self).__init__(*args, **kwargs)\n\n\nclass CKAN_AppCtxGlobals(_AppCtxGlobals): # noqa\n\n '''Custom Flask AppCtxGlobal class (flask.g).'''\n\n def __getattr__(self, name: str):\n '''\n If flask.g doesn't have attribute `name`, fall back to CKAN's\n app_globals object.\n If the key is also not found in there, an AttributeError will be raised\n '''\n return getattr(app_globals.app_globals, name)\n\n\nclass CKANFlask(MultiStaticFlask):\n\n '''Extend the Flask class with a special method called on incoming\n requests by AskAppDispatcherMiddleware.\n '''\n\n app_name: str = 'flask_app'\n static_folder: list[str]\n session_interface: SessionInterface\n\n def can_handle_request(\n self,\n environ: Any) -> Union[tuple[bool, str], tuple[bool, str, str]]:\n '''\n Decides whether it can handle a request with the Flask app by\n matching the request environ against the route mapper\n\n Returns (True, 'flask_app', origin) if this is the case.\n\n `origin` can be either 'core' or 'extension' depending on where\n the route was defined.\n '''\n urls = self.url_map.bind_to_environ(environ)\n\n try:\n rule, args = urls.match(return_rule=True)\n origin = 'core'\n if not getattr(rule, 'ckan_core', True):\n origin = 'extension'\n log.debug('Flask route match, endpoint: {0}, args: {1}, '\n 'origin: {2}'.format(rule.endpoint, args, origin))\n\n # Disable built-in flask's ability to prepend site root to\n # generated url, as we are going to use locale and existing\n # logic is not flexible enough for this purpose\n environ['SCRIPT_NAME'] = ''\n\n return (True, self.app_name, origin)\n except HTTPException:\n return (False, self.app_name)\n\n def register_extension_blueprint(self, blueprint: Blueprint,\n **kwargs: dict[str, Any]):\n '''\n This method should be used to register blueprints that come from\n extensions, so there's an opportunity to add extension-specific\n options.\n\n Sets the rule property `ckan_core` to False, to indicate that the rule\n applies to an extension route.\n '''\n self.register_blueprint(blueprint, **kwargs)\n\n # Get the new blueprint rules\n bp_rules = itertools.chain.from_iterable(\n v for k, v in self.url_map._rules_by_endpoint.items()\n if k.startswith(u'{0}.'.format(blueprint.name))\n )\n\n # This compare key will ensure the rule will be near the top.\n top_compare_key = False, -100, [(-2, 0)]\n for r in bp_rules:\n setattr(r, \"ckan_core\", False)\n setattr(r, \"match_compare_key\", lambda: top_compare_key)\n\n\ndef _register_plugins_blueprints(app: CKANApp):\n \"\"\" Resgister all blueprints defined in plugins by IBlueprint\n \"\"\"\n for plugin in PluginImplementations(IBlueprint):\n plugin_blueprints = plugin.get_blueprint()\n if isinstance(plugin_blueprints, list):\n for blueprint in plugin_blueprints:\n app.register_extension_blueprint(blueprint)\n else:\n app.register_extension_blueprint(plugin_blueprints)\n\n\ndef _exempt_plugins_blueprints_from_csrf(csrf: CSRFProtect):\n \"\"\"Exempt plugins blueprints from CSRF protection.\n\n This feature will be deprecated in future versions.\n \"\"\"\n for plugin in PluginImplementations(IBlueprint):\n plugin_blueprints = plugin.get_blueprint()\n if isinstance(plugin_blueprints, list):\n for blueprint in plugin_blueprints:\n csrf.exempt(blueprint)\n else:\n csrf.exempt(plugin_blueprints)\n\n\ndef _register_core_blueprints(app: CKANApp):\n u'''Register all blueprints defined in the `views` folder\n '''\n def is_blueprint(mm: Any):\n return isinstance(mm, Blueprint) and getattr(mm, 'auto_register', True)\n\n path = os.path.join(os.path.dirname(__file__), '..', '..', 'views')\n\n for loader, name, __ in pkgutil.iter_modules([path], 'ckan.views.'):\n # type_ignore_reason: incorrect external type declarations\n module = loader.find_module(name).load_module(name) # type: ignore\n for blueprint in inspect.getmembers(module, is_blueprint):\n app.register_blueprint(blueprint[1])\n log.debug(u'Registered core blueprint: {0!r}'.format(blueprint[0]))\n\n\ndef _register_error_handler(app: CKANApp):\n u'''Register error handler'''\n\n def error_handler(e: Exception) -> Union[\n tuple[str, Optional[int]], Optional[Response]\n ]:\n debug = config.get('debug')\n if isinstance(e, HTTPException):\n if debug:\n log.debug(e, exc_info=sys.exc_info) # type: ignore\n else:\n log.info(e)\n\n show_login_redirect_link = current_user.is_anonymous and type(\n e\n ) in (Unauthorized, Forbidden)\n extra_vars = {\n u'code': e.code,\n u'content': e.description,\n u'name': e.name,\n u'show_login_redirect_link': show_login_redirect_link\n }\n return base.render(\n u'error_document_template.html', extra_vars), e.code\n\n log.error(e, exc_info=sys.exc_info) # type: ignore\n extra_vars = {u'code': [500], u'content': u'Internal server error'}\n return base.render(u'error_document_template.html', extra_vars), 500\n\n for code in default_exceptions:\n app.register_error_handler(code, error_handler)\n if not app.debug and not app.testing:\n app.register_error_handler(Exception, error_handler)\n if config.get('email_to'):\n _setup_error_mail_handler(app)\n\n\ndef _setup_error_mail_handler(app: CKANApp):\n\n class ContextualFilter(logging.Filter):\n def filter(self, log_record: Any) -> bool:\n log_record.url = request.path\n log_record.method = request.method\n log_record.ip = request.environ.get(\"REMOTE_ADDR\")\n log_record.headers = request.headers\n return True\n\n smtp_server = config.get('smtp.server')\n mailhost = cast(\"tuple[str, int]\", tuple(smtp_server.split(':'))) \\\n if ':' in smtp_server else smtp_server\n credentials = None\n if config.get('smtp.user'):\n credentials = (\n config.get('smtp.user'),\n config.get('smtp.password')\n )\n secure = () if config.get('smtp.starttls') else None\n mail_handler = SMTPHandler(\n mailhost=mailhost,\n fromaddr=config.get('error_email_from'),\n toaddrs=[config.get('email_to')],\n subject='Application Error',\n credentials=credentials,\n secure=secure\n )\n\n mail_handler.setLevel(logging.ERROR)\n mail_handler.setFormatter(logging.Formatter('''\nTime: %(asctime)s\nURL: %(url)s\nMethod: %(method)s\nIP: %(ip)s\nHeaders: %(headers)s\n\n'''))\n\n context_provider = ContextualFilter()\n app.logger.addFilter(context_provider)\n app.logger.addHandler(mail_handler)\n\n\ndef _setup_webassets(app: CKANApp):\n app.use_x_sendfile = config.get('ckan.webassets.use_x_sendfile')\n\n webassets_folder = get_webassets_path()\n\n def webassets(path: str):\n return send_from_directory(webassets_folder, path)\n\n app.add_url_rule('/webassets/<path:path>', 'webassets.index', webassets)\n", "path": "ckan/config/middleware/flask_app.py"}]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.