message
stringlengths
13
484
diff
stringlengths
38
4.63k
Remove extra please Please -= 1
@@ -374,5 +374,5 @@ class AdventOfCode(commands.Cog): async def cog_command_error(self, ctx: commands.Context, error: Exception) -> None: """Custom error handler if an advent of code command was posted in the wrong channel.""" if isinstance(error, InChannelCheckFailure): - await ctx.send(f":x: Please use <#{Channels.advent_of_code_commands}> for aoc commands instead, please.") + await ctx.send(f":x: Please use <#{Channels.advent_of_code_commands}> for aoc commands instead.") error.handled = True
Fix port command for SDK >0.9.10 The port functional test can not be passed in my local environment. When 'dns_assignment' is None, the port create, show command will fail because parameter for 'utils.format_list_of_dicts' can not be None.
@@ -35,6 +35,10 @@ def _format_admin_state(state): return 'UP' if state else 'DOWN' +def _format_dns_assignment(dns_assignment): + return utils.format_list_of_dicts(dns_assignment) \ + if dns_assignment else None + _formatters = { 'admin_state_up': _format_admin_state, 'is_admin_state_up': _format_admin_state, @@ -43,7 +47,7 @@ _formatters = { 'binding_vif_details': utils.format_dict, 'binding:profile': utils.format_dict, 'binding:vif_details': utils.format_dict, - 'dns_assignment': utils.format_list_of_dicts, + 'dns_assignment': _format_dns_assignment, 'extra_dhcp_opts': utils.format_list_of_dicts, 'fixed_ips': utils.format_list_of_dicts, 'security_group_ids': utils.format_list,
test_manifest_config_properties: use assertEqual The method assertEquals is an deprecated alias for assertEqual. See: Tested-by: Daniel Kutik
@@ -480,13 +480,13 @@ class ManifestPropertiesFetchedCorrectly(unittest.TestCase): self.assertFalse(fakeproj.partial_clone) fakeproj.config.SetString('repo.depth', '48') - self.assertEquals(fakeproj.depth, '48') + self.assertEqual(fakeproj.depth, '48') fakeproj.config.SetString('repo.clonefilter', 'blob:limit=10M') - self.assertEquals(fakeproj.clone_filter, 'blob:limit=10M') + self.assertEqual(fakeproj.clone_filter, 'blob:limit=10M') fakeproj.config.SetString('repo.partialcloneexclude', 'third_party/big_repo') - self.assertEquals(fakeproj.partial_clone_exclude, 'third_party/big_repo') + self.assertEqual(fakeproj.partial_clone_exclude, 'third_party/big_repo') fakeproj.config.SetString('manifest.platform', 'auto') - self.assertEquals(fakeproj.manifest_platform, 'auto') + self.assertEqual(fakeproj.manifest_platform, 'auto')
Fix seq2reward test Summary: Diff (https://github.com/facebookresearch/ReAgent/commit/9b25610ec10bb092a0b65726c6edcc91fe668238) swapped out uses of FullyConnected (which takes a tensor as input) with FloatFeatureFullyConnected (which takes FeatureData as input). This broke an assumption made in the predictor wrapper.
@@ -859,6 +859,5 @@ class CompressModelWithPreprocessor(DiscreteDqnWithPreprocessor): state_feature_data = serving_to_feature_data( state, self.state_preprocessor, self.sparse_preprocessor ) - # TODO: model is a fully connected network which only takes in Tensor now. - q_values = self.model(state_feature_data.float_features) + q_values = self.model(state_feature_data) return q_values
Use `None` instead of mutable `[]` default argument See
@@ -76,7 +76,7 @@ class Environment: def __init__( self, *, - user_classes=[], + user_classes=None, shape_class=None, tags=None, exclude_tags=None, @@ -92,7 +92,7 @@ class Environment: else: self.events = Events() - self.user_classes = user_classes + self.user_classes = user_classes or [] self.shape_class = shape_class self.tags = tags self.exclude_tags = exclude_tags
Replicas: remove whitespaces from geoip cache key Memcached doesn't work with spaces in keys.
@@ -138,7 +138,7 @@ def __get_distance(se1, client_location, ignore_error): # does not cache ignore_error, str.lower on hostnames/ips is fine canonical_parties = list(map(lambda x: str(x).lower(), [se1, client_location['ip'], client_location.get('latitude', ''), client_location.get('longitude', '')])) canonical_parties.sort() - cache_key = f'replica_sorter:__get_distance|site_distance|{"".join(canonical_parties)}' + cache_key = f'replica_sorter:__get_distance|site_distance|{"".join(canonical_parties)}'.replace(' ', '.') cache_val = REGION.get(cache_key) if cache_val is NO_VALUE: try:
Lexical env: don't allocate Env_Rebindings_Type for empty rebindings TN:
@@ -151,6 +151,12 @@ package body Langkit_Support.Lexical_Env is ------------ function Create (Bindings : Env_Rebindings_Array) return Env_Rebindings is + begin + if Bindings'Length = 0 then + return null; + end if; + + declare Result : constant Env_Rebindings := new Env_Rebindings_Type' (Size => Bindings'Length, Rebindings => Bindings, @@ -160,6 +166,7 @@ package body Langkit_Support.Lexical_Env is Inc_Ref (R); end loop; return Result; + end; end Create; -------------
adding unmatch/match methods from video:Movie and video:Show classes to base:PlexPartialObject minor improvements to matches method thanks to matching can be done for artists, albums, shows, movies all other media types safely return an empty list []
@@ -429,6 +429,43 @@ class PlexPartialObject(PlexObject): """ return self._server.history(maxresults=maxresults, mindate=mindate, ratingKey=self.ratingKey) + def unmatch(self): + """ Unmatches show object. """ + key = '/library/metadata/%s/unmatch' % self.ratingKey + self._server.query(key, method=self._server._session.put) + + def matches(self, auto=True, agent=None, title=None, year=None, language=None): + """ Return list of show metadata matches from library agent. """ + key = '/library/metadata/%s/matches' % self.ratingKey + if not auto: + params = {'manual': 1, + 'title': title or self.title, + 'year': year or self.year if self.section().type != 'artist' else '', + 'language': language or self.section().language} + if agent: + agents = self._server.agents() + match_agent = next((ag for ag in agents if ag.shortIdentifier == agent), None) + if match_agent: + params['agent'] = match_agent.identifier + else: + raise NotFound('Couldnt find "%s" in agents list (%s)' % + (agent, ','.join(agents.keys()))) + else: + params['agent'] = self.section().agent + + key = key + '?' + urlencode(params) + data = self._server.query(key, method=self._server._session.get) + return self.findItems(data) + + def fixMatch(self, searchResult): + """ Use match result to update show metadata. """ + key = '/library/metadata/%s/match' % self.ratingKey + params = {'guid': searchResult.guid, + 'name': searchResult.name} + + data = key + '?' + urlencode(params) + self._server.query(data, method=self._server._session.put) + # The photo tag cant be built atm. TODO # def arts(self): # part = '%s/arts' % self.key
Explicitly install bazel version 0.15.0 Until [this][1] issue is resolved, [@drigz suggestion][2] works for now. [1]: [2]:
@@ -47,6 +47,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ rm -rf /var/lib/apt/lists/* # Install bazel +ENV BAZEL_VERSION=0.15.0 RUN apt-get update && apt-get install -y python-software-properties zip && \ echo "deb http://ppa.launchpad.net/webupd8team/java/ubuntu precise main" | tee -a /etc/apt/sources.list && \ echo "deb-src http://ppa.launchpad.net/webupd8team/java/ubuntu precise main" | tee -a /etc/apt/sources.list && \ @@ -55,10 +56,12 @@ RUN apt-get update && apt-get install -y python-software-properties zip && \ echo debconf shared/accepted-oracle-license-v1-1 select true | debconf-set-selections && \ echo debconf shared/accepted-oracle-license-v1-1 seen true | debconf-set-selections && \ apt-get install -y oracle-java8-installer && \ - echo "deb [arch=amd64] http://storage.googleapis.com/bazel-apt stable jdk1.8" | tee /etc/apt/sources.list.d/bazel.list && \ - curl https://bazel.build/bazel-release.pub.gpg | apt-key add - && \ - apt-get update && apt-get install -y bazel && \ - apt-get upgrade -y bazel + apt-get install -y --no-install-recommends \ + bash-completion \ + zlib1g-dev && \ + curl -LO "https://github.com/bazelbuild/bazel/releases/download/${BAZEL_VERSION}/bazel_${BAZEL_VERSION}-linux-x86_64.deb" && \ + dpkg -i bazel_*.deb && \ + rm bazel_*.deb # Tensorflow doesn't support python 3.7 yet. See https://github.com/tensorflow/tensorflow/issues/20517 RUN conda install -y python=3.6.6 && \
langkit.caching: minor reformatting TN:
@@ -5,8 +5,8 @@ import json class Cache(object): - - """General purpose content cache. + """ + General purpose content cache. Generating and building libraries can be quite long. This cache class is an attempt to reduce the time to do this.
fix: Website URL parsing function parses absolute telephone/phone tel: URLs as relative * Update utils.py Fix: tel: URLs should be parsed as absolute path * fix: Parsing telephone/phone tel: URLs as absolute
@@ -166,6 +166,8 @@ def abs_url(path): return if path.startswith('http://') or path.startswith('https://'): return path + if path.startswith('tel:'): + return path if path.startswith('data:'): return path if not path.startswith("/"):
Update routing.py Attempt to fix incompatibility with Windows
@@ -307,11 +307,11 @@ class StaticRouter(SinkRouter): api = self.route.get('api', hug.api.from_object(api_function)) for base_url in self.route.get('urls', ("/{0}".format(api_function.__name__), )): def read_file(request=None, path=""): - filename = os.path.normpath(path.lstrip("/")) - if filename.startswith('../'): - hug.redirect.not_found() + filename = path.lstrip("/") for directory in directories: path = os.path.join(directory, filename) + if not path.startswith(directory): + hug.redirect.not_found() if os.path.isdir(path): new_path = os.path.join(path, "index.html") if os.path.exists(new_path) and os.path.isfile(new_path):
[hail/fs] fix use of Semaphore in router fs I do not think this ever worked, my bad!
@@ -183,8 +183,7 @@ class RouterFS(FS): async def _copy(): sema = asyncio.Semaphore(max_simultaneous_transfers) - async with sema: - await Copier.copy(self.afs, asyncio.Semaphore, transfer) + await Copier.copy(self.afs, sema, transfer) return async_to_blocking(_copy()) def exists(self, path: str) -> bool:
cephadm-adopt: use custom dashboard images cephadm uses default value for dashboard container images which need to be customized by ansible for upstream or downstream purpose. This feature wasn't present when cephadm-adopt.yml has been designed. Also set the container_image_base variable for upgrade purpose.
run_once: true delegate_to: '{{ groups[mon_group_name][0] }}' + - name: set container image base in ceph configuration + command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_base {{ ceph_docker_registry }}/{{ ceph_docker_image }}" + changed_when: false + run_once: true + delegate_to: '{{ groups[mon_group_name][0] }}' + + - name: set dashboard container image in ceph mgr configuration + when: dashboard_enabled | bool + run_once: true + block: + - name: set alertmanager container image in ceph configuration + command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_alertmanager {{ alertmanager_container_image }}" + changed_when: false + delegate_to: '{{ groups[mon_group_name][0] }}' + + - name: set grafana container image in ceph configuration + command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_grafana {{ grafana_container_image }}" + changed_when: false + delegate_to: '{{ groups[mon_group_name][0] }}' + + - name: set node-exporter container image in ceph configuration + command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_node_exporter {{ node_exporter_container_image }}" + changed_when: false + delegate_to: '{{ groups[mon_group_name][0] }}' + + - name: set prometheus container image in ceph configuration + command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_prometheus {{ prometheus_container_image }}" + changed_when: false + delegate_to: '{{ groups[mon_group_name][0] }}' + - name: manage nodes with cephadm command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} orch host add {{ hostvars[item]['ansible_hostname'] }} {{ hostvars[item]['ansible_default_ipv4']['address'] }} {{ hostvars[item]['group_names'] | join(' ') }}" changed_when: false
$.Rewriting_Implementation: stop depending on $.Introspection This dependency from implementation stuff to public API stuff is unrequired and prevents us from disabling Ada API generation. TN:
@@ -7,7 +7,6 @@ with System; with ${ada_lib_name}.Common; use ${ada_lib_name}.Common; use ${ada_lib_name}.Common.Token_Data_Handlers; with ${ada_lib_name}.Implementation; -with ${ada_lib_name}.Introspection; use ${ada_lib_name}.Introspection; with ${ada_lib_name}.Lexer_Implementation; use ${ada_lib_name}.Lexer_Implementation; @@ -926,8 +925,8 @@ package body ${ada_lib_name}.Rewriting_Implementation is return Create_Token_Node (Handle, Kind, ""); else declare - Refs : constant Field_Reference_Array := Fields (Kind); - Children : constant Node_Rewriting_Handle_Array (Refs'Range) := + Count : constant Integer := Kind_To_Node_Children_Count (Kind); + Children : constant Node_Rewriting_Handle_Array (1 .. Count) := (others => No_Node_Rewriting_Handle); begin return Create_Regular_Node (Handle, Kind, Children);
Fix bug in 2.3 CPE string construction logic. Use the cpe part as it is Fixes
@@ -2168,7 +2168,7 @@ class ImageCpe(Base): "-", "-", ] - final_cpe[2] = self.cpetype[1] + final_cpe[2] = self.cpetype final_cpe[3] = self.vendor final_cpe[4] = self.name final_cpe[5] = self.version
add release notes for 0.7.16 Test Plan: inspection Reviewers: sashank, nate
- `dagster_spark.create_spark_solid` now accepts a `required_resource_keys` argument, which enables setting up a step launcher for Spark solids, like the `emr_pyspark_step_launcher`. -## 0.7.15 (Latest) +## 0.7.16 (Latest) + +**Bugfix** + +- Enabled `NoOpComputeLogManager` to be configured as the `compute_logs` implementation in `dagster.yaml` +- Suppressed noisy error messages in logs from skipped steps + +## 0.7.15 **New** create PipelineRun entries simultaneously. - Fixed an issue with schedules that had invalid config not logging the appropriate error. - ## 0.7.13 **Breaking Changes**
Allow filtering ES forms by case_id Usage: FormsES().updating_cases(['case_id_1', 'case_id_2'])
@@ -29,6 +29,7 @@ class FormES(HQESQuery): user_type, user_ids_handle_unknown, j2me_submissions, + updating_cases, ] + super(FormES, self).builtin_filters def user_aggregation(self): @@ -100,3 +101,9 @@ def j2me_submissions(gt=None, gte=None, lt=None, lte=None): filters.regexp("form.meta.appVersion", "v2+.[0-9]+.*"), submitted(gt, gte, lt, lte) ) + + +def updating_cases(case_ids): + """return only those forms that have case blocks that touch the cases listed in `case_ids` + """ + return filters.term("__retrieved_case_ids", case_ids)
add a fallback for people using clang 5.0 to use cindex40.py refers to should be done properly as soon as clang5.0 is officially out
@@ -32,7 +32,8 @@ cindex_dict = { '3.7': PKG_NAME + ".plugin.clang.cindex37", '3.8': PKG_NAME + ".plugin.clang.cindex38", '3.9': PKG_NAME + ".plugin.clang.cindex39", - '4.0': PKG_NAME + ".plugin.clang.cindex40" + '4.0': PKG_NAME + ".plugin.clang.cindex40", + '5.0': PKG_NAME + ".plugin.clang.cindex40" }
Fix Alcatel.AOS.7302 pattern prompt HG-- branch : feature/microservices
@@ -14,6 +14,6 @@ from noc.core.profile.base import BaseProfile class Profile(BaseProfile): name = "Alcatel.7302" - pattern_prompt = r"^leg:.+#" + pattern_prompt = r"^(typ:|leg:|)\S+(>|#)" command_save_config = "admin software-mngt shub database save" command_exit = "logout"
removing Buscador [Buscador VM](https://inteltechniques.com/buscador) is not supported by the creator anymore (Michael Bazzell)
@@ -4,7 +4,6 @@ Open-source intelligence (OSINT) is data collected from open source and publicly ## Passive Recon Tools: - [AMass](https://github.com/OWASP/Amass) -- [Buscador VM](https://inteltechniques.com/buscador) - [Exiftool](https://www.sno.phy.queensu.ca/~phil/exiftool/) - [ExtractMetadata](http://www.extractmetadata.com) - [Findsubdomains](https://findsubdomains.com/)
disable display setup script This script appears to be a problem for lightdm on Rasbian Duster
@@ -52,6 +52,12 @@ if [ "$1" = "1" ] || [ "$1" = "on" ]; then # set user pi user for autostart sudo sed -i 's/^autologin-user=.*/autologin-user=pi/g' /etc/lightdm/lightdm.conf + + # disable display-setup script + if grep -Eq "^display-setup-script=" /etc/lightdm/lightdm.conf; then + sed -i -E 's/^(display-setup-script=.*)/#\1/' /etc/lightdm/lightdm.conf + fi + sudo sed -i 's/--autologin root/--autologin pi/' /etc/systemd/system/[email protected]/autologin.conf sudo sed -i 's/--autologin admin/--autologin pi/' /etc/systemd/system/[email protected]/autologin.conf
Temporarily disable macOS stock Python Travis build It's failing with InterpreterNotFound
@@ -8,32 +8,32 @@ env: - NEWEST_PYTHON=3.7 python: # <https://docs.travis-ci.com/user/languages/python/> - - 2.7 - # Python 3.4 fails installing packages # <https://travis-ci.org/jakubroztocil/httpie/jobs/403263566#L636> # - 3.4 - - 3.5 - 3.6 # - 3.7 # is done in the matrix below as described in travis-ci/travis-ci#9069 - pypy - # pypy3 currently fails because of a Flask issue # - pypy3 cache: pip matrix: include: - # Add manually defined OS X builds + + # Manually defined macOS builds # <https://docs.travis-ci.com/user/multi-os/#Python-example-(unsupported-languages)> - - os: osx - language: generic - env: - # Stock OSX Python - - TOXENV=py27-osx-builtin - - BREW_PYTHON_PACKAGE= + + # FIXME: stock macOS python fails with InterpreterNotFound + # <https://travis-ci.org/jakubroztocil/httpie/jobs/578189209> + # - os: osx + # language: generic + # env: + # # Stock macOS Python + # - TOXENV=py27-osx-builtin + # - BREW_PYTHON_PACKAGE= - os: osx language: generic env: @@ -46,16 +46,22 @@ matrix: # Latest Python 3.x from Homebrew - TOXENV=py37 # <= needs to be kept up-to-date to reflect latest minor version - BREW_PYTHON_PACKAGE=python@3 + + # Travis Python 3.7 must run sudo on - os: linux python: 3.7 env: TOXENV=py37 sudo: true # Required for Python 3.7 dist: xenial # Required for Python 3.7 + + # Add a codestyle-only build - os: linux python: 3.6 env: CODESTYLE_ONLY=true + + install: - | if [[ $TRAVIS_OS_NAME == 'osx' ]]; then
[cffLib.specializer] Fix bug introduced in Test case (which apparently is not covered by our current tests!): ./fonttools cffLib.specializer 1 2 3 4 5 0 rrcurveto
@@ -461,7 +461,7 @@ def specializeCommands(commands, # Swap last two args order args = args[:-2]+args[-1:]+args[-2:-1] else: # hhcurveto / vvcurveto - if op[0] == 'h': # hhcurveto + if op0 == 'h': # hhcurveto # Swap first two args order args = args[1:2]+args[:1]+args[2:]
GDB helpers: materialize explicit arguments and Self as bindings TN:
@@ -26,10 +26,15 @@ ${"overriding" if property.overriding else ""} function ${property.name} is use type AST_Envs.Lexical_Env; + % for arg in property.explicit_arguments: + ${gdb_helper('bind', arg.name.lower, arg.name.camel_with_underscores)} + % endfor + ## We declare a variable Self, that has the named class wide access type ## that we can use to dispatch on other properties and all. Self : ${Self.type.name()} := ${Self.type.name()} (${property.self_arg_name}); + ${gdb_helper('bind', 'self', 'Self')} % if property.has_implicit_env: ## Properties are evaluated in the context of a lexical environment. If
Preliminary Game -> Activity changes NOTE - this requires an update to discord.py!! If you get change_presence() errors *after* this commit, make sure you are running the newest discord.py rewrite.
@@ -184,16 +184,16 @@ class Bot: server_embed.add_field(name="Owners", value=owners, inline=True) server_embed.add_field(name="Prefixes", value=prefix, inline=True) server_embed.add_field(name="Status", value=status_text, inline=True) - if bot_member.game and bot_member.game.name: + if bot_member.activity and bot_member.activity.name: play_list = [ "Playing", "Streaming", "Listening to", "Watching" ] try: - play_string = play_list[bot_member.game.type] + play_string = play_list[bot_member.activity.type] except: play_string = "Playing" - server_embed.add_field(name=play_string, value=str(bot_member.game.name), inline=True) - if bot_member.game.type == 1: + server_embed.add_field(name=play_string, value=str(bot_member.activity.name), inline=True) + if bot_member.activity.type == 1: # Add the URL too - server_embed.add_field(name="Stream URL", value="[Watch Now]({})".format(bot_member.game.url), inline=True) + server_embed.add_field(name="Stream URL", value="[Watch Now]({})".format(bot_member.activity.url), inline=True) server_embed.set_thumbnail(url=avatar) # Send the embed await ctx.channel.send(embed=server_embed) @@ -794,8 +794,8 @@ class Bot: else: # Online when in doubt s = discord.Status.online - dgame = discord.Game(name=game, url=url, type=t) if game else None - await self.bot.change_presence(status=s, game=dgame) + dgame = discord.Activity(name=game, url=url, type=t) if game else None + await self.bot.change_presence(status=s, activity=dgame) @commands.command(pass_context=True)
Update baldr.txt New trails + generalization + some cleanings.
# Reference: https://twitter.com/fletchsec/status/1108144401530978304 -86818.prohoster.biz/gate.php +86818.prohoster.biz # Reference: https://twitter.com/PRODAFT/status/1105581121595719681 @@ -73,6 +73,7 @@ gangbulk.icu # Reference: https://twitter.com/x42x5a/status/1123250026883497985 +http://66.154.103.144/auth.php http://66.154.103.144/gate.php # Reference: https://twitter.com/x42x5a/status/1123914216665174016 @@ -90,3 +91,9 @@ makemoneywithus.club # Reference: https://twitter.com/JAMESWT_MHT/status/1128974517144031232 kolibri.icu + +# Reference: https://twitter.com/P3pperP0tts/status/1133716120043687936 +# Reference: https://app.any.run/tasks/25a119f3-5dc2-4b9e-a426-92b9c17e0a15/ + +http://185.250.204.118/auth.php +http://185.250.204.118/gate.php
rendered_markdown: Improve headings. * Switch from underline to a smaller range of font sizes to indicate h5/h6 headings. * Provide margin-top for headings while avoiding problematic behavior for messages that start with a heading.
h6 { font-weight: 600; line-height: 1.4; - /* No margin-top is important to make messages that start with a heading - avoid a weird blank area at the top of a message. */ - margin-top: 0; + margin-top: 15px; margin-bottom: 5px; } + /* Headings: Ensure that messages that start with a heading don't have + a weirdly blank area at the very start of the message. */ + h1:first-child, + h2:first-child, + h3:first-child, + h4:first-child, + h5:first-child, + h6:first-child { + margin-top: 0; + } + /* We use a modest progression of heading sizes to make them stand out from normal next but avoid taking up too much space. */ h1 { } h5 { - font-size: 1em; - text-decoration: underline; + font-size: 1.05em; } h6 { font-size: 1em; - font-weight: normal; - text-decoration: underline; } /* Formatting for blockquotes */
If there's a mixed-attribute on the node, this will fail, just skip it with a warning.
@@ -549,6 +549,11 @@ class CollectLook(pyblish.api.InstancePlugin): if not cmds.attributeQuery(attr, node=node, exists=True): continue attribute = "{}.{}".format(node, attr) + # We don't support mixed-type attributes yet. + if cmds.attributeQuery(attr, node=node, multi=True): + self.log.warning("Attribute '{}' is mixed-type and is " + "not supported yet.".format(attribute)) + continue if cmds.getAttr(attribute, type=True) == "message": continue node_attributes[attr] = cmds.getAttr(attribute)
Update welcome-to-mattermost.rst Made a few grammatical / typo changes
Welcome to Mattermost! ========== -This article will go over the basics of Mattermost and a general overview of the appliation so that you can start using it right way. +This article will cover the basics of Mattermost and give a general overview of the application so that you can start using it right way. Ready? Let's get started! @@ -9,11 +9,11 @@ Ready? Let's get started! **What is Mattermost?** ----------------------------------- -Mattermost is a modern, digital workspace that will help you be more productive in your day to day work life. +Mattermost is a modern, digital workspace that will help you be more productive in your day-to-day work life. Some of the major benefits of using Mattermost are: -- Direct 1:1 and group messaging with your colleages +- Direct 1:1 and group messaging with your colleagues - Channels for topic-based, group-based, or meeting-based chat - Streamlined collaboration on projects - Reduced email clutter @@ -44,9 +44,9 @@ These are the main features of Mattermost, we will be going over each one to und **Teams** --------------------- -A team is a digital workspace where you and your teammates can collaborate on Mattermost. Depending on how Mattermost is set up in your organization, you can belong to one team or multiple teams. +A team is a digital workspace where you and your teammates can collaborate in Mattermost. Depending on how Mattermost is set up in your organization, you can belong to one team or multiple teams. -You have the ability to switch back and forth between teams on the Team sidebar, located on the upper left side of the app. +You have the ability to switch back and forth between teams on the Team Sidebar, located on the upper left side of the app. .. image:: ../../images/ui_teams.png :alt: ui_teams @@ -122,7 +122,7 @@ Notifications in Mattermost alert you to unread messages and mentions. .. image:: ../../images/ui_notifications.png :alt: ui_notifications -You can configure your Mattermost account on how you want to be notified `here <https://docs.mattermost.com/help/getting-started/configuring-notifications.html>`__ +You can configure your Mattermost account for how you want to be notified `here <https://docs.mattermost.com/help/getting-started/configuring-notifications.html>`__ **What's Next?**
Added a filter for bogus containers now being created by the ONOS build. The build leaves containers tagged <none> which are a byproduct of the build process and they can't be pushed into the registry or otherwise manipulated. This would cause the installer being built in test mode to fail.
@@ -288,7 +288,7 @@ if [ "$testMode" == "yes" ]; then echo -e "${lBlue}Extracting the docker image list from the voltha VM${NC}" volIpAddr=`virsh domifaddr $vVmName${uId} | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'` ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ../.vagrant/machines/voltha${uId}/libvirt/private_key vagrant@$volIpAddr "docker image ls" > images.tmp - cat images.tmp | grep -v 5000 | tail -n +2 | awk '{printf(" - %s:%s\n", $1, $2)}' > image-list.cfg + cat images.tmp | grep -v 5000 | tail -n +2 | awk '{printf(" - %s:%s\n", $1, $2)}' | grep -v "<none>" > image-list.cfg rm -f images.tmp sed -i -e '/voltha_containers:/,$d' ansible/group_vars/all echo "voltha_containers:" >> ansible/group_vars/all
Add test_long_description_content_type Test that specifying a `long_description_content_type` keyword arg to the `setup` function results in writing a `Description-Content-Type` line to the `PKG-INFO` file in the `<distribution>.egg-info` directory. `Description-Content-Type` is described at
@@ -398,6 +398,31 @@ class TestEggInfo(object): self._run_install_command(tmpdir_cwd, env) assert glob.glob(os.path.join(env.paths['lib'], 'barbazquux*')) == [] + def test_long_description_content_type(self, tmpdir_cwd, env): + # Test that specifying a `long_description_content_type` keyword arg to + # the `setup` function results in writing a `Description-Content-Type` + # line to the `PKG-INFO` file in the `<distribution>.egg-info` + # directory. + # `Description-Content-Type` is described at + # https://github.com/pypa/python-packaging-user-guide/pull/258 + + self._setup_script_with_requires( + """long_description_content_type='text/markdown',""") + environ = os.environ.copy().update( + HOME=env.paths['home'], + ) + code, data = environment.run_setup_py( + cmd=['egg_info'], + pypath=os.pathsep.join([env.paths['lib'], str(tmpdir_cwd)]), + data_stream=1, + env=environ, + ) + egg_info_dir = os.path.join('.', 'foo.egg-info') + with open(os.path.join(egg_info_dir, 'PKG-INFO')) as pkginfo_file: + pkg_info_lines = pkginfo_file.read().split('\n') + expected_line = 'Description-Content-Type: text/markdown' + assert expected_line in pkg_info_lines + def test_python_requires_egg_info(self, tmpdir_cwd, env): self._setup_script_with_requires( """python_requires='>=2.7.12',""")
Remove extra "Unsubscribe" on left-sidebar stream popover. An extra "Unsubscribe" was left over from when it was a CSS :pseudo content rather than text in the templates.
max-width: 200px; } -.streams_popover .popover_sub_unsub_button::after { - content: " Unsubscribe"; -} - .streams_popover .sp-container { background: white; cursor: pointer;
Adds drop_zero_counts and process_times DataSet methods. These can be particularly useful for time-depdendent data analysis.
@@ -2249,7 +2249,56 @@ class DataSet(object): ds.done_adding_data() return _OrderedDict([(t, dsDict[t]) for t in sorted(dsDict.keys())]) - def process_circuits(self, processor_fn, aggregate=False): + def drop_zero_counts(self): + """ + Creates a copy of this data set that doesn't include any zero counts. + + Returns + ------- + DataSet + """ + self_sparse = DataSet(outcome_label_indices = self.olIndex) + for circuit, datarow in self.items(): + self_sparse.add_raw_series_data(circuit, datarow.outcomes, datarow.time, datarow.reps, + record_zero_counts = False) + self_sparse.done_adding_data() + return self_sparse + + def process_times(self, process_times_array_fn): + """ + Manipulate this DataSet's timestamps according to `processor_fn`. + + For example, using, the folloing `process_times_array_fn` would change + the timestamps for each circuit to sequential integers. + + ``` + def process_times_array_fn(times): + return list(range(len(times))) + ``` + + Parameters + ---------- + process_times_array_fn : function + A function which takes a single array-of-timestamps argument + and returns another similarly-sized array. This function is + called, once per circuit, with the circuit's array of timestamps. + + Returns + ------- + DataSet + A new data set with altered timestamps. + """ + processed_ds = DataSet(outcome_label_indices = self.olIndex) + + for circuit, datarow in self.items(): + processed_time = _np.array(process_times_array_fn(datarow.time)) + assert(processed_time.shape == datarow.time.shape), "process_times_array_fn returned the wrong shape!" + processed_ds.add_raw_series_data(circuit, datarow.outcomes, processed_time, datarow.reps, + record_zero_counts = True) + processed_ds.done_adding_data() + return processed_ds + + def process_circuits(self, processor_fn, aggregate=False): # INPLACE """ Manipulate this DataSet's circuits (keys) according to `processor_fn`.
Add function to automatically install trove-ui will install trove-ui when Trove is enabled. Per RDO request redhat-openstack/easyfix#14
@@ -52,6 +52,10 @@ class packstack::horizon () ensure_packages(['openstack-ironic-ui'], {'ensure' => 'present'}) } + if hiera('CONFIG_TROVE_INSTALL') == 'y' { + ensure_packages(['openstack-trove-ui'], {'ensure' => 'present'}) + } + include '::packstack::memcached' $firewall_port = hiera('CONFIG_HORIZON_PORT')
Close by showing stderr and exceptions. Also check if input redirection is actually enabled.
@@ -141,7 +141,8 @@ public class LiveCodingAnalyst implements DocumentListener { else { pythonRunConfiguration = null; } - String inputFilePath = pythonRunConfiguration == null + String inputFilePath = + (pythonRunConfiguration == null || ! pythonRunConfiguration.isRedirectInput()) ? null : pythonRunConfiguration.getInputFile(); DefaultRunExecutor executor = new DefaultRunExecutor(); @@ -360,9 +361,10 @@ public class LiveCodingAnalyst implements DocumentListener { String stderr = processOutput.getStderr(); isPassing = processOutput.getExitCode() == 0; if (stderr.length() > 0) { - log.warn(stderr); + display += "\nLive coding plugin error:\n" + stderr; } } catch (ExecutionException | IOException ex) { + display += "\nLive coding plugin exception:\n" + ex.toString(); log.error("Report failed.", ex); } return display;
[batch][azure] increase timeout for Azure * [batch][azure] increase timeout for Azure Azure seems to have pervasively higher latency than GCP. This should reduce the amount of warning logs we receive. * add import
from typing import Mapping, Optional, List, Union +import aiohttp + from ..common import Session, AnonymousCloudCredentials from .credentials import AzureCredentials @@ -14,4 +16,6 @@ class AzureSession(Session): credentials = AzureCredentials.from_file(credentials_file, scopes=scopes) else: credentials = AzureCredentials.default_credentials(scopes=scopes) + if 'timeout' not in kwargs: + kwargs['timeout'] = aiohttp.ClientTimeout(total=30) super().__init__(credentials=credentials, params=params, **kwargs)
Fix illegal memory accesses when NITEMS > 1, and nrows % NITEMS != 0. This is based on All of the credit for detecting the bug, and part of the credit for fixing it goes to Authors: - Andy Adinets (@canonizer) Approvers: - John Zedlewski (@JohnZed) URL:
@@ -67,34 +67,55 @@ struct ArgMax { } }; +/** tree_leaf_output returns the leaf outputs from the tree with leaf indices + given by leaves for n_rows items. FULL_ITEMS indicates whether n_rows == + NITEMS, to allow the compiler to skip the conditional when unrolling the + loop. */ +template <typename output_type, bool FULL_NITEMS, int NITEMS, + typename tree_type> +__device__ __forceinline__ vec<NITEMS, output_type> tree_leaf_output( + tree_type tree, int n_rows, int (&leaves)[NITEMS]) { + vec<NITEMS, output_type> out(0); +#pragma unroll + for (int j = 0; j < NITEMS; ++j) { + if (FULL_NITEMS || j < n_rows) { + /** dependent names are not considered templates by default, unless it's a + member of a current [template] instantiation. As output<>() is a + member function inherited from the base class, template + output<output_type>() is required. */ + out[j] = tree[leaves[j]].template output<output_type>(); + } + } + return out; +} + template <int NITEMS, typename output_type, typename tree_type> __device__ __forceinline__ vec<NITEMS, output_type> infer_one_tree( tree_type tree, const float* input, int cols, int n_rows) { + // find the leaf nodes for each row int curr[NITEMS]; // the first n_rows are active - int mask = ((1 << n_rows) - 1) << (NITEMS - n_rows); + int mask = (1 << n_rows) - 1; for (int j = 0; j < NITEMS; ++j) curr[j] = 0; do { #pragma unroll for (int j = 0; j < NITEMS; ++j) { auto n = tree[curr[j]]; mask &= ~(n.is_leaf() << j); - if (!n.is_leaf()) { + if ((mask & (1 << j)) != 0) { float val = input[j * cols + n.fid()]; bool cond = isnan(val) ? !n.def_left() : val >= n.thresh(); curr[j] = n.left(curr[j]) + cond; } } } while (mask != 0); - vec<NITEMS, output_type> out; -#pragma unroll - for (int j = 0; j < NITEMS; ++j) { - /** dependent names are not considered templates by default, - unless it's a member of a current [template] instantiation. - alternatively, could have used .base_node::output<... */ - out[j] = tree[curr[j]].template output<output_type>(); + + // get the output from the leaves + if (n_rows == NITEMS) { + return tree_leaf_output<output_type, true>(tree, n_rows, curr); + } else { + return tree_leaf_output<output_type, false>(tree, n_rows, curr); } - return out; } template <typename output_type, typename tree_type>
Update README re: experimental support for Windows As mentioned in , Windows support is "experimental" and does not currently support multiple workers.
@@ -83,6 +83,9 @@ Installation If you are running on a clean install of Fedora 28 or above, please make sure you have the ``redhat-rpm-config`` package installed in case if you want to use ``sanic`` with ``ujson`` dependency. +.. note:: + + Windows support is currently "experimental" and on a best-effort basis. Multiple workers are also not currently supported on Windows (see `Issue #1517 <https://github.com/huge-success/sanic/issues/1517>`_), but setting ``workers=1`` should launch the server successfully. Hello World Example -------------------
Bias predicted times a little earlier Allow one more minute in the past, round predictions down to the next minute
@@ -11,7 +11,7 @@ from helpers.match_manipulator import MatchManipulator class MatchTimePredictionHelper(object): EPOCH = datetime.datetime.fromtimestamp(0) - MAX_IN_PAST = datetime.timedelta(minutes=-3) # One match length, ish + MAX_IN_PAST = datetime.timedelta(minutes=-4) # One match length, ish @classmethod def as_local(cls, time, timezone): @@ -140,6 +140,7 @@ class MatchTimePredictionHelper(object): last_predicted = cls.as_local(last_match.actual_time if i == 0 else last.predicted_time, timezone) if last_predicted and average_cycle_time: predicted = last_predicted + datetime.timedelta(seconds=average_cycle_time) + predicted = predicted.replace(second=0) # Round down to the nearest minute else: predicted = match.time
Cleanup include files in jit/passes/common_subexpression_elimination.h. Summary: Pull Request resolved:
-#include <torch/csrc/jit/ir.h> - -#include <algorithm> -#include <unordered_map> +#include <torch/csrc/jit/passes/common_subexpression_elimination.h> -#include <ATen/core/functional.h> -#include <ATen/core/interned_strings.h> -#include <c10/util/Exception.h> +#include <torch/csrc/jit/ir.h> #include <torch/csrc/jit/node_hashing.h> #include <torch/csrc/jit/passes/alias_analysis.h> -#include <torch/csrc/jit/passes/common_subexpression_elimination.h> -#include <torch/csrc/utils/hash.h> + +#include <unordered_map> namespace torch { namespace jit {
Remove ReportFormESView It was used in pact, as shown in this rename commit When we removed the pact custom module, we removed the last remaining reference to this.
@@ -25,10 +25,9 @@ from corehq.elastic import ( get_es_new, report_and_fail_on_shard_failures, ) -from corehq.pillows.base import VALUE_TAG, restore_property_dict +from corehq.pillows.base import VALUE_TAG from corehq.pillows.mappings.case_mapping import CASE_ES_ALIAS from corehq.pillows.mappings.reportcase_mapping import REPORT_CASE_ES_ALIAS -from corehq.pillows.mappings.reportxform_mapping import REPORT_XFORM_ALIAS from corehq.pillows.mappings.xform_mapping import XFORM_ALIAS from no_exceptions.exceptions import Http400 @@ -266,46 +265,6 @@ def report_term_filter(terms, mapping): return ret_terms -class ReportFormESView(FormESView): - es_alias = REPORT_XFORM_ALIAS - doc_type = "XFormInstance" - model = ESXFormInstance - - def run_query(self, es_query): - es_results = super(FormESView, self).run_query(es_query) - #hack, walk the results again, and if we have xmlns, populate human readable names - # Note that `get_unknown_form_name` does not require the request, which is also - # not necessarily available here. So `None` is passed here. - form_filter = FormsByApplicationFilter(None, domain=self.domain) - - for res in es_results.get('hits', {}).get('hits', []): - if '_source' in res: - res_source = restore_property_dict(res['_source']) - res['_source'] = res_source - xmlns = res['_source'].get('xmlns', None) - name = None - if xmlns: - name = form_filter.get_unknown_form_name(xmlns, - app_id=res['_source'].get('app_id', - None), - none_if_not_found=True) - if not name: - name = 'unknown' # try to fix it below but this will be the default - # fall back - try: - if res['_source']['form'].get('@name', None): - name = res['_source']['form']['@name'] - else: - backup = res['_source']['form'].get('#type', 'data') - if backup != 'data': - name = backup - except (TypeError, KeyError): - pass - - res['_source']['es_readable_name'] = name - return es_results - - class ElasticAPIQuerySet(object): """ An abstract representation of an elastic search query,
fix udocker/singularity example URLs Fixes
@@ -180,10 +180,8 @@ Using uDocker ------------- Some shared computing environments don't support Docker software containers for technical or policy reasons. -As a workaround, the CWL reference runner supports using alternative ``docker`` implementations on Linux -with the ``--user-space-docker-cmd`` option. - -One such "user space" friendly docker replacement is ``udocker`` https://github.com/indigo-dc/udocker. +As a workaround, the CWL reference runner supports using the ``udocker`` program on Linux using ``--udocker`` +<https://github.com/indigo-dc/udocker>. udocker installation: https://indigo-dc.github.io/udocker/installation_manual.html @@ -191,7 +189,10 @@ Run `cwltool` just as you usually would, but with the new option, e.g., from the .. code:: bash - cwltool --user-space-docker-cmd=udocker https://raw.githubusercontent.com/common-workflow-language/common-workflow-language/main/v1.0/v1.0/test-cwl-out2.cwl https://github.com/common-workflow-language/common-workflow-language/raw/main/v1.0/v1.0/empty.json + cwltool --udocker https://github.com/common-workflow-language/common-workflow-language/raw/main/v1.0/v1.0/test-cwl-out2.cwl https://github.com/common-workflow-language/common-workflow-language/raw/main/v1.0/v1.0/empty.json + +Using Singularity +----------------- ``cwltool`` can also use `Singularity <https://github.com/hpcng/singularity/releases/>`_ version 2.6.1 or later as a Docker container runtime. @@ -206,7 +207,7 @@ Example .. code:: bash - cwltool --singularity https://raw.githubusercontent.com/common-workflow-language/common-workflow-language/main/v1.0/v1.0/v1.0/cat3-tool-mediumcut.cwl https://github.com/common-workflow-language/common-workflow-language/blob/main/v1.0/v1.0/cat-job.json + cwltool --singularity https://github.com/common-workflow-language/common-workflow-language/raw/main/v1.0/v1.0/cat3-tool-mediumcut.cwl https://github.com/common-workflow-language/common-workflow-language/raw/main/v1.0/v1.0/cat-job.json Running a tool or workflow from remote or local locations ---------------------------------------------------------
Styler example in docs add a Styler example to the st.dataframe docstring
@@ -476,8 +476,8 @@ class DeltaGenerator(object): pandas styling features, like bar charts, hovering, and captions.) Styler support is experimental! - Example - ------- + Examples + -------- >>> df = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) @@ -488,6 +488,19 @@ class DeltaGenerator(object): https://share.streamlit.io/0.25.0-2JkNY/index.html?id=165mJbzWdAC8Duf8a4tjyQ height: 330px + You can also pass a Pandas Styler object to change the style of + the rendered DataFrame: + + >>> df = pd.DataFrame( + ... np.random.randn(10, 20), + ... columns=('col %d' % i for i in range(20))) + ... + >>> st.dataframe(df.style.highlight_max(axis=0)) + + .. output:: + https://share.streamlit.io/0.29.0-dV1Y/index.html?id=Hb6UymSNuZDzojUNybzPby + height: 285px + """ from streamlit import data_frame_proto
Add simple example of ClientCredentialsAuthorizer Resolves
@@ -94,4 +94,34 @@ When your tokens are expired, you should just request new ones by making another Client Credentials request. Depending on your needs, you may need to track the expiration times along with your tokens. -The SDK does not offer any special facilities for doing this. + +Using ClientCredentialsAuthorizer +--------------------------------- + +The SDK also provides a specialized Authorizer which can be used to +automatically handle token expiration. + +Use it like so: + +.. code-block:: python + + import globus_sdk + + # you must have a client ID + CLIENT_ID = '...' + # the secret, loaded from wherever you store it + CLIENT_SECRET = '...' + + confidential_client = globus_sdk.ConfidentialAppAuthClient( + client_id=CLIENT_ID, client_secret=CLIENT_SECRET) + scopes = "urn:globus:auth:scopes:transfer.api.globus.org:all" + cc_authorizer = globus_sdk.ClientCredentialsAuthorizer( + confidential_client, scopes) + # create a new client + transfer_client = globus_sdk.TransferClient(authorizer=cc_authorizer) + + # usage is still the same + print("Endpoints Belonging to {}@clients.auth.globus.org:" + .format(CLIENT_ID)) + for ep in tc.endpoint_search(filter_scope="my-endpoints"): + print("[{}] {}".format(ep["id"], ep["display_name"]))
Creating the class test.base.TestBaseNonAtomic to properly test when an atomic transaction has been set. This new class inherits from TransactionTestCase which does not wrap tests in transaction.atomic. TestCase on the other hand does, and get_connection().in_atomic_block is always True, which is not great when testing that we didn't use transaction.atomic.
@@ -6,7 +6,7 @@ try: from django.urls import clear_url_caches except ImportError: # Django < 1.10 pragma: no cover from django.core.urlresolvers import clear_url_caches -from django.test import TestCase +from django.test import TestCase, TransactionTestCase from django.test.utils import override_settings from django.utils import timezone from django.utils.six import StringIO, assertRegex @@ -22,7 +22,7 @@ except ImportError: # Python 2.7 # Test helpers. -class TestBase(TestCase): +class TestBaseMixin(object): multi_db = True @@ -31,7 +31,7 @@ class TestBase(TestCase): clear_url_caches() def tearDown(self): - super(TestBase, self).tearDown() + super(TestBaseMixin, self).tearDown() for model in list(reversion.get_registered_models()): reversion.unregister(model) @@ -68,6 +68,14 @@ class TestBase(TestCase): self.assertEqual(Revision.objects.using(using).all().count(), 0) +class TestBase(TestBaseMixin, TestCase): + pass + + +class TestBaseNonAtomic(TestBaseMixin, TransactionTestCase): + pass + + class TestModelMixin(object): def setUp(self):
Then.Expr: switch to ComputingExpr TN:
@@ -10,8 +10,8 @@ from langkit.diagnostics import check_source_language from langkit.expressions.analysis_units import AnalysisUnitType from langkit.expressions.base import ( AbstractExpression, AbstractVariable, BasicExpr, BindingScope, CallExpr, - ComputingExpr, LiteralExpr, No, NullExpr, PropertyDef, ResolvedExpression, - attr_call, construct, render + ComputingExpr, LiteralExpr, No, NullExpr, PropertyDef, attr_call, + construct, render ) from langkit.expressions.envs import EmptyEnv from langkit.utils import assert_type @@ -377,7 +377,7 @@ class Then(AbstractExpression): otherwise. """ - class Expr(ResolvedExpression): + class Expr(ComputingExpr): pretty_name = 'Then' def __init__(self, expr, var_expr, then_expr, default_expr, @@ -395,9 +395,6 @@ class Then(AbstractExpression): def _render_pre(self): return render('properties/then_ada', then=self) - def _render_expr(self): - return self.result_var.name.camel_with_underscores - @property def subexprs(self): return {'0-prefix': self.expr,
Remove comment numerical warning Answers Just set in stone the warning filter for "Numerical issues". Authors: - Victor Lafargue (https://github.com/viclafargue) Approvers: - Dante Gama Dessavre (https://github.com/dantegd) URL:
@@ -160,8 +160,9 @@ def test_standard_scaler_sparse(failure_logger, @pytest.mark.parametrize("axis", [0, 1]) @pytest.mark.parametrize("with_mean", [True, False]) @pytest.mark.parametrize("with_std", [True, False]) -# FIXME: ignore warnings from cuml and sklearn about scaling issues -# issue: https://github.com/rapidsai/cuml/issues/4203 +# The numerical warning is triggered when centering or scaling +# cannot be done as single steps. Its display can be safely disabled. +# For more information see : https://github.com/rapidsai/cuml/issues/4203 @pytest.mark.filterwarnings("ignore:Numerical issues::") def test_scale(failure_logger, clf_dataset, axis, # noqa: F811 with_mean, with_std):
hagrid: parallelize launch cmds only if multiple cmds are present print logs for each running cmd
@@ -283,7 +283,7 @@ def launch(args: TypeTuple[str], **kwargs: TypeDict[str, Any]) -> None: return -def execute_commands(cmds: list, dry_run: bool) -> None: +def execute_commands(cmds: list, dry_run: bool = False) -> None: process_list: list = [] for cmd in cmds: if dry_run: @@ -294,23 +294,45 @@ def execute_commands(cmds: list, dry_run: bool) -> None: cmd = ["powershell.exe", "-Command", cmd] try: + if len(cmds) > 1: process = subprocess.Popen( - cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=GRID_SRC_PATH + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + cwd=GRID_SRC_PATH, + shell=True, ) process_list.append(process) + else: + subprocess.check_call( + cmd, + stdout=sys.stdout, + stderr=subprocess.STDOUT, + shell=True, + cwd=GRID_SRC_PATH, + ) except Exception as e: print(f"Failed to run cmd: {cmd}. {e}") if dry_run is False: # TODO: Display the VM Status with its Ip in here. # Refresh the status whenever you check for the status - while True: + while True and len(process_list) > 0: # Check process status + + # For each process display hagrid ssh status with flush, once all machines are up. + # display the password and jupyter tokens and exit. process_status = [False if p.poll() is None else True for p in process_list] if all(process_status): print("All processes completed") break + process_status = [False if p.poll() is None else True for p in process_list] + for p in process_list: + output = p.stdout.readline().decode("utf-8") + print(f"PID: {p.pid} -> {output}") + time.sleep(1) + def display_vm_status(cmds: list) -> None: for cmd in cmds:
Added validator method for ChatClientConfig which will validate domain format Added test case for the same
@@ -29,7 +29,7 @@ from kairon.shared.data.signals import push_notification from kairon.exceptions import AppException from kairon.shared.utils import Utility from kairon.shared.models import TemplateType - +from validators import domain class Entity(EmbeddedDocument): start = LongField(required=True) @@ -642,6 +642,12 @@ class ChatClientConfig(Document): status = BooleanField(default=True) white_listed_domain = ListField(StringField(), default=None) + def validate(self, clean=True): + if isinstance(self.white_listed_domain, list): + for val in self.white_listed_domain: + if val is not "*" and isinstance(domain(val), ValidationFailure): + raise ValidationError("One of the domain is invalid") + @push_notification.apply class ConversationsHistoryDeleteLogs(Document):
MAINT: Remove redundant test. The Python 3.8 32 bits fast test on windows was being run twice.
@@ -67,19 +67,6 @@ stages: displayName: 'Run Lint Checks' failOnStderr: true - - job: WindowsFast - pool: - vmImage: 'VS2017-Win2016' - strategy: - matrix: - Python37-32bit-fast: - PYTHON_VERSION: '3.8' - PYTHON_ARCH: 'x86' - TEST_MODE: fast - BITS: 32 - steps: - - template: azure-steps-windows.yml - - job: Linux_Python_38_32bit_full_with_asserts pool: vmImage: 'ubuntu-20.04'
Made searching even stricter by searching from start of each word Added regex back to sub and split by non-alphabet. Now use two pointers to move from words to words.
import logging +import re import time from typing import Dict, List, Optional @@ -19,6 +20,8 @@ TEST_CHANNELS = ( Channels.helpers ) +REGEX_NON_ALPHABET = re.compile(r"[^a-z]", re.MULTILINE & re.IGNORECASE) + class Tags(Cog): """Save new tags and fetch existing tags.""" @@ -42,20 +45,19 @@ class Tags(Cog): @staticmethod def _fuzzy_search(search: str, target: str) -> int: """A simple scoring algorithm based on how many letters are found / total, with order in mind.""" - found, index = 0, 0 - _search = search.lower().replace(' ', '') - _targets = iter(target.lower()) + current, index = 0, 0 + _search = REGEX_NON_ALPHABET.sub('', search.lower()) + _targets = iter(REGEX_NON_ALPHABET.split(target.lower())) _target = next(_targets) try: - for letter in _search: - index = _target.find(letter, index) - while index == -1: - _target = next(_targets) - index = _target.find(letter) - found += 1 - except StopIteration: + while True: + while index < len(_target) and _search[current] == _target[index]: + current += 1 + index += 1 + index, _target = 0, next(_targets) + except (StopIteration, IndexError): pass - return found / len(_search) * 100 + return current / len(_search) * 100 def _get_suggestions(self, tag_name: str, thresholds: Optional[List[int]] = None) -> List[str]: """Return a list of suggested tags."""
Update version 0.9.9 -> 0.9.10 Fixes * Fix the behavior of the file-like returned by `DQM.to_file`
# # ================================================================================================ -__version__ = '0.9.9' +__version__ = '0.9.10' __author__ = 'D-Wave Systems Inc.' __authoremail__ = '[email protected]' __description__ = 'A shared API for binary quadratic model samplers.'
[main] tweak pending_link check Allow bypassing the auth module and setting the auth token on the client directly. This is useful for CI and tests.
@@ -564,9 +564,11 @@ class Maestral: """Indicates if Maestral is linked to a Dropbox account (read only). This will block until the user's keyring is unlocked to load the saved auth token.""" - if self._auth.linked: # this triggers keyring access on first call + if self.client.linked: + return False + + elif self._auth.linked: # this will trigger keyring access on first call - if not self.client.linked: if self._auth.token_access_type == 'legacy': self.client.set_token(access_token=self._auth.access_token) else:
Use new importlib.metadata.entry_points interface where available. With Python 3.10, the entry_points() method returning a SelectableGroups dict interface was deprecated. The preferred way is to now filter by group through a keyword argument. Fixes GH6514.
import functools import inspect import itertools +import sys import warnings from importlib.metadata import entry_points @@ -95,6 +96,10 @@ def build_engines(entrypoints): @functools.lru_cache(maxsize=1) def list_engines(): + # New selection mechanism introduced with Python 3.10. See GH6514. + if sys.version_info >= (3, 10): + entrypoints = entry_points(group="xarray.backends") + else: entrypoints = entry_points().get("xarray.backends", ()) return build_engines(entrypoints)
database version now v6 Added unique constraint to journal name
@@ -1399,11 +1399,12 @@ class MainWindow(QtWidgets.QMainWindow): cur.execute( "CREATE TABLE code_name (cid integer primary key, name text, memo text, catid integer, owner text," "date text, color text, unique(name))") + # Database version v6 - unique name for journal cur.execute("CREATE TABLE journal (jid integer primary key, name text, jentry text, date text, owner text), " "unique(name)") cur.execute("CREATE TABLE stored_sql (title text, description text, grouper text, ssql text, unique(title))") cur.execute("INSERT INTO project VALUES(?,?,?,?,?,?,?)", - ('v5', datetime.datetime.now().astimezone().strftime("%Y-%m-%d %H:%M:%S"), '', qualcoder_version, 0, + ('v6', datetime.datetime.now().astimezone().strftime("%Y-%m-%d %H:%M:%S"), '', qualcoder_version, 0, 0, self.app.settings['codername'])) self.app.conn.commit() try:
Update dev-setup-centos-7.rst missing sudo at line 32
@@ -29,7 +29,7 @@ Set up your development environment for building, running, and testing Mattermos a. ``sudo yum group install "Development Tools"`` - b. ``yum install -y libpng12`` + b. ``sudo yum install -y libpng12`` 3. Download and install Go 1.8 for Linux:
closes reverting feature
return template_height - template_height / 3 + 'px'; } + /* + // not working on Chrome sometimes angular.element($window).bind("keyup", function ($event) { if ($event.keyCode === $scope.ctrlKey || $event.keyCode === $scope.cmdKey || $event.keyCode === 91 || $event.keyCode === 93) } $scope.$apply(); }); + */ } })();
Bug Send Pin Comment notifications Notify: authors of existing translations across all locales reviewers of existing translations across all locales Send separate notification for each locale the user has contributed to. Each notification is then linked to corresponding translate view.
import logging import re + +from collections import defaultdict from datetime import datetime from urllib.parse import urlparse @@ -557,6 +559,40 @@ def _send_add_comment_notifications(user, comment, entity, locale, translation): ) +def _send_pin_comment_notifications(user, comment): + # When pinning a comment, notify: + # - authors of existing translations across all locales + # - reviewers of existing translations across all locales + recipient_data = defaultdict(list) + entity = comment.entity + translations = Translation.objects.filter(entity=entity) + + for t in translations: + for u in ( + t.user, + t.approved_user, + t.unapproved_user, + t.rejected_user, + t.unrejected_user, + ): + if u: + recipient_data[u.pk].append(t.locale.pk) + + for recipient in User.objects.filter(pk__in=recipient_data.keys()).exclude( + pk=user.pk + ): + # Send separate notification for each locale (which results in links to corresponding translate views) + for locale in Locale.objects.filter(pk__in=recipient_data[recipient.pk]): + notify.send( + user, + recipient=recipient, + verb="has pinned a comment in", + action_object=locale, + target=entity, + description=comment.content, + ) + + @require_POST @utils.require_AJAX @login_required(redirect_field_name="", login_url="/403") @@ -617,6 +653,8 @@ def pin_comment(request): comment.pinned = True comment.save() + _send_pin_comment_notifications(request.user, comment) + return JsonResponse({"status": True})
Add notes for tabulate in docstring of to_markdown Add notes for tabulate in docstring of to_markdown, following pandas.
@@ -2876,6 +2876,10 @@ class Frame(object, metaclass=ABCMeta): str Series or DataFrame in Markdown-friendly format. + Notes + ----- + Requires the `tabulate <https://pypi.org/project/tabulate>`_ package. + Examples -------- >>> kser = ks.Series(["elk", "pig", "dog", "quetzal"], name="animal")
Update zones.json * Update zones.json Capacity info for Belarus has been added. * BY: Biogas moved to gas from biomass
"https://github.com/systemcatch" ] }, - "BY": {}, + "BY": { + "_comment": "gas includes 24.4 MW of biogas. gas includes 1273 CHP", + "capacity": { + "biomass": 6.6, + "coal": 0, + "gas": 9492, + "hydro": 73, + "hydro storage": 0, + "nuclear": 0, + "oil": 447, + "solar": 50.9, + "wind": 70.4 + }, + "contributors": [ + "https://github.com/admetny" + ] + }, "CA-AB": { "flag_file_name": "ca.png", "parsers": {
Fixes Use absolute path when executing excluder as it's used when checking for excluder.
register: docker_excluder_stat - name: Enable docker excluder - command: "{{ r_openshift_excluder_service_type }}-docker-excluder exclude" + command: "/sbin/{{ r_openshift_excluder_service_type }}-docker-excluder exclude" when: - r_openshift_excluder_enable_docker_excluder | bool - docker_excluder_stat.stat.exists register: openshift_excluder_stat - name: Enable openshift excluder - command: "{{ r_openshift_excluder_service_type }}-excluder exclude" + command: "/sbin/{{ r_openshift_excluder_service_type }}-excluder exclude" when: - r_openshift_excluder_enable_openshift_excluder | bool - openshift_excluder_stat.stat.exists
Make `align_to` method-only. Summary: Pull Request resolved: The ellipsis version of `align_to` only works if it is called as a method. To prevent any confusion, this PR disables `torch.align_to` (but keeps `Tensor.align_to`. Test Plan: - [namedtensor ci]
supports_named_tensor: True - func: align_to(Tensor(a) self, DimnameList names) -> Tensor(a) - variants: function, method + variants: method supports_named_tensor: True - func: align_as(Tensor self, Tensor other) -> Tensor
ResolvedExpression.destructure_entity: remove expression tree sharing TN:
@@ -1086,17 +1086,26 @@ class ResolvedExpression(object): def destructure_entity(self): """ Must be called only on expressions that evaluate to entities. Return - expressions to access separately 1) the node and 2) the entity info for - an entity. + 3 expressions: - :rtype: (ResolvedExpression, ResolvedExpression). + 1. A SavedExpr wrapper for self, so its result can be used multiple + times. + 2. An expression that evaluates the entity node. + 3. An expression that evaluates the entity info. + + The SavedExpr (1) must be evaluated before any of (2) and (3) are + evaluated themselves. + + :rtype: (ResolvedExpression, ResolvedExpression, ResolvedExpression). """ from langkit.expressions.structs import FieldAccess assert self.type.is_entity_type fields = self.type.get_abstract_fields_dict() + saved = SavedExpr('Saved', self) return ( - FieldAccess.Expr(self, fields['el'], []), - FieldAccess.Expr(self, fields['info'], []), + saved, + FieldAccess.Expr(saved.result_var_expr, fields['el'], []), + FieldAccess.Expr(saved.result_var_expr, fields['info'], []), )
Update test for new metadata Geocoder test uses test geo data but real phone number data, and the fixed-line patterns for TW have become more specific.
@@ -212,8 +212,8 @@ class PhoneNumberGeocoderTest(unittest.TestCase): TEST_GEOCODE_DATA['1650960'] = {'en': u("Mountain View, CA")} # Test the locale mapping - TEST_GEOCODE_DATA['8868'] = {'zh': u("Chinese"), 'zh_Hant': u("Hant-specific")} - tw_number = FrozenPhoneNumber(country_code=886, national_number=810080123) + TEST_GEOCODE_DATA['8862'] = {'zh': u("Chinese"), 'zh_Hant': u("Hant-specific")} + tw_number = FrozenPhoneNumber(country_code=886, national_number=221234567) self.assertEqual("Hant-specific", description_for_number(tw_number, "zh", region="TW")) - del TEST_GEOCODE_DATA['8868'] + del TEST_GEOCODE_DATA['8862']
[gae.py] Require cloudbuildhelper v1.1.13. To pick up
@@ -904,7 +904,7 @@ def _check_go(min_version='1.16.0'): 'Could not find `go` in PATH. Is it needed to deploy Go code.') -def _check_cloudbuildhelper(min_version='1.1.9'): +def _check_cloudbuildhelper(min_version='1.1.13'): """Checks `cloudbuildhelper` is in PATH and it is fresh enough.""" explainer = ( 'It is needed to deploy Go GAE apps now (https://crbug.com/1057067).\n' @@ -912,7 +912,7 @@ def _check_cloudbuildhelper(min_version='1.1.9'): ' $ eval `.../infra/go/env.py`.' ) try: - # 'cloudbuildhelper v1.1.9\nCIPD package: ...' + # 'cloudbuildhelper v1.x.y\nCIPD package: ...' ver = subprocess.check_output(['cloudbuildhelper', 'version']) ver = ver.splitlines()[0].strip() if not ver.startswith('cloudbuildhelper v'):
Fix validation_step in DQNTrainer Summary: AutoDataModule yields dictionary of tensors. Therefore, we need to manually type the input
@@ -304,6 +304,8 @@ class DQNTrainer(DQNTrainerBaseLightning): return retval def validation_step(self, batch, batch_idx): + if isinstance(batch, dict): + batch = rlt.DiscreteDqnInput.from_dict(batch) rewards = self.boost_rewards(batch.reward, batch.action) discount_tensor = self.compute_discount_tensor(batch, rewards) td_loss = self.compute_td_loss(batch, rewards, discount_tensor)
[PY3] Fix test that is flaky in Python 3 We can't rely on lists having the same order in Python3 the same way we rely on them in Python2. If we sort them first, and then compare them, this test will be more reliable.
@@ -486,7 +486,7 @@ class TestCustomExtensions(TestCase): env = Environment(extensions=[SerializerExtension]) if six.PY3: rendered = env.from_string('{{ dataset|unique }}').render(dataset=dataset).strip("'{}").split("', '") - self.assertEqual(rendered, list(unique)) + self.assertEqual(sorted(rendered), sorted(list(unique))) else: rendered = env.from_string('{{ dataset|unique }}').render(dataset=dataset) self.assertEqual(rendered, u"{0}".format(unique))
doc: Update docs for tags in cfncluster create we have to encapsulate with single quotes for tags JSON
@@ -28,12 +28,17 @@ optional arguments: specify a specific cluster template to use --extra-parameters EXTRA_PARAMETERS, -p EXTRA_PARAMETERS add extra parameters to stack create - --tags TAGS, -g TAGS tags to be added to the stack + --tags TAGS, -g TAGS tags to be added to the stack, TAGS is a JSON formatted string encapsulated by single quotes :: $ cfncluster create mycluster +create cluster with tags: + +:: + + $ cfncluster create mycluster --tags '{ "Key1" : "Value1" , "Key2" : "Value2" }' update ======
Update README.md Thanks, Matti!
@@ -48,7 +48,11 @@ NumPy requires `pytest` and `hypothesis`. Tests can then be run after installat Code of Conduct ---------------------- -NumPy is a community-driven open source project developed by a very diverse group of [contributors](/gallery/team.html). The NumPy leadership has made a strong commitment to creating an open, inclusive, and positive community. Please read the [NumPy Code of Conduct](/code-of-conduct) for guidance on how to interact with others in a way that makes our community thrive. +NumPy is a community-driven open source project developed by a very diverse group of +[contributors](https://numpy.org/gallery/team.html). The NumPy leadership has made a strong +commitment to creating an open, inclusive, and positive community. Please read the +[NumPy Code of Conduct](https://numpy.org/code-of-conduct/) for guidance on how to interact +with others in a way that makes our community thrive. Call for Contributions ----------------------
Decorators: pass bound arguments to callable Bound arguments are more convenient to work with than the raw args and kwargs.
@@ -18,9 +18,10 @@ log = logging.getLogger(__name__) __lock_dicts = defaultdict(WeakValueDictionary) Argument = t.Union[int, str] -_IdCallable = t.Callable[..., t.Hashable] -_IdAwaitable = t.Callable[..., t.Awaitable[t.Hashable]] -ResourceId = t.Union[t.Hashable, _IdCallable, _IdAwaitable] +BoundArgs = t.OrderedDict[str, t.Any] +_IdCallableReturn = t.Union[t.Hashable, t.Awaitable[t.Hashable]] +_IdCallable = t.Callable[[BoundArgs], _IdCallableReturn] +ResourceId = t.Union[t.Hashable, _IdCallable] def in_whitelist( @@ -108,8 +109,8 @@ def mutually_exclusive(namespace: t.Hashable, resource_id: ResourceId) -> t.Call `namespace` is an identifier used to prevent collisions among resource IDs. `resource_id` identifies a resource on which to perform a mutually exclusive operation. - It may also be a callable or awaitable which will return the resource ID given the decorated - function's args and kwargs. + It may also be a callable or awaitable which will return the resource ID given an ordered + mapping of the parameters' names to arguments' values. If decorating a command, this decorator must go before (below) the `command` decorator. """ @@ -121,8 +122,13 @@ def mutually_exclusive(namespace: t.Hashable, resource_id: ResourceId) -> t.Call log.trace(f"{name}: mutually exclusive decorator called") if callable(resource_id): + log.trace(f"{name}: binding args to signature") + sig = inspect.signature(func) + bound_args = sig.bind(*args, **kwargs) + bound_args.apply_defaults() + log.trace(f"{name}: calling the given callable to get the resource ID") - id_ = resource_id(*args, **kwargs) + id_ = resource_id(bound_args.arguments) if inspect.isawaitable(id_): log.trace(f"{name}: awaiting to get resource ID")
Disable gpg check in fedora:rawhide image The heat-container-agent is currently failing to build due to misconfigured upstream fedora:rawhide image. We can revert this change later. Story: Task: 36184
@@ -11,7 +11,7 @@ LABEL name="heat-container-agent" \ atomic.type="system" \ distribution-scope="public" -RUN dnf -y --setopt=tsflags=nodocs install \ +RUN dnf -y --setopt=tsflags=nodocs --nogpgcheck install \ bash \ findutils \ gcc \
Uses abspath() for test data access in dependency_utils_test. `dependency_utils.py` changes working directory while buidling an ephemeral package. So we need to access test data using abspath.
@@ -51,7 +51,8 @@ class DependencyUtilsTest(tf.test.TestCase): @mock.patch('tempfile.mkdtemp') @mock.patch('subprocess.call') def testEphemeralPackageMocked(self, mock_subprocess_call, mock_mkdtemp): - source_data_dir = os.path.join(os.path.dirname(__file__), 'testdata') + source_data_dir = os.path.join( + os.path.dirname(os.path.abspath(__file__)), 'testdata') test_file = os.path.join(source_data_dir, 'test.csv') expected_package = 'mypackage.tar.gz'
Modified sed script to not use -i and to remove necessary lines sed -i is not supported on OS X. Removal of lines 1-6 and 10 from mod.rs had unintentionally been removed. Now reinstated.
@@ -30,9 +30,10 @@ define crate_template $(1)/src/%/mod.rs: svd/%.svd.patched mkdir -p $$(@D) cd $$(@D); svd2rust -i ../../../$$< + rustfmt $$(@D)/lib.rs export DEVICE=$$$$(basename $$< .svd.patched); \ - sed -i "s/crate :: Interrupt/crate :: $$$${DEVICE} :: Interrupt/" $$(@D)/lib.rs - form -i $$(@D)/lib.rs -o $$(@D)/ + sed "1,6d;10d;s/crate::Interrupt/crate::$$$${DEVICE}::Interrupt/" $$(@D)/lib.rs > $$(@D)/mod.rs + form -i $$(@D)/mod.rs -o $$(@D)/ mv $$(@D)/lib.rs $$(@D)/mod.rs rm $$(@D)/build.rs find $$(@D) -name *.rs -exec rustfmt {} +
Fix typo in querying docs Replaces
@@ -1055,7 +1055,7 @@ MySQL uses *Rand*: .. code-block:: python # Pick 5 lucky winners: - LotterNumber.select().order_by(fn.Rand()).limit(5) + LotteryNumber.select().order_by(fn.Rand()).limit(5) Paginating records ------------------
Add inference mock Add monitor mock
@@ -66,8 +66,10 @@ class ReducerControl: self.__state = ReducerState.monitoring - def monitor(self, config): + def monitor(self, config=None): self.__state = ReducerState.monitoring + # todo connect to combiners and listen for globalmodelupdate request. + # use the globalmodel received to start the reducer combiner method on received models to construct its own model. def add(self, combiner): if self.__state != ReducerState.idle: @@ -94,11 +96,32 @@ class ReducerControl: return self.__state +class ReducerInference: + def __init__(self): + self.model_wrapper = None + + def set(self, model): + self.model_wrapper = model + + def infer(self, params): + + results = None + if self.model_wrapper: + results = self.model_wrapper.infer(params) + + return results + + +class ModelError(BaseException): + pass + + class Reducer: def __init__(self, config): self.name = config['name'] self.token = config['token'] self.control = ReducerControl() + self.inference = ReducerInference() # from fedn.algo.fedavg import FEDAVGCombiner # self.reducer = FEDAVGCombiner(self.name, self.repository, self) @@ -151,6 +174,16 @@ class Reducer: self.control.instruct(config) return "started" + @app.route('/infer') + def infer(): + result = "" + try: + result = self.inference.infer(request.args) + except ModelError: + print("no model") + + return result + # import os, sys # self._original_stdout = sys.stdout # sys.stdout = open(os.devnull, 'w') @@ -167,5 +200,6 @@ class Reducer: while True: time.sleep(1) print("Reducer in {} state".format(ReducerStateToString(self.control.state())), flush=True) + self.control.monitor() except (KeyboardInterrupt, SystemExit): print("Exiting..", flush=True)
[jax2tf] add a new test for jax2tf gda test. Now it cover the test using gda as jax function input.
@@ -1336,11 +1336,12 @@ class XlaCallModuleTest(tf_test_util.JaxToTfTestCase): global_shape, global_mesh, mesh_axes, lambda idx: global_data[idx]), global_data - # Create GDA global_mesh = jtu.create_global_mesh((4, 2), ("x", "y")) mesh_axes = P(("x", "y")) params, _ = create_gda((8, 2), global_mesh, mesh_axes) + input_data = np.arange(16).reshape(2, 8) + # Test 1: use GDA as constants def jax_func(input_data): handle = pjit( jnp.matmul, @@ -1353,12 +1354,29 @@ class XlaCallModuleTest(tf_test_util.JaxToTfTestCase): jax2tf.convert(jax_func, enable_xla=True), jit_compile=True, ) - input_data = np.arange(16).reshape(2, 8) jax_out = jax_func(input_data=input_data) tf_out = tf_func(input_data=input_data) # TODO(b/243146552) We can switch to ConvertAndCompare after this bug fix. np.array_equal(jax_out._value, np.array(tf_out)) + # Test 2: use GDA as JAX function input + def jax_func_2(input_data, params): + handle = pjit( + jnp.matmul, + in_axis_resources=(P("y", "x"), P(("x", "y"),)), + out_axis_resources=None) + return handle(input_data, params) + + with global_mesh: + tf_func_2 = tf.function( + jax2tf.convert(jax_func_2, enable_xla=True), + jit_compile=True, + ) + jax_out_2 = jax_func_2(input_data=input_data, params=params) + tf_out_2 = tf_func_2(input_data=input_data, params=params) + # TODO(b/243146552) We can switch to ConvertAndCompare after this bug fix. + np.array_equal(jax_out_2._value, np.array(tf_out_2)) + if __name__ == "__main__": # TODO: Remove once tensorflow is 2.10.0 everywhere.
making link more clear For the separate section for FITS file compression. It's kind of hidden in the text. Issue
@@ -131,11 +131,16 @@ for more details). Working with compressed files """"""""""""""""""""""""""""" +.. note:: + + Files that use compressed HDUs within the FITS file are discussed + in :ref:`Compressed Image Data <astropy-io-fits-compressedImageData>`. + + The :func:`open` function will seamlessly open FITS files that have been compressed with gzip, bzip2 or pkzip. Note that in this context we're talking about a fits file that has been compressed with one of these utilities - e.g. a -.fits.gz file. Files that use compressed HDUs within the FITS file are discussed -in :ref:`Compressed Image Data <astropy-io-fits-compressedImageData>`. +.fits.gz file. There are some limitations with working with compressed files. For example with Zip files that contain multiple compressed files, only the first file will be accessible.
Fix wrong parsed VP9 codec string The VP9 profile number was wrong
@@ -189,7 +189,7 @@ def _determine_video_codec(content_profile): return 'dvhe' return 'hevc' if content_profile.startswith('vp9'): - return 'vp9.0.' + content_profile[14:16] + return 'vp9.' + content_profile[11:12] return 'h264'
Increase TimerTest tolerance to 20% on Windows Summary: Pull Request resolved: Test Plan: CI
@@ -22,10 +22,16 @@ TEST(TimerTest, Test) { float us = timer.MicroSeconds(); float ms = timer.MilliSeconds(); - // Time should be at least accurate +- 10%. + // Time should be at least accurate +- 10%. (20% on Windows) +#ifndef _WIN32 EXPECT_NEAR(ns, 100000000, 10000000); EXPECT_NEAR(us, 100000, 10000); EXPECT_NEAR(ms, 100, 10); +#else + EXPECT_NEAR(ns, 100000000, 20000000); + EXPECT_NEAR(us, 100000, 20000); + EXPECT_NEAR(ms, 100, 20); +#endif // Test restarting the clock. timer.Start();
Adding ATEN_NO_TEST option to root level cmake for propogation to aten Summary: Pull Request resolved:
@@ -56,6 +56,7 @@ include(CMakeDependentOption) option(BUILD_TORCH "Build Torch" OFF) option(BUILD_CAFFE2 "Build Caffe2" ON) option(BUILD_ATEN "Build ATen" OFF) +option(ATEN_NO_TEST "Do not build ATen test binaries" OFF) option(BUILD_BINARY "Build C++ binaries" ON) option(BUILD_DOCS "Build Caffe2 documentation" OFF) option(BUILD_CUSTOM_PROTOBUF "Build and use Caffe2's own protobuf under third_party" ON)
Cleanup bench script a bit. [skip ci]
@@ -29,13 +29,13 @@ def timed(fn): N = 10 for i in range(N): start = time.time() - fn(*args, **kwargs) + fn(i, *args, **kwargs) times.append(time.time() - start) - print(fn.__name__, round(sum(times) / N, 3)) + print('%0.2f ... %s' % (round(sum(times) / N, 2), fn.__name__)) return inner -def populate_register(n): - for i in range(n): +def populate_register(s, n): + for i in range(s, n): Register.create(value=i) def populate_collections(n, n_i): @@ -45,56 +45,62 @@ def populate_collections(n, n_i): Item.create(collection=c, name=str(j)) @timed -def insert(): +def insert(i): with db.atomic(): - populate_register(1000) + populate_register((i * 1000), (i + 1) * 1000) @timed -def batch_insert(): +def batch_insert(i): with db.atomic(): - it = range(1000) + it = range(i * 1000, (i + 1) * 1000) for i in db.batch_commit(it, 100): - Register.insert(value=i + 100000).execute() + Register.insert(value=i).execute() @timed -def bulk_insert(): +def bulk_insert(i): with db.atomic(): - for i in range(0, 1000, 100): + for i in range(i * 1000, (i + 1) * 1000, 100): data = [(j,) for j in range(i, i + 100)] Register.insert_many(data, fields=[Register.value]).execute() @timed -def select(): +def bulk_create(i): + with db.atomic(): + data = [Register(value=i) for i in range(i * 1000, (i + 1) * 1000)] + Register.bulk_create(data, batch_size=100) + +@timed +def select(i): query = Register.select() for row in query: pass @timed -def select_related_dbapi_raw(): +def select_related_dbapi_raw(i): query = Item.select(Item, Collection).join(Collection) cursor = db.execute(query) for row in cursor: pass @timed -def insert_related(): +def insert_related(i): with db.atomic(): populate_collections(30, 35) @timed -def select_related(): +def select_related(i): query = Item.select(Item, Collection).join(Collection) for item in query: pass @timed -def select_related_left(): +def select_related_left(i): query = Collection.select(Collection, Item).join(Item, JOIN.LEFT_OUTER) for collection in query: pass @timed -def select_related_dicts(): +def select_related_dicts(i): query = Item.select(Item, Collection).join(Collection).dicts() for row in query: pass @@ -106,8 +112,13 @@ if __name__ == '__main__': insert_related() Register.delete().execute() batch_insert() + assert Register.select().count() == 10000 Register.delete().execute() bulk_insert() + assert Register.select().count() == 10000 + Register.delete().execute() + bulk_create() + assert Register.select().count() == 10000 select() select_related() select_related_left()
Implement seeding Fix
@@ -190,13 +190,13 @@ class AbstractEnv(gym.Env): """ Reset the environment to it's initial configuration - :param seed: not implemented + :param seed: The seed that is used to initialize the environment's PRNG :param options: Allows the environment configuration to specified through `options["config"]` :return: the observation of the reset state """ + super().reset(seed=seed, options=options) if options and "config" in options: self.configure(options["config"]) - self.update_metadata() self.define_spaces() # First, to set the controlled vehicle class depending on action space self.time = self.steps = 0
Updated DE capacity Updated installed netto capacity in Germany. Source: Frauenhofer ISE (last update: 2.05.19)
] ], "capacity": { - "biomass": 7720, - "coal": 45380, - "gas": 29630, + "biomass": 7740, + "coal": 44910, + "gas": 29390, "geothermal": 38, - "hydro": 5500, - "hydro storage": 9440, + "hydro": 4800, + "hydro storage": 9600, "nuclear": 9516, "oil": 4300, - "solar": 45550, + "solar": 47200, "unknown": 3137, - "wind": 59240 + "wind": 59830 }, "contributors": [ "https://github.com/corradio"
Remove HAVE_ROBHAT option, which is no longer used It was redundant to the DRIVE_TRAIN_TYPE and CONTROLLER_TYPE options
@@ -198,7 +198,6 @@ IMU_DLP_CONFIG = 0 # Digital Lowpass Filter setting (0:250Hz, 1:184 HAVE_SOMBRERO = False #set to true when using the sombrero hat from the Donkeycar store. This will enable pwm on the hat. #ROBOHAT MM1 -HAVE_ROBOHAT = False # set to true when using the Robo HAT MM1 from Robotics Masters. This will change to RC Control. MM1_STEERING_MID = 1500 # Adjust this value if your car cannot run in a straight line MM1_MAX_FORWARD = 2000 # Max throttle to go fowrward. The bigger the faster MM1_STOPPED_PWM = 1500
Fixed a test so that it passes on Chrome and Firefox It was failing on firefox because the keyword returns a FirefoxWebElement object on firefox and a WeblEment on chrome.
@@ -97,7 +97,11 @@ Get Webelement (singlular) [Setup] Go to setup home ${element}= Get webelement A:breadcrumb:Home - Should be true $element.__class__.__name__=="WebElement" + # Different browsers return different classes of objects so we + # can't easily do a check for the returned object type that works + # for all browsers. We'll just have to assume that if the element + # isn't None then it's a web element + Should be true $element is not None Get Webelements (plural) - no matching elements
Update plot_brainstorm_phantom_elekta.py closes
@@ -7,16 +7,13 @@ Brainstorm Elekta phantom dataset tutorial ========================================== Here we compute the evoked from raw for the Brainstorm Elekta phantom -tutorial dataset. For comparison, see [1]_ and: +tutorial dataset. For comparison, see :footcite:`TadelEtAl2011` and: https://neuroimage.usc.edu/brainstorm/Tutorials/PhantomElekta References ---------- -.. [1] Tadel F, Baillet S, Mosher JC, Pantazis D, Leahy RM. - Brainstorm: A User-Friendly Application for MEG/EEG Analysis. - Computational Intelligence and Neuroscience, vol. 2011, Article ID - 879716, 13 pages, 2011. doi:10.1155/2011/879716 +.. footbibliography:: """ # sphinx_gallery_thumbnail_number = 9
misc/file_reader: minor reformatting TN:
@@ -30,6 +30,10 @@ procedure Main is New_Line; end Put_Title; + ----------- + -- Parse -- + ----------- + procedure Parse (Filename, Charset : String) is begin U := Ctx.Get_From_File (Filename, Charset); @@ -47,6 +51,7 @@ begin Put_Line ("main.adb: Starting..."); -- Create a context with our file reader + declare FR : constant File_Reader_Reference := Create_File_Reader_Reference (My_File_Reader'(null record)); @@ -55,6 +60,7 @@ begin end; -- Check the file reader is used appropriately when reading files + Put_Title ("Parsing foo.txt"); Parse ("foo.txt", ""); New_Line;
Add support for using loopback devices as OSDs This is particularly useful in CI environments where you dont have the option of adding extra devices or volumes to the host. It is also a simple change to support loopback devices
# partition. - name: activate osd(s) when device is a disk - command: ceph-disk activate {{ item | regex_replace('^(\/dev\/cciss\/c[0-9]{1}d[0-9]{1})$', '\\1p') }}1 + command: ceph-disk activate "{{ item | regex_replace('^(\/dev\/cciss\/c[0-9]{1}d[0-9]{1})$', '\1p') | regex_replace('^(\/dev\/loop[0-9]{1})$', '\1p') }}1" with_items: - "{{ devices|unique }}" changed_when: false - not dmcrypt - name: activate osd(s) when device is a disk (dmcrypt) - command: ceph-disk activate --dmcrypt {{ item | regex_replace('^(\/dev\/cciss\/c[0-9]{1}d[0-9]{1})$', '\\1p') }}1 + command: ceph-disk activate --dmcrypt "{{ item | regex_replace('^(\/dev\/cciss\/c[0-9]{1}d[0-9]{1})$', '\1p') | regex_replace('^(\/dev\/loop[0-9]{1})$', '\1p') }}1" with_items: - "{{ devices|unique }}" changed_when: false
Then expressions: create a local scope for the "then variable" TN:
@@ -9,9 +9,9 @@ from langkit.compiled_types import ( from langkit.diagnostics import check_source_language from langkit.expressions.analysis_units import AnalysisUnitType from langkit.expressions.base import ( - AbstractExpression, AbstractVariable, LiteralExpr, No, PropertyDef, - ResolvedExpression, render, construct, BuiltinCallExpr, BasicExpr, - attr_call + AbstractExpression, AbstractVariable, BasicExpr, BindingScope, + BuiltinCallExpr, LiteralExpr, No, PropertyDef, ResolvedExpression, + attr_call, construct, render ) from langkit.expressions.envs import EmptyEnv from langkit.utils import assert_type @@ -361,11 +361,12 @@ class Then(AbstractExpression): pretty_name = 'Then' def __init__(self, expr, var_expr, then_expr, default_expr, - abstract_expr=None): + then_scope, abstract_expr=None): self.expr = expr self.var_expr = var_expr self.then_expr = then_expr self.default_expr = default_expr + self.then_scope = then_scope self.static_type = self.then_expr.type super(Then.Expr, self).__init__('Result_Var', @@ -438,9 +439,6 @@ class Then(AbstractExpression): self.then_expr = self.then_fn(self.var_expr) def construct(self): - # Add var_expr to the scope for this Then expression - PropertyDef.get_scope().add(self.var_expr.local_var) - # Accept as a prefix: # * any pointer, since it can be checked against "null"; # * any Struct, since its "Is_Null" field can be checked. @@ -448,7 +446,13 @@ class Then(AbstractExpression): lambda cls: cls.is_ptr or issubclass(cls, Struct)) self.var_expr.set_type(expr.type) + # Create a then-expr specific scope to restrict the span of the "then" + # variable in the debugger. + with PropertyDef.get_scope().new_child() as then_scope: + then_scope.add(self.var_expr.local_var) then_expr = construct(self.then_expr) + var_expr = construct(self.var_expr) + then_expr = BindingScope(then_expr, [var_expr], scope=then_scope) # Affect default value to the fallback expression. For the moment, # only booleans and structs are handled. @@ -483,7 +487,7 @@ class Then(AbstractExpression): default_expr = construct(self.default_val, then_expr.type) return Then.Expr(expr, construct(self.var_expr), then_expr, - default_expr) + default_expr, then_scope) def __repr__(self): return "<Then {}: {} {}>".format(self.expr, self.var_expr,
Remove pydocstyle version restriction Ignore the newly added error D999 for __all__ in pywikibot/__init__.py.[1] This patch also fixes the current flake8 InvocationError. [1]:
@@ -60,7 +60,7 @@ commands = basepython = python2.7 deps = flake8 pyflakes >= 1.1 - pydocstyle == 2.0.0 + pydocstyle hacking flake8-docstrings>=1.1.0 flake8-per-file-ignores @@ -200,6 +200,8 @@ per-file-ignores = scripts/makecat.py : D103 scripts/interwiki.py : P102 pywikibot/__init__.py : P103 + # pydocstyle cannot handle multiple __all__ variables + pywikibot/__init__.py : D999 # valid N805 naming convention exceptions pywikibot/userinterfaces/terminal_interface.py : N814 # invalidly detected as {} format string:
Some scripts in GtBurst need to be made executables. This will hopefully fixed in fermitools, but for now I fixed here at running time.
@@ -582,8 +582,8 @@ class TransientLATDataBuilder(object): """ This builds the cmd string for the script """ - - cmd_str = '%s %s' % (os.path.join('fermitools', 'GtBurst', 'scripts', 'doTimeResolvedLike.py'), + executable = os.path.join('fermitools', 'GtBurst', 'scripts', 'doTimeResolvedLike.py') + cmd_str = '%s %s' % (executable, self._triggername) for k, v in self._parameters.items(): @@ -618,7 +618,21 @@ class TransientLATDataBuilder(object): # located. This should be the first entry... might break in teh future! site_pkg = site.getsitepackages()[0] + cmd = os.path.join(site_pkg, cmd) + executable = cmd.split()[0] + gtapp_mp_dir = os.path.join(site_pkg,'fermitools', 'GtBurst', 'gtapps_mp') + executables = [ + executable, + os.path.join(gtapp_mp_dir, 'gtdiffrsp_mp.py'), + os.path.join(gtapp_mp_dir, 'gtexpmap_mp.py'), + os.path.join(gtapp_mp_dir, 'gtltcube_mp.py'), + os.path.join(gtapp_mp_dir, 'gttsmap_mp.py'), + ] + for _e in executables: + print ("Changing permission to %s" % _e) + os.chmod(_e, 0o755) + log.info('About to run the following command:\n%s' % cmd) # see what we already have
[util/popup] add generic "close" on root menu add a "close" entry for the root menu of all popup menus (if they are not automatically destroyed when leaving the menu). fixes
@@ -22,6 +22,9 @@ class menu(object): self._root.withdraw() self._menu = tk.Menu(self._root, tearoff=0) self._menu.bind("<FocusOut>", self.__on_focus_out) + + self.add_menuitem("close", self.__on_focus_out) + self.add_separator() else: self._root = parent.root() self._root.withdraw()
Fix error when trying to delete notification secret Closes ansible/galaxy-issues#287
@@ -829,10 +829,11 @@ class NotificationSecretDetail(RetrieveUpdateDestroyAPIView): serializer = self.get_serializer(instance=instance) return Response(serializer.data, status=status.HTTP_202_ACCEPTED) - def destroy(self): + def destroy(self, request, *args, **kwargs): obj = super(NotificationSecretDetail, self).get_object() obj.delete() - return Response(dict(detail="Requested secret deleted."), status=status.HTTP_202_ACCEPTED) + return Response(dict(detail="Requested secret deleted."), + status=status.HTTP_202_ACCEPTED) class NotificationList(ListCreateAPIView):
skip ctc_loss test on Windows Summary: Pull Request resolved: It is flaky on Windows only, so disable for now: Test Plan: Imported from OSS
@@ -5445,6 +5445,8 @@ class TestAutogradDeviceType(TestCase): gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, 5, device=device)]) @skipCUDAIfRocm + @unittest.skipIf(IS_WINDOWS, """Test is flaky on Windows: + https://github.com/pytorch/pytorch/issues/34870""") def test_ctc_loss(self, device): batch_size = 64 num_labels = 101
ebuild.profiles: parent_paths: toss unnecessary RuntimeError Now that the load/invoker doesn't catch nearly all exception types, this should raise and toss a traceback if there are bad issues by default.
@@ -209,8 +209,6 @@ class ProfileNode(object, metaclass=caching.WeakInstMeta): f'unknown repo {repo_id!r}' ) continue - except (TypeError, AttributeError): - raise RuntimeError("repo mapping is unset") l.append((abspath(pjoin(location, 'profiles', path)), line, lineno)) else: l.append((abspath(pjoin(self.path, repo_id)), line, lineno))
fixed pixmap scale error in qt6 needed int not float
@@ -600,7 +600,7 @@ class DialogCodeInAV(QtWidgets.QDialog): class DialogCodeInImage(QtWidgets.QDialog): """ View coded section in original image. - Called by: reports.DialogReportCodes qhn results are produced + Called by: reports.DialogReportCodes, when results are produced """ app = None @@ -698,7 +698,7 @@ class DialogCodeInImage(QtWidgets.QDialog): if self.pixmap is None: return self.scale = (self.ui.horizontalSlider.value() + 1) / 100 - height = self.scale * self.pixmap.height() + height = int(self.scale * self.pixmap.height()) pixmap = self.pixmap.scaledToHeight(height, QtCore.Qt.TransformationMode.FastTransformation) pixmap_item = QtWidgets.QGraphicsPixmapItem(pixmap) pixmap_item.setPos(0, 0)
Can move port again Fixes
@@ -78,7 +78,8 @@ class ProxyPortItem(Presentation[sysml.ProxyPort], HandlePositionUpdate, Named): return cinfo.connected.port_side(cinfo.port) if cinfo else None def dimensions(self): - return Rectangle(-8, -8, 16, 16) + x, y = self._handles[0].pos + return Rectangle(x - 8, y - 8, 16, 16) def point(self, x, y): return distance_rectangle_point(self.dimensions(), (x, y))
Add filterable option to IronicInspectorLog Resolves:
@@ -195,7 +195,7 @@ class Specs(SpecSet): iptables = RegistryPoint() ipv4_neigh = RegistryPoint() ipv6_neigh = RegistryPoint() - ironic_inspector_log = RegistryPoint() + ironic_inspector_log = RegistryPoint(filterable=True) iscsiadm_m_session = RegistryPoint() jboss_domain_server_log = RegistryPoint(multi_output=True, filterable=True) jboss_standalone_server_log = RegistryPoint(multi_output=True, filterable=True)
corrected date to be Jun 1st both Inquirer and WHYY say "Monday" which was Jun 1st, I was mistaken in my earlier commit
@@ -80,7 +80,7 @@ At least 6 officers surround a handcuffed man who says "I can't breathe". Office * https://twitter.com/greg_doucette/status/1268200800649707526 -### Police shove protestors and strike man across the face with a baton | May 31st +### Police shove protestors and strike man across the face with a baton | Jun 1st Police shove a group of protestors. A particularly aggressive police officer (fat, white shirt) runs and shoves someone, then grabs another man. He is then approached by a young man with long hair whom he strikes with his baton, at full strength, in the head before jumping on him and cuffing him. He has assistance from another officer who presses the man's face to the pavement using his knee on his neck. Twitter accounts misreport the individual as female; later accounts in media report the individual is a male Temple University engineering student.