message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
Fix ONNX Interpolate
Summary: Pull Request resolved: | @@ -48,7 +48,8 @@ def _interpolate(name, dim, interpolate_mode):
align_corners = sym_help._maybe_get_scalar(align_corners)
output_size = sym_help._maybe_get_const(output_size, 'is')
if sym_help._is_value(output_size):
- offsets = g.op("Constant", value_t=torch.ones(offset, dtype=torch.int64))
+ offsets = g.op("Constant", value_t=torch.ones(2, dtype=torch.int64))
+ output_size = g.op("Cast", output_size, to_i=sym_help.cast_pytorch_to_onnx["Long"])
output_size = g.op("Concat", offsets, output_size, axis_i=0)
else:
output_size = [1 if i < 2 else output_size[-(dim - i)] for i in range(0, dim)]
|
Fix loop examples after Accelerator API removals
Summary:
### New commit log messages
Fix loop examples after Accelerator API removals | @@ -26,7 +26,7 @@ install_requires =
petastorm>=0.9.0
parameterized>=0.7.4
pyspark==3.1.1
- pytorch-lightning @ git+https://github.com/PyTorchLightning/pytorch-lightning@fa0ed17f8
+ pytorch-lightning @ git+https://github.com/PyTorchLightning/pytorch-lightning@98de69b14
ruamel.yaml>=0.15.99
scipy>=1.3.1
tensorboard>=1.14
|
add releaseNotes to TC
add releaseNotes to TC | @@ -296,3 +296,4 @@ script:
description: confidence in indicator, in scale of 1-100
description: Add a new indicator to ThreatConnect
dockerimage: demisto/threatconnect-sdk
+releaseNotes: "Fix proxy condition in TC"
|
Rewrite function comparison paragraph
It isn't about modules. | @@ -53,9 +53,10 @@ Parsl determines app equivalence by storing the a hash
of the app function. Thus, any changes to the app code (e.g.,
its signature, its body, or even the docstring within the body)
will invalidate cached values.
-Further, Parsl does not traverse imported modules, and thus
-changes to modules used by an app will not invalidate cached
-values.
+
+However, Parsl does not traverse the call graph of the app function,
+so changes inside functions called by an app will not invalidate
+cached values.
Invocation equivalence
|
[cleanup] remove dontTouchRegexes
dontTouchRegexes points to result. The code is more ugly keeping
this double access to result than having a single line from 2008
unchanged. | @@ -310,8 +310,6 @@ def _get_regexes(keys, site):
_create_default_regexes()
result = []
- # 'dontTouchRegexes' exist to reduce git blame only.
- dontTouchRegexes = result
for exc in keys:
if isinstance(exc, UnicodeType):
@@ -342,10 +340,10 @@ def _get_regexes(keys, site):
result.append(_regex_cache[exc])
# handle alias
if exc == 'source':
- dontTouchRegexes.append(_tag_regex('syntaxhighlight'))
+ result.append(_tag_regex('syntaxhighlight'))
else:
# assume it's a regular expression
- dontTouchRegexes.append(exc)
+ result.append(exc)
return result
|
list_types_ada.mako: use unchecked convs. not to rely on tagged types
TN: | Or_Null : Boolean := False) return ${element_type.name}
is
function Absolute_Get
- (L : ${type_name}; Index : Integer)
- return ${element_type.name}
- is
- (${element_type.name} (L.Nodes (Index + 1)));
+ (L : ${type_name}; Index : Integer) return ${element_type.name};
-- L.Nodes is 1-based but Index is 0-based
- function Length (Node : ${type_name}) return Natural is (Node.Count);
+ ------------------
+ -- Absolute_Get --
+ ------------------
+
+ function Absolute_Get
+ (L : ${type_name}; Index : Integer) return ${element_type.name}
+ is
+ GL : constant ${ctx.generic_list_type.name} :=
+ ${ctx.generic_list_type.internal_conversion(list_type, 'L')};
+ begin
+ return ${element_type.internal_conversion(T.root_node,
+ 'GL.Nodes (Index + 1)')};
+ end Absolute_Get;
+
+ function Length (Node : ${type_name}) return Natural is
+ (${ctx.generic_list_type.internal_conversion(list_type, 'Node')}.Count);
-- Wrapper around the Length primitive to get the compiler happy for the
-- the package instantiation below.
Result : ${element_type.name};
begin
- if Relative_Get (${type_name} (Node), Index, Result) then
+ if Relative_Get (Node, Index, Result) then
return Result;
elsif Or_Null then
return null;
function Item
(Node : access ${value_type}'Class; Index : Positive)
return ${element_type.name}
- is (${element_type.name} (Node.Child (Index)));
+ is
+ Result : constant ${root_node_type_name} :=
+ Child (${T.root_node.internal_conversion(list_type, 'Node')}, Index);
+ begin
+ return ${element_type.internal_conversion(T.root_node, 'Result')};
+ end Item;
</%def>
|
Convert the teardown functions to async
This supports the existing synchronous usage, whilst also supporting
async teardown functions. It follows the same pattern elsewhere in
Quart and was missed originally in an oversight. | @@ -1159,7 +1159,8 @@ class Quart(PackageStatic):
func: The teardown request function itself.
name: Optional blueprint key name.
"""
- self.teardown_request_funcs[name].append(func)
+ handler = ensure_coroutine(func)
+ self.teardown_request_funcs[name].append(handler)
return func
def teardown_websocket(self, func: Callable, name: AppOrBlueprintKey=None) -> Callable:
@@ -1177,7 +1178,8 @@ class Quart(PackageStatic):
func: The teardown websocket function itself.
name: Optional blueprint key name.
"""
- self.teardown_websocket_funcs[name].append(func)
+ handler = ensure_coroutine(func)
+ self.teardown_websocket_funcs[name].append(handler)
return func
def teardown_appcontext(self, func: Callable) -> Callable:
@@ -1195,7 +1197,8 @@ class Quart(PackageStatic):
func: The teardown function itself.
name: Optional blueprint key name.
"""
- self.teardown_appcontext_funcs.append(func)
+ handler = ensure_coroutine(func)
+ self.teardown_appcontext_funcs.append(handler)
return func
def register_blueprint(self, blueprint: Blueprint, url_prefix: Optional[str]=None) -> None:
@@ -1256,7 +1259,7 @@ class Quart(PackageStatic):
functions = chain(functions, self.teardown_request_funcs[blueprint]) # type: ignore
for function in functions:
- function(exc=exc)
+ await function(exc=exc)
await request_tearing_down.send(self, exc=exc)
async def do_teardown_websocket(
@@ -1279,13 +1282,13 @@ class Quart(PackageStatic):
functions = chain(functions, self.teardown_websocket_funcs[blueprint]) # type: ignore
for function in functions:
- function(exc=exc)
+ await function(exc=exc)
await websocket_tearing_down.send(self, exc=exc)
async def do_teardown_appcontext(self, exc: Optional[BaseException]) -> None:
"""Teardown the app (context), calling the teardown functions."""
for function in self.teardown_appcontext_funcs:
- function(exc)
+ await function(exc)
await appcontext_tearing_down.send(self, exc=exc)
def app_context(self) -> AppContext:
|
Repair Nuke-integration
Regression from | @@ -243,8 +243,8 @@ def _install_menu():
creator,
# publish,
workfiles,
- cbloader,
- cbsceneinventory,
+ loader,
+ sceneinventory,
contextmanager
)
# for now we are using `lite` version
@@ -263,9 +263,9 @@ def _install_menu():
menu.addSeparator()
menu.addCommand("Create...", creator.show)
- menu.addCommand("Load...", cbloader.show)
+ menu.addCommand("Load...", loader.show)
menu.addCommand("Publish...", publish.show)
- menu.addCommand("Manage...", cbsceneinventory.show)
+ menu.addCommand("Manage...", sceneinventory.show)
menu.addSeparator()
menu.addCommand("Work Files...",
|
Update avemaria.txt
Nanocore instead. | @@ -215,13 +215,6 @@ craftedfollowing.duckdns.org
ventm.warzonedns.com
-# Reference: https://www.virustotal.com/gui/file/883562a2a36809c07e2368a7d0fd47e8e8fc23a839837f1ebe64b86dcc3209d5/detection
-
-79.134.225.74:2404
-79.134.225.89:2404
-behco.duckdns.org
-paris4real111.ddnsfree.com
-
# Reference: https://www.virustotal.com/gui/file/dbfe4a369975251fd14e5d160f2edde33942723a9bb3b4e6b5f445dd5b9dc549/detection
75.127.5.164:4741
|
fix: Direct3D includes are not exposed
The include paths for the D3D11 and D3D12 backends were not exposed. As a result, consumers of the recipes could not use the D3D11/12 backends.
The fix is to simply add the missing include paths to `self.cpp_info` in `package_info()`.
Fixes | @@ -184,6 +184,8 @@ class DiligentCoreConan(ConanFile):
self.cpp_info.includedirs.append(os.path.join("include", "DiligentCore", "Platforms", "Basic", "interface"))
self.cpp_info.includedirs.append(os.path.join("include", "DiligentCore", "Platforms", "Linux", "interface"))
self.cpp_info.includedirs.append(os.path.join("include", "DiligentCore", "Graphics", "GraphicsEngine", "interface"))
+ self.cpp_info.includedirs.append(os.path.join("include", "DiligentCore", "Graphics", "GraphicsEngineD3D11", "interface"))
+ self.cpp_info.includedirs.append(os.path.join("include", "DiligentCore", "Graphics", "GraphicsEngineD3D12", "interface"))
self.cpp_info.includedirs.append(os.path.join("include", "DiligentCore", "Graphics", "GraphicsEngineVulkan", "interface"))
self.cpp_info.includedirs.append(os.path.join("include", "DiligentCore", "Graphics", "GraphicsEngineOpenGL", "interface"))
self.cpp_info.includedirs.append(os.path.join("include", "DiligentCore", "Graphics", "GraphicsAccessories", "interface"))
|
[easy] fix windows scheduler tests
Summary: Cron is not a thing on windows
Test Plan: BK + Azure
Reviewers: johann, alangenfeld, prha | pipeline,
repository,
schedule,
+ seven,
solid,
)
from dagster.core.definitions.job import RunRequest
@@ -1301,6 +1302,7 @@ def test_multi_runs_missing_run_key(external_repo_context, capfd):
)
[email protected](seven.IS_WINDOWS, reason="Cron doesn't work on windows")
def test_run_with_hanging_cron_schedules():
# Verify that the system will prompt you to wipe your schedules with the SystemCronScheduler
# before you can switch to DagsterDaemonScheduler
|
quote NUMPY_INCLUDE_DIR
Summary:
when NUMPY_INCLUDE_DIR contains space character (e.g. "C:\Program Files (x86)\Microsoft Visual Studio\..."), cmake cannot receive correct path name.
Pull Request resolved: | @@ -227,7 +227,7 @@ goto:eof
-DUSE_DISTRIBUTED=%USE_DISTRIBUTED% ^
-DUSE_FBGEMM=%USE_FBGEMM% ^
-DUSE_NUMPY=%USE_NUMPY% ^
- -DNUMPY_INCLUDE_DIR=%NUMPY_INCLUDE_DIR% ^
+ -DNUMPY_INCLUDE_DIR="%NUMPY_INCLUDE_DIR%" ^
-DUSE_NNPACK=%USE_NNPACK% ^
-DUSE_LEVELDB=%USE_LEVELDB% ^
-DUSE_LMDB=%USE_LMDB% ^
|
Add try/catch and Promise resolve/reject to js sdk
This handles cases when the apply method doesn't return a
promise, or apply throws an exception. | @@ -103,8 +103,13 @@ class TransactionProcessor {
candidate.versions.includes(txnHeader.familyVersion))
if (handler) {
- handler
- .apply(request, context)
+ let applyPromise
+ try {
+ applyPromise = Promise.resolve(handler.apply(request, context))
+ } catch(err) {
+ applyPromise = Promise.reject(err)
+ }
+ applyPromise
.then(() =>
TpProcessResponse.create({
status: TpProcessResponse.Status.OK
|
Texas: added video evidence
Only articles were submitted so I have added the video itself. | @@ -6,6 +6,7 @@ A 20-year-old black man is hospitalized in critical condition after police shot
**Links**
+* https://www.reddit.com/r/Bad_Cop_No_Donut/comments/gwd37n/a_black_20yearold_student_justin_howell_is_in/
* https://www.texastribune.org/2020/06/01/austin-police-george-floyd-mike-ramos/
* https://www.kvue.com/article/news/local/austin-protester-police-struck-by-less-lethal-bean-bag-round/269-430f90a2-b6c1-4ee3-9d9c-639faf132100
|
Support (and prefer) per-controller macaroon files
Juju 2.2-beta4 moved to per-controller macaroon files, which breaks auth.
Fixes | @@ -11,6 +11,7 @@ import subprocess
import websockets
from concurrent.futures import CancelledError
from http.client import HTTPSConnection
+from pathlib import Path
import asyncio
import yaml
@@ -436,7 +437,7 @@ class Connection:
accounts = jujudata.accounts()[controller_name]
username = accounts['user']
password = accounts.get('password')
- macaroons = get_macaroons() if not password else None
+ macaroons = get_macaroons(controller_name) if not password else None
return await cls.connect(
endpoint, None, username, password, cacert, macaroons, loop)
@@ -470,7 +471,7 @@ class Connection:
password = accounts.get('password')
models = jujudata.models()[controller_name]
model_uuid = models['models'][model_name]['uuid']
- macaroons = get_macaroons() if not password else None
+ macaroons = get_macaroons(controller_name) if not password else None
return await cls.connect(
endpoint, model_uuid, username, password, cacert, macaroons, loop)
@@ -545,17 +546,27 @@ class JujuData:
return yaml.safe_load(f)[key]
-def get_macaroons():
+def get_macaroons(controller_name=None):
"""Decode and return macaroons from default ~/.go-cookies
"""
+ cookie_files = []
+ if controller_name:
+ cookie_files.append('~/.local/share/juju/cookies/{}.json'.format(
+ controller_name))
+ cookie_files.append('~/.go-cookies')
+ for cookie_file in cookie_files:
+ cookie_file = Path(cookie_file).expanduser()
+ if cookie_file.exists():
try:
- cookie_file = os.path.expanduser('~/.go-cookies')
- with open(cookie_file, 'r') as f:
- cookies = json.load(f)
+ cookies = json.loads(cookie_file.read_text())
+ break
except (OSError, ValueError):
log.warn("Couldn't load macaroons from %s", cookie_file)
return []
+ else:
+ log.warn("Couldn't load macaroons from %s", ' or '.join(cookie_files))
+ return []
base64_macaroons = [
c['Value'] for c in cookies
|
Fix link to CommCare Cloud docs
(Also, "setup" is a noun. The verb is "set up" / "setting up".) | @@ -22,8 +22,8 @@ bundled [web application platform](https://github.com/dimagi/formplayer).
### More Information
+ To try CommCare you can use [this production instance of hosted CommCare](https://www.commcarehq.org/).
-+ To setup a local CommCare HQ developer environment, see [Setting up CommCare HQ for Developers](https://github.com/dimagi/commcare-hq/blob/master/DEV_SETUP.md).
-+ To setup a production CommCare HQ environment, check out [CommCare Cloud](https://dimagi.github.io/commcare-cloud/), our toolkit for deploying and maintaining CommCare servers.
++ For setting up a local CommCare HQ developer environment, see [Setting up CommCare HQ for Developers](https://github.com/dimagi/commcare-hq/blob/master/DEV_SETUP.md).
++ For setting up a production CommCare HQ environment, check out [CommCare Cloud](https://commcare-cloud.readthedocs.io/), our toolkit for deploying and maintaining CommCare servers.
+ Additional documentation is available on [ReadTheDocs](https://commcare-hq.readthedocs.io/).
+ We welcome contributions, see [Contributing](CONTRIBUTING.rst) for more.
+ Questions? Contact the CommCare community at our [forum](https://forum.dimagi.com/).
|
Update amazon.py
Fix a typo for the amazon instant video dataset | @@ -176,7 +176,7 @@ class AmazonInstantVideo(AmazonDataset):
def __init__(self, min_u_c=0, min_i_c=3, root_dir=None):
r"""Init AmazonInstantVideo Class."""
super().__init__(
- dataset_name="amazon-amazon-instant-video",
+ dataset_name="amazon-instant-video",
min_u_c=min_u_c,
min_i_c=min_i_c,
root_dir=root_dir,
|
Update settings.py
Some additional ```.%{suffix}%``` types, that were met ITW. | @@ -100,7 +100,7 @@ SUSPICIOUS_HTTP_PATH_REGEXES = (
("potential web scan", r"inexistent_file_name\.inexistent|test-for-some-inexistent-file|long_inexistent_path|some-inexistent-website\.acu")
)
SUSPICIOUS_HTTP_REQUEST_PRE_CONDITION = ("?", "..", ".ht", "=", " ", "'")
-SUSPICIOUS_DIRECT_IP_URL_REGEX = r"\A[\w./-]*/[\w.]*\b(aarch|arm(\b|v?\d)|m68k|mips|mpsl|m1psel|powerpc|powerppc|ppc|x86|x32|x64|i586|i686|sparc|sh\b|wtf|yarn|zte)\Z"
+SUSPICIOUS_DIRECT_IP_URL_REGEX = r"\A[\w./-]*/[\w.]*\b(aarch|arm(\b|v?\d)|exploit|m68(\b|\w?)|mips|mpsl|m1psel|pcc|powerpc|powerppc|ppc|root|x86|x32|x64|i\d{1,2}\b|i386|i486|i586|i686|sparc|sh\b|wtf|yarn|zte)\Z"
SUSPICIOUS_PROXY_PROBE_PRE_CONDITION = ("probe", "proxy", "echo", "check")
SUSPICIOUS_HTTP_REQUEST_FORCE_ENCODE_CHARS = dict((_, _urllib.parse.quote(_)) for _ in "( )\r\n")
SUSPICIOUS_UA_REGEX = ""
|
Add set_issue_status_by_id
It is better to close the task by ID, the status name can be changed :( | @@ -841,6 +841,15 @@ class Jira(AtlassianRestAPI):
transition_id = self.get_transition_id_to_status_name(issue_key, status_name)
return self.post(url, data={'transition': {'id': transition_id}})
+ def set_issue_status_by_id(self, issue_key, transition_id):
+ """
+ Setting status by transition_id
+ :param issue_key: str
+ :param transition_id: int
+ """
+ url = 'rest/api/2/issue/{issue_key}/transitions'.format(issue_key=issue_key)
+ return self.post(url, data={'transition': {'id': transition_id}})
+
def get_issue_status(self, issue_key):
url = 'rest/api/2/issue/{issue_key}?fields=status'.format(issue_key=issue_key)
return (self.get(url) or {}).get('fields').get('status').get('name')
|
Add logging showing BotInfo before and after update in bot_management.bot_event
Review-Url: | import datetime
import hashlib
+import logging
from google.appengine.ext import ndb
@@ -354,7 +355,12 @@ def bot_event(
# Retrieve the previous BotInfo and update it.
info_key = get_info_key(bot_id)
- bot_info = info_key.get() or BotInfo(key=info_key)
+ bot_info = info_key.get()
+ if bot_info:
+ logging.info('Updating BotInfo: %s', bot_info)
+ else:
+ bot_info = BotInfo(key=info_key)
+ logging.info('Creating BotInfo: %s', bot_info)
bot_info.last_seen_ts = utils.utcnow()
bot_info.external_ip = external_ip
bot_info.authenticated_as = authenticated_as
@@ -376,6 +382,7 @@ def bot_event(
bot_info.lease_expiration_ts = kwargs['lease_expiration_ts']
if kwargs.get('machine_type') is not None:
bot_info.machine_type = kwargs['machine_type']
+ logging.info('BotInfo: %s', bot_info)
if event_type in ('request_sleep', 'task_update'):
# Handle this specifically. It's not much of an even worth saving a BotEvent
|
Visual Code: Handle pylint warning only given when running per file.
* That is how they do it, which makes pylint give this. Still we
want to be clean, so lets try to avoid it. | @@ -401,7 +401,7 @@ class TraceCollectionBase(CollectionTracingMixin):
@staticmethod
def signalChange(tags, source_ref, message):
- # This is monkey patched from another module.
+ # This is monkey patched from another module. pylint: disable=I0021,not-callable
signalChange(tags, source_ref, message)
def onUsedModule(self, module_name, module_relpath):
|
ec2: Move key file path and mode validation into separate function
Move the key file existence and mode validation from create() into a
separate function, so we can write unit tests for it and make create()
simpler. | @@ -2481,6 +2481,31 @@ def wait_for_instance(
return vm_
+def _validate_key_path_and_mode(key_filename):
+ if key_filename is None:
+ raise SaltCloudSystemExit(
+ 'The required \'private_key\' configuration setting is missing from the '
+ '\'ec2\' driver.'
+ )
+
+ if not os.path.exists(key_filename):
+ raise SaltCloudSystemExit(
+ 'The EC2 key file \'{0}\' does not exist.\n'.format(
+ key_filename
+ )
+ )
+
+ key_mode = str(
+ oct(stat.S_IMODE(os.stat(key_filename).st_mode))
+ )
+ if key_mode not in ('0400', '0600'):
+ raise SaltCloudSystemExit(
+ 'The EC2 key file \'{0}\' needs to be set to mode 0400 or 0600.\n'.format(
+ key_filename
+ )
+ )
+
+ return True
def create(vm_=None, call=None):
'''
@@ -2514,28 +2539,7 @@ def create(vm_=None, call=None):
if deploy:
# The private_key and keyname settings are only needed for bootstrapping
# new instances when deploy is True
- if key_filename is None:
- raise SaltCloudSystemExit(
- 'The required \'private_key\' configuration setting is missing from the '
- '\'ec2\' driver.'
- )
-
- if not os.path.exists(key_filename):
- raise SaltCloudSystemExit(
- 'The EC2 key file \'{0}\' does not exist.\n'.format(
- key_filename
- )
- )
-
- key_mode = str(
- oct(stat.S_IMODE(os.stat(key_filename).st_mode))
- )
- if key_mode not in ('0400', '0600'):
- raise SaltCloudSystemExit(
- 'The EC2 key file \'{0}\' needs to be set to mode 0400 or 0600.\n'.format(
- key_filename
- )
- )
+ _validate_key_path_and_mode(key_filename)
__utils__['cloud.fire_event'](
'event',
|
GDB helpers: fix generation when library short name is missing
TN: | @@ -1179,7 +1179,8 @@ class CompileCtx(object):
lib_name=lib_name,
astnode_names={node.name().lower
for node in self.astnode_types},
- prefix=self.short_name.lower or self.lib_name,
+ prefix=(self.short_name.lower
+ if self.short_name else lib_name),
))
# Add any sources in $lang_path/extensions/support if it exists
|
Fix errors property in Writer
It needs to return serializable object. | import abc
import itertools
from collections import defaultdict
-from typing import List
+from typing import Any, Dict, List
from django.conf import settings
@@ -17,6 +17,10 @@ class Writer(abc.ABC):
"""Save the read contents to DB."""
raise NotImplementedError('Please implement this method in the subclass.')
+ def errors(self) -> List[Dict[Any, Any]]:
+ """Return errors."""
+ raise NotImplementedError('Please implement this method in the subclass.')
+
def group_by_class(instances):
groups = defaultdict(list)
@@ -97,9 +101,9 @@ class BulkWriter(Writer):
self._errors.extend(reader.errors)
@property
- def errors(self) -> List[FileParseException]:
+ def errors(self) -> List[Dict[Any, Any]]:
self._errors.sort(key=lambda e: e.line_num)
- return self._errors
+ return [error.dict() for error in self._errors]
def create(self, project: Project, user):
self.examples.save_label(project)
|
Specify the package name in setup.py
This should fix GitHub's issue with finding dependents for this project. | @@ -28,7 +28,6 @@ extras_require = {
}
metadata = {
- "name": "uplink",
"author": "P. Raj Kumar",
"author_email": "[email protected]",
"url": "https://uplink.readthedocs.io/",
@@ -58,4 +57,4 @@ metadata = {
metadata = dict(metadata, **about)
if __name__ == "__main__":
- setup(**metadata)
+ setup(name="uplink", **metadata)
|
Trivial change to IdleTomographyObservedRatesTable.
Shifts of "X below threshold" message left to the figure column
since that one is typically wider (make the table look a little
nicer). | @@ -135,7 +135,7 @@ class IdleTomographyObservedRatesTable(_ws.WorkspaceTable):
table.addrow(row_data, row_formatters)
if nBelowThreshold > 0:
- table.addrow( ["", "%d observed rates below %g" % (nBelowThreshold,rate_threshold)],
+ table.addrow( ["%d observed rates below %g" % (nBelowThreshold,rate_threshold), ""],
[None, None])
table.finish()
|
Do not use len to assert existance of attempts
It forces unnecessary evaluation of the length of attemps. | @@ -62,7 +62,7 @@ def log_user_login_failed(sender, credentials, request, **kwargs):
get_axes_cache().set(cache_hash_key, failures, cache_timeout)
# has already attempted, update the info
- if len(attempts):
+ if attempts:
for attempt in attempts:
attempt.get_data = '%s\n---------\n%s' % (
attempt.get_data,
|
Fix a failing test for row selection in `_get_selected_cells()`.
Rename auxiliary `find()` params:
`row` -> `in_row`
`col` -> `in_column`
Add `in_row` and `in_column` params to `findall()`.
Add param specs. | @@ -1703,7 +1703,7 @@ class Worksheet(object):
absolute_range_name(self.title)
)
- def _finder(self, func, query, col, row):
+ def _finder(self, func, query, in_row=None, in_column=None):
data = self.spreadsheet.values_get(absolute_range_name(self.title))
try:
@@ -1711,7 +1711,7 @@ class Worksheet(object):
except KeyError:
values = []
- cells = self._get_selected_cells(values, col, row)
+ cells = self._get_selected_cells(values, in_row, in_column)
if isinstance(query, basestring):
match = lambda x: x.value == query
@@ -1720,23 +1720,24 @@ class Worksheet(object):
return func(match, cells)
- def _get_selected_cells(self, values, col, row):
- """ Returns an array of cell objects.
- :param values: Array with row, colums and values
- :param col: Number of colum to find
- :param row: Number of row to find
+ def _get_selected_cells(self, values, in_row=None, in_column=None):
+ """Returns a list of ``Cell`` instances scoped by optional
+ ``in_row``` or ``in_column`` values (both one-based).
"""
- if col and row: raise TypeError("Either 'rows' or 'cols' should be specified.")
+ if in_row and in_column:
+ raise TypeError(
+ "Either 'in_row' or 'in_column' should be specified."
+ )
- if col:
+ if in_column:
return [
- Cell(row=i + 1, col=col, value=row[col])
+ Cell(row=i + 1, col=in_column, value=row[in_column - 1])
for i, row in enumerate(values)
]
- elif row:
+ elif in_row:
return [
- Cell(row=row, col=j + 1, value=value)
- for j, value in enumerate(row)
+ Cell(row=in_row, col=j + 1, value=value)
+ for j, value in enumerate(values[in_row - 1])
]
else:
return [
@@ -1745,26 +1746,32 @@ class Worksheet(object):
for j, value in enumerate(row)
]
- def find(self, query, col=None, row=None):
+ def find(self, query, in_row=None, in_column=None):
"""Finds the first cell matching the query.
:param query: A literal string to match or compiled regular expression.
:type query: str, :py:class:`re.RegexObject`
-
+ :param int in_row: (optional) One-based number of row to scope the
+ search.
+ :param int in_column: (optional) One-based number of column to scope
+ the search.
"""
try:
- return self._finder(finditem, query, col, row)
+ return self._finder(finditem, query, in_row, in_column)
except StopIteration:
raise CellNotFound(query)
- def findall(self, query):
+ def findall(self, query, in_row=None, in_column=None):
"""Finds all cells matching the query.
:param query: A literal string to match or compiled regular expression.
:type query: str, :py:class:`re.RegexObject`
-
+ :param int in_row: (optional) One-based number of row to scope the
+ search.
+ :param int in_column: (optional) One-based number of column to scope
+ the search.
"""
- return list(self._finder(filter, query))
+ return list(self._finder(filter, query, in_row, in_column))
def freeze(self, rows=None, cols=None):
"""Freeze rows and/or columns on the worksheet.
|
catch exception when parsing score breakdown json
I see some cases where CachedQueryResult returns `'None'` as the value.
The old app just wraps the `json.loads` in a try/catch, so I'll do the
same thing here.
Fixes | @@ -197,7 +197,10 @@ class Match(CachedModel):
Lazy load score_breakdown_json
"""
if self._score_breakdown is None and self.score_breakdown_json is not None:
+ try:
score_breakdown = json.loads(none_throws(self.score_breakdown_json))
+ except json.decoder.JSONDecodeError:
+ return None
if self.has_been_played:
# Add in RP calculations
|
Update ryuk.txt
Root form of domains only. Subs will be detected automatically. | @@ -186,3 +186,19 @@ zsplace.com
climinus.com
hayridumanli.com
mysocialsoftware.com
+
+# Reference: https://community.riskiq.com/article/0bcefe76
+
+balanarr.com
+bukaguka.com
+daemon-update.com
+hotlable.com
+hunbabe.com
+myobtain.com
+nasmasterservice.com
+primeviref.com
+raingamess.com
+servicemusthave.com
+starcyclone.com
+toyotacamryy.com
+webxyz.net
|
Update Pytorch to version 1.12.0 and TorchVision to 0.13.0
update Pytorch to version 1.12.0 and TorchVision to 0.13.0 | @@ -36,6 +36,6 @@ pip3 install \
pip3 install future
pip3 install \
- torch==1.11.0 \
- torchvision==0.12.0 \
+ torch==1.12.0 \
+ torchvision==0.13.0 \
--extra-index-url https://download.pytorch.org/whl/cpu
|
Refactor tests for monthly usage API
These are now consistent with the yearly usage API tests. | @@ -150,11 +150,12 @@ def test_get_yearly_usage_by_monthly_from_ft_billing_populates_deltas(admin_requ
assert fact_billing[0].notification_type == 'sms'
-def test_get_yearly_usage_by_monthly_from_ft_billing(admin_request, notify_db_session):
+def set_up_monthly_data():
service = create_service()
sms_template = create_template(service=service, template_type="sms")
email_template = create_template(service=service, template_type="email")
letter_template = create_template(service=service, template_type="letter")
+
for month in range(1, 13):
mon = str(month).zfill(2)
for day in range(1, monthrange(2016, month)[1] + 1):
@@ -171,34 +172,37 @@ def test_get_yearly_usage_by_monthly_from_ft_billing(admin_request, notify_db_se
billable_unit=1,
rate=0.33,
postage='second')
+ return service
+
+
+def test_get_yearly_usage_by_monthly_from_ft_billing(admin_request, notify_db_session):
+ service = set_up_monthly_data()
- json_resp = admin_request.get(
+ json_response = admin_request.get(
'billing.get_yearly_usage_by_monthly_from_ft_billing',
service_id=service.id,
year=2016
)
- ft_letters = [x for x in json_resp if x['notification_type'] == 'letter']
- ft_sms = [x for x in json_resp if x['notification_type'] == 'sms']
- ft_email = [x for x in json_resp if x['notification_type'] == 'email']
- keys = [x.keys() for x in ft_sms][0]
- expected_sms_april = {"month": "April",
- "notification_type": "sms",
- "billing_units": 30,
- "rate": 0.162,
- "postage": "none"
- }
- expected_letter_april = {"month": "April",
- "notification_type": "letter",
- "billing_units": 30,
- "rate": 0.33,
- "postage": "second"
- }
+ assert len(json_response) == 18
+
+ email_rows = [row for row in json_response if row['notification_type'] == 'email']
+ assert len(email_rows) == 0
+
+ letter_row = next(x for x in json_response if x['notification_type'] == 'letter')
+ sms_row = next(x for x in json_response if x['notification_type'] == 'sms')
+
+ assert letter_row["month"] == "April"
+ assert letter_row["notification_type"] == "letter"
+ assert letter_row["billing_units"] == 30
+ assert letter_row["rate"] == 0.33
+ assert letter_row["postage"] == "second"
- for k in keys:
- assert ft_sms[0][k] == expected_sms_april[k]
- assert ft_letters[0][k] == expected_letter_april[k]
- assert len(ft_email) == 0
+ assert sms_row["month"] == "April"
+ assert sms_row["notification_type"] == "sms"
+ assert sms_row["billing_units"] == 30
+ assert sms_row["rate"] == 0.162
+ assert sms_row["postage"] == "none"
def set_up_yearly_data():
|
Calculate sha256sum for release assets
And include .sha256 files to the assets as well | @@ -22,6 +22,9 @@ assets_dir=$TEMPDIR/assets
nix-build -A release -o "$TEMPDIR"/"$project" --arg timestamp "$(date +\"%Y%m%d%H%M\")" \
--arg docker-binaries ./binaries/docker --arg docker-arm-binaries ./arm-binaries/docker
mkdir -p "$assets_dir"
+for asset in "$assets_dir"/*; do
+ sha256sum "$asset" > "$asset.sha256"
+done
# Move archive with binaries and tezos license to assets
shopt -s extglob
cp -L "$TEMPDIR"/"$project"/!(*.md) "$assets_dir"
|
[recipes] Fix compilation for regex recipe
The error was: build/other_builds/hostpython3/desktop/hostpython3/Include/Python.h:39:19: fatal error: crypt.h: No such file or directory | @@ -7,6 +7,7 @@ class RegexRecipe(CompiledComponentsPythonRecipe):
url = 'https://pypi.python.org/packages/d1/23/5fa829706ee1d4452552eb32e0bfc1039553e01f50a8754c6f7152e85c1b/regex-{version}.tar.gz'
depends = ['setuptools']
+ call_hostpython_via_targetpython = False
recipe = RegexRecipe()
|
Quick fix for inline JS at Django template level.
Ideal case would be to include this in a bundle. | var fastclick = require('fastclick');
+window.$ = $;
// side effect: binds handlebars helpers to our handlebars instance
require('../handlebars/helpers.js');
|
Update command-line-tools.rst
Updated code block to be more specific that the entry is a username not a name of a user. | @@ -2081,8 +2081,8 @@ mattermost user migrate_auth
.. code-block:: json
{
- "[email protected]": "user.one",
- "[email protected]": "user.two"
+ "[email protected]": "username.one",
+ "[email protected]": "username.two"
}
Users file generation
|
[modules/brightness] Fix return format
What: Fixes the return format in `brightness` module
Why: To remove the initial zero in the brightness indicator when below hundred. | @@ -29,7 +29,7 @@ class Module(bumblebee.engine.Module):
def brightness(self, widget):
if isinstance(self._brightness, float):
- return "{:03.0f}%".format(self._brightness)
+ return "{:3.0f}%".format(self._brightness).strip()
else:
return "n/a"
|
Why do you hate functional programming, Python?
The whole string module is deprecated - you're supposed to use methods now :( | @@ -10,7 +10,6 @@ from __future__ import unicode_literals
import copy
from collections import Counter, defaultdict
from decimal import Decimal, InvalidOperation
-from string import strip
from attr import attrs, attrib
from django.core.exceptions import ValidationError
@@ -52,6 +51,10 @@ def to_boolean(val):
return False if val == '' else string_to_boolean(val)
+def strip(val):
+ return val.strip()
+
+
@attrs(frozen=True)
class LocationTypeData(object):
"""read-only representation of location type attributes specified in an upload"""
|
Fix f string error
I apologize | @@ -11332,7 +11332,7 @@ and follow recommend steps to authorize GAM for Drive access.''')
else:
mimeType = MIMETYPE_GA_SPREADSHEET
body = {'description': QuotedArgumentList(sys.argv),
- f'name': '{GC_Values[GC_DOMAIN]} - {list_type}',
+ 'name': f'{GC_Values[GC_DOMAIN]} - {list_type}',
'mimeType': mimeType}
result = gapi.call(drive.files(), 'create', fields='webViewLink',
body=body,
|
upgrade lexical-core to fix a bug in recent rustc version
Upgrade the `lexical-core` crate to version v0.7.6 (from v0.7.4) so we get the fix from v0.7.5 that is blocking the upgrade to Rust v1.53.0. See for the particular error.
[ci skip-build-wheels] | @@ -1452,13 +1452,13 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"
[[package]]
name = "lexical-core"
-version = "0.7.4"
+version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "db65c6da02e61f55dae90a0ae427b2a5f6b3e8db09f58d10efab23af92592616"
+checksum = "6607c62aa161d23d17a9072cc5da0be67cdfc89d3afb1e8d9c842bebc2525ffe"
dependencies = [
"arrayvec",
"bitflags",
- "cfg-if 0.1.10",
+ "cfg-if 1.0.0",
"ryu",
"static_assertions",
]
|
upload: Use URL manipulation for get_public_upload_url logic.
This is much faster than calling generate_presigned_url each time.
```
In [3]: t = time.time()
...: for i in range(250):
...: x = u.get_public_upload_url("foo")
...: print(time.time()-t)
0.0010945796966552734
``` | @@ -387,12 +387,10 @@ class S3UploadBackend(ZulipUploadBackend):
self.uploads_bucket = get_bucket(settings.S3_AUTH_UPLOADS_BUCKET, self.session)
self._boto_client = None
+ self.public_upload_url_pattern = self.construct_public_upload_url_pattern()
- def get_public_upload_url(
- self,
- key: str,
- ) -> str:
- # Return the public URL for a key in the S3 Avatar bucket.
+ def construct_public_upload_url_pattern(self) -> str:
+ # Return the pattern for public URL for a key in the S3 Avatar bucket.
# For Amazon S3 itself, this will return the following:
# f"https://{self.avatar_bucket.name}.{network_location}/{key}"
#
@@ -401,14 +399,35 @@ class S3UploadBackend(ZulipUploadBackend):
# different URL format. Configuring no signature and providing
# no access key makes `generate_presigned_url` just return the
# normal public URL for a key.
- return self.get_boto_client().generate_presigned_url(
+ #
+ # It unfortunately takes 2ms per query to call
+ # generate_presigned_url, even with our cached boto
+ # client. Since we need to potentially compute hundreds of
+ # avatar URLs in single `GET /messages` request, we instead
+ # back-compute the URL pattern here.
+
+ DUMMY_KEY = "dummy_key_ignored"
+ foo_url = self.get_boto_client().generate_presigned_url(
ClientMethod="get_object",
Params={
"Bucket": self.avatar_bucket.name,
- "Key": key,
+ "Key": DUMMY_KEY,
},
ExpiresIn=0,
)
+ parsed = urllib.parse.urlparse(foo_url)
+ base_path = os.path.dirname(parsed.path)
+
+ url_pattern = urllib.parse.urlunparse(
+ parsed._replace(path=os.path.join(base_path, "{key}"))
+ )
+ return url_pattern
+
+ def get_public_upload_url(
+ self,
+ key: str,
+ ) -> str:
+ return self.public_upload_url_pattern.format(key=key)
def get_boto_client(self) -> botocore.client.BaseClient:
"""
|
langkit.compile_context: re-order imports
TN: | @@ -24,9 +24,8 @@ from langkit.ada_api import AdaAPISettings
from langkit.c_api import CAPISettings
from langkit.diagnostics import (Context, Severity, WarningSet,
check_source_language)
-from langkit.utils import (
- TopologicalSortError, topological_sort, memoized, memoized_with_default
-)
+from langkit.utils import (TopologicalSortError, memoized,
+ memoized_with_default, topological_sort)
compile_ctx = None
|
chore: add new spec.json file in accord to the changes of the docstrings.
chore: correctly add the spec json file | "module": "vaex.ml.lightgbm",
"snake_name": "lightgbm_model",
"traits": [
- {
- "default": false,
- "has_default": false,
- "help": "Copy data or use the modified xgboost library for efficient transfer.",
- "name": "copy",
- "type": "Bool"
- },
{
"default": null,
"has_default": true,
|
Added a simple install command
To install locally, call: python ABCD.py install
You will then be able to use it in scripts from any directory | @@ -726,11 +726,13 @@ class OpticalPath(object):
return (x,y)
import os
+import subprocess
def installModule():
+ directory = subprocess.check_output('python -m site --user-site', shell=True)
os.system('mkdir -p "`python -m site --user-site`"')
os.system('cp ABCD.py "`python -m site --user-site`/"')
os.system('cp Axicon.py "`python -m site --user-site`/"')
-
+ print('Module ABCD.py and Axicon.py copied to ', directory)
# This is an example for the module.
# Don't modify this: create a new script that imports ABCD
@@ -739,6 +741,7 @@ if __name__ == "__main__":
if len(sys.argv) >= 2:
if sys.argv[1] == 'install':
installModule()
+ exit()
path = OpticalPath()
path.name = "Simple demo: one infinite lens f = 5cm"
|
Update `.conf` locations
Configuration files for pgsql 9.4 on CentOS7.3 are now located in `/var/lib/pgsql/9.4/data/` | @@ -62,7 +62,7 @@ Installing PostgreSQL Database
15. Allow Postgres to listen on all assigned IP Addresses.
- a. Open ``/etc/postgresql/9.4/main/postgresql.conf`` as root in a text editor.
+ a. Open ``/var/lib/pgsql/9.4/data/postgresql.conf`` as root in a text editor.
b. Find the following line:
@@ -76,7 +76,7 @@ Installing PostgreSQL Database
If the Mattermost server and the database are on the same machine, then you can skip this step.
- a. Open ``/etc/postgresql/9.4/main/pg_hba.conf`` in a text editor.
+ a. Open ``/var/lib/pgsql/9/4/data/pg_hba.conf`` in a text editor.
b. Add the following line to the end of the file, where *<mm-server-IP>* is the IP address of the machine that contains the Mattermost server.
|
[bugfix] Fix category tidy error when not using custom summary
edit_summary has to be instantiated before we call if edit_summary later
in the script as it throws an error otherwise. | @@ -987,9 +987,8 @@ class CategoryTidyRobot(Bot, CategoryPreprocess):
"""Initializer."""
self.cat_title = cat_title
self.cat_db = cat_db
- if comment:
self.edit_summary = comment
- else:
+ if not comment:
self.template_vars = {'oldcat': cat_title}
site = pywikibot.Site()
|
DOC: add 2 projects using sphinx gallery
These are:
*
* | @@ -25,4 +25,6 @@ Here is a list of projects using `sphinx-gallery`.
* `Fury <http://fury.gl/latest/auto_examples/index.html>`_
* `NetworkX <https://networkx.github.io/documentation/stable/auto_examples/index.html>`_
* `Optuna <https://optuna.readthedocs.io/en/stable/tutorial/index.html>`_
+* `Auto-sklearn <https://automl.github.io/auto-sklearn/master/examples/index.html>`_
+* `OpenML-Python <https://openml.github.io/openml-python/master/examples/index.html>`_
|
Clarify that there is only one polling agent
Mention in install/get_started.rst that the compute polling agent
and the central polling agent are actually the same program,
running in different polling namespaces.
Closes-Bug: | @@ -16,12 +16,15 @@ The Telemetry service consists of the following components:
A compute agent (``ceilometer-agent-compute``)
Runs on each compute node and polls for resource utilization
- statistics.
+ statistics. This is actually the polling agent ``ceilometer-polling``
+ running with parameter ``--polling-namespace compute``.
A central agent (``ceilometer-agent-central``)
Runs on a central management server to poll for resource utilization
statistics for resources not tied to instances or compute nodes.
- Multiple agents can be started to scale service horizontally.
+ Multiple agents can be started to scale service horizontally. This is
+ actually the polling agent ``ceilometer-polling`` running with
+ parameter ``--polling-namespace central``.
A notification agent (``ceilometer-agent-notification``)
Runs on a central management server(s) and consumes messages from
|
Update required Keras version to 2.3.0
In accordance to | @@ -251,7 +251,7 @@ Example output images using `keras-retinanet` are shown below.
If you have a project based on `keras-retinanet` and would like to have it published here, shoot me a message on Slack.
### Notes
-* This repository requires Keras 2.2.4 or higher.
+* This repository requires Keras 2.3.0 or higher.
* This repository is [tested](https://github.com/fizyr/keras-retinanet/blob/master/.travis.yml) using OpenCV 3.4.
* This repository is [tested](https://github.com/fizyr/keras-retinanet/blob/master/.travis.yml) using Python 2.7 and 3.6.
|
[IMPR] Skip PageSaveRelatedError and ServerError in checkimages
skip PageSaveRelatedError when putting talk page
if CheckImagesBot.ignore_save_related_errors is True (default: True)
skip ServerError when putting talk page
if CheckImagesBot.ignore_server_errors is True (default: False) | @@ -99,6 +99,8 @@ from pywikibot.exceptions import (
NoPageError,
NotEmailableError,
PageRelatedError,
+ PageSaveRelatedError,
+ ServerError,
TranslationError,
)
from pywikibot.family import Family
@@ -501,6 +503,9 @@ class CheckImagesBot:
"""A robot to check recently uploaded files."""
+ ignore_save_related_errors = True
+ ignore_server_errors = False
+
def __init__(self, site, log_full_number=25000, sendemail_active=False,
duplicates_report=False, log_full_error=True,
max_user_notify=None) -> None:
@@ -726,11 +731,22 @@ class CheckImagesBot:
try:
self.talk_page.put(new_text, summary=commentox, minor=False)
- except LockedPageError:
- pywikibot.output('Talk page blocked, skip.')
+ except PageSaveRelatedError as e:
+ if not self.ignore_save_related_errors:
+ raise
+ err = e
+ except ServerError as e:
+ if not self.ignore_server_errors:
+ raise
+ err = e
else:
if self.num_notify is not None:
self.num_notify[self.talk_page.title()] -= 1
+ err = None
+ if err:
+ pywikibot.exception(err)
+ pywikibot.output('Skipping saving talk page {}'
+ .format(self.talk_page))
if email_page_name and email_subj:
email_page = pywikibot.Page(self.site, email_page_name)
|
Documantation to chek synapse version
I've added some Documentation, how to get the running Version of a
Synapse homeserver. This should help the HS-Owners to check whether the
Upgrade was successful. | @@ -29,6 +29,15 @@ running:
# Update the versions of synapse's python dependencies.
python synapse/python_dependencies.py | xargs -n1 pip install --upgrade
+To check whether your update was sucessfull, run:
+
+.. code:: bash
+
+ # replace your.server.domain with ther domain of your synaspe homeserver
+ curl https://<your.server.domain>/_matrix/federation/v1/version
+
+So for the Matrix.org HS server the URL would be: https://matrix.org/_matrix/federation/v1/version.
+
Upgrading to v0.15.0
====================
|
Fix test_ir_type.
* The void return type is not None/nullptr, it's VoidType or
TupleType([]). | @@ -72,7 +72,7 @@ def test_func_type():
def test_tuple_type():
tp = tvm.ir.TypeVar('tp', tvm.ir.TypeKind.Type)
- tf = tvm.ir.FuncType([], None, [], [])
+ tf = tvm.ir.FuncType([], tvm.ir.TupleType([]), [], [])
tt = tvm.ir.TensorType(tvm.runtime.convert([1, 2, 3]), 'float32')
fields = tvm.runtime.convert([tp, tf, tt])
|
Fix networks in IP setter
Match new networks format. | @@ -40,9 +40,9 @@ def update_provider_context(args):
raise ValueError('Cannot change network {0} address'
.format(network_name))
else:
- agent_dict['networks'][network_name] = address
+ agent_dict['networks'][network_name]['manager'] = address
agent_dict['broker_ip'] = args.manager_ip
- agent_dict['networks']['default'] = args.manager_ip
+ agent_dict['networks']['default']['manager'] = args.manager_ip
flag_modified(ctx, 'context')
sm.update(ctx)
|
MAINT: made doc fixes
Fixed issues with wording and formatting. | @@ -15,7 +15,7 @@ Sample Period Mean Function
The code below creates a function called ``periodic_mean`` that takes either
a pysat Instrument or Orbits object connected to an Instrument and calculates
-the mean every daily or every orbit over the period of time supplied by
+the mean every day or every orbit over the period of time supplied by
`bounds`.
.. code:: python
@@ -70,7 +70,7 @@ You may apply this function as demonstrated below.
tag='utd', inst_id='f15', orbit_info=orbit_info,
clean_level='none', update_files=True)
- # Ensure the data is dowloaded
+ # Ensure the data is downloaded
if len(f15.files[stime:etime + dt.timedelta(days=1)]) < 3:
f15.download(start=stime, stop=etime, user='name', password='email')
|
Fix XLA fallback to avoid checking the mesh conditions
The warning about not using the full mesh manually is mainly to improve error messages
(otherwise an XLA error is generated). But the MLIR lowering fallback uses axis_env
unconditionally, so we have to go around that check. | @@ -364,6 +364,10 @@ class SPMDAxisContext:
"Collectives in manually partitioned computations are only supported "
"when all mesh axes are partitioned manually (no partial automatic sharding). "
"Make sure that you mention all mesh axes in axis_resources!")
+ return self.unsafe_axis_env
+
+ @property
+ def unsafe_axis_env(self):
return xla.AxisEnv(
nreps=self.mesh.size,
names=self.mesh.axis_names,
@@ -1340,8 +1344,13 @@ def xla_fallback_lowering(prim: core.Primitive):
@cache_lowering
def fallback(ctx: LoweringRuleContext, *args, **params):
module_ctx = ctx.module_context
+ axis_ctx = module_ctx.axis_context
+ if isinstance(axis_ctx, SPMDAxisContext):
+ axis_env = axis_ctx.unsafe_axis_env
+ else:
+ axis_env = module_ctx.axis_env
xla_computation = xla.primitive_subcomputation(
- module_ctx.platform, module_ctx.axis_env, prim, ctx.avals_in,
+ module_ctx.platform, axis_env, prim, ctx.avals_in,
ctx.avals_out, **params)
xla_module = xla_computation_to_mhlo_module(xla_computation)
callee_name = merge_mhlo_modules(
|
Add additional logging for termination tasks
TBR=maruel
Review-Url: | @@ -559,6 +559,12 @@ def associate_termination_task(key, hostname, task_id):
if machine_lease.termination_task:
return
+ logging.info(
+ 'Associating termination task\nKey: %s\nHostname: %s\nTask ID: %s',
+ key,
+ machine_lease.hostname,
+ machine_lease.termination_task,
+ )
machine_lease.termination_task = task_id
machine_lease.put()
|
ArrayType.array_type_name: add missing rtype in docstring
TN: | @@ -2234,6 +2234,8 @@ class ArrayType(CompiledType):
def array_type_name(self):
"""
Name of the Ada array type.
+
+ :rtype: names.Name
"""
return self.element_type.name + names.Name('Array')
|
Adjust find_token_in_message tests for the recent cog changes
It now supports the changes that switched to finditer, added match
groups, and added the Token NamedTuple. It also accounts for the
is_maybe_token function being removed.
For the sake of simplicity, call assertions on is_valid_user_id and
is_valid_timestamp were not made. | import unittest
+from re import Match
from unittest import mock
from unittest.mock import MagicMock
@@ -130,9 +131,8 @@ class TokenRemoverTests(unittest.IsolatedAsyncioTestCase):
self.assertIsNone(return_value)
token_re.finditer.assert_not_called()
- @autospec(TokenRemover, "is_maybe_token")
@autospec("bot.cogs.token_remover", "TOKEN_RE")
- def test_find_token_no_matches_returns_none(self, token_re, is_maybe_token):
+ def test_find_token_no_matches(self, token_re):
"""None should be returned if the regex matches no tokens in a message."""
token_re.finditer.return_value = ()
@@ -140,30 +140,31 @@ class TokenRemoverTests(unittest.IsolatedAsyncioTestCase):
self.assertIsNone(return_value)
token_re.finditer.assert_called_once_with(self.msg.content)
- is_maybe_token.assert_not_called()
- @autospec(TokenRemover, "is_maybe_token")
+ @autospec(TokenRemover, "is_valid_user_id", "is_valid_timestamp")
+ @autospec("bot.cogs.token_remover", "Token")
@autospec("bot.cogs.token_remover", "TOKEN_RE")
- def test_find_token_returns_found_token(self, token_re, is_maybe_token):
- """The found token should be returned."""
- true_index = 1
- matches = ("foo", "bar", "baz")
- side_effects = [False] * len(matches)
- side_effects[true_index] = True
-
- token_re.findall.return_value = matches
- is_maybe_token.side_effect = side_effects
+ def test_find_token_valid_match(self, token_re, token_cls, is_valid_id, is_valid_timestamp):
+ """The first match with a valid user ID and timestamp should be returned as a `Token`."""
+ matches = [
+ mock.create_autospec(Match, spec_set=True, instance=True),
+ mock.create_autospec(Match, spec_set=True, instance=True),
+ ]
+ tokens = [
+ mock.create_autospec(Token, spec_set=True, instance=True),
+ mock.create_autospec(Token, spec_set=True, instance=True),
+ ]
+
+ token_re.finditer.return_value = matches
+ token_cls.side_effect = tokens
+ is_valid_id.side_effect = (False, True) # The 1st match will be invalid, 2nd one valid.
+ is_valid_timestamp.return_value = True
return_value = TokenRemover.find_token_in_message(self.msg)
- self.assertEqual(return_value, matches[true_index])
+ self.assertEqual(tokens[1], return_value)
token_re.finditer.assert_called_once_with(self.msg.content)
- # assert_has_calls isn't used cause it'd allow for extra calls before or after.
- # The function should short-circuit, so nothing past true_index should have been used.
- calls = [mock.call(match) for match in matches[:true_index + 1]]
- self.assertEqual(is_maybe_token.mock_calls, calls)
-
def test_regex_invalid_tokens(self):
"""Messages without anything looking like a token are not matched."""
tokens = (
|
(more-config-work-4) Rename Dict Api object to DagsterDictApi
Summary: Trivial. Just renaming this as it's own PR
Test Plan: BK
Reviewers: max, alangenfeld | @@ -100,7 +100,7 @@ def __getitem__(self, inner_type):
return WrappingSetType(inner_type)
-class DictTypeApi(object):
+class DagsterDictApi(object):
def __call__(self, fields):
from dagster.core.types.config.field_utils import build_config_dict
@@ -121,4 +121,4 @@ def __getitem__(self, *args):
Tuple = DagsterTupleApi()
-Dict = DictTypeApi()
+Dict = DagsterDictApi()
|
Show permission failure message based on passed user
only if user passed is equal to session user or if no user is passed | @@ -24,8 +24,10 @@ def print_has_permission_check_logs(func):
def inner(*args, **kwargs):
frappe.flags['has_permission_check_logs'] = []
result = func(*args, **kwargs)
+ self_perm_check = True if not kwargs['user'] else kwargs['user'] == frappe.session.user
# print only if access denied
- if not result:
+ # and if user is checking his own permission
+ if not result and self_perm_check:
msgprint(('<br>').join(frappe.flags['has_permission_check_logs']))
frappe.flags.pop('has_permission_check_logs', None)
return result
|
Move circleci cache save until after tests
That way the minorminer cache is also saved. | @@ -12,7 +12,7 @@ jobs:
- restore_cache: &restore-cache-template
keys:
- - v1-dependencies-{{ checksum "requirements.txt" }}-{{ checksum "tests/requirements.txt" }}-{{ .Environment.CIRCLE_JOB }}
+ - v2-dependencies-{{ checksum "requirements.txt" }}-{{ checksum "tests/requirements.txt" }}-{{ .Environment.CIRCLE_JOB }}
- run:
name: create virtualenv
@@ -26,11 +26,6 @@ jobs:
python --version
pip install -r requirements.txt -r tests/requirements.txt
- - save_cache: &save-cache-template
- paths:
- - ./env
- key: v1-dependencies-{{ checksum "requirements.txt" }}-{{ checksum "tests/requirements.txt" }}-{{ .Environment.CIRCLE_JOB }}
-
- run: &run-tests-template
name: run unittests
command: |
@@ -38,6 +33,11 @@ jobs:
python --version
coverage run -m unittest discover
+ - save_cache: &save-cache-template
+ paths:
+ - ./env
+ key: v2-dependencies-{{ checksum "requirements.txt" }}-{{ checksum "tests/requirements.txt" }}-{{ .Environment.CIRCLE_JOB }}
+
- run:
name: codecov
command: |
@@ -70,7 +70,7 @@ jobs:
- restore_cache:
keys:
- - v1-dependencies-{{ checksum "requirements.txt" }}-{{ checksum "docs/requirements.txt" }}-{{ .Environment.CIRCLE_JOB }}
+ - v2-dependencies-{{ checksum "requirements.txt" }}-{{ checksum "docs/requirements.txt" }}-{{ .Environment.CIRCLE_JOB }}
- run:
name: create virtualenv
@@ -83,17 +83,17 @@ jobs:
. env/bin/activate
pip install -r requirements.txt -r docs/requirements.txt
- - save_cache:
- paths:
- - ./env
- key: v1-dependencies-{{ checksum "requirements.txt" }}-{{ checksum "docs/requirements.txt" }}-{{ .Environment.CIRCLE_JOB }}
-
- run:
name: doctest
command: |
. env/bin/activate
make -C docs/ html doctest
+ - save_cache:
+ paths:
+ - ./env
+ key: v2-dependencies-{{ checksum "requirements.txt" }}-{{ checksum "docs/requirements.txt" }}-{{ .Environment.CIRCLE_JOB }}
+
test-osx-3.8: &osx-tests-template
macos:
xcode: "11.2.1"
@@ -137,10 +137,10 @@ jobs:
- run: *install-dependencies-template
- - save_cache: *save-cache-template
-
- run: *run-tests-template
+ - save_cache: *save-cache-template
+
test-osx-3.7:
<<: *osx-tests-template
environment:
@@ -165,12 +165,6 @@ jobs:
PYTHON: 3.4.8
HOMEBREW_NO_AUTO_UPDATE: 1
- test-osx-2.7:
- <<: *osx-tests-template
- environment:
- PYTHON: 2.7.15
- HOMEBREW_NO_AUTO_UPDATE: 1
-
deploy:
docker:
- image: circleci/python:3.6-jessie
|
pywinusb backend: raise DeviceError on timeout while opening device.
DeviceError is raised instead of a forced assertion failure. | @@ -83,13 +83,14 @@ class PyWinUSB(Interface):
# If the device could not be opened in read only mode
# Then it either has been disconnected or is in use
# by another thread/process
- raise six.raise_from(DAPAccessIntf.DeviceError("Unable to open device"), exc)
+ raise six.raise_from(DAPAccessIntf.DeviceError("Unable to open device %s"
+ % self.serial_number), exc)
else:
# If this timeout has elapsed then another process
# has locked this device in shared mode. This should
# not happen.
- assert False
+ raise DAPAccessIntf.DeviceError("timed out attempting to open device %s" % self.serial_number)
@staticmethod
def get_all_connected_interfaces():
|
Update CHANGES.rst
Fixed triple quotes to double quotes | @@ -26,7 +26,7 @@ New Features
- ``combine`` now accepts ``numpy.ndarray`` as the input ``img_list``. [#493, #503]
-- Added ```sum``` option in method for ```combime```. [#500, #508]
+- Added ``sum`` option in method for ``combime``. [#500, #508]
Other Changes and Additions
|
SConstruct : Remove FaceAreaOp stub
This op has been removed in Cortex 10. | @@ -844,7 +844,6 @@ libraries = {
# meshes
( "TriangulateOp", "ops/mesh/triangulate" ),
- ( "FaceAreaOp", "ops/mesh/faceArea" ),
( "MeshMergeOp", "ops/mesh/merge" ),
( "MeshNormalsOp", "ops/mesh/normals" ),
|
Apply suggestions from code review
changes error ts arg/var name
explicitly uses runtime.cwd instead of tmpdir | @@ -135,7 +135,7 @@ def get_parser():
g_other.add_argument("--estimator", action="store", type=str,
help="estimator to use to fit the model",
default="nistats", choices=["nistats", "afni"])
- g_other.add_argument("--errorts", action='store_true', default=False,
+ g_other.add_argument("--error-ts", action='store_true', default=False,
help='save error time series for first level models.'
' Currently only implemented for afni estimator.')
@@ -201,7 +201,7 @@ def run_fitlins(argv=None):
# TODO - fix neuroscout
derivatives = derivatives[0].split(" ")
- if opts.errorts and opts.estimator != 'afni':
+ if opts.error_ts and opts.estimator != 'afni':
raise NotImplementedError("Saving the error time series is only implmented for"
" the afni estimator. If this is a feature you want"
f" for {opts.estimator} please let us know on github.")
@@ -247,7 +247,7 @@ def run_fitlins(argv=None):
space=opts.space, desc=opts.desc_label,
participants=subject_list, base_dir=work_dir,
smoothing=opts.smoothing, drop_missing=opts.drop_missing,
- estimator=opts.estimator, errorts=opts.errorts
+ estimator=opts.estimator, errorts=opts.error_ts
)
fitlins_wf.config = deepcopy(config.get_fitlins_config()._sections)
|
ci: print deployment command
This commit prints the
command that will deploy the CI Job. | @@ -406,7 +406,9 @@ def run_e2e_job(distro, driver, masters, workers,
str(hypervisors),
str(job_type),
str(launch_from))
+ print("'launch_e2e.py' ==> The deployment command is:")
print(deployment_command)
+
launch_output = subprocess.run(deployment_command, shell=True, check=True)
print("'launch_e2e.py' ==> ./ci/launch_e2e.sh output")
print(launch_output)
|
don't rely on SIP to check if Qt object exists
sip import may fail on some installations, see | @@ -18,7 +18,7 @@ from subprocess import Popen
import click
import keyring
from keyring.errors import KeyringLocked
-from PyQt5 import QtCore, QtWidgets, sip
+from PyQt5 import QtCore, QtWidgets
# maestral modules
from maestral.config.main import CONF
@@ -629,11 +629,14 @@ def _is_linked():
def _is_pyqt_obj(obj):
"""Checks if ``obj`` wraps an underlying C/C++ object."""
+ if isinstance(obj, QtCore.QObject):
try:
- sip.unwrapinstance(obj)
- except (RuntimeError, TypeError):
- return False
+ obj.parent()
return True
+ except RuntimeError:
+ return False
+ else:
+ return False
def run():
|
chore: escape regex correctly
why codacy? | @@ -264,7 +264,7 @@ frappe.utils.sanitise_redirect = (url) => {
const is_external = (() => {
return (url) => {
function domain(url) {
- let base_domain = /^(?:https?://)?(?:[^@\n]+@)?(?:www\.)?([^:\/\n?]+)/img.exec(url);
+ let base_domain = /^(?:https?:\/\/)?(?:[^@\n]+@)?(?:www\.)?([^:\/\n?]+)/img.exec(url);
return base_domain == null ? "" : base_domain[1];
}
|
update read bitalino to select channels correctly
channels information in the metadata is not the column index but rather the port index (e.g. A1, A2, A6 => 1, 2, 6) | @@ -47,7 +47,7 @@ def read_bitalino(filename):
list(metadata.keys())[0]
] # convert json header to dict (only select first device / MAC address)
sampling_rate = metadata["sampling rate"]
- channels = np.array(metadata["channels"]) + 5 # analog channels start from column 5
+ channels = np.arange(len(metadata["channels"])) + 5 # analog channels start from column 5
# Read data
data = pd.read_csv(filename, sep="\t", usecols=channels, header=None, comment="#")
|
Update example run command in tutorial
It is not recommended to run `mlflow ui` from the root directory of
MLFlow. As a result, the tutorial is updated to use `examples` directory
as the working directory. The two example commands are however not
updated accordingly. | @@ -78,13 +78,15 @@ First, train a linear regression model that takes two hyperparameters: ``alpha``
.. code-block:: py
- python examples/sklearn_elasticnet_wine/train.py
+ # Make sure the current working directory is 'examples'
+ python sklearn_elasticnet_wine/train.py
Try out some other values for ``alpha`` and ``l1_ratio`` by passing them as arguments to ``train.py``:
.. code-block:: py
- python examples/sklearn_elasticnet_wine/train.py <alpha> <l1_ratio>
+ # Make sure the current working directory is 'examples'
+ python sklearn_elasticnet_wine/train.py <alpha> <l1_ratio>
Each time you run the example, MLflow logs information about your experiment runs in the directory ``mlruns``.
@@ -107,13 +109,15 @@ First, train a linear regression model that takes two hyperparameters: ``alpha``
.. code-block:: R
- mlflow_run(uri = "examples/r_wine", entry_point = "train.R")
+ # Make sure the current working directory is 'examples'
+ mlflow_run(uri = "r_wine", entry_point = "train.R")
Try out some other values for ``alpha`` and ``lambda`` by passing them as arguments to ``train.R``:
.. code-block:: R
- mlflow_run(uri = "examples/r_wine", entry_point = "train.R", parameters = list(alpha = 0.1, lambda = 0.5))
+ # Make sure the current working directory is 'examples'
+ mlflow_run(uri = "r_wine", entry_point = "train.R", parameters = list(alpha = 0.1, lambda = 0.5))
Each time you run the example, MLflow logs information about your experiment runs in the directory ``mlruns``.
@@ -174,7 +178,7 @@ Now that you have your training code, you can package it so that other data scie
.. literalinclude:: ../../../examples/sklearn_elasticnet_wine/conda.yaml
- To run this project, invoke ``mlflow run examples/sklearn_elasticnet_wine -P alpha=0.42``. After running
+ To run this project, invoke ``mlflow run sklearn_elasticnet_wine -P alpha=0.42``. After running
this command, MLflow runs your training code in a new Conda environment with the dependencies
specified in ``conda.yaml``.
@@ -213,7 +217,8 @@ Now that you have your training code, you can package it so that other data scie
.. code-block:: r
- mlflow_run("examples/r_wine", entry_point = "train.R", parameters = list(alpha = 0.2))
+ # Make sure the current working directory is 'examples'
+ mlflow_run("r_wine", entry_point = "train.R", parameters = list(alpha = 0.2))
After running this command, MLflow runs your training code in a new R session.
@@ -222,7 +227,8 @@ Now that you have your training code, you can package it so that other data scie
.. code-block:: r
mlflow_restore_snapshot()
- mlflow_run("examples/r_wine", entry_point = "train.R", parameters = list(alpha = 0.2))
+ # Make sure the current working directory is 'examples'
+ mlflow_run("r_wine", entry_point = "train.R", parameters = list(alpha = 0.2))
You can also run a project directly from GitHub. This tutorial is duplicated in the https://github.com/rstudio/mlflow-example repository which you can run with:
|
Fixup api doc.
Replaces | @@ -1570,9 +1570,10 @@ Query-builder
# Get user and a list of their tweet IDs. The tweet IDs are
# returned as a comma-separated string by the db, so we'll split
# the result string and convert the values to python ints.
+ convert_ids = lambda s: [int(i) for i in (s or '').split(',')]
tweet_ids = (fn
.GROUP_CONCAT(Tweet.id)
- .python_value(lambda idlist: [int(i) for i in idlist]))
+ .python_value(convert_ids))
query = (User
.select(User.username, tweet_ids.alias('tweet_ids'))
|
Add instruction of Azure test case
Azure test case development steps | @@ -113,6 +113,20 @@ This XML file defines the regions per Category. It may require specific region o
Per Category, each XML file has VM name, Resource Group name, etc. We do not recommend to make change of the file.
+## Add test case in Azure
+
+ 1. Design test case and its configuration.
+ 2. Create a new test case xml file under ./XML/TestCases folder. Or, update with new tags in the existing xml file.
+ 3. Define testName, PowershellScript, setupType, Platform, Category, Area and Tags as required. Add optional tag if needed.
+ 4. Test design may have two ways;
+ a. A single PowerShell script execution: A single PowerShell script imports builtin library modules and posts the result.
+ For example, 'BVT-VERIFY-DEPLOYMET-PROVISION.ps1' shows this script calls 'DeployVMS' function for its testing and
+ collect the result by 'CheckKernelLogs'. 'DeplyVM' and 'CheckKernelLogs' are builtin modules
+ in ./Libraries/CommonFunctions.psm1 module. You can add new module or update existing ones for further development.
+ b. PowerShell script wraps multiple non-PowerShell script like Bash or Python scripts: Like 'VERIFY-TEST-SCRIPT-IN-LINUX-GUEST.ps1',
+ the PowerShell script wraps the multiple Bash or Python script as a parameter of 'RunLinuxCmd' PS module.
+ 5. Before PR review, we recommend you run script testing in cmdline/API mode. See above instruction.
+
## Support Contact
Contact [email protected] (Linux Integration Service Support), if you have technical issues.
|
remove superfluous flake8 rule ignores
because some linting violations have been fixed in the meantime | @@ -47,15 +47,11 @@ extend-ignore =
# Allow certain violations in certain files:
per-file-ignores =
- # FIXME: D100 Missing docstring in public module
- # FIXME: D101 Missing docstring in public class
# FIXME: D102 Missing docstring in public method
- # FIXME: drop these once they're made simpler
- # Ref: https://github.com/ansible-community/ansible-lint/issues/744
- src/ansiblelint/cli.py: D101 D102
- src/ansiblelint/formatters/__init__.py: D101 D102
- src/ansiblelint/rules/*.py: D100 D101 D102
- src/ansiblelint/rules/__init__.py: D100 D101 D102
+ src/ansiblelint/cli.py: D102
+ src/ansiblelint/formatters/__init__.py: D102
+ src/ansiblelint/rules/*.py: D102
+ src/ansiblelint/rules/__init__.py: D102
# FIXME: C901 Function is too complex
# FIXME: refactor _defaults_from_yamllint_config using match case
@@ -65,7 +61,7 @@ per-file-ignores =
# FIXME: drop these once they're fixed
# Ref: https://github.com/ansible-community/ansible-lint/issues/725
- test/*: D100 D101 D102
+ test/*: D102
# flake8-pytest-style
# PT001:
|
Fix variable declaration
In plain old C, variables shall be declared above
Fix | @@ -1120,7 +1120,7 @@ psutil_net_connections(PyObject *self, PyObject *args) {
mib2_udp6Entry_t ude6;
#endif
char buf[512];
- int i, flags, getcode, num_ent, state;
+ int i, flags, getcode, num_ent, state, ret;
char lip[INET6_ADDRSTRLEN], rip[INET6_ADDRSTRLEN];
int lport, rport;
int processed_pid;
@@ -1147,7 +1147,7 @@ psutil_net_connections(PyObject *self, PyObject *args) {
goto error;
}
- int ret = ioctl(sd, I_PUSH, "tcp");
+ ret = ioctl(sd, I_PUSH, "tcp");
if (ret == -1) {
PyErr_SetFromErrno(PyExc_OSError);
goto error;
|
Update new endpoint tutorial
Remove remaining outdated materials | @@ -12,7 +12,7 @@ In this tutorial we'll create a brand new endpoint for the Epidata API:
`fluview_meta`. At a high level, we'll do the following steps:
1. understand the data that we want to surface
-2. add the new endpoint to `api.php`
+2. add the new endpoint to the API server
3. add the new endpoint to the various client libraries
4. write an integration test for the new endpoint
5. update API documentation for the new endpoint
@@ -328,8 +328,7 @@ actual
[pull request for the `fluview_meta` endpoint](https://github.com/cmu-delphi/delphi-epidata/pull/93)
created in this tutorial.
-Once it's approved, commit the code. Within a short amount of time (usually ~30
-seconds), the API will begin serving your new endpoint. Go ahead and give it a
+Once it's approved, merge the PR, and contact an admin to schedule a release. Once released, the API will begin serving your new endpoint. Go ahead and give it a
try: https://delphi.cmu.edu/epidata/fluview_meta/
```
|
Update core-base.vue
whitespace | },
navOpenStyle() {
if (this.navShown) {
- return {
- marginLeft: `${this.paddingForNav}px`,
- };
+ return { marginLeft: `${this.paddingForNav}px` };
}
return '';
},
|
Update 01_reachability_map.rst
small typo in reuleaux link | @@ -14,7 +14,7 @@ available robots.
Links
=====
-* `Realuex (ROS's reachability map) <http://wiki.ros.org/reuleaux>`_
+* `Reuleaux (ROS's reachability map) <http://wiki.ros.org/reuleaux>`_
Example 01: reachability map 1D
|
Code block: fix formatting of the additional message
The newlines should be replaced with a space rather than with 1 newline.
To separate the two issues, a double newline is prepended to the entire
additional message. | @@ -34,10 +34,10 @@ def get_bad_ticks_message(code_block: parsing.CodeBlock) -> Optional[str]:
# already have an example code block.
if addition_msg:
# The first line has a double line break which is not desirable when appending the msg.
- addition_msg = addition_msg.replace("\n\n", "\n", 1)
+ addition_msg = addition_msg.replace("\n\n", " ", 1)
# Make the first character of the addition lower case.
- instructions += "Furthermore, " + addition_msg[0].lower() + addition_msg[1:]
+ instructions += "\n\nFurthermore, " + addition_msg[0].lower() + addition_msg[1:]
else:
# Determine the example code to put in the code block based on the language specifier.
if code_block.language.lower() in PY_LANG_CODES:
|
Fixing NullPointerException
Summary: The previous diff missed the other usage | @@ -40,14 +40,15 @@ object Helper {
def getDataTypes(sqlContext: SQLContext,
tableName: String,
columnNames: List[String]): Map[String, String] = {
+ // null check is required because jackson doesn't care about default values
+ val notNullColumnNames = Option(columnNames).getOrElse(List[String]())
val dt = sqlContext.sparkSession.catalog
.listColumns(tableName)
.collect
- // null check is required because jackson doesn't care about default values
- .filter(column => columnNames != null && columnNames.contains(column.name))
+ .filter(column => notNullColumnNames.contains(column.name))
.map(column => column.name -> column.dataType)
.toMap
- assert(dt.size == columnNames.size)
+ assert(dt.size == notNullColumnNames.size)
dt
}
|
support build Boost with Emscripten
This changes the Boost conanfile to not fail if the "arch" profile
setting is "asm.js" and to explicitly use the "emscripten" Boost Build
toolchain if the "os" profile setting is "Emscripten". | @@ -619,6 +619,8 @@ class BoostConan(ConanFile):
pass
elif arch.startswith("mips"):
pass
+ elif arch.startswith("asm.js"):
+ pass
else:
raise Exception("I'm so sorry! I don't know the appropriate ABI for "
"your architecture. :'(")
@@ -732,6 +734,8 @@ class BoostConan(ConanFile):
return "msvc", _msvc_version, ""
elif self.settings.os == "Windows" and self.settings.compiler == "clang":
return "clang-win", compiler_version, ""
+ elif self.settings.os == "Emscripten" and self.settings.compiler == "clang":
+ return "emscripten", compiler_version, self._cxx
elif self.settings.compiler == "gcc" and tools.is_apple_os(self.settings.os):
return "darwin", compiler_version, self._cxx
elif compiler == "gcc" and compiler_version[0] >= "5":
|
Add operationId properties to endpoints
This enables client SDK generators to create nicer names for the functions
Otherwise auto generated names are used, which are mostly suboptimal | @@ -16,6 +16,7 @@ paths:
tags:
- Server Information
summary: Health endpoint of Rasa Server
+ operationId: getHealth
description: >-
This URL can be used as an endpoint to run
health checks against. When the server is running
@@ -35,6 +36,7 @@ paths:
get:
tags:
- Server Information
+ operationId: getVersion
summary: Version of Rasa
description: >-
Returns the version of Rasa.
@@ -64,6 +66,7 @@ paths:
security:
- TokenAuth: []
- JWT: []
+ operationId: getStatus
tags:
- Server Information
summary: Status of the currently loaded Rasa model
@@ -108,6 +111,7 @@ paths:
security:
- TokenAuth: []
- JWT: []
+ operationId: getConversationTracker
tags:
- Tracker
summary: Retrieve a conversations tracker
@@ -139,6 +143,7 @@ paths:
security:
- TokenAuth: []
- JWT: []
+ operationId: getConversationTrackerEvents
tags:
- Tracker
summary: Append events to a tracker
@@ -177,6 +182,7 @@ paths:
security:
- TokenAuth: []
- JWT: []
+ operationId: replaceConversationTrackerEvents
tags:
- Tracker
summary: Replace a trackers events
@@ -213,6 +219,7 @@ paths:
security:
- TokenAuth: []
- JWT: []
+ operationId: getConversationStory
tags:
- Tracker
summary: Retrieve an end-to-end story corresponding to a conversation
@@ -240,6 +247,7 @@ paths:
security:
- TokenAuth: []
- JWT: []
+ operationId: executeConversationAction
tags:
- Tracker
summary: Run an action in a conversation
@@ -287,6 +295,7 @@ paths:
security:
- TokenAuth: []
- JWT: []
+ operationId: predictConversationAction
tags:
- Tracker
summary: Predict the next action
@@ -322,6 +331,7 @@ paths:
security:
- TokenAuth: []
- JWT: []
+ operationId: addConversationMessage
tags:
- Tracker
summary: Add a message to a tracker
@@ -359,6 +369,7 @@ paths:
security:
- TokenAuth: []
- JWT: []
+ operationId: trainModel
tags:
- Model
summary: Train a Rasa model
@@ -398,6 +409,7 @@ paths:
security:
- TokenAuth: []
- JWT: []
+ operationId: evaluateModelStories
tags:
- Model
summary: Evaluate stories
@@ -435,6 +447,7 @@ paths:
security:
- TokenAuth: []
- JWT: []
+ operationId: testModelIntent
tags:
- Model
summary: Perform an intent evaluation
@@ -471,6 +484,7 @@ paths:
security:
- TokenAuth: []
- JWT: []
+ operationId: predictModelAction
tags:
- Model
summary: Predict an action on a temporary state
@@ -511,6 +525,7 @@ paths:
security:
- TokenAuth: []
- JWT: []
+ operationId: parseModelMessage
tags:
- Model
summary: Parse a message using the Rasa model
@@ -553,6 +568,7 @@ paths:
security:
- TokenAuth: []
- JWT: []
+ operationId: replaceModel
tags:
- Model
summary: Replace the currently loaded model
@@ -583,6 +599,7 @@ paths:
security:
- TokenAuth: []
- JWT: []
+ operationId: unloadModel
tags:
- Model
summary: Unload the trained model
@@ -601,6 +618,7 @@ paths:
security:
- TokenAuth: []
- JWT: []
+ operationId: getDomain
tags:
- Domain
summary: Retrieve the loaded domain
|
BUG: separate data objects
Copy the data object when writing xarray netCDF data files. | @@ -1125,8 +1125,9 @@ def inst_to_netcdf(inst, fname, base_instrument=None, epoch_name='Epoch',
# Attach attributes
out_data.setncatts(attrb_dict)
else:
- # Attach the metadata to the xarray.Dataset
- xr_data = inst.data
+ # Attach the metadata to a separate xarray.Dataset object, ensuring
+ # the Instrument data object is unchanged.
+ xr_data = xr.Dataset(inst.data)
pysat_meta_to_xarray_attr(xr_data, inst.meta)
# If the case needs to be preserved, update Dataset variables
|
Add extra new line
The markdown parser for the website requires a new line here to create bullet points. | @@ -7,5 +7,6 @@ category: upgrade
# TiDB Development Release Upgrade Guide
Please see the upgrade guides from the following earlier releases:
+
- [Upgrading to TiDB 2.1](https://pingcap.com/docs/v2.1/how-to/upgrade/from-previous-version/)
- [Upgrading to TiDB 3.0](https://pingcap.com/docs/v3.0/how-to/upgrade/from-previous-version/)
|
right-sidebar: Fix menu icon hover color.
This will fix the menu icon hover effect for the day mode. | &:hover {
display: inline;
cursor: pointer;
- color: hsl(0, 0%, 0%);
+ color: hsl(0, 0%, 0%) !important;
}
}
|
DOC: optimize: fix doc that `curve_fit` xdata should be float convertible
[skip azp] [skip actions] | @@ -545,10 +545,11 @@ def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False,
The model function, f(x, ...). It must take the independent
variable as the first argument and the parameters to fit as
separate remaining arguments.
- xdata : array_like or object
+ xdata : array_like
The independent variable where the data is measured.
Should usually be an M-length sequence or an (k,M)-shaped array for
- functions with k predictors, but can actually be any object.
+ functions with k predictors, and each element should be float
+ convertible if it is an array like object.
ydata : array_like
The dependent data, a length M array - nominally ``f(xdata, ...)``.
p0 : array_like, optional
|
Update lax.py
Use the accurate mathematical description to avoid confusion.
We may want to say the dimension of the array rather than the rank of the tensor array. | @@ -635,8 +635,8 @@ def dot(lhs: Array, rhs: Array, precision: PrecisionLike = None,
For more general contraction, see the `dot_general` operator.
Args:
- lhs: an array of rank 1 or 2.
- rhs: an array of rank 1 or 2.
+ lhs: an array of dimension 1 or 2.
+ rhs: an array of dimension 1 or 2.
precision: Optional. Either ``None``, which means the default precision for
the backend, a :class:`~jax.lax.Precision` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``) or a tuple of two
@@ -4403,7 +4403,7 @@ def _check_shapelike(fun_name, arg_name, obj, non_zero_shape=False):
return # TODO(mattjj): handle more checks in the dynamic shape case
obj_arr = np.array(obj)
if obj_arr.ndim != 1:
- msg = "{} {} must be rank 1, got {}."
+ msg = "{} {} must be 1-dimensional, got {}."
raise TypeError(msg.format(obj_arr.ndim))
try:
canonicalize_shape(obj_arr)
|
{AppService} make webapp name local context value readable for cupertino
* {AppService} make webapp name local context value readable for cupertino
* temp add
* add cupertino for webapp up
* Revert "temp add"
This reverts commit | @@ -126,7 +126,7 @@ def load_arguments(self, _):
c.argument('name', options_list=['--name', '-n'], help='name of the new web app',
validator=validate_site_create,
local_context_attribute=LocalContextAttribute(name='web_name', actions=[LocalContextAction.SET],
- scopes=['webapp']))
+ scopes=['webapp', 'cupertino']))
c.argument('startup_file', help="Linux only. The web's startup file")
c.argument('docker_registry_server_user', options_list=['--docker-registry-server-user', '-s'], help='the container registry server username')
c.argument('docker_registry_server_password', options_list=['--docker-registry-server-password', '-w'], help='The container registry server password. Required for private registries.')
@@ -598,7 +598,7 @@ def load_arguments(self, _):
c.argument('name', arg_type=webapp_name_arg_type,
local_context_attribute=LocalContextAttribute(name='web_name', actions=[LocalContextAction.GET,
LocalContextAction.SET],
- scopes=['webapp']))
+ scopes=['webapp', 'cupertino']))
c.argument('plan', options_list=['--plan', '-p'], configured_default='appserviceplan',
completer=get_resource_name_completion_list('Microsoft.Web/serverFarms'),
help="name of the appserviceplan associated with the webapp",
|
fix(context): do not set type on class attribute
Otherwise dataclasses transforms it to an instance attribute. | @@ -100,7 +100,7 @@ class Installation:
)
return self._user_tokens
- USER_ID_MAPPING_CACHE_KEY: str = "user-id-mapping"
+ USER_ID_MAPPING_CACHE_KEY = "user-id-mapping"
async def get_user(
self, login: github_types.GitHubLogin
|
Corrected vec2d cross documentation
Documentation had incorrect cross product formula (function was correct) | @@ -463,7 +463,7 @@ class Vec2d(object):
def cross(self, other):
"""The cross product between the vector and other vector
- v1.cross(v2) -> v1.x*v2.y - v2.y*v1.x
+ v1.cross(v2) -> v1.x*v2.y - v1.y*v2.x
:return: The cross product
"""
|
Add Python version to README.md
Make it clear which version of Python we support | [](https://travis-ci.org/metoppv/improver)
[](https://www.codacy.com/app/metoppv_tech/improver?utm_source=github.com&utm_medium=referral&utm_content=metoppv/improver&utm_campaign=Badge_Grade)
[](http://improver.readthedocs.io/en/latest/?badge=latest)
+[](https://www.python.org/downloads/release/python-360/)
IMPROVER is a library of algorithms for meteorological post-processing and verification.
|
TST: Simple tests for `from_euler`
Initial tests. More to be added. | @@ -63,7 +63,7 @@ def test_zero_norms_from_quaternion():
[5, 0, 12, 0]
])
with pytest.raises(ValueError):
- r = Rotation.from_quaternion(x)
+ Rotation.from_quaternion(x)
def test_as_dcm_single_1d_quaternion():
@@ -314,3 +314,15 @@ def test_rotvec_calc_pipeline():
[-3e-4, 3.5e-4, 7.5e-5]
])
assert_allclose(Rotation.from_rotvec(rotvec).as_rotvec(), rotvec)
+
+
+def test_from_euler_single_rotation():
+ quat = Rotation.from_euler('z', 90, degrees=True).as_quaternion()
+ expected_quat = np.array([0, 0, 1, 1]) / np.sqrt(2)
+ assert_allclose(quat, expected_quat)
+
+
+def test_single_intrinsic_extrinsic_rotation():
+ ext = Rotation.from_euler('z', 90, degrees=True).as_dcm()
+ int = Rotation.from_euler('Z', 90, degrees=True).as_dcm()
+ assert_allclose(ext, int)
|
Fix the enigmatic scale case
There seem to be a mistake in the enigmatic scale: enigmatic = Scale('G', 'enigmatic', 'mAMMMmM')
Semitone, Tone and a half, Tone, Tone, Tone, Semitone, Semitone. Should be enigmatic = Scale('G', 'enigmatic', 'mAMMMmm') | @@ -110,7 +110,7 @@ class ScaleGeneratorTest(unittest.TestCase):
self.assertEqual(expected, actual)
def test_enigmatic(self):
- enigmatic = Scale('G', 'enigmatic', 'mAMMMmM')
+ enigmatic = Scale('G', 'enigmatic', 'mAMMMmm')
expected = ['G', 'G#', 'B', 'C#', 'D#', 'F', 'F#']
actual = enigmatic.pitches
self.assertEqual(expected, actual)
|
[flake8] Ignore B028 bugbear checks
The bugbear B028 is wrong if the variable is not a string,
see | @@ -109,6 +109,7 @@ deps =
[flake8]
# The following are intentionally ignored, possibly pending consensus
+# B028: False positive, see https://github.com/PyCQA/flake8-bugbear/issues/329
# D105: Missing docstring in magic method
# D211: No blank lines allowed before class docstring
# FI1: __future__ import "x" missing
@@ -131,7 +132,7 @@ deps =
# DARXXX: Darglint docstring issues to be solved
-ignore = B007,D105,D211,D401,D413,D412,DAR003,DAR101,DAR102,DAR201,DAR202,DAR301,DAR401,DAR402,DAR501,H101,H238,H301,H306,H404,H405,H903,P101,P102,P103,P205,W503
+ignore = B007,B028,D105,D211,D401,D413,D412,DAR003,DAR101,DAR102,DAR201,DAR202,DAR301,DAR401,DAR402,DAR501,H101,H238,H301,H306,H404,H405,H903,P101,P102,P103,P205,W503
enable-extensions = H203,H204,H205,N818
color = always
|
[skip ci][ci] Mark more ethosu tests with xfail
See for context. Since more parameterizations are popping up as
failed, this disables whole tests rather than specific combinations of
parameters. | @@ -347,9 +347,7 @@ def test_ethosu_binary_elementwise(
([1, 4, 4], [4, 1]),
],
)
[email protected]_parameterizations(
- "ifm_shape0-ifm2_shape0-ethos-u55-64", reason="See https://github.com/apache/tvm/issues/12511"
-)
[email protected](reason="See https://github.com/apache/tvm/issues/12511")
def test_binary_add_with_non_4d_shapes(
request,
accel_type,
@@ -608,9 +606,7 @@ def test_ethosu_right_shift_binary_elemwise(
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize("ifm_shape", [(3, 2), (1, 15, 11, 7), (3, 1, 12), (400,)])
@pytest.mark.parametrize("ifm_scale, ifm_zp, ofm_scale, ofm_zp", [(1, 0, 1, 0), (0.015, 3, 0.2, 5)])
[email protected]_parameterizations(
- "1-0-1-0-ifm_shape3-ethos-u55-128", reason="See https://github.com/apache/tvm/issues/12511"
-)
[email protected](reason="See https://github.com/apache/tvm/issues/12511")
def test_ethosu_identity_codegen(
request, ifm_shape, ifm_scale, ifm_zp, ofm_scale, ofm_zp, accel_type
):
@@ -659,6 +655,7 @@ def test_ethosu_identity_codegen(
((8, 7, 3), (-4, 1, 8, -2)),
],
)
[email protected](reason="See https://github.com/apache/tvm/issues/12511")
def test_relay_reshape_codegen(ifm_shape, new_shape, accel_type):
np.random.seed(0)
@@ -691,9 +688,7 @@ def test_relay_reshape_codegen(ifm_shape, new_shape, accel_type):
([5000], [123], [2151]),
],
)
[email protected]_parameterizations(
- "ifm_shape3-begin3-size3-ethos-u55-32", reason="See https://github.com/apache/tvm/issues/12511"
-)
[email protected](reason="See https://github.com/apache/tvm/issues/12511")
def test_tflite_slice(request, accel_type, ifm_shape, begin, size):
np.random.seed(0)
@@ -729,9 +724,7 @@ def test_tflite_strided_slice(accel_type, ifm_shape, begin, end):
"ifm_shape",
[[1, 5, 12, 4], [1, 1, 2], [4, 3, 2], [10, 20], [345]],
)
[email protected]_parameterizations(
- "ifm_shape4-ABS-ethos-u55-64", reason="See https://github.com/apache/tvm/issues/12511"
-)
[email protected](reason="See https://github.com/apache/tvm/issues/12511")
def test_ethosu_unary_elementwise(
request,
accel_type,
|
make scheduled reports return 400 not 500
when encountering a report the user may not edit | @@ -189,6 +189,7 @@ from corehq.apps.hqwebapp.decorators import (
)
import six
from six.moves import range
+from no_exceptions.exceptions import Http400
# Number of columns in case property history popup
@@ -994,7 +995,7 @@ class ScheduledReportsView(BaseProjectReportSectionView):
instance.day = calculate_day(instance.interval, instance.day, day_change)
if not self.can_edit_report(instance):
- return HttpResponseBadRequest()
+ raise Http400()
else:
instance = ReportNotification(
owner_id=self.request.couch_user._id,
|
Fix build_ext interaction with non numpy extensions
Numpy extensions define the extra_cxx_compile_args and extra_c_compile_args
filed, but distutils extensions don't. Take that into account when populating
build_extension.
Should fix | @@ -393,8 +393,8 @@ def build_extension(self, ext):
log.info("building '%s' extension", ext.name)
extra_args = ext.extra_compile_args or []
- extra_cflags = ext.extra_c_compile_args or []
- extra_cxxflags = ext.extra_cxx_compile_args or []
+ extra_cflags = getattr(ext, 'extra_c_compile_args', None) or []
+ extra_cxxflags = getattr(ext, 'extra_cxx_compile_args', None) or []
macros = ext.define_macros[:]
for undef in ext.undef_macros:
|
model: transformers: Set version range to >=2.5.1,<2.9.0
Temporaray fix for issue with new TensorFlow release
Related: | @@ -24,7 +24,7 @@ INSTALL_REQUIRES = [
"numpy>=1.16.4",
"seqeval>=0.0.12",
"fastprogress>=0.2.2",
- "transformers>=2.5.1",
+ "transformers>=2.5.1,<2.9.0",
] + (
["dffml>=0.3.7"]
if not any(
|
Update training.rst
Fixed broken link | @@ -545,7 +545,7 @@ To advanced the skills of senior and functional leaders we bring in experts to a
- As an example, `Jono Bacon <http://www.jonobacon.org/about/>`_--a leading author, speaker and consultant on open source community advocacy--meets with our community team regularly to refine our processes and understanding. There's a range of similiarly adept company advisers that help advance our thinking and capabilities in critical ways.
-Many thought leaders and conference speakers are open to consulting projects with the right clients, and Mattermost is a flexible client. There's no travel involved, we meet over video conference, [our consulting process is straight forward](https://docs.google.com/document/d/1G4wFLq_wHHEDJ-hrv5Kmu022mFJgh3rJ4-glM0W6riI/edit#heading=h.pwxwwq4ezzfx), we're easy to work with, and we take advising seriously.
+Many thought leaders and conference speakers are open to consulting projects with the right clients, and Mattermost is a flexible client. There's no travel involved, we meet over video conference, `our consulting process is straight forward <https://docs.google.com/document/d/1G4wFLq_wHHEDJ-hrv5Kmu022mFJgh3rJ4-glM0W6riI/edit#heading=h.pwxwwq4ezzfx>`_, we're easy to work with, and we take advising seriously.
When hiring, we are also open to bringing in a leader's personal mentors as consultants and company advisers when skill sets are appropriate.
|
change: [cli] utilize metavar instead of actual choices
utilize metavar instead of candidate values to shorten help text. | @@ -157,13 +157,14 @@ def make_parser(defaults=None):
gspog.add_argument("--set", help=_SET_HELP)
parser.add_argument("-o", "--output", help="Output file path")
- parser.add_argument("-I", "--itype", choices=ctypes,
+ parser.add_argument("-I", "--itype", choices=ctypes, metavar="ITYPE",
help=(type_help % "Input"))
- parser.add_argument("-O", "--otype", choices=ctypes,
+ parser.add_argument("-O", "--otype", choices=ctypes, metavar="OTYPE",
help=(type_help % "Output"))
- parser.add_argument("-M", "--merge", choices=mts, help=mt_help)
+ parser.add_argument("-M", "--merge", choices=mts, metavar="MERGE",
+ help=mt_help)
parser.add_argument("-A", "--args", help="Argument configs to override")
- parser.add_argument("--atype", choices=ctypes,
+ parser.add_argument("--atype", choices=ctypes, metavar="ATYPE",
help=_ATYPE_HELP_FMT % ctypes_s)
cpog = parser.add_argument_group("Common options")
|
fix: mport module from line N shadowed by loop variable
Flake8 F402 reported by sider | @@ -181,11 +181,11 @@ class Query:
warn("'filters_config' hook is not completely implemented yet in frappe.db.query engine")
- for operator, function in additional_filters_config.items():
+ for _operator, function in additional_filters_config.items():
if callable(function):
- all_operators.update({operator.casefold(): function})
+ all_operators.update({_operator.casefold(): function})
elif isinstance(function, dict):
- all_operators[operator.casefold()] = frappe.get_attr(function.get("get_field"))()["operator"]
+ all_operators[_operator.casefold()] = frappe.get_attr(function.get("get_field"))()["operator"]
return all_operators
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.