file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
utils.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import datetime
import errno
import inspect
import logging
import os
import platform
import random
import subprocess
import socket
import sys
import uuid
import iso8601
from heat.common import exception
logger = logging.getLogger(__name__)
TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
def chunkreadable(iter, chunk_size=65536):
"""
Wrap a readable iterator with a reader yielding chunks of
a preferred size, otherwise leave iterator unchanged.
:param iter: an iter which may also be readable
:param chunk_size: maximum size of chunk
"""
return chunkiter(iter, chunk_size) if hasattr(iter, 'read') else iter
def chunkiter(fp, chunk_size=65536):
"""
Return an iterator to a file-like obj which yields fixed size chunks
:param fp: a file-like object
:param chunk_size: maximum size of chunk
"""
while True:
chunk = fp.read(chunk_size)
if chunk:
yield chunk
else:
break
def image_meta_to_http_headers(image_meta):
"""
Returns a set of image metadata into a dict
of HTTP headers that can be fed to either a Webob
Request object or an httplib.HTTP(S)Connection object
:param image_meta: Mapping of image metadata
"""
headers = {}
for k, v in image_meta.items():
if v is not None:
if k == 'properties':
for pk, pv in v.items():
if pv is not None:
headers["x-image-meta-property-%s"
% pk.lower()] = unicode(pv)
else:
headers["x-image-meta-%s" % k.lower()] = unicode(v)
return headers
def add_features_to_http_headers(features, headers):
"""
Adds additional headers representing heat features to be enabled.
:param headers: Base set of headers
:param features: Map of enabled features
"""
if features:
for k, v in features.items():
if v is not None:
headers[k.lower()] = unicode(v)
def get_image_meta_from_headers(response):
"""
Processes HTTP headers from a supplied response that
match the x-image-meta and x-image-meta-property and
returns a mapping of image metadata and properties
:param response: Response to process
"""
result = {}
properties = {}
if hasattr(response, 'getheaders'): # httplib.HTTPResponse
headers = response.getheaders()
else: # webob.Response
headers = response.headers.items()
for key, value in headers:
key = str(key.lower())
if key.startswith('x-image-meta-property-'):
field_name = key[len('x-image-meta-property-'):].replace('-', '_')
properties[field_name] = value or None
elif key.startswith('x-image-meta-'):
field_name = key[len('x-image-meta-'):].replace('-', '_')
result[field_name] = value or None
result['properties'] = properties
if 'size' in result:
try:
result['size'] = int(result['size'])
except ValueError:
raise exception.Invalid
for key in ('is_public', 'deleted', 'protected'):
if key in result:
result[key] = bool_from_header_value(result[key])
return result
def bool_from_header_value(value):
"""
Returns True if value is a boolean True or the
string 'true', case-insensitive, False otherwise
"""
if isinstance(value, bool):
return value
elif isinstance(value, (basestring, unicode)):
if str(value).lower() == 'true':
return True
return False
def bool_from_string(subject):
"""
Interpret a string as a boolean.
Any string value in:
('True', 'true', 'On', 'on', '1')
is interpreted as a boolean True.
Useful for JSON-decoded stuff and config file parsing
"""
if isinstance(subject, bool):
return subject
elif isinstance(subject, int):
return subject == 1
if hasattr(subject, 'startswith'): # str or unicode...
if subject.strip().lower() in ('true', 'on', '1'):
return True
return False
def import_class(import_str):
"""Returns a class from a string including module and class"""
mod_str, _sep, class_str = import_str.rpartition('.')
try:
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
except (ImportError, ValueError, AttributeError), e:
raise exception.ImportFailure(import_str=import_str,
reason=e)
def import_object(import_str):
"""Returns an object including a module or module and class"""
try:
__import__(import_str)
return sys.modules[import_str]
except ImportError:
cls = import_class(import_str)
return cls()
def generate_uuid():
return str(uuid.uuid4())
def is_uuid_like(value):
try:
uuid.UUID(value)
return True
except Exception:
return False
def isotime(at=None):
"""Stringify time in ISO 8601 format"""
if not at:
at = datetime.datetime.utcnow()
str = at.strftime(TIME_FORMAT)
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
str += ('Z' if tz == 'UTC' else tz)
return str
def parse_isotime(timestr):
"""Parse time from ISO 8601 format"""
try:
return iso8601.parse_date(timestr)
except iso8601.ParseError as e:
raise ValueError(e.message)
except TypeError as e:
raise ValueError(e.message)
def | (timestamp):
"""Normalize time in arbitrary timezone to UTC"""
offset = timestamp.utcoffset()
return timestamp.replace(tzinfo=None) - offset if offset else timestamp
def safe_mkdirs(path):
try:
os.makedirs(path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
def safe_remove(path):
try:
os.remove(path)
except OSError, e:
if e.errno != errno.ENOENT:
raise
class PrettyTable(object):
"""Creates an ASCII art table for use in bin/heat
Example:
ID Name Size Hits
--- ----------------- ------------ -----
122 image 22 0
"""
def __init__(self):
self.columns = []
def add_column(self, width, label="", just='l'):
"""Add a column to the table
:param width: number of characters wide the column should be
:param label: column heading
:param just: justification for the column, 'l' for left,
'r' for right
"""
self.columns.append((width, label, just))
def make_header(self):
label_parts = []
break_parts = []
for width, label, _ in self.columns:
# NOTE(sirp): headers are always left justified
label_part = self._clip_and_justify(label, width, 'l')
label_parts.append(label_part)
break_part = '-' * width
break_parts.append(break_part)
label_line = ' '.join(label_parts)
break_line = ' '.join(break_parts)
return '\n'.join([label_line, break_line])
def make_row(self, *args):
row = args
row_parts = []
for data, (width, _, just) in zip(row, self.columns):
row_part = self._clip_and_justify(data, width, just)
row_parts.append(row_part)
row_line = ' '.join(row_parts)
return row_line
@staticmethod
def _clip_and_justify(data, width, just):
# clip field to column width
clipped_data = str(data)[:width]
if just == 'r':
# right justify
justified = clipped_data.rjust(width)
else:
# left justify
justified = clipped_data.ljust(width)
return justified
def get_terminal_size():
def _get_terminal_size_posix():
import fcntl
import struct
import termios
height_width = None
try:
height_width = struct.unpack('hh', fcntl.ioctl(sys.stderr.fileno(),
termios.TIOCGWINSZ,
struct.pack('HH', 0, 0)))
except:
pass
if not height_width:
try:
p = subprocess.Popen(['stty', 'size'],
shell=false,
stdout=subprocess.PIPE)
return tuple(int(x) for x in p.communicate()[0].split())
except:
pass
return height_width
def _get_terminal_size_win32():
try:
from ctypes import windll, create_string_buffer
handle = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(handle, csbi)
except:
return None
if res:
import struct
unpack_tmp = struct.unpack("hhhhHhhhhhh", csbi.raw)
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom, maxx, maxy) = unpack_tmp
height = bottom - top + 1
width = right - left + 1
return (height, width)
else:
return None
def _get_terminal_size_unknownOS():
raise NotImplementedError
func = {'posix': _get_terminal_size_posix,
'win32': _get_terminal_size_win32}
height_width = func.get(platform.os.name, _get_terminal_size_unknownOS)()
if height_width == None:
raise exception.Invalid()
for i in height_width:
if not isinstance(i, int) or i <= 0:
raise exception.Invalid()
return height_width[0], height_width[1]
| normalize_time | identifier_name |
utils.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import datetime
import errno
import inspect
import logging
import os
import platform
import random
import subprocess
import socket
import sys
import uuid
import iso8601
from heat.common import exception
logger = logging.getLogger(__name__)
TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
def chunkreadable(iter, chunk_size=65536):
"""
Wrap a readable iterator with a reader yielding chunks of
a preferred size, otherwise leave iterator unchanged.
:param iter: an iter which may also be readable
:param chunk_size: maximum size of chunk
"""
return chunkiter(iter, chunk_size) if hasattr(iter, 'read') else iter
def chunkiter(fp, chunk_size=65536):
"""
Return an iterator to a file-like obj which yields fixed size chunks
:param fp: a file-like object
:param chunk_size: maximum size of chunk
"""
while True:
chunk = fp.read(chunk_size) | if chunk:
yield chunk
else:
break
def image_meta_to_http_headers(image_meta):
"""
Returns a set of image metadata into a dict
of HTTP headers that can be fed to either a Webob
Request object or an httplib.HTTP(S)Connection object
:param image_meta: Mapping of image metadata
"""
headers = {}
for k, v in image_meta.items():
if v is not None:
if k == 'properties':
for pk, pv in v.items():
if pv is not None:
headers["x-image-meta-property-%s"
% pk.lower()] = unicode(pv)
else:
headers["x-image-meta-%s" % k.lower()] = unicode(v)
return headers
def add_features_to_http_headers(features, headers):
"""
Adds additional headers representing heat features to be enabled.
:param headers: Base set of headers
:param features: Map of enabled features
"""
if features:
for k, v in features.items():
if v is not None:
headers[k.lower()] = unicode(v)
def get_image_meta_from_headers(response):
"""
Processes HTTP headers from a supplied response that
match the x-image-meta and x-image-meta-property and
returns a mapping of image metadata and properties
:param response: Response to process
"""
result = {}
properties = {}
if hasattr(response, 'getheaders'): # httplib.HTTPResponse
headers = response.getheaders()
else: # webob.Response
headers = response.headers.items()
for key, value in headers:
key = str(key.lower())
if key.startswith('x-image-meta-property-'):
field_name = key[len('x-image-meta-property-'):].replace('-', '_')
properties[field_name] = value or None
elif key.startswith('x-image-meta-'):
field_name = key[len('x-image-meta-'):].replace('-', '_')
result[field_name] = value or None
result['properties'] = properties
if 'size' in result:
try:
result['size'] = int(result['size'])
except ValueError:
raise exception.Invalid
for key in ('is_public', 'deleted', 'protected'):
if key in result:
result[key] = bool_from_header_value(result[key])
return result
def bool_from_header_value(value):
"""
Returns True if value is a boolean True or the
string 'true', case-insensitive, False otherwise
"""
if isinstance(value, bool):
return value
elif isinstance(value, (basestring, unicode)):
if str(value).lower() == 'true':
return True
return False
def bool_from_string(subject):
"""
Interpret a string as a boolean.
Any string value in:
('True', 'true', 'On', 'on', '1')
is interpreted as a boolean True.
Useful for JSON-decoded stuff and config file parsing
"""
if isinstance(subject, bool):
return subject
elif isinstance(subject, int):
return subject == 1
if hasattr(subject, 'startswith'): # str or unicode...
if subject.strip().lower() in ('true', 'on', '1'):
return True
return False
def import_class(import_str):
"""Returns a class from a string including module and class"""
mod_str, _sep, class_str = import_str.rpartition('.')
try:
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
except (ImportError, ValueError, AttributeError), e:
raise exception.ImportFailure(import_str=import_str,
reason=e)
def import_object(import_str):
"""Returns an object including a module or module and class"""
try:
__import__(import_str)
return sys.modules[import_str]
except ImportError:
cls = import_class(import_str)
return cls()
def generate_uuid():
return str(uuid.uuid4())
def is_uuid_like(value):
try:
uuid.UUID(value)
return True
except Exception:
return False
def isotime(at=None):
"""Stringify time in ISO 8601 format"""
if not at:
at = datetime.datetime.utcnow()
str = at.strftime(TIME_FORMAT)
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
str += ('Z' if tz == 'UTC' else tz)
return str
def parse_isotime(timestr):
"""Parse time from ISO 8601 format"""
try:
return iso8601.parse_date(timestr)
except iso8601.ParseError as e:
raise ValueError(e.message)
except TypeError as e:
raise ValueError(e.message)
def normalize_time(timestamp):
"""Normalize time in arbitrary timezone to UTC"""
offset = timestamp.utcoffset()
return timestamp.replace(tzinfo=None) - offset if offset else timestamp
def safe_mkdirs(path):
try:
os.makedirs(path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
def safe_remove(path):
try:
os.remove(path)
except OSError, e:
if e.errno != errno.ENOENT:
raise
class PrettyTable(object):
"""Creates an ASCII art table for use in bin/heat
Example:
ID Name Size Hits
--- ----------------- ------------ -----
122 image 22 0
"""
def __init__(self):
self.columns = []
def add_column(self, width, label="", just='l'):
"""Add a column to the table
:param width: number of characters wide the column should be
:param label: column heading
:param just: justification for the column, 'l' for left,
'r' for right
"""
self.columns.append((width, label, just))
def make_header(self):
label_parts = []
break_parts = []
for width, label, _ in self.columns:
# NOTE(sirp): headers are always left justified
label_part = self._clip_and_justify(label, width, 'l')
label_parts.append(label_part)
break_part = '-' * width
break_parts.append(break_part)
label_line = ' '.join(label_parts)
break_line = ' '.join(break_parts)
return '\n'.join([label_line, break_line])
def make_row(self, *args):
row = args
row_parts = []
for data, (width, _, just) in zip(row, self.columns):
row_part = self._clip_and_justify(data, width, just)
row_parts.append(row_part)
row_line = ' '.join(row_parts)
return row_line
@staticmethod
def _clip_and_justify(data, width, just):
# clip field to column width
clipped_data = str(data)[:width]
if just == 'r':
# right justify
justified = clipped_data.rjust(width)
else:
# left justify
justified = clipped_data.ljust(width)
return justified
def get_terminal_size():
def _get_terminal_size_posix():
import fcntl
import struct
import termios
height_width = None
try:
height_width = struct.unpack('hh', fcntl.ioctl(sys.stderr.fileno(),
termios.TIOCGWINSZ,
struct.pack('HH', 0, 0)))
except:
pass
if not height_width:
try:
p = subprocess.Popen(['stty', 'size'],
shell=false,
stdout=subprocess.PIPE)
return tuple(int(x) for x in p.communicate()[0].split())
except:
pass
return height_width
def _get_terminal_size_win32():
try:
from ctypes import windll, create_string_buffer
handle = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(handle, csbi)
except:
return None
if res:
import struct
unpack_tmp = struct.unpack("hhhhHhhhhhh", csbi.raw)
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom, maxx, maxy) = unpack_tmp
height = bottom - top + 1
width = right - left + 1
return (height, width)
else:
return None
def _get_terminal_size_unknownOS():
raise NotImplementedError
func = {'posix': _get_terminal_size_posix,
'win32': _get_terminal_size_win32}
height_width = func.get(platform.os.name, _get_terminal_size_unknownOS)()
if height_width == None:
raise exception.Invalid()
for i in height_width:
if not isinstance(i, int) or i <= 0:
raise exception.Invalid()
return height_width[0], height_width[1] | random_line_split |
|
utils.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import datetime
import errno
import inspect
import logging
import os
import platform
import random
import subprocess
import socket
import sys
import uuid
import iso8601
from heat.common import exception
logger = logging.getLogger(__name__)
TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
def chunkreadable(iter, chunk_size=65536):
"""
Wrap a readable iterator with a reader yielding chunks of
a preferred size, otherwise leave iterator unchanged.
:param iter: an iter which may also be readable
:param chunk_size: maximum size of chunk
"""
return chunkiter(iter, chunk_size) if hasattr(iter, 'read') else iter
def chunkiter(fp, chunk_size=65536):
"""
Return an iterator to a file-like obj which yields fixed size chunks
:param fp: a file-like object
:param chunk_size: maximum size of chunk
"""
while True:
chunk = fp.read(chunk_size)
if chunk:
yield chunk
else:
break
def image_meta_to_http_headers(image_meta):
"""
Returns a set of image metadata into a dict
of HTTP headers that can be fed to either a Webob
Request object or an httplib.HTTP(S)Connection object
:param image_meta: Mapping of image metadata
"""
headers = {}
for k, v in image_meta.items():
if v is not None:
if k == 'properties':
for pk, pv in v.items():
if pv is not None:
headers["x-image-meta-property-%s"
% pk.lower()] = unicode(pv)
else:
headers["x-image-meta-%s" % k.lower()] = unicode(v)
return headers
def add_features_to_http_headers(features, headers):
"""
Adds additional headers representing heat features to be enabled.
:param headers: Base set of headers
:param features: Map of enabled features
"""
if features:
for k, v in features.items():
if v is not None:
headers[k.lower()] = unicode(v)
def get_image_meta_from_headers(response):
"""
Processes HTTP headers from a supplied response that
match the x-image-meta and x-image-meta-property and
returns a mapping of image metadata and properties
:param response: Response to process
"""
result = {}
properties = {}
if hasattr(response, 'getheaders'): # httplib.HTTPResponse
headers = response.getheaders()
else: # webob.Response
headers = response.headers.items()
for key, value in headers:
key = str(key.lower())
if key.startswith('x-image-meta-property-'):
field_name = key[len('x-image-meta-property-'):].replace('-', '_')
properties[field_name] = value or None
elif key.startswith('x-image-meta-'):
field_name = key[len('x-image-meta-'):].replace('-', '_')
result[field_name] = value or None
result['properties'] = properties
if 'size' in result:
try:
result['size'] = int(result['size'])
except ValueError:
raise exception.Invalid
for key in ('is_public', 'deleted', 'protected'):
if key in result:
result[key] = bool_from_header_value(result[key])
return result
def bool_from_header_value(value):
"""
Returns True if value is a boolean True or the
string 'true', case-insensitive, False otherwise
"""
if isinstance(value, bool):
return value
elif isinstance(value, (basestring, unicode)):
if str(value).lower() == 'true':
return True
return False
def bool_from_string(subject):
"""
Interpret a string as a boolean.
Any string value in:
('True', 'true', 'On', 'on', '1')
is interpreted as a boolean True.
Useful for JSON-decoded stuff and config file parsing
"""
if isinstance(subject, bool):
return subject
elif isinstance(subject, int):
return subject == 1
if hasattr(subject, 'startswith'): # str or unicode...
if subject.strip().lower() in ('true', 'on', '1'):
return True
return False
def import_class(import_str):
"""Returns a class from a string including module and class"""
mod_str, _sep, class_str = import_str.rpartition('.')
try:
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
except (ImportError, ValueError, AttributeError), e:
raise exception.ImportFailure(import_str=import_str,
reason=e)
def import_object(import_str):
"""Returns an object including a module or module and class"""
try:
__import__(import_str)
return sys.modules[import_str]
except ImportError:
cls = import_class(import_str)
return cls()
def generate_uuid():
return str(uuid.uuid4())
def is_uuid_like(value):
try:
uuid.UUID(value)
return True
except Exception:
return False
def isotime(at=None):
"""Stringify time in ISO 8601 format"""
if not at:
at = datetime.datetime.utcnow()
str = at.strftime(TIME_FORMAT)
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
str += ('Z' if tz == 'UTC' else tz)
return str
def parse_isotime(timestr):
"""Parse time from ISO 8601 format"""
try:
return iso8601.parse_date(timestr)
except iso8601.ParseError as e:
raise ValueError(e.message)
except TypeError as e:
raise ValueError(e.message)
def normalize_time(timestamp):
"""Normalize time in arbitrary timezone to UTC"""
offset = timestamp.utcoffset()
return timestamp.replace(tzinfo=None) - offset if offset else timestamp
def safe_mkdirs(path):
try:
os.makedirs(path)
except OSError, e:
if e.errno != errno.EEXIST:
|
def safe_remove(path):
try:
os.remove(path)
except OSError, e:
if e.errno != errno.ENOENT:
raise
class PrettyTable(object):
"""Creates an ASCII art table for use in bin/heat
Example:
ID Name Size Hits
--- ----------------- ------------ -----
122 image 22 0
"""
def __init__(self):
self.columns = []
def add_column(self, width, label="", just='l'):
"""Add a column to the table
:param width: number of characters wide the column should be
:param label: column heading
:param just: justification for the column, 'l' for left,
'r' for right
"""
self.columns.append((width, label, just))
def make_header(self):
label_parts = []
break_parts = []
for width, label, _ in self.columns:
# NOTE(sirp): headers are always left justified
label_part = self._clip_and_justify(label, width, 'l')
label_parts.append(label_part)
break_part = '-' * width
break_parts.append(break_part)
label_line = ' '.join(label_parts)
break_line = ' '.join(break_parts)
return '\n'.join([label_line, break_line])
def make_row(self, *args):
row = args
row_parts = []
for data, (width, _, just) in zip(row, self.columns):
row_part = self._clip_and_justify(data, width, just)
row_parts.append(row_part)
row_line = ' '.join(row_parts)
return row_line
@staticmethod
def _clip_and_justify(data, width, just):
# clip field to column width
clipped_data = str(data)[:width]
if just == 'r':
# right justify
justified = clipped_data.rjust(width)
else:
# left justify
justified = clipped_data.ljust(width)
return justified
def get_terminal_size():
def _get_terminal_size_posix():
import fcntl
import struct
import termios
height_width = None
try:
height_width = struct.unpack('hh', fcntl.ioctl(sys.stderr.fileno(),
termios.TIOCGWINSZ,
struct.pack('HH', 0, 0)))
except:
pass
if not height_width:
try:
p = subprocess.Popen(['stty', 'size'],
shell=false,
stdout=subprocess.PIPE)
return tuple(int(x) for x in p.communicate()[0].split())
except:
pass
return height_width
def _get_terminal_size_win32():
try:
from ctypes import windll, create_string_buffer
handle = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(handle, csbi)
except:
return None
if res:
import struct
unpack_tmp = struct.unpack("hhhhHhhhhhh", csbi.raw)
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom, maxx, maxy) = unpack_tmp
height = bottom - top + 1
width = right - left + 1
return (height, width)
else:
return None
def _get_terminal_size_unknownOS():
raise NotImplementedError
func = {'posix': _get_terminal_size_posix,
'win32': _get_terminal_size_win32}
height_width = func.get(platform.os.name, _get_terminal_size_unknownOS)()
if height_width == None:
raise exception.Invalid()
for i in height_width:
if not isinstance(i, int) or i <= 0:
raise exception.Invalid()
return height_width[0], height_width[1]
| raise | conditional_block |
utils.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import datetime
import errno
import inspect
import logging
import os
import platform
import random
import subprocess
import socket
import sys
import uuid
import iso8601
from heat.common import exception
logger = logging.getLogger(__name__)
TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
def chunkreadable(iter, chunk_size=65536):
"""
Wrap a readable iterator with a reader yielding chunks of
a preferred size, otherwise leave iterator unchanged.
:param iter: an iter which may also be readable
:param chunk_size: maximum size of chunk
"""
return chunkiter(iter, chunk_size) if hasattr(iter, 'read') else iter
def chunkiter(fp, chunk_size=65536):
"""
Return an iterator to a file-like obj which yields fixed size chunks
:param fp: a file-like object
:param chunk_size: maximum size of chunk
"""
while True:
chunk = fp.read(chunk_size)
if chunk:
yield chunk
else:
break
def image_meta_to_http_headers(image_meta):
"""
Returns a set of image metadata into a dict
of HTTP headers that can be fed to either a Webob
Request object or an httplib.HTTP(S)Connection object
:param image_meta: Mapping of image metadata
"""
headers = {}
for k, v in image_meta.items():
if v is not None:
if k == 'properties':
for pk, pv in v.items():
if pv is not None:
headers["x-image-meta-property-%s"
% pk.lower()] = unicode(pv)
else:
headers["x-image-meta-%s" % k.lower()] = unicode(v)
return headers
def add_features_to_http_headers(features, headers):
"""
Adds additional headers representing heat features to be enabled.
:param headers: Base set of headers
:param features: Map of enabled features
"""
if features:
for k, v in features.items():
if v is not None:
headers[k.lower()] = unicode(v)
def get_image_meta_from_headers(response):
"""
Processes HTTP headers from a supplied response that
match the x-image-meta and x-image-meta-property and
returns a mapping of image metadata and properties
:param response: Response to process
"""
result = {}
properties = {}
if hasattr(response, 'getheaders'): # httplib.HTTPResponse
headers = response.getheaders()
else: # webob.Response
headers = response.headers.items()
for key, value in headers:
key = str(key.lower())
if key.startswith('x-image-meta-property-'):
field_name = key[len('x-image-meta-property-'):].replace('-', '_')
properties[field_name] = value or None
elif key.startswith('x-image-meta-'):
field_name = key[len('x-image-meta-'):].replace('-', '_')
result[field_name] = value or None
result['properties'] = properties
if 'size' in result:
try:
result['size'] = int(result['size'])
except ValueError:
raise exception.Invalid
for key in ('is_public', 'deleted', 'protected'):
if key in result:
result[key] = bool_from_header_value(result[key])
return result
def bool_from_header_value(value):
"""
Returns True if value is a boolean True or the
string 'true', case-insensitive, False otherwise
"""
if isinstance(value, bool):
return value
elif isinstance(value, (basestring, unicode)):
if str(value).lower() == 'true':
return True
return False
def bool_from_string(subject):
|
def import_class(import_str):
"""Returns a class from a string including module and class"""
mod_str, _sep, class_str = import_str.rpartition('.')
try:
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
except (ImportError, ValueError, AttributeError), e:
raise exception.ImportFailure(import_str=import_str,
reason=e)
def import_object(import_str):
"""Returns an object including a module or module and class"""
try:
__import__(import_str)
return sys.modules[import_str]
except ImportError:
cls = import_class(import_str)
return cls()
def generate_uuid():
return str(uuid.uuid4())
def is_uuid_like(value):
try:
uuid.UUID(value)
return True
except Exception:
return False
def isotime(at=None):
"""Stringify time in ISO 8601 format"""
if not at:
at = datetime.datetime.utcnow()
str = at.strftime(TIME_FORMAT)
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
str += ('Z' if tz == 'UTC' else tz)
return str
def parse_isotime(timestr):
"""Parse time from ISO 8601 format"""
try:
return iso8601.parse_date(timestr)
except iso8601.ParseError as e:
raise ValueError(e.message)
except TypeError as e:
raise ValueError(e.message)
def normalize_time(timestamp):
"""Normalize time in arbitrary timezone to UTC"""
offset = timestamp.utcoffset()
return timestamp.replace(tzinfo=None) - offset if offset else timestamp
def safe_mkdirs(path):
try:
os.makedirs(path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
def safe_remove(path):
try:
os.remove(path)
except OSError, e:
if e.errno != errno.ENOENT:
raise
class PrettyTable(object):
"""Creates an ASCII art table for use in bin/heat
Example:
ID Name Size Hits
--- ----------------- ------------ -----
122 image 22 0
"""
def __init__(self):
self.columns = []
def add_column(self, width, label="", just='l'):
"""Add a column to the table
:param width: number of characters wide the column should be
:param label: column heading
:param just: justification for the column, 'l' for left,
'r' for right
"""
self.columns.append((width, label, just))
def make_header(self):
label_parts = []
break_parts = []
for width, label, _ in self.columns:
# NOTE(sirp): headers are always left justified
label_part = self._clip_and_justify(label, width, 'l')
label_parts.append(label_part)
break_part = '-' * width
break_parts.append(break_part)
label_line = ' '.join(label_parts)
break_line = ' '.join(break_parts)
return '\n'.join([label_line, break_line])
def make_row(self, *args):
row = args
row_parts = []
for data, (width, _, just) in zip(row, self.columns):
row_part = self._clip_and_justify(data, width, just)
row_parts.append(row_part)
row_line = ' '.join(row_parts)
return row_line
@staticmethod
def _clip_and_justify(data, width, just):
# clip field to column width
clipped_data = str(data)[:width]
if just == 'r':
# right justify
justified = clipped_data.rjust(width)
else:
# left justify
justified = clipped_data.ljust(width)
return justified
def get_terminal_size():
def _get_terminal_size_posix():
import fcntl
import struct
import termios
height_width = None
try:
height_width = struct.unpack('hh', fcntl.ioctl(sys.stderr.fileno(),
termios.TIOCGWINSZ,
struct.pack('HH', 0, 0)))
except:
pass
if not height_width:
try:
p = subprocess.Popen(['stty', 'size'],
shell=false,
stdout=subprocess.PIPE)
return tuple(int(x) for x in p.communicate()[0].split())
except:
pass
return height_width
def _get_terminal_size_win32():
try:
from ctypes import windll, create_string_buffer
handle = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(handle, csbi)
except:
return None
if res:
import struct
unpack_tmp = struct.unpack("hhhhHhhhhhh", csbi.raw)
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom, maxx, maxy) = unpack_tmp
height = bottom - top + 1
width = right - left + 1
return (height, width)
else:
return None
def _get_terminal_size_unknownOS():
raise NotImplementedError
func = {'posix': _get_terminal_size_posix,
'win32': _get_terminal_size_win32}
height_width = func.get(platform.os.name, _get_terminal_size_unknownOS)()
if height_width == None:
raise exception.Invalid()
for i in height_width:
if not isinstance(i, int) or i <= 0:
raise exception.Invalid()
return height_width[0], height_width[1]
| """
Interpret a string as a boolean.
Any string value in:
('True', 'true', 'On', 'on', '1')
is interpreted as a boolean True.
Useful for JSON-decoded stuff and config file parsing
"""
if isinstance(subject, bool):
return subject
elif isinstance(subject, int):
return subject == 1
if hasattr(subject, 'startswith'): # str or unicode...
if subject.strip().lower() in ('true', 'on', '1'):
return True
return False | identifier_body |
web.py | """
Nodes that use web services to do something.
"""
import json
import httplib2
import urllib
from BeautifulSoup import BeautifulSoup
from nodetree import node, exceptions
from . import base
from .. import stages
class WebServiceNodeError(exceptions.NodeError):
pass
class BaseWebService(node.Node, base.TextWriterMixin):
"""Base class for web service nodes."""
abstract = True
stage = stages.POST
intypes = [unicode]
outtype = unicode
class MashapeProcessing(BaseWebService):
"""Mashape entity extraction."""
stage = stages.POST
baseurl = "http://text-processing.com/api/"
parameters = [
dict(name="extract", value="phrases", choices=["phrases", "sentiment"]),
]
def process(self, input):
http = httplib2.Http()
headers = {}
body = dict(text=input[:10000].encode("utf8", "replace"))
url = "%s/%s/" % (self.baseurl, self._params.get("extract", "phrases"))
request, content = http.request(url, "POST", headers=headers, body=urllib.urlencode(body))
if request["status"] == "503":
raise WebServiceNodeError("Daily limit exceeded", self)
elif request["status"] == "400":
raise WebServiceNodeError("No text, limit exceeded, or incorrect language", self)
out = u""
try:
data = json.loads(content)
except ValueError:
return content
for key in ["GPE", "VP", "LOCATION", "NP", "DATE"]:
keydata = data.get(key)
if keydata is not None:
out += "%s\n" % key
for entity in keydata:
out += " %s\n" % entity
return out
class DBPediaAnnotate(BaseWebService):
"""Mashape entity extraction."""
stage = stages.POST
baseurl = "http://spotlight.dbpedia.org/rest/annotate/"
parameters = [
dict(name="confident", value=0.2),
dict(name="support", value=20),
]
def process(self, input):
http = httplib2.Http()
headers = {}
body = dict(
text=input.encode("utf8", "replace"),
confidence=self._params.get("confident"),
support=self._params.get("support"),
)
url = "%s?%s" % (self.baseurl, urllib.urlencode(body))
request, content = http.request(url, "GET", headers=headers)
if request["status"] != "200":
raise WebServiceNodeError("A web service error occured. Status: %s" % request["status"], self)
out = u""
soup = BeautifulSoup(content)
for ref in soup.findAll("a"):
out += "%s\n" % ref.text
out += " %s\n\n" % ref.get("href")
return out
class | (BaseWebService):
"""OpenCalias sematic markup."""
stage = stages.POST
baseurl = "http://api.opencalais.com/tag/rs/enrich"
parameters = [
]
def process(self, input):
http = httplib2.Http()
headers = {
"x-calais-licenseID": "dsza6q6zwa9nzvz9wbz7f6y5",
"content-type": "text/raw",
"Accept": "xml/rdf",
"enableMetadataType": "GenericRelations",
}
request, content = http.request(
self.baseurl,
"POST",
headers=headers,
body=input.encode("utf8")
)
if request["status"] != "200":
raise WebServiceNodeError("A web service error occured. Status: %s" % request["status"], self)
return content.decode("utf8")
| OpenCalais | identifier_name |
web.py | """
Nodes that use web services to do something.
"""
import json
import httplib2
import urllib
from BeautifulSoup import BeautifulSoup
from nodetree import node, exceptions
from . import base
from .. import stages
class WebServiceNodeError(exceptions.NodeError):
pass
class BaseWebService(node.Node, base.TextWriterMixin):
"""Base class for web service nodes."""
abstract = True
stage = stages.POST
intypes = [unicode]
outtype = unicode
class MashapeProcessing(BaseWebService):
"""Mashape entity extraction."""
stage = stages.POST
baseurl = "http://text-processing.com/api/"
parameters = [
dict(name="extract", value="phrases", choices=["phrases", "sentiment"]),
]
def process(self, input):
http = httplib2.Http()
headers = {}
body = dict(text=input[:10000].encode("utf8", "replace"))
url = "%s/%s/" % (self.baseurl, self._params.get("extract", "phrases"))
request, content = http.request(url, "POST", headers=headers, body=urllib.urlencode(body))
if request["status"] == "503":
raise WebServiceNodeError("Daily limit exceeded", self)
elif request["status"] == "400":
raise WebServiceNodeError("No text, limit exceeded, or incorrect language", self)
out = u""
try:
data = json.loads(content)
except ValueError:
return content
for key in ["GPE", "VP", "LOCATION", "NP", "DATE"]:
keydata = data.get(key)
if keydata is not None:
out += "%s\n" % key
for entity in keydata:
out += " %s\n" % entity
return out
class DBPediaAnnotate(BaseWebService):
"""Mashape entity extraction."""
stage = stages.POST
baseurl = "http://spotlight.dbpedia.org/rest/annotate/"
parameters = [
dict(name="confident", value=0.2),
dict(name="support", value=20),
]
def process(self, input):
http = httplib2.Http()
headers = {}
body = dict(
text=input.encode("utf8", "replace"),
confidence=self._params.get("confident"),
support=self._params.get("support"),
)
url = "%s?%s" % (self.baseurl, urllib.urlencode(body))
request, content = http.request(url, "GET", headers=headers)
if request["status"] != "200":
raise WebServiceNodeError("A web service error occured. Status: %s" % request["status"], self)
out = u""
soup = BeautifulSoup(content)
for ref in soup.findAll("a"):
out += "%s\n" % ref.text
out += " %s\n\n" % ref.get("href")
return out
|
class OpenCalais(BaseWebService):
"""OpenCalias sematic markup."""
stage = stages.POST
baseurl = "http://api.opencalais.com/tag/rs/enrich"
parameters = [
]
def process(self, input):
http = httplib2.Http()
headers = {
"x-calais-licenseID": "dsza6q6zwa9nzvz9wbz7f6y5",
"content-type": "text/raw",
"Accept": "xml/rdf",
"enableMetadataType": "GenericRelations",
}
request, content = http.request(
self.baseurl,
"POST",
headers=headers,
body=input.encode("utf8")
)
if request["status"] != "200":
raise WebServiceNodeError("A web service error occured. Status: %s" % request["status"], self)
return content.decode("utf8") | random_line_split |
|
web.py | """
Nodes that use web services to do something.
"""
import json
import httplib2
import urllib
from BeautifulSoup import BeautifulSoup
from nodetree import node, exceptions
from . import base
from .. import stages
class WebServiceNodeError(exceptions.NodeError):
pass
class BaseWebService(node.Node, base.TextWriterMixin):
"""Base class for web service nodes."""
abstract = True
stage = stages.POST
intypes = [unicode]
outtype = unicode
class MashapeProcessing(BaseWebService):
"""Mashape entity extraction."""
stage = stages.POST
baseurl = "http://text-processing.com/api/"
parameters = [
dict(name="extract", value="phrases", choices=["phrases", "sentiment"]),
]
def process(self, input):
http = httplib2.Http()
headers = {}
body = dict(text=input[:10000].encode("utf8", "replace"))
url = "%s/%s/" % (self.baseurl, self._params.get("extract", "phrases"))
request, content = http.request(url, "POST", headers=headers, body=urllib.urlencode(body))
if request["status"] == "503":
|
elif request["status"] == "400":
raise WebServiceNodeError("No text, limit exceeded, or incorrect language", self)
out = u""
try:
data = json.loads(content)
except ValueError:
return content
for key in ["GPE", "VP", "LOCATION", "NP", "DATE"]:
keydata = data.get(key)
if keydata is not None:
out += "%s\n" % key
for entity in keydata:
out += " %s\n" % entity
return out
class DBPediaAnnotate(BaseWebService):
"""Mashape entity extraction."""
stage = stages.POST
baseurl = "http://spotlight.dbpedia.org/rest/annotate/"
parameters = [
dict(name="confident", value=0.2),
dict(name="support", value=20),
]
def process(self, input):
http = httplib2.Http()
headers = {}
body = dict(
text=input.encode("utf8", "replace"),
confidence=self._params.get("confident"),
support=self._params.get("support"),
)
url = "%s?%s" % (self.baseurl, urllib.urlencode(body))
request, content = http.request(url, "GET", headers=headers)
if request["status"] != "200":
raise WebServiceNodeError("A web service error occured. Status: %s" % request["status"], self)
out = u""
soup = BeautifulSoup(content)
for ref in soup.findAll("a"):
out += "%s\n" % ref.text
out += " %s\n\n" % ref.get("href")
return out
class OpenCalais(BaseWebService):
"""OpenCalias sematic markup."""
stage = stages.POST
baseurl = "http://api.opencalais.com/tag/rs/enrich"
parameters = [
]
def process(self, input):
http = httplib2.Http()
headers = {
"x-calais-licenseID": "dsza6q6zwa9nzvz9wbz7f6y5",
"content-type": "text/raw",
"Accept": "xml/rdf",
"enableMetadataType": "GenericRelations",
}
request, content = http.request(
self.baseurl,
"POST",
headers=headers,
body=input.encode("utf8")
)
if request["status"] != "200":
raise WebServiceNodeError("A web service error occured. Status: %s" % request["status"], self)
return content.decode("utf8")
| raise WebServiceNodeError("Daily limit exceeded", self) | conditional_block |
web.py | """
Nodes that use web services to do something.
"""
import json
import httplib2
import urllib
from BeautifulSoup import BeautifulSoup
from nodetree import node, exceptions
from . import base
from .. import stages
class WebServiceNodeError(exceptions.NodeError):
pass
class BaseWebService(node.Node, base.TextWriterMixin):
"""Base class for web service nodes."""
abstract = True
stage = stages.POST
intypes = [unicode]
outtype = unicode
class MashapeProcessing(BaseWebService):
"""Mashape entity extraction."""
stage = stages.POST
baseurl = "http://text-processing.com/api/"
parameters = [
dict(name="extract", value="phrases", choices=["phrases", "sentiment"]),
]
def process(self, input):
http = httplib2.Http()
headers = {}
body = dict(text=input[:10000].encode("utf8", "replace"))
url = "%s/%s/" % (self.baseurl, self._params.get("extract", "phrases"))
request, content = http.request(url, "POST", headers=headers, body=urllib.urlencode(body))
if request["status"] == "503":
raise WebServiceNodeError("Daily limit exceeded", self)
elif request["status"] == "400":
raise WebServiceNodeError("No text, limit exceeded, or incorrect language", self)
out = u""
try:
data = json.loads(content)
except ValueError:
return content
for key in ["GPE", "VP", "LOCATION", "NP", "DATE"]:
keydata = data.get(key)
if keydata is not None:
out += "%s\n" % key
for entity in keydata:
out += " %s\n" % entity
return out
class DBPediaAnnotate(BaseWebService):
"""Mashape entity extraction."""
stage = stages.POST
baseurl = "http://spotlight.dbpedia.org/rest/annotate/"
parameters = [
dict(name="confident", value=0.2),
dict(name="support", value=20),
]
def process(self, input):
|
class OpenCalais(BaseWebService):
"""OpenCalias sematic markup."""
stage = stages.POST
baseurl = "http://api.opencalais.com/tag/rs/enrich"
parameters = [
]
def process(self, input):
http = httplib2.Http()
headers = {
"x-calais-licenseID": "dsza6q6zwa9nzvz9wbz7f6y5",
"content-type": "text/raw",
"Accept": "xml/rdf",
"enableMetadataType": "GenericRelations",
}
request, content = http.request(
self.baseurl,
"POST",
headers=headers,
body=input.encode("utf8")
)
if request["status"] != "200":
raise WebServiceNodeError("A web service error occured. Status: %s" % request["status"], self)
return content.decode("utf8")
| http = httplib2.Http()
headers = {}
body = dict(
text=input.encode("utf8", "replace"),
confidence=self._params.get("confident"),
support=self._params.get("support"),
)
url = "%s?%s" % (self.baseurl, urllib.urlencode(body))
request, content = http.request(url, "GET", headers=headers)
if request["status"] != "200":
raise WebServiceNodeError("A web service error occured. Status: %s" % request["status"], self)
out = u""
soup = BeautifulSoup(content)
for ref in soup.findAll("a"):
out += "%s\n" % ref.text
out += " %s\n\n" % ref.get("href")
return out | identifier_body |
syslog.py | import SocketServer
from abc import ABCMeta, abstractmethod
import json
import requests
import six
from .. import LOG as _LOG
from ..signal.signal import DEFAULT_ORCHESTRATOR_URL
from ..signal.event import LogEvent
LOG = _LOG.getChild(__name__)
@six.add_metaclass(ABCMeta)
class SyslogInspectorBase(object):
def __init__(
self, udp_port=10514, orchestrator_rest_url=DEFAULT_ORCHESTRATOR_URL,
entity_id='_earthquake_syslog_inspector'):
LOG.info('Syslog UDP port: %d', udp_port)
LOG.info('Orchestrator REST URL: %s', orchestrator_rest_url)
self.orchestrator_rest_url = orchestrator_rest_url
LOG.info('Inspector System Entity ID: %s', entity_id)
self.entity_id = entity_id
that = self
class SyslogUDPHandler(SocketServer.BaseRequestHandler):
def handle(self):
data = bytes.decode(self.request[0].strip(), 'utf-8')
that.on_syslog_recv(
self.client_address[0], self.client_address[1], data)
self.syslog_server = SocketServer.UDPServer(
('0.0.0.0', udp_port), SyslogUDPHandler)
def start(self):
self.syslog_server.serve_forever()
def on_syslog_recv(self, ip, port, data):
LOG.info('SYSLOG from %s:%d: "%s"', ip, port, data)
event = self.map_syslog_to_event(ip, port, data)
assert event is None or isinstance(event, LogEvent)
if event:
try:
self.send_event_to_orchestrator(event)
except Exception as e:
LOG.error('cannot send event: %s', event, exc_info=True)
def send_event_to_orchestrator(self, event):
event_jsdict = event.to_jsondict()
headers = {'content-type': 'application/json'}
post_url = self.orchestrator_rest_url + \
'/events/' + self.entity_id + '/' + event.uuid
# LOG.debug('POST %s', post_url)
r = requests.post(
post_url, data=json.dumps(event_jsdict), headers=headers)
@abstractmethod
def map_syslog_to_event(self, ip, port, data):
"""
:param ip:
:param port:
:param data:
:return: None or LogEvent
"""
pass
class BasicSyslogInspector(SyslogInspectorBase):
# @Override
def map_syslog_to_event(self, ip, port, data):
entity = 'entity-%s:%d' % (ip, port)
event = LogEvent.from_message(entity, data)
return event
if __name__ == "__main__":
| insp = BasicSyslogInspector()
insp.start() | conditional_block |
|
syslog.py | import SocketServer
from abc import ABCMeta, abstractmethod
import json
import requests
import six
from .. import LOG as _LOG
from ..signal.signal import DEFAULT_ORCHESTRATOR_URL
from ..signal.event import LogEvent
LOG = _LOG.getChild(__name__)
@six.add_metaclass(ABCMeta)
class SyslogInspectorBase(object):
def __init__(
self, udp_port=10514, orchestrator_rest_url=DEFAULT_ORCHESTRATOR_URL,
entity_id='_earthquake_syslog_inspector'):
LOG.info('Syslog UDP port: %d', udp_port)
LOG.info('Orchestrator REST URL: %s', orchestrator_rest_url)
self.orchestrator_rest_url = orchestrator_rest_url
LOG.info('Inspector System Entity ID: %s', entity_id)
self.entity_id = entity_id
that = self
class SyslogUDPHandler(SocketServer.BaseRequestHandler):
def handle(self):
data = bytes.decode(self.request[0].strip(), 'utf-8')
that.on_syslog_recv(
self.client_address[0], self.client_address[1], data)
self.syslog_server = SocketServer.UDPServer(
('0.0.0.0', udp_port), SyslogUDPHandler)
def start(self):
self.syslog_server.serve_forever()
def on_syslog_recv(self, ip, port, data):
LOG.info('SYSLOG from %s:%d: "%s"', ip, port, data)
event = self.map_syslog_to_event(ip, port, data)
assert event is None or isinstance(event, LogEvent)
if event:
try:
self.send_event_to_orchestrator(event) | def send_event_to_orchestrator(self, event):
event_jsdict = event.to_jsondict()
headers = {'content-type': 'application/json'}
post_url = self.orchestrator_rest_url + \
'/events/' + self.entity_id + '/' + event.uuid
# LOG.debug('POST %s', post_url)
r = requests.post(
post_url, data=json.dumps(event_jsdict), headers=headers)
@abstractmethod
def map_syslog_to_event(self, ip, port, data):
"""
:param ip:
:param port:
:param data:
:return: None or LogEvent
"""
pass
class BasicSyslogInspector(SyslogInspectorBase):
# @Override
def map_syslog_to_event(self, ip, port, data):
entity = 'entity-%s:%d' % (ip, port)
event = LogEvent.from_message(entity, data)
return event
if __name__ == "__main__":
insp = BasicSyslogInspector()
insp.start() | except Exception as e:
LOG.error('cannot send event: %s', event, exc_info=True)
| random_line_split |
syslog.py | import SocketServer
from abc import ABCMeta, abstractmethod
import json
import requests
import six
from .. import LOG as _LOG
from ..signal.signal import DEFAULT_ORCHESTRATOR_URL
from ..signal.event import LogEvent
LOG = _LOG.getChild(__name__)
@six.add_metaclass(ABCMeta)
class SyslogInspectorBase(object):
def | (
self, udp_port=10514, orchestrator_rest_url=DEFAULT_ORCHESTRATOR_URL,
entity_id='_earthquake_syslog_inspector'):
LOG.info('Syslog UDP port: %d', udp_port)
LOG.info('Orchestrator REST URL: %s', orchestrator_rest_url)
self.orchestrator_rest_url = orchestrator_rest_url
LOG.info('Inspector System Entity ID: %s', entity_id)
self.entity_id = entity_id
that = self
class SyslogUDPHandler(SocketServer.BaseRequestHandler):
def handle(self):
data = bytes.decode(self.request[0].strip(), 'utf-8')
that.on_syslog_recv(
self.client_address[0], self.client_address[1], data)
self.syslog_server = SocketServer.UDPServer(
('0.0.0.0', udp_port), SyslogUDPHandler)
def start(self):
self.syslog_server.serve_forever()
def on_syslog_recv(self, ip, port, data):
LOG.info('SYSLOG from %s:%d: "%s"', ip, port, data)
event = self.map_syslog_to_event(ip, port, data)
assert event is None or isinstance(event, LogEvent)
if event:
try:
self.send_event_to_orchestrator(event)
except Exception as e:
LOG.error('cannot send event: %s', event, exc_info=True)
def send_event_to_orchestrator(self, event):
event_jsdict = event.to_jsondict()
headers = {'content-type': 'application/json'}
post_url = self.orchestrator_rest_url + \
'/events/' + self.entity_id + '/' + event.uuid
# LOG.debug('POST %s', post_url)
r = requests.post(
post_url, data=json.dumps(event_jsdict), headers=headers)
@abstractmethod
def map_syslog_to_event(self, ip, port, data):
"""
:param ip:
:param port:
:param data:
:return: None or LogEvent
"""
pass
class BasicSyslogInspector(SyslogInspectorBase):
# @Override
def map_syslog_to_event(self, ip, port, data):
entity = 'entity-%s:%d' % (ip, port)
event = LogEvent.from_message(entity, data)
return event
if __name__ == "__main__":
insp = BasicSyslogInspector()
insp.start()
| __init__ | identifier_name |
syslog.py | import SocketServer
from abc import ABCMeta, abstractmethod
import json
import requests
import six
from .. import LOG as _LOG
from ..signal.signal import DEFAULT_ORCHESTRATOR_URL
from ..signal.event import LogEvent
LOG = _LOG.getChild(__name__)
@six.add_metaclass(ABCMeta)
class SyslogInspectorBase(object):
|
class BasicSyslogInspector(SyslogInspectorBase):
# @Override
def map_syslog_to_event(self, ip, port, data):
entity = 'entity-%s:%d' % (ip, port)
event = LogEvent.from_message(entity, data)
return event
if __name__ == "__main__":
insp = BasicSyslogInspector()
insp.start()
| def __init__(
self, udp_port=10514, orchestrator_rest_url=DEFAULT_ORCHESTRATOR_URL,
entity_id='_earthquake_syslog_inspector'):
LOG.info('Syslog UDP port: %d', udp_port)
LOG.info('Orchestrator REST URL: %s', orchestrator_rest_url)
self.orchestrator_rest_url = orchestrator_rest_url
LOG.info('Inspector System Entity ID: %s', entity_id)
self.entity_id = entity_id
that = self
class SyslogUDPHandler(SocketServer.BaseRequestHandler):
def handle(self):
data = bytes.decode(self.request[0].strip(), 'utf-8')
that.on_syslog_recv(
self.client_address[0], self.client_address[1], data)
self.syslog_server = SocketServer.UDPServer(
('0.0.0.0', udp_port), SyslogUDPHandler)
def start(self):
self.syslog_server.serve_forever()
def on_syslog_recv(self, ip, port, data):
LOG.info('SYSLOG from %s:%d: "%s"', ip, port, data)
event = self.map_syslog_to_event(ip, port, data)
assert event is None or isinstance(event, LogEvent)
if event:
try:
self.send_event_to_orchestrator(event)
except Exception as e:
LOG.error('cannot send event: %s', event, exc_info=True)
def send_event_to_orchestrator(self, event):
event_jsdict = event.to_jsondict()
headers = {'content-type': 'application/json'}
post_url = self.orchestrator_rest_url + \
'/events/' + self.entity_id + '/' + event.uuid
# LOG.debug('POST %s', post_url)
r = requests.post(
post_url, data=json.dumps(event_jsdict), headers=headers)
@abstractmethod
def map_syslog_to_event(self, ip, port, data):
"""
:param ip:
:param port:
:param data:
:return: None or LogEvent
"""
pass | identifier_body |
Read.js | //Capturar possibles errors
process.on('uncaughtException', function(err) {
console.log(err);
});
//Importar mòdul net
var net = require('net')
//Port d'escolta del servidor
var port = 8002;
//Crear servidor TCP
net.createServer(function(socket){
socket.on('data', function(data){
//Parse dades JSON
var json=JSON.parse(data);
//Importar mòdul theThingsCoAP
var theThingsCoAP = require('../../')
//Crear client theThingsCoAP
var client = theThingsCoAP.createClient()
client.on('ready', function () {
read(json.endDate)
})
//Funció per llegir dades de la plataforma thethings.iO
function read(endDate){
|
});
//Configuració del port en el servidor TCP
}).listen(port);
| client.thingRead(json.key, {limit: 100,endDate: endDate, startDate: json.startDate}, function (error, data) {
if (typeof data!=='undefined' && data!==null){
if (data.length > 0) {
var dataSend=""
var coma=","
for (var i=0;i<=(data.length - 1);i++){
dataSend=dataSend+data[i].value+coma+data[i].datetime.split('T')[1]+coma
}
socket.write(dataSend);
read(data[data.length - 1].datetime.split('.')[0].replace(/-/g, '').replace(/:/g, '').replace('T', ''))
}else{
socket.write("</FINAL>");
}
}
})
}
| identifier_body |
Read.js | //Capturar possibles errors
process.on('uncaughtException', function(err) {
console.log(err);
});
//Importar mòdul net
var net = require('net')
//Port d'escolta del servidor
var port = 8002;
//Crear servidor TCP
net.createServer(function(socket){
socket.on('data', function(data){
//Parse dades JSON
var json=JSON.parse(data);
//Importar mòdul theThingsCoAP
var theThingsCoAP = require('../../')
//Crear client theThingsCoAP
var client = theThingsCoAP.createClient()
client.on('ready', function () {
read(json.endDate)
})
//Funció per llegir dades de la plataforma thethings.iO
function read(endDate){
client.thingRead(json.key, {limit: 100,endDate: endDate, startDate: json.startDate}, function (error, data) {
if (typeof data!=='undefined' && data!==null){
if (data.length > 0) {
var dataSend=""
var coma=","
for (var i=0;i<=(data.length - 1);i++){
dataSend=dataSend+data[i].value+coma+data[i].datetime.split('T')[1]+coma | }else{
socket.write("</FINAL>");
}
}
})
}
});
//Configuració del port en el servidor TCP
}).listen(port); | }
socket.write(dataSend);
read(data[data.length - 1].datetime.split('.')[0].replace(/-/g, '').replace(/:/g, '').replace('T', '')) | random_line_split |
Read.js | //Capturar possibles errors
process.on('uncaughtException', function(err) {
console.log(err);
});
//Importar mòdul net
var net = require('net')
//Port d'escolta del servidor
var port = 8002;
//Crear servidor TCP
net.createServer(function(socket){
socket.on('data', function(data){
//Parse dades JSON
var json=JSON.parse(data);
//Importar mòdul theThingsCoAP
var theThingsCoAP = require('../../')
//Crear client theThingsCoAP
var client = theThingsCoAP.createClient()
client.on('ready', function () {
read(json.endDate)
})
//Funció per llegir dades de la plataforma thethings.iO
function read(endDate){
client.thingRead(json.key, {limit: 100,endDate: endDate, startDate: json.startDate}, function (error, data) {
if (typeof data!=='undefined' && data!==null){ | })
}
});
//Configuració del port en el servidor TCP
}).listen(port);
|
if (data.length > 0) {
var dataSend=""
var coma=","
for (var i=0;i<=(data.length - 1);i++){
dataSend=dataSend+data[i].value+coma+data[i].datetime.split('T')[1]+coma
}
socket.write(dataSend);
read(data[data.length - 1].datetime.split('.')[0].replace(/-/g, '').replace(/:/g, '').replace('T', ''))
}else{
socket.write("</FINAL>");
}
}
| conditional_block |
Read.js | //Capturar possibles errors
process.on('uncaughtException', function(err) {
console.log(err);
});
//Importar mòdul net
var net = require('net')
//Port d'escolta del servidor
var port = 8002;
//Crear servidor TCP
net.createServer(function(socket){
socket.on('data', function(data){
//Parse dades JSON
var json=JSON.parse(data);
//Importar mòdul theThingsCoAP
var theThingsCoAP = require('../../')
//Crear client theThingsCoAP
var client = theThingsCoAP.createClient()
client.on('ready', function () {
read(json.endDate)
})
//Funció per llegir dades de la plataforma thethings.iO
function rea | dDate){
client.thingRead(json.key, {limit: 100,endDate: endDate, startDate: json.startDate}, function (error, data) {
if (typeof data!=='undefined' && data!==null){
if (data.length > 0) {
var dataSend=""
var coma=","
for (var i=0;i<=(data.length - 1);i++){
dataSend=dataSend+data[i].value+coma+data[i].datetime.split('T')[1]+coma
}
socket.write(dataSend);
read(data[data.length - 1].datetime.split('.')[0].replace(/-/g, '').replace(/:/g, '').replace('T', ''))
}else{
socket.write("</FINAL>");
}
}
})
}
});
//Configuració del port en el servidor TCP
}).listen(port);
| d(en | identifier_name |
tab-group-harness-example.spec.ts | import {TestBed, ComponentFixture} from '@angular/core/testing'; | import {
BrowserDynamicTestingModule,
platformBrowserDynamicTesting,
} from '@angular/platform-browser-dynamic/testing';
import {MatTabsModule} from '@angular/material/tabs';
import {TabGroupHarnessExample} from './tab-group-harness-example';
import {NoopAnimationsModule} from '@angular/platform-browser/animations';
describe('TabGroupHarnessExample', () => {
let fixture: ComponentFixture<TabGroupHarnessExample>;
let loader: HarnessLoader;
beforeAll(() => {
TestBed.initTestEnvironment(BrowserDynamicTestingModule, platformBrowserDynamicTesting());
});
beforeEach(async () => {
await TestBed.configureTestingModule({
imports: [MatTabsModule, NoopAnimationsModule],
declarations: [TabGroupHarnessExample]
}).compileComponents();
fixture = TestBed.createComponent(TabGroupHarnessExample);
fixture.detectChanges();
loader = TestbedHarnessEnvironment.loader(fixture);
});
it('should load harness for tab-group', async () => {
const tabGroups = await loader.getAllHarnesses(MatTabGroupHarness);
expect(tabGroups.length).toBe(1);
});
it('should load harness for tab-group with selected tab label', async () => {
const tabGroups = await loader.getAllHarnesses(MatTabGroupHarness.with({
selectedTabLabel: 'Profile',
}));
expect(tabGroups.length).toBe(1);
});
it('should be able to get tabs of tab-group', async () => {
const tabGroup = await loader.getHarness(MatTabGroupHarness);
const tabs = await tabGroup.getTabs();
expect(tabs.length).toBe(3);
});
it('should be able to select tab from tab-group', async () => {
const tabGroup = await loader.getHarness(MatTabGroupHarness);
expect(await (await tabGroup.getSelectedTab()).getLabel()).toBe('Profile');
await tabGroup.selectTab({label: 'FAQ'});
expect(await (await tabGroup.getSelectedTab()).getLabel()).toBe('FAQ');
});
}); | import {TestbedHarnessEnvironment} from '@angular/cdk/testing/testbed';
import {MatTabGroupHarness} from '@angular/material/tabs/testing';
import {HarnessLoader} from '@angular/cdk/testing'; | random_line_split |
flow_management.py | # -*- coding: utf-8 -*-
from flask import render_template, redirect, url_for, flash, abort
from purchasing.decorators import requires_roles
from purchasing.data.stages import Stage
from purchasing.data.flows import Flow
from purchasing.conductor.forms import FlowForm, NewFlowForm
from purchasing.conductor.manager import blueprint
@blueprint.route('/flow/new', methods=['GET', 'POST'])
@requires_roles('conductor', 'admin', 'superadmin')
def new_flow():
'''Create a new flow
:status 200: Render the new flow template
:status 302: Try to create a new flow using the
:py:class:`~purchasing.conductor.forms.NewFlowForm`, redirect
to the flows list view if successful
'''
stages = Stage.choices_factory()
form = NewFlowForm(stages=stages)
if form.validate_on_submit():
stage_order = []
for entry in form.stage_order.entries:
# try to evaluate the return value as an ID
try:
stage_id = int(entry.data)
# otherwise it's a new stage
except ValueError:
new_stage = Stage.create(name=entry.data)
stage_id = new_stage.id
stage_order.append(stage_id)
Flow.create(flow_name=form.flow_name.data, stage_order=stage_order)
flash('Flow created successfully!', 'alert-success')
return redirect(url_for('conductor.flows_list'))
return render_template('conductor/flows/new.html', stages=stages, form=form)
@blueprint.route('/flows')
@requires_roles('conductor', 'admin', 'superadmin')
def flows_list():
'''List all flows
:status 200: Render the all flows list template
'''
flows = Flow.query.order_by(Flow.flow_name).all()
active, archived = [], []
for flow in flows:
if flow.is_archived:
archived.append(flow)
else:
|
return render_template('conductor/flows/browse.html', active=active, archived=archived)
@blueprint.route('/flow/<int:flow_id>', methods=['GET', 'POST'])
@requires_roles('conductor', 'admin', 'superadmin')
def flow_detail(flow_id):
'''View/edit a flow's details
:status 200: Render the flow edit template
:status 302: Post changes to the a flow using the submitted
:py:class:`~purchasing.conductor.forms.FlowForm`, redirect back to
the current flow's detail page if successful
'''
flow = Flow.query.get(flow_id)
if flow:
form = FlowForm(obj=flow)
if form.validate_on_submit():
flow.update(
flow_name=form.data['flow_name'],
is_archived=form.data['is_archived']
)
flash('Flow successfully updated', 'alert-success')
return redirect(url_for('conductor.flow_detail', flow_id=flow.id))
return render_template('conductor/flows/edit.html', form=form, flow=flow)
abort(404)
| active.append(flow) | conditional_block |
flow_management.py | # -*- coding: utf-8 -*-
from flask import render_template, redirect, url_for, flash, abort
from purchasing.decorators import requires_roles
from purchasing.data.stages import Stage
from purchasing.data.flows import Flow
from purchasing.conductor.forms import FlowForm, NewFlowForm
from purchasing.conductor.manager import blueprint
@blueprint.route('/flow/new', methods=['GET', 'POST'])
@requires_roles('conductor', 'admin', 'superadmin')
def | ():
'''Create a new flow
:status 200: Render the new flow template
:status 302: Try to create a new flow using the
:py:class:`~purchasing.conductor.forms.NewFlowForm`, redirect
to the flows list view if successful
'''
stages = Stage.choices_factory()
form = NewFlowForm(stages=stages)
if form.validate_on_submit():
stage_order = []
for entry in form.stage_order.entries:
# try to evaluate the return value as an ID
try:
stage_id = int(entry.data)
# otherwise it's a new stage
except ValueError:
new_stage = Stage.create(name=entry.data)
stage_id = new_stage.id
stage_order.append(stage_id)
Flow.create(flow_name=form.flow_name.data, stage_order=stage_order)
flash('Flow created successfully!', 'alert-success')
return redirect(url_for('conductor.flows_list'))
return render_template('conductor/flows/new.html', stages=stages, form=form)
@blueprint.route('/flows')
@requires_roles('conductor', 'admin', 'superadmin')
def flows_list():
'''List all flows
:status 200: Render the all flows list template
'''
flows = Flow.query.order_by(Flow.flow_name).all()
active, archived = [], []
for flow in flows:
if flow.is_archived:
archived.append(flow)
else:
active.append(flow)
return render_template('conductor/flows/browse.html', active=active, archived=archived)
@blueprint.route('/flow/<int:flow_id>', methods=['GET', 'POST'])
@requires_roles('conductor', 'admin', 'superadmin')
def flow_detail(flow_id):
'''View/edit a flow's details
:status 200: Render the flow edit template
:status 302: Post changes to the a flow using the submitted
:py:class:`~purchasing.conductor.forms.FlowForm`, redirect back to
the current flow's detail page if successful
'''
flow = Flow.query.get(flow_id)
if flow:
form = FlowForm(obj=flow)
if form.validate_on_submit():
flow.update(
flow_name=form.data['flow_name'],
is_archived=form.data['is_archived']
)
flash('Flow successfully updated', 'alert-success')
return redirect(url_for('conductor.flow_detail', flow_id=flow.id))
return render_template('conductor/flows/edit.html', form=form, flow=flow)
abort(404)
| new_flow | identifier_name |
flow_management.py | # -*- coding: utf-8 -*-
from flask import render_template, redirect, url_for, flash, abort
from purchasing.decorators import requires_roles
from purchasing.data.stages import Stage
from purchasing.data.flows import Flow
from purchasing.conductor.forms import FlowForm, NewFlowForm
from purchasing.conductor.manager import blueprint
@blueprint.route('/flow/new', methods=['GET', 'POST'])
@requires_roles('conductor', 'admin', 'superadmin')
def new_flow():
'''Create a new flow
:status 200: Render the new flow template
:status 302: Try to create a new flow using the
:py:class:`~purchasing.conductor.forms.NewFlowForm`, redirect
to the flows list view if successful
'''
stages = Stage.choices_factory()
form = NewFlowForm(stages=stages)
if form.validate_on_submit():
stage_order = []
for entry in form.stage_order.entries:
# try to evaluate the return value as an ID
try:
stage_id = int(entry.data)
# otherwise it's a new stage
except ValueError:
new_stage = Stage.create(name=entry.data)
stage_id = new_stage.id
stage_order.append(stage_id)
Flow.create(flow_name=form.flow_name.data, stage_order=stage_order)
flash('Flow created successfully!', 'alert-success')
return redirect(url_for('conductor.flows_list'))
return render_template('conductor/flows/new.html', stages=stages, form=form)
@blueprint.route('/flows')
@requires_roles('conductor', 'admin', 'superadmin')
def flows_list():
'''List all flows
:status 200: Render the all flows list template
'''
flows = Flow.query.order_by(Flow.flow_name).all()
active, archived = [], []
for flow in flows:
if flow.is_archived:
archived.append(flow)
else:
active.append(flow)
return render_template('conductor/flows/browse.html', active=active, archived=archived)
@blueprint.route('/flow/<int:flow_id>', methods=['GET', 'POST'])
@requires_roles('conductor', 'admin', 'superadmin')
def flow_detail(flow_id):
'''View/edit a flow's details
:status 200: Render the flow edit template
:status 302: Post changes to the a flow using the submitted
:py:class:`~purchasing.conductor.forms.FlowForm`, redirect back to
the current flow's detail page if successful
'''
flow = Flow.query.get(flow_id)
if flow:
form = FlowForm(obj=flow)
if form.validate_on_submit():
flow.update(
flow_name=form.data['flow_name'],
is_archived=form.data['is_archived']
)
flash('Flow successfully updated', 'alert-success')
return redirect(url_for('conductor.flow_detail', flow_id=flow.id))
return render_template('conductor/flows/edit.html', form=form, flow=flow) | abort(404) | random_line_split |
|
flow_management.py | # -*- coding: utf-8 -*-
from flask import render_template, redirect, url_for, flash, abort
from purchasing.decorators import requires_roles
from purchasing.data.stages import Stage
from purchasing.data.flows import Flow
from purchasing.conductor.forms import FlowForm, NewFlowForm
from purchasing.conductor.manager import blueprint
@blueprint.route('/flow/new', methods=['GET', 'POST'])
@requires_roles('conductor', 'admin', 'superadmin')
def new_flow():
'''Create a new flow
:status 200: Render the new flow template
:status 302: Try to create a new flow using the
:py:class:`~purchasing.conductor.forms.NewFlowForm`, redirect
to the flows list view if successful
'''
stages = Stage.choices_factory()
form = NewFlowForm(stages=stages)
if form.validate_on_submit():
stage_order = []
for entry in form.stage_order.entries:
# try to evaluate the return value as an ID
try:
stage_id = int(entry.data)
# otherwise it's a new stage
except ValueError:
new_stage = Stage.create(name=entry.data)
stage_id = new_stage.id
stage_order.append(stage_id)
Flow.create(flow_name=form.flow_name.data, stage_order=stage_order)
flash('Flow created successfully!', 'alert-success')
return redirect(url_for('conductor.flows_list'))
return render_template('conductor/flows/new.html', stages=stages, form=form)
@blueprint.route('/flows')
@requires_roles('conductor', 'admin', 'superadmin')
def flows_list():
|
@blueprint.route('/flow/<int:flow_id>', methods=['GET', 'POST'])
@requires_roles('conductor', 'admin', 'superadmin')
def flow_detail(flow_id):
'''View/edit a flow's details
:status 200: Render the flow edit template
:status 302: Post changes to the a flow using the submitted
:py:class:`~purchasing.conductor.forms.FlowForm`, redirect back to
the current flow's detail page if successful
'''
flow = Flow.query.get(flow_id)
if flow:
form = FlowForm(obj=flow)
if form.validate_on_submit():
flow.update(
flow_name=form.data['flow_name'],
is_archived=form.data['is_archived']
)
flash('Flow successfully updated', 'alert-success')
return redirect(url_for('conductor.flow_detail', flow_id=flow.id))
return render_template('conductor/flows/edit.html', form=form, flow=flow)
abort(404)
| '''List all flows
:status 200: Render the all flows list template
'''
flows = Flow.query.order_by(Flow.flow_name).all()
active, archived = [], []
for flow in flows:
if flow.is_archived:
archived.append(flow)
else:
active.append(flow)
return render_template('conductor/flows/browse.html', active=active, archived=archived) | identifier_body |
IosCrop.js | 'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
var _react = require('react');
var _react2 = _interopRequireDefault(_react);
var _IconBase = require('./../components/IconBase/IconBase');
var _IconBase2 = _interopRequireDefault(_IconBase);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function _inherits(subClass, superClass) |
var IosCrop = function (_React$Component) {
_inherits(IosCrop, _React$Component);
function IosCrop() {
_classCallCheck(this, IosCrop);
return _possibleConstructorReturn(this, Object.getPrototypeOf(IosCrop).apply(this, arguments));
}
_createClass(IosCrop, [{
key: 'render',
value: function render() {
if (this.props.bare) {
return _react2.default.createElement(
'g',
null,
_react2.default.createElement(
'g',
null,
_react2.default.createElement('rect', { x: '128', y: '64', width: '16', height: '48' }),
_react2.default.createElement('polygon', { points: '144,368 144,160 128,160 128,384 352,384 352,368 \t' }),
_react2.default.createElement('rect', { x: '400', y: '368', width: '48', height: '16' }),
_react2.default.createElement('polygon', { points: '64,128 64,144 368,144 368,448 384,448 384,128 \t' })
)
);
}return _react2.default.createElement(
_IconBase2.default,
null,
_react2.default.createElement(
'g',
null,
_react2.default.createElement('rect', { x: '128', y: '64', width: '16', height: '48' }),
_react2.default.createElement('polygon', { points: '144,368 144,160 128,160 128,384 352,384 352,368 \t' }),
_react2.default.createElement('rect', { x: '400', y: '368', width: '48', height: '16' }),
_react2.default.createElement('polygon', { points: '64,128 64,144 368,144 368,448 384,448 384,128 \t' })
)
);
}
}]);
return IosCrop;
}(_react2.default.Component);
exports.default = IosCrop;
;IosCrop.defaultProps = { bare: false }; | { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } | identifier_body |
IosCrop.js | 'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
var _react = require('react');
var _react2 = _interopRequireDefault(_react);
var _IconBase = require('./../components/IconBase/IconBase');
var _IconBase2 = _interopRequireDefault(_IconBase);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var IosCrop = function (_React$Component) {
_inherits(IosCrop, _React$Component);
|
_createClass(IosCrop, [{
key: 'render',
value: function render() {
if (this.props.bare) {
return _react2.default.createElement(
'g',
null,
_react2.default.createElement(
'g',
null,
_react2.default.createElement('rect', { x: '128', y: '64', width: '16', height: '48' }),
_react2.default.createElement('polygon', { points: '144,368 144,160 128,160 128,384 352,384 352,368 \t' }),
_react2.default.createElement('rect', { x: '400', y: '368', width: '48', height: '16' }),
_react2.default.createElement('polygon', { points: '64,128 64,144 368,144 368,448 384,448 384,128 \t' })
)
);
}return _react2.default.createElement(
_IconBase2.default,
null,
_react2.default.createElement(
'g',
null,
_react2.default.createElement('rect', { x: '128', y: '64', width: '16', height: '48' }),
_react2.default.createElement('polygon', { points: '144,368 144,160 128,160 128,384 352,384 352,368 \t' }),
_react2.default.createElement('rect', { x: '400', y: '368', width: '48', height: '16' }),
_react2.default.createElement('polygon', { points: '64,128 64,144 368,144 368,448 384,448 384,128 \t' })
)
);
}
}]);
return IosCrop;
}(_react2.default.Component);
exports.default = IosCrop;
;IosCrop.defaultProps = { bare: false }; | function IosCrop() {
_classCallCheck(this, IosCrop);
return _possibleConstructorReturn(this, Object.getPrototypeOf(IosCrop).apply(this, arguments));
} | random_line_split |
IosCrop.js | 'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
var _react = require('react');
var _react2 = _interopRequireDefault(_react);
var _IconBase = require('./../components/IconBase/IconBase');
var _IconBase2 = _interopRequireDefault(_IconBase);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function | (self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var IosCrop = function (_React$Component) {
_inherits(IosCrop, _React$Component);
function IosCrop() {
_classCallCheck(this, IosCrop);
return _possibleConstructorReturn(this, Object.getPrototypeOf(IosCrop).apply(this, arguments));
}
_createClass(IosCrop, [{
key: 'render',
value: function render() {
if (this.props.bare) {
return _react2.default.createElement(
'g',
null,
_react2.default.createElement(
'g',
null,
_react2.default.createElement('rect', { x: '128', y: '64', width: '16', height: '48' }),
_react2.default.createElement('polygon', { points: '144,368 144,160 128,160 128,384 352,384 352,368 \t' }),
_react2.default.createElement('rect', { x: '400', y: '368', width: '48', height: '16' }),
_react2.default.createElement('polygon', { points: '64,128 64,144 368,144 368,448 384,448 384,128 \t' })
)
);
}return _react2.default.createElement(
_IconBase2.default,
null,
_react2.default.createElement(
'g',
null,
_react2.default.createElement('rect', { x: '128', y: '64', width: '16', height: '48' }),
_react2.default.createElement('polygon', { points: '144,368 144,160 128,160 128,384 352,384 352,368 \t' }),
_react2.default.createElement('rect', { x: '400', y: '368', width: '48', height: '16' }),
_react2.default.createElement('polygon', { points: '64,128 64,144 368,144 368,448 384,448 384,128 \t' })
)
);
}
}]);
return IosCrop;
}(_react2.default.Component);
exports.default = IosCrop;
;IosCrop.defaultProps = { bare: false }; | _possibleConstructorReturn | identifier_name |
IosCrop.js | 'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
var _react = require('react');
var _react2 = _interopRequireDefault(_react);
var _IconBase = require('./../components/IconBase/IconBase');
var _IconBase2 = _interopRequireDefault(_IconBase);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function _possibleConstructorReturn(self, call) { if (!self) | return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var IosCrop = function (_React$Component) {
_inherits(IosCrop, _React$Component);
function IosCrop() {
_classCallCheck(this, IosCrop);
return _possibleConstructorReturn(this, Object.getPrototypeOf(IosCrop).apply(this, arguments));
}
_createClass(IosCrop, [{
key: 'render',
value: function render() {
if (this.props.bare) {
return _react2.default.createElement(
'g',
null,
_react2.default.createElement(
'g',
null,
_react2.default.createElement('rect', { x: '128', y: '64', width: '16', height: '48' }),
_react2.default.createElement('polygon', { points: '144,368 144,160 128,160 128,384 352,384 352,368 \t' }),
_react2.default.createElement('rect', { x: '400', y: '368', width: '48', height: '16' }),
_react2.default.createElement('polygon', { points: '64,128 64,144 368,144 368,448 384,448 384,128 \t' })
)
);
}return _react2.default.createElement(
_IconBase2.default,
null,
_react2.default.createElement(
'g',
null,
_react2.default.createElement('rect', { x: '128', y: '64', width: '16', height: '48' }),
_react2.default.createElement('polygon', { points: '144,368 144,160 128,160 128,384 352,384 352,368 \t' }),
_react2.default.createElement('rect', { x: '400', y: '368', width: '48', height: '16' }),
_react2.default.createElement('polygon', { points: '64,128 64,144 368,144 368,448 384,448 384,128 \t' })
)
);
}
}]);
return IosCrop;
}(_react2.default.Component);
exports.default = IosCrop;
;IosCrop.defaultProps = { bare: false }; | { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } | conditional_block |
rust-indexer.rs | #[macro_use]
extern crate clap;
extern crate env_logger;
#[macro_use]
extern crate log;
extern crate rls_analysis;
extern crate rls_data as data;
extern crate tools;
use crate::data::GlobalCrateId;
use crate::data::{DefKind, ImplKind};
use rls_analysis::{AnalysisHost, AnalysisLoader, SearchDirectory};
use std::collections::{BTreeSet, HashMap};
use std::fs::{self, File};
use std::io;
use std::io::{BufRead, BufReader, Read, Seek};
use std::path::{Path, PathBuf};
use tools::file_format::analysis::{
AnalysisKind, AnalysisSource, AnalysisTarget, LineRange, Location, SourceRange, WithLocation,
};
/// A global definition id in a crate.
///
/// FIXME(emilio): This key is kind of slow, because GlobalCrateId contains a
/// String. There's a "disambiguator" field which may be more than enough for
/// our purposes.
#[derive(Clone, Hash, Debug, Eq, PartialEq)]
pub struct DefId(GlobalCrateId, u32);
/// A map from global definition ids to the actual definition.
pub struct Defs {
map: HashMap<DefId, data::Def>,
}
/// Local filesystem path mappings and metadata which exist for the following
/// purposes:
/// 1. Know where to output the analysis files.
/// - There is only ever one analysis output directory.
/// 2. Know how to locate rust source files in order to hackily extract strings
/// that should have been in the save-analysis files.
/// - After config scripts run and normalize things there are 2 source
/// directories: revision controlled source (cross-platform) and the
/// (per-platform) generated files directory.
#[derive(Debug)]
struct TreeInfo<'a> {
/// Local filesystem path root for the analysis dir where rust-indexer.rs
/// should write its output.
out_analysis_dir: &'a Path,
/// Local filesystem path root for the source tree. In the searchfox path
/// space presented to users, this means all paths not prefixed with
/// `__GENERATED__`.
srcdir: &'a Path,
/// Local filesystem path root for the per-platform generated source tree.
/// In the searchfox path space presented to users, this means paths
/// prefixed with `__GENERATED__`.
generated: &'a Path,
/// The searchfox path space prefix for generated.
generated_friendly: &'a Path,
}
fn construct_qualname(scope: &str, name: &str) -> String {
// Some of the names don't start with ::, for example:
// __self_0_0$282
// <Loader>::new
// Since we're gluing it to the "scope" (which might be a crate name)
// we'll insert the :: to make it more readable
let glue = if name.starts_with("::") { "" } else { "::" };
format!("{}{}{}", scope, glue, name)
}
fn sanitize_symbol(sym: &str) -> String {
// Downstream processing of the symbol doesn't deal well with
// these characters, so replace them with underscores
sym.replace(",", "_").replace(" ", "_").replace("\n", "_")
}
// Given a definition, and the global crate id where that definition is found,
// return a qualified name that identifies the definition unambiguously.
fn crate_independent_qualname(def: &data::Def, crate_id: &data::GlobalCrateId) -> String {
// For stuff with "no_mangle" functions or statics, or extern declarations,
// we just use the name.
//
// TODO(emilio): Maybe there's a way to get the #[link_name] attribute from
// here and make C++ agree with that? Though we don't use it so it may not
// be worth the churn.
fn use_unmangled_name(def: &data::Def) -> bool {
match def.kind {
DefKind::ForeignStatic | DefKind::ForeignFunction => true,
DefKind::Static | DefKind::Function => {
def.attributes.iter().any(|attr| attr.value == "no_mangle")
}
_ => false,
}
}
if use_unmangled_name(def) {
return def.name.clone();
}
construct_qualname(&crate_id.name, &def.qualname)
}
impl Defs {
fn new() -> Self {
Self {
map: HashMap::new(),
}
}
fn insert(&mut self, analysis: &data::Analysis, def: &data::Def) {
let crate_id = analysis.prelude.as_ref().unwrap().crate_id.clone();
let mut definition = def.clone();
definition.qualname = crate_independent_qualname(&def, &crate_id);
let index = definition.id.index;
let defid = DefId(crate_id, index);
debug!("Indexing def: {:?} -> {:?}", defid, definition);
let previous = self.map.insert(defid, definition);
if let Some(previous) = previous {
// This shouldn't happen, but as of right now it can happen with
// some builtin definitions when highly generic types are involved.
// This is probably a rust bug, just ignore it for now.
debug!(
"Found a definition with the same ID twice? {:?}, {:?}",
previous, def,
);
}
}
/// Getter for a given local id, which takes care of converting to a global
/// ID and returning the definition if present.
fn get(&self, analysis: &data::Analysis, id: data::Id) -> Option<data::Def> {
let prelude = analysis.prelude.as_ref().unwrap();
let krate_id = if id.krate == 0 {
prelude.crate_id.clone()
} else {
// TODO(emilio): This escales with the number of crates in this
// particular crate, but it's probably not too bad, since it should
// be a pretty fast linear search.
let krate = prelude
.external_crates
.iter()
.find(|krate| krate.num == id.krate);
let krate = match krate {
Some(k) => k,
None => {
debug!("Crate not found: {:?}", id);
return None;
}
};
krate.id.clone()
};
let id = DefId(krate_id, id.index);
let result = self.map.get(&id).cloned();
if result.is_none() {
debug!("Def not found: {:?}", id);
}
result
}
}
#[derive(Clone)]
pub struct Loader {
deps_dirs: Vec<PathBuf>,
}
impl Loader {
pub fn new(deps_dirs: Vec<PathBuf>) -> Self {
Self { deps_dirs }
}
}
impl AnalysisLoader for Loader {
fn needs_hard_reload(&self, _: &Path) -> bool {
true
}
fn fresh_host(&self) -> AnalysisHost<Self> {
AnalysisHost::new_with_loader(self.clone())
}
fn set_path_prefix(&mut self, _: &Path) {}
fn abs_path_prefix(&self) -> Option<PathBuf> {
None
}
fn search_directories(&self) -> Vec<SearchDirectory> {
self.deps_dirs
.iter()
.map(|pb| SearchDirectory {
path: pb.clone(),
prefix_rewrite: None,
})
.collect()
}
}
fn def_kind_to_human(kind: DefKind) -> &'static str {
match kind {
DefKind::Enum => "enum",
DefKind::Local => "local",
DefKind::ExternType => "extern type",
DefKind::Const => "constant",
DefKind::Field => "field",
DefKind::Function | DefKind::ForeignFunction => "function",
DefKind::Macro => "macro",
DefKind::Method => "method",
DefKind::Mod => "module",
DefKind::Static | DefKind::ForeignStatic => "static",
DefKind::Struct => "struct",
DefKind::Tuple => "tuple",
DefKind::TupleVariant => "tuple variant",
DefKind::Union => "union",
DefKind::Type => "type",
DefKind::Trait => "trait",
DefKind::StructVariant => "struct variant",
}
}
/// Potentially non-helpful mapping of impl kind.
fn impl_kind_to_human(kind: &ImplKind) -> &'static str {
match kind {
ImplKind::Inherent => "impl",
ImplKind::Direct => "impl for",
ImplKind::Indirect => "impl for ref",
ImplKind::Blanket => "impl for where",
_ => "impl for where deref",
}
}
/// Given two spans, create a new super-span that encloses them both if the files match. If the
/// files don't match, just return the first span as-is.
fn union_spans(a: &data::SpanData, b: &data::SpanData) -> data::SpanData {
if a.file_name != b.file_name {
return a.clone();
}
let (byte_start, line_start, column_start) = if a.byte_start < b.byte_start {
(a.byte_start, a.line_start, a.column_start)
} else {
(b.byte_start, b.line_start, b.column_start)
};
let (byte_end, line_end, column_end) = if a.byte_end > b.byte_end {
(a.byte_end, a.line_end, a.column_end)
} else {
(b.byte_end, b.line_end, b.column_end)
};
data::SpanData {
file_name: a.file_name.clone(),
byte_start,
byte_end,
line_start,
line_end,
column_start,
column_end,
}
}
/// For the purposes of trying to figure out the actual effective nesting range of some type of
/// definition, union its span (which just really covers the symbol name) plus the spans of all of
/// its descendants. This should end up with a sufficiently reasonable line value. This is a hack.
fn recursive_union_spans_of_def(
def: &data::Def,
file_analysis: &data::Analysis,
defs: &Defs,
) -> data::SpanData {
let mut span = def.span.clone();
for id in &def.children {
// It should already be the case that the children are in the same krate, but better safe
// than sorry.
if id.krate != def.id.krate {
continue;
}
let kid = defs.get(file_analysis, *id);
if let Some(ref kid) = kid {
let rec_span = recursive_union_spans_of_def(kid, file_analysis, defs);
span = union_spans(&span, &rec_span);
}
}
span
}
/// Given a list of ids of defs, run recursive_union_spans_of_def on all of them and union up the
/// result. Necessary for when dealing with impls.
fn union_spans_of_defs(
initial_span: &data::SpanData,
ids: &[data::Id],
file_analysis: &data::Analysis,
defs: &Defs,
) -> data::SpanData {
let mut span = initial_span.clone();
for id in ids {
let kid = defs.get(file_analysis, *id);
if let Some(ref kid) = kid {
let rec_span = recursive_union_spans_of_def(kid, file_analysis, defs);
span = union_spans(&span, &rec_span);
}
}
span
}
/// If we unioned together a span that only covers 1 or 2 lines, normalize it to None because
/// nothing interesting will happen from a presentation perspective. (If we had proper AST info
/// about the span, it would be appropriate to keep it and expose it, but this is all derived from
/// shoddy inference.)
fn ignore_boring_spans(span: &data::SpanData) -> Option<&data::SpanData> {
match span {
span if span.line_end.0 > span.line_start.0 + 1 => Some(span),
_ => None,
}
}
fn pretty_for_impl(imp: &data::Impl, qualname: &str) -> String |
fn pretty_for_def(def: &data::Def, qualname: &str) -> String {
let mut pretty = def_kind_to_human(def.kind).to_owned();
pretty.push_str(" ");
// We use the unsanitized qualname here because it's more human-readable
// and the source-analysis pretty name is allowed to have commas and such
pretty.push_str(qualname);
pretty
}
fn visit_def(
out_data: &mut BTreeSet<String>,
kind: AnalysisKind,
location: &data::SpanData,
qualname: &str,
def: &data::Def,
context: Option<&str>,
nesting: Option<&data::SpanData>,
) {
let pretty = pretty_for_def(&def, &qualname);
visit_common(
out_data, kind, location, qualname, &pretty, context, nesting,
);
}
fn visit_common(
out_data: &mut BTreeSet<String>,
kind: AnalysisKind,
location: &data::SpanData,
qualname: &str,
pretty: &str,
context: Option<&str>,
nesting: Option<&data::SpanData>,
) {
// Searchfox uses 1-indexed lines, 0-indexed columns.
let col_end = if location.line_start != location.line_end {
// Rust spans are multi-line... So we just use the start column as
// the end column if it spans multiple rows, searchfox has fallback
// code to handle this.
location.column_start.zero_indexed().0
} else {
location.column_end.zero_indexed().0
};
let loc = Location {
lineno: location.line_start.0,
col_start: location.column_start.zero_indexed().0,
col_end,
};
let sanitized = sanitize_symbol(qualname);
let target_data = WithLocation {
data: AnalysisTarget {
kind,
pretty: sanitized.clone(),
sym: sanitized.clone(),
context: String::from(context.unwrap_or("")),
contextsym: String::from(context.unwrap_or("")),
peek_range: LineRange {
start_lineno: 0,
end_lineno: 0,
},
},
loc: loc.clone(),
};
out_data.insert(format!("{}", target_data));
let nesting_range = match nesting {
Some(span) => SourceRange {
// Hack note: These positions would ideally be those of braces. But they're not, so
// while the position:sticky UI stuff should work-ish, other things will not.
start_lineno: span.line_start.0,
start_col: span.column_start.zero_indexed().0,
end_lineno: span.line_end.0,
end_col: span.column_end.zero_indexed().0,
},
None => SourceRange {
start_lineno: 0,
start_col: 0,
end_lineno: 0,
end_col: 0,
},
};
let source_data = WithLocation {
data: AnalysisSource {
syntax: vec![],
pretty: pretty.to_string(),
sym: vec![sanitized],
no_crossref: false,
nesting_range,
},
loc,
};
out_data.insert(format!("{}", source_data));
}
/// Normalizes a searchfox user-visible relative file path to be an absolute
/// local filesystem path. No attempt is made to validate the existence of the
/// path. That's up to the caller.
fn searchfox_path_to_local_path(searchfox_path: &Path, tree_info: &TreeInfo) -> PathBuf {
if let Ok(objdir_path) = searchfox_path.strip_prefix(tree_info.generated_friendly) {
return tree_info.generated.join(objdir_path);
}
tree_info.srcdir.join(searchfox_path)
}
fn read_existing_contents(map: &mut BTreeSet<String>, file: &Path) {
if let Ok(f) = File::open(file) {
let reader = BufReader::new(f);
for line in reader.lines() {
map.insert(line.unwrap());
}
}
}
fn extract_span_from_source_as_buffer(
reader: &mut File,
span: &data::SpanData,
) -> io::Result<Box<[u8]>> {
reader.seek(std::io::SeekFrom::Start(span.byte_start.into()))?;
let len = (span.byte_end - span.byte_start) as usize;
let mut buffer: Box<[u8]> = vec![0; len].into_boxed_slice();
reader.read_exact(&mut buffer)?;
Ok(buffer)
}
/// Given a reader and a span from that file, extract the text contained by the span. If the span
/// covers multiple lines, then whatever newline delimiters the file has will be included.
///
/// In the event of a file read error or the contents not being valid UTF-8, None is returned.
/// We will log to log::Error in the event of a file read problem because this can be indicative
/// of lower level problems (ex: in vagrant), but not for utf-8 errors which are more expected
/// from sketchy source-files.
fn extract_span_from_source_as_string(
mut reader: &mut File,
span: &data::SpanData,
) -> Option<String> {
match extract_span_from_source_as_buffer(&mut reader, &span) {
Ok(buffer) => match String::from_utf8(buffer.into_vec()) {
Ok(s) => Some(s),
Err(_) => None,
},
// This used to error! but the error payload was always just
// `Unable to read file: Custom { kind: UnexpectedEof, error: "failed to fill whole buffer" }`
// which was not useful or informative and may be due to invalid spans
// being told to us by save-analysis.
Err(_) => None,
}
}
fn analyze_file(
searchfox_path: &PathBuf,
defs: &Defs,
file_analysis: &data::Analysis,
tree_info: &TreeInfo,
) {
use std::io::Write;
debug!("Running analyze_file for {}", searchfox_path.display());
let local_source_path = searchfox_path_to_local_path(searchfox_path, tree_info);
if !local_source_path.exists() {
warn!(
"Skipping nonexistent source file with searchfox path '{}' which mapped to local path '{}'",
searchfox_path.display(),
local_source_path.display()
);
return;
};
// Attempt to open the source file to extract information not currently available from the
// analysis data. Some analysis information may not be emitted if we are unable to access the
// file.
let maybe_source_file = match File::open(&local_source_path) {
Ok(f) => Some(f),
Err(_) => None,
};
let output_file = tree_info.out_analysis_dir.join(searchfox_path);
let mut dataset = BTreeSet::new();
read_existing_contents(&mut dataset, &output_file);
let mut output_dir = output_file.clone();
output_dir.pop();
if let Err(err) = fs::create_dir_all(output_dir) {
error!(
"Couldn't create dir for: {}, {:?}",
output_file.display(),
err
);
return;
}
let mut file = match File::create(&output_file) {
Ok(f) => f,
Err(err) => {
error!(
"Couldn't open output file: {}, {:?}",
output_file.display(),
err
);
return;
}
};
// Be chatty about the files we're outputting so that it's easier to follow
// the path of rust analysis generation.
info!(
"Writing analysis for '{}' to '{}'",
searchfox_path.display(),
output_file.display()
);
for import in &file_analysis.imports {
let id = match import.ref_id {
Some(id) => id,
None => {
debug!(
"Dropping import {} ({:?}): {}, no ref",
import.name, import.kind, import.value
);
continue;
}
};
let def = match defs.get(file_analysis, id) {
Some(def) => def,
None => {
debug!(
"Dropping import {} ({:?}): {}, no def for ref {:?}",
import.name, import.kind, import.value, id
);
continue;
}
};
visit_def(
&mut dataset,
AnalysisKind::Use,
&import.span,
&def.qualname,
&def,
None,
None,
)
}
for def in &file_analysis.defs {
let parent = def
.parent
.and_then(|parent_id| defs.get(file_analysis, parent_id));
if let Some(ref parent) = parent {
if parent.kind == DefKind::Trait {
let trait_dependent_name = construct_qualname(&parent.qualname, &def.name);
visit_def(
&mut dataset,
AnalysisKind::Def,
&def.span,
&trait_dependent_name,
&def,
Some(&parent.qualname),
None,
)
}
}
let crate_id = &file_analysis.prelude.as_ref().unwrap().crate_id;
let qualname = crate_independent_qualname(&def, crate_id);
let nested_span = recursive_union_spans_of_def(def, &file_analysis, &defs);
let maybe_nested = ignore_boring_spans(&nested_span);
visit_def(
&mut dataset,
AnalysisKind::Def,
&def.span,
&qualname,
&def,
parent.as_ref().map(|p| &*p.qualname),
maybe_nested,
)
}
// We want to expose impls as "def,namespace" with an inferred nesting_range for their
// contents. I don't know if it's a bug or just a dubious design decision, but the impls all
// have empty values and no names, so to get a useful string out of them, we need to extract
// the contents of their span directly.
//
// Because the name needs to be extracted from the source file, we omit this step if we were
// unable to open the file.
if let Some(mut source_file) = maybe_source_file {
for imp in &file_analysis.impls {
// (for simple.rs at least, there is never a parent)
let name = match extract_span_from_source_as_string(&mut source_file, &imp.span) {
Some(s) => s,
None => continue,
};
let crate_id = &file_analysis.prelude.as_ref().unwrap().crate_id;
let qualname = construct_qualname(&crate_id.name, &name);
let pretty = pretty_for_impl(&imp, &qualname);
let nested_span = union_spans_of_defs(&imp.span, &imp.children, &file_analysis, &defs);
let maybe_nested = ignore_boring_spans(&nested_span);
// XXX visit_common currently never emits any syntax types; we want to pretend this is
// a namespace once it does.
visit_common(
&mut dataset,
AnalysisKind::Def,
&imp.span,
&qualname,
&pretty,
None,
maybe_nested,
)
}
}
for ref_ in &file_analysis.refs {
let def = match defs.get(file_analysis, ref_.ref_id) {
Some(d) => d,
None => {
debug!(
"Dropping ref {:?}, kind {:?}, no def",
ref_.ref_id, ref_.kind
);
continue;
}
};
visit_def(
&mut dataset,
AnalysisKind::Use,
&ref_.span,
&def.qualname,
&def,
/* context = */ None, // TODO
/* nesting = */ None,
)
}
for obj in &dataset {
file.write_all(obj.as_bytes()).unwrap();
write!(file, "\n").unwrap();
}
}
// Replace any backslashes in the path with forward slashes. Paths can be a
// combination of backslashes and forward slashes for windows platform builds
// because the paths are normalized by a sed script that will match backslashes
// and output front-slashes. The sed script could be made smarter.
fn linuxized_path(path: &PathBuf) -> PathBuf {
if let Some(pathstr) = path.to_str() {
if pathstr.find('\\').is_some() {
// Pesky backslashes, get rid of them!
let converted = pathstr.replace('\\', "/");
// If we're seeing this, it means the paths weren't normalized and
// now it's a question of minimizing fallout.
if converted.find(":/") == Some(1) {
// Starts with a drive letter, so let's turn this into
// an absolute path
let abs = "/".to_string() + &converted;
return PathBuf::from(abs);
}
// Turn it into a relative path
return PathBuf::from(converted);
}
}
// Already a valid path!
path.clone()
}
fn analyze_crate(analysis: &data::Analysis, defs: &Defs, tree_info: &TreeInfo) {
// Create and populate per-file Analysis instances from the provided per-crate Analysis file.
let mut per_file = HashMap::new();
let crate_name = &*analysis.prelude.as_ref().unwrap().crate_id.name;
info!("Analyzing crate: '{}'", crate_name);
debug!("Crate prelude: {:?}", analysis.prelude);
macro_rules! flat_map_per_file {
($field:ident) => {
for item in &analysis.$field {
let file_analysis = per_file
.entry(linuxized_path(&item.span.file_name))
.or_insert_with(|| {
let prelude = analysis.prelude.clone();
let mut analysis = data::Analysis::new(analysis.config.clone());
analysis.prelude = prelude;
analysis
});
file_analysis.$field.push(item.clone());
}
};
}
flat_map_per_file!(imports);
flat_map_per_file!(defs);
flat_map_per_file!(impls);
flat_map_per_file!(refs);
flat_map_per_file!(macro_refs);
flat_map_per_file!(relations);
for (searchfox_path, analysis) in per_file.drain() {
// Absolute paths mean that the save-analysis data wasn't normalized
// into the searchfox path convention, which means we can't generate
// analysis data, so just skip.
//
// This will be the case for libraries built with cargo that have paths
// that have prefixes that look like "/cargo/registry/src/github.com-".
if searchfox_path.is_absolute() {
warn!(
"Skipping absolute analysis path {}",
searchfox_path.display()
);
continue;
}
analyze_file(&searchfox_path, defs, &analysis, tree_info);
}
}
fn main() {
use clap::Arg;
env_logger::init();
let matches = app_from_crate!()
.args_from_usage(
"<src> 'Points to the source root (FILES_ROOT)'
<output> 'Points to the directory where searchfox metadata should go (ANALYSIS_ROOT)'
<generated> 'Points to the generated source files root (GENERATED)'",
)
.arg(
Arg::with_name("input")
.required(false)
.multiple(true)
.help("rustc analysis directories"),
)
.get_matches();
let srcdir = Path::new(matches.value_of("src").unwrap());
let out_analysis_dir = Path::new(matches.value_of("output").unwrap());
let generated = Path::new(matches.value_of("generated").unwrap());
let tree_info = TreeInfo {
srcdir,
out_analysis_dir,
generated,
generated_friendly: &PathBuf::from("__GENERATED__"),
};
info!("Tree info: {:?}", tree_info);
let input_dirs = match matches.values_of("input") {
Some(inputs) => inputs.map(PathBuf::from).collect(),
None => vec![],
};
let loader = Loader::new(input_dirs);
let crates = rls_analysis::read_analysis_from_files(&loader, Default::default(), &[]);
info!(
"Crates: {:?}",
crates.iter().map(|k| &k.id.name).collect::<Vec<_>>()
);
// Create and populate Defs, a map from Id to Def, across all crates before beginning analysis.
// This is necessary because Def and Ref instances name Defs via Id.
let mut defs = Defs::new();
for krate in &crates {
for def in &krate.analysis.defs {
defs.insert(&krate.analysis, def);
}
}
for krate in crates {
analyze_crate(&krate.analysis, &defs, &tree_info);
}
}
| {
let mut pretty = impl_kind_to_human(&imp.kind).to_owned();
pretty.push_str(" ");
pretty.push_str(qualname);
pretty
} | identifier_body |
rust-indexer.rs | #[macro_use]
extern crate clap;
extern crate env_logger;
#[macro_use]
extern crate log;
extern crate rls_analysis;
extern crate rls_data as data;
extern crate tools;
use crate::data::GlobalCrateId;
use crate::data::{DefKind, ImplKind};
use rls_analysis::{AnalysisHost, AnalysisLoader, SearchDirectory};
use std::collections::{BTreeSet, HashMap};
use std::fs::{self, File};
use std::io;
use std::io::{BufRead, BufReader, Read, Seek};
use std::path::{Path, PathBuf};
use tools::file_format::analysis::{
AnalysisKind, AnalysisSource, AnalysisTarget, LineRange, Location, SourceRange, WithLocation,
};
/// A global definition id in a crate.
///
/// FIXME(emilio): This key is kind of slow, because GlobalCrateId contains a
/// String. There's a "disambiguator" field which may be more than enough for
/// our purposes.
#[derive(Clone, Hash, Debug, Eq, PartialEq)]
pub struct DefId(GlobalCrateId, u32);
/// A map from global definition ids to the actual definition.
pub struct Defs {
map: HashMap<DefId, data::Def>,
}
/// Local filesystem path mappings and metadata which exist for the following
/// purposes:
/// 1. Know where to output the analysis files.
/// - There is only ever one analysis output directory.
/// 2. Know how to locate rust source files in order to hackily extract strings
/// that should have been in the save-analysis files.
/// - After config scripts run and normalize things there are 2 source
/// directories: revision controlled source (cross-platform) and the
/// (per-platform) generated files directory.
#[derive(Debug)]
struct TreeInfo<'a> {
/// Local filesystem path root for the analysis dir where rust-indexer.rs
/// should write its output.
out_analysis_dir: &'a Path,
/// Local filesystem path root for the source tree. In the searchfox path
/// space presented to users, this means all paths not prefixed with
/// `__GENERATED__`.
srcdir: &'a Path,
/// Local filesystem path root for the per-platform generated source tree.
/// In the searchfox path space presented to users, this means paths
/// prefixed with `__GENERATED__`.
generated: &'a Path,
/// The searchfox path space prefix for generated.
generated_friendly: &'a Path,
}
fn construct_qualname(scope: &str, name: &str) -> String {
// Some of the names don't start with ::, for example:
// __self_0_0$282
// <Loader>::new
// Since we're gluing it to the "scope" (which might be a crate name)
// we'll insert the :: to make it more readable
let glue = if name.starts_with("::") { "" } else { "::" };
format!("{}{}{}", scope, glue, name)
}
fn sanitize_symbol(sym: &str) -> String {
// Downstream processing of the symbol doesn't deal well with
// these characters, so replace them with underscores
sym.replace(",", "_").replace(" ", "_").replace("\n", "_")
}
// Given a definition, and the global crate id where that definition is found,
// return a qualified name that identifies the definition unambiguously.
fn crate_independent_qualname(def: &data::Def, crate_id: &data::GlobalCrateId) -> String {
// For stuff with "no_mangle" functions or statics, or extern declarations,
// we just use the name.
//
// TODO(emilio): Maybe there's a way to get the #[link_name] attribute from
// here and make C++ agree with that? Though we don't use it so it may not
// be worth the churn.
fn use_unmangled_name(def: &data::Def) -> bool {
match def.kind {
DefKind::ForeignStatic | DefKind::ForeignFunction => true,
DefKind::Static | DefKind::Function => {
def.attributes.iter().any(|attr| attr.value == "no_mangle")
}
_ => false,
}
}
if use_unmangled_name(def) {
return def.name.clone();
}
construct_qualname(&crate_id.name, &def.qualname)
}
impl Defs {
fn new() -> Self {
Self {
map: HashMap::new(),
}
}
fn insert(&mut self, analysis: &data::Analysis, def: &data::Def) {
let crate_id = analysis.prelude.as_ref().unwrap().crate_id.clone();
let mut definition = def.clone();
definition.qualname = crate_independent_qualname(&def, &crate_id);
let index = definition.id.index;
let defid = DefId(crate_id, index);
debug!("Indexing def: {:?} -> {:?}", defid, definition);
let previous = self.map.insert(defid, definition);
if let Some(previous) = previous {
// This shouldn't happen, but as of right now it can happen with
// some builtin definitions when highly generic types are involved.
// This is probably a rust bug, just ignore it for now.
debug!(
"Found a definition with the same ID twice? {:?}, {:?}",
previous, def,
);
}
}
/// Getter for a given local id, which takes care of converting to a global
/// ID and returning the definition if present.
fn get(&self, analysis: &data::Analysis, id: data::Id) -> Option<data::Def> {
let prelude = analysis.prelude.as_ref().unwrap();
let krate_id = if id.krate == 0 {
prelude.crate_id.clone()
} else {
// TODO(emilio): This escales with the number of crates in this
// particular crate, but it's probably not too bad, since it should
// be a pretty fast linear search.
let krate = prelude
.external_crates
.iter()
.find(|krate| krate.num == id.krate);
let krate = match krate {
Some(k) => k,
None => {
debug!("Crate not found: {:?}", id);
return None;
}
};
krate.id.clone()
};
let id = DefId(krate_id, id.index);
let result = self.map.get(&id).cloned();
if result.is_none() {
debug!("Def not found: {:?}", id);
}
result
}
}
#[derive(Clone)]
pub struct Loader {
deps_dirs: Vec<PathBuf>,
}
impl Loader {
pub fn new(deps_dirs: Vec<PathBuf>) -> Self {
Self { deps_dirs }
}
}
impl AnalysisLoader for Loader {
fn needs_hard_reload(&self, _: &Path) -> bool {
true
}
fn fresh_host(&self) -> AnalysisHost<Self> {
AnalysisHost::new_with_loader(self.clone())
}
fn set_path_prefix(&mut self, _: &Path) {}
fn abs_path_prefix(&self) -> Option<PathBuf> {
None
}
fn | (&self) -> Vec<SearchDirectory> {
self.deps_dirs
.iter()
.map(|pb| SearchDirectory {
path: pb.clone(),
prefix_rewrite: None,
})
.collect()
}
}
fn def_kind_to_human(kind: DefKind) -> &'static str {
match kind {
DefKind::Enum => "enum",
DefKind::Local => "local",
DefKind::ExternType => "extern type",
DefKind::Const => "constant",
DefKind::Field => "field",
DefKind::Function | DefKind::ForeignFunction => "function",
DefKind::Macro => "macro",
DefKind::Method => "method",
DefKind::Mod => "module",
DefKind::Static | DefKind::ForeignStatic => "static",
DefKind::Struct => "struct",
DefKind::Tuple => "tuple",
DefKind::TupleVariant => "tuple variant",
DefKind::Union => "union",
DefKind::Type => "type",
DefKind::Trait => "trait",
DefKind::StructVariant => "struct variant",
}
}
/// Potentially non-helpful mapping of impl kind.
fn impl_kind_to_human(kind: &ImplKind) -> &'static str {
match kind {
ImplKind::Inherent => "impl",
ImplKind::Direct => "impl for",
ImplKind::Indirect => "impl for ref",
ImplKind::Blanket => "impl for where",
_ => "impl for where deref",
}
}
/// Given two spans, create a new super-span that encloses them both if the files match. If the
/// files don't match, just return the first span as-is.
fn union_spans(a: &data::SpanData, b: &data::SpanData) -> data::SpanData {
if a.file_name != b.file_name {
return a.clone();
}
let (byte_start, line_start, column_start) = if a.byte_start < b.byte_start {
(a.byte_start, a.line_start, a.column_start)
} else {
(b.byte_start, b.line_start, b.column_start)
};
let (byte_end, line_end, column_end) = if a.byte_end > b.byte_end {
(a.byte_end, a.line_end, a.column_end)
} else {
(b.byte_end, b.line_end, b.column_end)
};
data::SpanData {
file_name: a.file_name.clone(),
byte_start,
byte_end,
line_start,
line_end,
column_start,
column_end,
}
}
/// For the purposes of trying to figure out the actual effective nesting range of some type of
/// definition, union its span (which just really covers the symbol name) plus the spans of all of
/// its descendants. This should end up with a sufficiently reasonable line value. This is a hack.
fn recursive_union_spans_of_def(
def: &data::Def,
file_analysis: &data::Analysis,
defs: &Defs,
) -> data::SpanData {
let mut span = def.span.clone();
for id in &def.children {
// It should already be the case that the children are in the same krate, but better safe
// than sorry.
if id.krate != def.id.krate {
continue;
}
let kid = defs.get(file_analysis, *id);
if let Some(ref kid) = kid {
let rec_span = recursive_union_spans_of_def(kid, file_analysis, defs);
span = union_spans(&span, &rec_span);
}
}
span
}
/// Given a list of ids of defs, run recursive_union_spans_of_def on all of them and union up the
/// result. Necessary for when dealing with impls.
fn union_spans_of_defs(
initial_span: &data::SpanData,
ids: &[data::Id],
file_analysis: &data::Analysis,
defs: &Defs,
) -> data::SpanData {
let mut span = initial_span.clone();
for id in ids {
let kid = defs.get(file_analysis, *id);
if let Some(ref kid) = kid {
let rec_span = recursive_union_spans_of_def(kid, file_analysis, defs);
span = union_spans(&span, &rec_span);
}
}
span
}
/// If we unioned together a span that only covers 1 or 2 lines, normalize it to None because
/// nothing interesting will happen from a presentation perspective. (If we had proper AST info
/// about the span, it would be appropriate to keep it and expose it, but this is all derived from
/// shoddy inference.)
fn ignore_boring_spans(span: &data::SpanData) -> Option<&data::SpanData> {
match span {
span if span.line_end.0 > span.line_start.0 + 1 => Some(span),
_ => None,
}
}
fn pretty_for_impl(imp: &data::Impl, qualname: &str) -> String {
let mut pretty = impl_kind_to_human(&imp.kind).to_owned();
pretty.push_str(" ");
pretty.push_str(qualname);
pretty
}
fn pretty_for_def(def: &data::Def, qualname: &str) -> String {
let mut pretty = def_kind_to_human(def.kind).to_owned();
pretty.push_str(" ");
// We use the unsanitized qualname here because it's more human-readable
// and the source-analysis pretty name is allowed to have commas and such
pretty.push_str(qualname);
pretty
}
fn visit_def(
out_data: &mut BTreeSet<String>,
kind: AnalysisKind,
location: &data::SpanData,
qualname: &str,
def: &data::Def,
context: Option<&str>,
nesting: Option<&data::SpanData>,
) {
let pretty = pretty_for_def(&def, &qualname);
visit_common(
out_data, kind, location, qualname, &pretty, context, nesting,
);
}
fn visit_common(
out_data: &mut BTreeSet<String>,
kind: AnalysisKind,
location: &data::SpanData,
qualname: &str,
pretty: &str,
context: Option<&str>,
nesting: Option<&data::SpanData>,
) {
// Searchfox uses 1-indexed lines, 0-indexed columns.
let col_end = if location.line_start != location.line_end {
// Rust spans are multi-line... So we just use the start column as
// the end column if it spans multiple rows, searchfox has fallback
// code to handle this.
location.column_start.zero_indexed().0
} else {
location.column_end.zero_indexed().0
};
let loc = Location {
lineno: location.line_start.0,
col_start: location.column_start.zero_indexed().0,
col_end,
};
let sanitized = sanitize_symbol(qualname);
let target_data = WithLocation {
data: AnalysisTarget {
kind,
pretty: sanitized.clone(),
sym: sanitized.clone(),
context: String::from(context.unwrap_or("")),
contextsym: String::from(context.unwrap_or("")),
peek_range: LineRange {
start_lineno: 0,
end_lineno: 0,
},
},
loc: loc.clone(),
};
out_data.insert(format!("{}", target_data));
let nesting_range = match nesting {
Some(span) => SourceRange {
// Hack note: These positions would ideally be those of braces. But they're not, so
// while the position:sticky UI stuff should work-ish, other things will not.
start_lineno: span.line_start.0,
start_col: span.column_start.zero_indexed().0,
end_lineno: span.line_end.0,
end_col: span.column_end.zero_indexed().0,
},
None => SourceRange {
start_lineno: 0,
start_col: 0,
end_lineno: 0,
end_col: 0,
},
};
let source_data = WithLocation {
data: AnalysisSource {
syntax: vec![],
pretty: pretty.to_string(),
sym: vec![sanitized],
no_crossref: false,
nesting_range,
},
loc,
};
out_data.insert(format!("{}", source_data));
}
/// Normalizes a searchfox user-visible relative file path to be an absolute
/// local filesystem path. No attempt is made to validate the existence of the
/// path. That's up to the caller.
fn searchfox_path_to_local_path(searchfox_path: &Path, tree_info: &TreeInfo) -> PathBuf {
if let Ok(objdir_path) = searchfox_path.strip_prefix(tree_info.generated_friendly) {
return tree_info.generated.join(objdir_path);
}
tree_info.srcdir.join(searchfox_path)
}
fn read_existing_contents(map: &mut BTreeSet<String>, file: &Path) {
if let Ok(f) = File::open(file) {
let reader = BufReader::new(f);
for line in reader.lines() {
map.insert(line.unwrap());
}
}
}
fn extract_span_from_source_as_buffer(
reader: &mut File,
span: &data::SpanData,
) -> io::Result<Box<[u8]>> {
reader.seek(std::io::SeekFrom::Start(span.byte_start.into()))?;
let len = (span.byte_end - span.byte_start) as usize;
let mut buffer: Box<[u8]> = vec![0; len].into_boxed_slice();
reader.read_exact(&mut buffer)?;
Ok(buffer)
}
/// Given a reader and a span from that file, extract the text contained by the span. If the span
/// covers multiple lines, then whatever newline delimiters the file has will be included.
///
/// In the event of a file read error or the contents not being valid UTF-8, None is returned.
/// We will log to log::Error in the event of a file read problem because this can be indicative
/// of lower level problems (ex: in vagrant), but not for utf-8 errors which are more expected
/// from sketchy source-files.
fn extract_span_from_source_as_string(
mut reader: &mut File,
span: &data::SpanData,
) -> Option<String> {
match extract_span_from_source_as_buffer(&mut reader, &span) {
Ok(buffer) => match String::from_utf8(buffer.into_vec()) {
Ok(s) => Some(s),
Err(_) => None,
},
// This used to error! but the error payload was always just
// `Unable to read file: Custom { kind: UnexpectedEof, error: "failed to fill whole buffer" }`
// which was not useful or informative and may be due to invalid spans
// being told to us by save-analysis.
Err(_) => None,
}
}
fn analyze_file(
searchfox_path: &PathBuf,
defs: &Defs,
file_analysis: &data::Analysis,
tree_info: &TreeInfo,
) {
use std::io::Write;
debug!("Running analyze_file for {}", searchfox_path.display());
let local_source_path = searchfox_path_to_local_path(searchfox_path, tree_info);
if !local_source_path.exists() {
warn!(
"Skipping nonexistent source file with searchfox path '{}' which mapped to local path '{}'",
searchfox_path.display(),
local_source_path.display()
);
return;
};
// Attempt to open the source file to extract information not currently available from the
// analysis data. Some analysis information may not be emitted if we are unable to access the
// file.
let maybe_source_file = match File::open(&local_source_path) {
Ok(f) => Some(f),
Err(_) => None,
};
let output_file = tree_info.out_analysis_dir.join(searchfox_path);
let mut dataset = BTreeSet::new();
read_existing_contents(&mut dataset, &output_file);
let mut output_dir = output_file.clone();
output_dir.pop();
if let Err(err) = fs::create_dir_all(output_dir) {
error!(
"Couldn't create dir for: {}, {:?}",
output_file.display(),
err
);
return;
}
let mut file = match File::create(&output_file) {
Ok(f) => f,
Err(err) => {
error!(
"Couldn't open output file: {}, {:?}",
output_file.display(),
err
);
return;
}
};
// Be chatty about the files we're outputting so that it's easier to follow
// the path of rust analysis generation.
info!(
"Writing analysis for '{}' to '{}'",
searchfox_path.display(),
output_file.display()
);
for import in &file_analysis.imports {
let id = match import.ref_id {
Some(id) => id,
None => {
debug!(
"Dropping import {} ({:?}): {}, no ref",
import.name, import.kind, import.value
);
continue;
}
};
let def = match defs.get(file_analysis, id) {
Some(def) => def,
None => {
debug!(
"Dropping import {} ({:?}): {}, no def for ref {:?}",
import.name, import.kind, import.value, id
);
continue;
}
};
visit_def(
&mut dataset,
AnalysisKind::Use,
&import.span,
&def.qualname,
&def,
None,
None,
)
}
for def in &file_analysis.defs {
let parent = def
.parent
.and_then(|parent_id| defs.get(file_analysis, parent_id));
if let Some(ref parent) = parent {
if parent.kind == DefKind::Trait {
let trait_dependent_name = construct_qualname(&parent.qualname, &def.name);
visit_def(
&mut dataset,
AnalysisKind::Def,
&def.span,
&trait_dependent_name,
&def,
Some(&parent.qualname),
None,
)
}
}
let crate_id = &file_analysis.prelude.as_ref().unwrap().crate_id;
let qualname = crate_independent_qualname(&def, crate_id);
let nested_span = recursive_union_spans_of_def(def, &file_analysis, &defs);
let maybe_nested = ignore_boring_spans(&nested_span);
visit_def(
&mut dataset,
AnalysisKind::Def,
&def.span,
&qualname,
&def,
parent.as_ref().map(|p| &*p.qualname),
maybe_nested,
)
}
// We want to expose impls as "def,namespace" with an inferred nesting_range for their
// contents. I don't know if it's a bug or just a dubious design decision, but the impls all
// have empty values and no names, so to get a useful string out of them, we need to extract
// the contents of their span directly.
//
// Because the name needs to be extracted from the source file, we omit this step if we were
// unable to open the file.
if let Some(mut source_file) = maybe_source_file {
for imp in &file_analysis.impls {
// (for simple.rs at least, there is never a parent)
let name = match extract_span_from_source_as_string(&mut source_file, &imp.span) {
Some(s) => s,
None => continue,
};
let crate_id = &file_analysis.prelude.as_ref().unwrap().crate_id;
let qualname = construct_qualname(&crate_id.name, &name);
let pretty = pretty_for_impl(&imp, &qualname);
let nested_span = union_spans_of_defs(&imp.span, &imp.children, &file_analysis, &defs);
let maybe_nested = ignore_boring_spans(&nested_span);
// XXX visit_common currently never emits any syntax types; we want to pretend this is
// a namespace once it does.
visit_common(
&mut dataset,
AnalysisKind::Def,
&imp.span,
&qualname,
&pretty,
None,
maybe_nested,
)
}
}
for ref_ in &file_analysis.refs {
let def = match defs.get(file_analysis, ref_.ref_id) {
Some(d) => d,
None => {
debug!(
"Dropping ref {:?}, kind {:?}, no def",
ref_.ref_id, ref_.kind
);
continue;
}
};
visit_def(
&mut dataset,
AnalysisKind::Use,
&ref_.span,
&def.qualname,
&def,
/* context = */ None, // TODO
/* nesting = */ None,
)
}
for obj in &dataset {
file.write_all(obj.as_bytes()).unwrap();
write!(file, "\n").unwrap();
}
}
// Replace any backslashes in the path with forward slashes. Paths can be a
// combination of backslashes and forward slashes for windows platform builds
// because the paths are normalized by a sed script that will match backslashes
// and output front-slashes. The sed script could be made smarter.
fn linuxized_path(path: &PathBuf) -> PathBuf {
if let Some(pathstr) = path.to_str() {
if pathstr.find('\\').is_some() {
// Pesky backslashes, get rid of them!
let converted = pathstr.replace('\\', "/");
// If we're seeing this, it means the paths weren't normalized and
// now it's a question of minimizing fallout.
if converted.find(":/") == Some(1) {
// Starts with a drive letter, so let's turn this into
// an absolute path
let abs = "/".to_string() + &converted;
return PathBuf::from(abs);
}
// Turn it into a relative path
return PathBuf::from(converted);
}
}
// Already a valid path!
path.clone()
}
fn analyze_crate(analysis: &data::Analysis, defs: &Defs, tree_info: &TreeInfo) {
// Create and populate per-file Analysis instances from the provided per-crate Analysis file.
let mut per_file = HashMap::new();
let crate_name = &*analysis.prelude.as_ref().unwrap().crate_id.name;
info!("Analyzing crate: '{}'", crate_name);
debug!("Crate prelude: {:?}", analysis.prelude);
macro_rules! flat_map_per_file {
($field:ident) => {
for item in &analysis.$field {
let file_analysis = per_file
.entry(linuxized_path(&item.span.file_name))
.or_insert_with(|| {
let prelude = analysis.prelude.clone();
let mut analysis = data::Analysis::new(analysis.config.clone());
analysis.prelude = prelude;
analysis
});
file_analysis.$field.push(item.clone());
}
};
}
flat_map_per_file!(imports);
flat_map_per_file!(defs);
flat_map_per_file!(impls);
flat_map_per_file!(refs);
flat_map_per_file!(macro_refs);
flat_map_per_file!(relations);
for (searchfox_path, analysis) in per_file.drain() {
// Absolute paths mean that the save-analysis data wasn't normalized
// into the searchfox path convention, which means we can't generate
// analysis data, so just skip.
//
// This will be the case for libraries built with cargo that have paths
// that have prefixes that look like "/cargo/registry/src/github.com-".
if searchfox_path.is_absolute() {
warn!(
"Skipping absolute analysis path {}",
searchfox_path.display()
);
continue;
}
analyze_file(&searchfox_path, defs, &analysis, tree_info);
}
}
fn main() {
use clap::Arg;
env_logger::init();
let matches = app_from_crate!()
.args_from_usage(
"<src> 'Points to the source root (FILES_ROOT)'
<output> 'Points to the directory where searchfox metadata should go (ANALYSIS_ROOT)'
<generated> 'Points to the generated source files root (GENERATED)'",
)
.arg(
Arg::with_name("input")
.required(false)
.multiple(true)
.help("rustc analysis directories"),
)
.get_matches();
let srcdir = Path::new(matches.value_of("src").unwrap());
let out_analysis_dir = Path::new(matches.value_of("output").unwrap());
let generated = Path::new(matches.value_of("generated").unwrap());
let tree_info = TreeInfo {
srcdir,
out_analysis_dir,
generated,
generated_friendly: &PathBuf::from("__GENERATED__"),
};
info!("Tree info: {:?}", tree_info);
let input_dirs = match matches.values_of("input") {
Some(inputs) => inputs.map(PathBuf::from).collect(),
None => vec![],
};
let loader = Loader::new(input_dirs);
let crates = rls_analysis::read_analysis_from_files(&loader, Default::default(), &[]);
info!(
"Crates: {:?}",
crates.iter().map(|k| &k.id.name).collect::<Vec<_>>()
);
// Create and populate Defs, a map from Id to Def, across all crates before beginning analysis.
// This is necessary because Def and Ref instances name Defs via Id.
let mut defs = Defs::new();
for krate in &crates {
for def in &krate.analysis.defs {
defs.insert(&krate.analysis, def);
}
}
for krate in crates {
analyze_crate(&krate.analysis, &defs, &tree_info);
}
}
| search_directories | identifier_name |
rust-indexer.rs | #[macro_use]
extern crate clap;
extern crate env_logger;
#[macro_use]
extern crate log;
extern crate rls_analysis;
extern crate rls_data as data;
extern crate tools;
use crate::data::GlobalCrateId;
use crate::data::{DefKind, ImplKind};
use rls_analysis::{AnalysisHost, AnalysisLoader, SearchDirectory};
use std::collections::{BTreeSet, HashMap};
use std::fs::{self, File};
use std::io;
use std::io::{BufRead, BufReader, Read, Seek};
use std::path::{Path, PathBuf};
use tools::file_format::analysis::{
AnalysisKind, AnalysisSource, AnalysisTarget, LineRange, Location, SourceRange, WithLocation,
};
/// A global definition id in a crate.
///
/// FIXME(emilio): This key is kind of slow, because GlobalCrateId contains a
/// String. There's a "disambiguator" field which may be more than enough for
/// our purposes.
#[derive(Clone, Hash, Debug, Eq, PartialEq)]
pub struct DefId(GlobalCrateId, u32);
/// A map from global definition ids to the actual definition.
pub struct Defs {
map: HashMap<DefId, data::Def>,
}
/// Local filesystem path mappings and metadata which exist for the following
/// purposes:
/// 1. Know where to output the analysis files.
/// - There is only ever one analysis output directory.
/// 2. Know how to locate rust source files in order to hackily extract strings
/// that should have been in the save-analysis files.
/// - After config scripts run and normalize things there are 2 source
/// directories: revision controlled source (cross-platform) and the
/// (per-platform) generated files directory.
#[derive(Debug)]
struct TreeInfo<'a> {
/// Local filesystem path root for the analysis dir where rust-indexer.rs
/// should write its output.
out_analysis_dir: &'a Path,
/// Local filesystem path root for the source tree. In the searchfox path
/// space presented to users, this means all paths not prefixed with
/// `__GENERATED__`.
srcdir: &'a Path,
/// Local filesystem path root for the per-platform generated source tree.
/// In the searchfox path space presented to users, this means paths
/// prefixed with `__GENERATED__`.
generated: &'a Path,
/// The searchfox path space prefix for generated.
generated_friendly: &'a Path,
}
fn construct_qualname(scope: &str, name: &str) -> String {
// Some of the names don't start with ::, for example:
// __self_0_0$282
// <Loader>::new
// Since we're gluing it to the "scope" (which might be a crate name)
// we'll insert the :: to make it more readable
let glue = if name.starts_with("::") { "" } else { "::" };
format!("{}{}{}", scope, glue, name)
}
fn sanitize_symbol(sym: &str) -> String {
// Downstream processing of the symbol doesn't deal well with
// these characters, so replace them with underscores
sym.replace(",", "_").replace(" ", "_").replace("\n", "_")
}
// Given a definition, and the global crate id where that definition is found,
// return a qualified name that identifies the definition unambiguously.
fn crate_independent_qualname(def: &data::Def, crate_id: &data::GlobalCrateId) -> String {
// For stuff with "no_mangle" functions or statics, or extern declarations,
// we just use the name.
//
// TODO(emilio): Maybe there's a way to get the #[link_name] attribute from
// here and make C++ agree with that? Though we don't use it so it may not
// be worth the churn.
fn use_unmangled_name(def: &data::Def) -> bool {
match def.kind {
DefKind::ForeignStatic | DefKind::ForeignFunction => true,
DefKind::Static | DefKind::Function => {
def.attributes.iter().any(|attr| attr.value == "no_mangle")
}
_ => false,
}
}
if use_unmangled_name(def) {
return def.name.clone();
}
construct_qualname(&crate_id.name, &def.qualname)
}
impl Defs {
fn new() -> Self {
Self {
map: HashMap::new(),
}
}
fn insert(&mut self, analysis: &data::Analysis, def: &data::Def) {
let crate_id = analysis.prelude.as_ref().unwrap().crate_id.clone();
let mut definition = def.clone();
definition.qualname = crate_independent_qualname(&def, &crate_id);
let index = definition.id.index;
let defid = DefId(crate_id, index);
debug!("Indexing def: {:?} -> {:?}", defid, definition);
let previous = self.map.insert(defid, definition);
if let Some(previous) = previous {
// This shouldn't happen, but as of right now it can happen with
// some builtin definitions when highly generic types are involved.
// This is probably a rust bug, just ignore it for now.
debug!(
"Found a definition with the same ID twice? {:?}, {:?}",
previous, def,
);
}
}
/// Getter for a given local id, which takes care of converting to a global
/// ID and returning the definition if present.
fn get(&self, analysis: &data::Analysis, id: data::Id) -> Option<data::Def> {
let prelude = analysis.prelude.as_ref().unwrap();
let krate_id = if id.krate == 0 {
prelude.crate_id.clone()
} else {
// TODO(emilio): This escales with the number of crates in this
// particular crate, but it's probably not too bad, since it should
// be a pretty fast linear search.
let krate = prelude
.external_crates
.iter()
.find(|krate| krate.num == id.krate);
let krate = match krate {
Some(k) => k,
None => {
debug!("Crate not found: {:?}", id);
return None;
}
};
krate.id.clone()
};
let id = DefId(krate_id, id.index);
let result = self.map.get(&id).cloned();
if result.is_none() {
debug!("Def not found: {:?}", id);
}
result
}
}
#[derive(Clone)]
pub struct Loader {
deps_dirs: Vec<PathBuf>,
}
impl Loader {
pub fn new(deps_dirs: Vec<PathBuf>) -> Self {
Self { deps_dirs }
}
}
impl AnalysisLoader for Loader {
fn needs_hard_reload(&self, _: &Path) -> bool {
true
}
fn fresh_host(&self) -> AnalysisHost<Self> {
AnalysisHost::new_with_loader(self.clone())
}
fn set_path_prefix(&mut self, _: &Path) {}
fn abs_path_prefix(&self) -> Option<PathBuf> {
None
}
fn search_directories(&self) -> Vec<SearchDirectory> {
self.deps_dirs
.iter()
.map(|pb| SearchDirectory {
path: pb.clone(),
prefix_rewrite: None,
})
.collect()
}
}
fn def_kind_to_human(kind: DefKind) -> &'static str {
match kind {
DefKind::Enum => "enum",
DefKind::Local => "local",
DefKind::ExternType => "extern type",
DefKind::Const => "constant",
DefKind::Field => "field",
DefKind::Function | DefKind::ForeignFunction => "function",
DefKind::Macro => "macro",
DefKind::Method => "method",
DefKind::Mod => "module",
DefKind::Static | DefKind::ForeignStatic => "static",
DefKind::Struct => "struct",
DefKind::Tuple => "tuple",
DefKind::TupleVariant => "tuple variant",
DefKind::Union => "union",
DefKind::Type => "type",
DefKind::Trait => "trait",
DefKind::StructVariant => "struct variant",
}
}
/// Potentially non-helpful mapping of impl kind.
fn impl_kind_to_human(kind: &ImplKind) -> &'static str {
match kind {
ImplKind::Inherent => "impl",
ImplKind::Direct => "impl for",
ImplKind::Indirect => "impl for ref",
ImplKind::Blanket => "impl for where",
_ => "impl for where deref",
}
}
/// Given two spans, create a new super-span that encloses them both if the files match. If the
/// files don't match, just return the first span as-is.
fn union_spans(a: &data::SpanData, b: &data::SpanData) -> data::SpanData {
if a.file_name != b.file_name {
return a.clone();
}
let (byte_start, line_start, column_start) = if a.byte_start < b.byte_start {
(a.byte_start, a.line_start, a.column_start)
} else {
(b.byte_start, b.line_start, b.column_start)
};
let (byte_end, line_end, column_end) = if a.byte_end > b.byte_end {
(a.byte_end, a.line_end, a.column_end)
} else {
(b.byte_end, b.line_end, b.column_end)
};
data::SpanData {
file_name: a.file_name.clone(),
byte_start,
byte_end,
line_start,
line_end,
column_start,
column_end,
}
}
/// For the purposes of trying to figure out the actual effective nesting range of some type of
/// definition, union its span (which just really covers the symbol name) plus the spans of all of
/// its descendants. This should end up with a sufficiently reasonable line value. This is a hack.
fn recursive_union_spans_of_def(
def: &data::Def,
file_analysis: &data::Analysis,
defs: &Defs,
) -> data::SpanData {
let mut span = def.span.clone();
for id in &def.children {
// It should already be the case that the children are in the same krate, but better safe
// than sorry.
if id.krate != def.id.krate {
continue;
}
let kid = defs.get(file_analysis, *id);
if let Some(ref kid) = kid {
let rec_span = recursive_union_spans_of_def(kid, file_analysis, defs);
span = union_spans(&span, &rec_span);
}
}
span
}
/// Given a list of ids of defs, run recursive_union_spans_of_def on all of them and union up the
/// result. Necessary for when dealing with impls.
fn union_spans_of_defs(
initial_span: &data::SpanData,
ids: &[data::Id],
file_analysis: &data::Analysis,
defs: &Defs,
) -> data::SpanData {
let mut span = initial_span.clone();
for id in ids {
let kid = defs.get(file_analysis, *id);
if let Some(ref kid) = kid {
let rec_span = recursive_union_spans_of_def(kid, file_analysis, defs);
span = union_spans(&span, &rec_span);
}
}
span
}
/// If we unioned together a span that only covers 1 or 2 lines, normalize it to None because
/// nothing interesting will happen from a presentation perspective. (If we had proper AST info
/// about the span, it would be appropriate to keep it and expose it, but this is all derived from
/// shoddy inference.)
fn ignore_boring_spans(span: &data::SpanData) -> Option<&data::SpanData> {
match span {
span if span.line_end.0 > span.line_start.0 + 1 => Some(span),
_ => None,
}
}
fn pretty_for_impl(imp: &data::Impl, qualname: &str) -> String {
let mut pretty = impl_kind_to_human(&imp.kind).to_owned();
pretty.push_str(" ");
pretty.push_str(qualname);
pretty
}
fn pretty_for_def(def: &data::Def, qualname: &str) -> String {
let mut pretty = def_kind_to_human(def.kind).to_owned();
pretty.push_str(" ");
// We use the unsanitized qualname here because it's more human-readable
// and the source-analysis pretty name is allowed to have commas and such
pretty.push_str(qualname);
pretty
}
fn visit_def(
out_data: &mut BTreeSet<String>,
kind: AnalysisKind,
location: &data::SpanData,
qualname: &str,
def: &data::Def,
context: Option<&str>,
nesting: Option<&data::SpanData>,
) {
let pretty = pretty_for_def(&def, &qualname);
visit_common(
out_data, kind, location, qualname, &pretty, context, nesting,
);
}
fn visit_common(
out_data: &mut BTreeSet<String>,
kind: AnalysisKind,
location: &data::SpanData,
qualname: &str,
pretty: &str,
context: Option<&str>,
nesting: Option<&data::SpanData>,
) {
// Searchfox uses 1-indexed lines, 0-indexed columns.
let col_end = if location.line_start != location.line_end {
// Rust spans are multi-line... So we just use the start column as
// the end column if it spans multiple rows, searchfox has fallback
// code to handle this.
location.column_start.zero_indexed().0
} else {
location.column_end.zero_indexed().0
};
let loc = Location {
lineno: location.line_start.0,
col_start: location.column_start.zero_indexed().0,
col_end,
};
let sanitized = sanitize_symbol(qualname);
let target_data = WithLocation {
data: AnalysisTarget {
kind,
pretty: sanitized.clone(),
sym: sanitized.clone(),
context: String::from(context.unwrap_or("")),
contextsym: String::from(context.unwrap_or("")),
peek_range: LineRange {
start_lineno: 0,
end_lineno: 0,
},
},
loc: loc.clone(),
};
out_data.insert(format!("{}", target_data));
let nesting_range = match nesting {
Some(span) => SourceRange {
// Hack note: These positions would ideally be those of braces. But they're not, so
// while the position:sticky UI stuff should work-ish, other things will not.
start_lineno: span.line_start.0,
start_col: span.column_start.zero_indexed().0,
end_lineno: span.line_end.0,
end_col: span.column_end.zero_indexed().0,
},
None => SourceRange {
start_lineno: 0,
start_col: 0,
end_lineno: 0,
end_col: 0,
},
};
let source_data = WithLocation {
data: AnalysisSource {
syntax: vec![],
pretty: pretty.to_string(),
sym: vec![sanitized],
no_crossref: false,
nesting_range,
},
loc,
};
out_data.insert(format!("{}", source_data));
}
/// Normalizes a searchfox user-visible relative file path to be an absolute
/// local filesystem path. No attempt is made to validate the existence of the
/// path. That's up to the caller.
fn searchfox_path_to_local_path(searchfox_path: &Path, tree_info: &TreeInfo) -> PathBuf {
if let Ok(objdir_path) = searchfox_path.strip_prefix(tree_info.generated_friendly) {
return tree_info.generated.join(objdir_path);
}
tree_info.srcdir.join(searchfox_path)
}
fn read_existing_contents(map: &mut BTreeSet<String>, file: &Path) {
if let Ok(f) = File::open(file) {
let reader = BufReader::new(f);
for line in reader.lines() {
map.insert(line.unwrap());
}
}
}
fn extract_span_from_source_as_buffer(
reader: &mut File,
span: &data::SpanData,
) -> io::Result<Box<[u8]>> {
reader.seek(std::io::SeekFrom::Start(span.byte_start.into()))?;
let len = (span.byte_end - span.byte_start) as usize;
let mut buffer: Box<[u8]> = vec![0; len].into_boxed_slice();
reader.read_exact(&mut buffer)?;
Ok(buffer)
}
/// Given a reader and a span from that file, extract the text contained by the span. If the span
/// covers multiple lines, then whatever newline delimiters the file has will be included.
///
/// In the event of a file read error or the contents not being valid UTF-8, None is returned.
/// We will log to log::Error in the event of a file read problem because this can be indicative
/// of lower level problems (ex: in vagrant), but not for utf-8 errors which are more expected
/// from sketchy source-files.
fn extract_span_from_source_as_string(
mut reader: &mut File,
span: &data::SpanData,
) -> Option<String> {
match extract_span_from_source_as_buffer(&mut reader, &span) {
Ok(buffer) => match String::from_utf8(buffer.into_vec()) {
Ok(s) => Some(s),
Err(_) => None,
},
// This used to error! but the error payload was always just
// `Unable to read file: Custom { kind: UnexpectedEof, error: "failed to fill whole buffer" }`
// which was not useful or informative and may be due to invalid spans
// being told to us by save-analysis.
Err(_) => None,
}
}
fn analyze_file(
searchfox_path: &PathBuf,
defs: &Defs,
file_analysis: &data::Analysis,
tree_info: &TreeInfo,
) {
use std::io::Write;
debug!("Running analyze_file for {}", searchfox_path.display());
let local_source_path = searchfox_path_to_local_path(searchfox_path, tree_info); |
if !local_source_path.exists() {
warn!(
"Skipping nonexistent source file with searchfox path '{}' which mapped to local path '{}'",
searchfox_path.display(),
local_source_path.display()
);
return;
};
// Attempt to open the source file to extract information not currently available from the
// analysis data. Some analysis information may not be emitted if we are unable to access the
// file.
let maybe_source_file = match File::open(&local_source_path) {
Ok(f) => Some(f),
Err(_) => None,
};
let output_file = tree_info.out_analysis_dir.join(searchfox_path);
let mut dataset = BTreeSet::new();
read_existing_contents(&mut dataset, &output_file);
let mut output_dir = output_file.clone();
output_dir.pop();
if let Err(err) = fs::create_dir_all(output_dir) {
error!(
"Couldn't create dir for: {}, {:?}",
output_file.display(),
err
);
return;
}
let mut file = match File::create(&output_file) {
Ok(f) => f,
Err(err) => {
error!(
"Couldn't open output file: {}, {:?}",
output_file.display(),
err
);
return;
}
};
// Be chatty about the files we're outputting so that it's easier to follow
// the path of rust analysis generation.
info!(
"Writing analysis for '{}' to '{}'",
searchfox_path.display(),
output_file.display()
);
for import in &file_analysis.imports {
let id = match import.ref_id {
Some(id) => id,
None => {
debug!(
"Dropping import {} ({:?}): {}, no ref",
import.name, import.kind, import.value
);
continue;
}
};
let def = match defs.get(file_analysis, id) {
Some(def) => def,
None => {
debug!(
"Dropping import {} ({:?}): {}, no def for ref {:?}",
import.name, import.kind, import.value, id
);
continue;
}
};
visit_def(
&mut dataset,
AnalysisKind::Use,
&import.span,
&def.qualname,
&def,
None,
None,
)
}
for def in &file_analysis.defs {
let parent = def
.parent
.and_then(|parent_id| defs.get(file_analysis, parent_id));
if let Some(ref parent) = parent {
if parent.kind == DefKind::Trait {
let trait_dependent_name = construct_qualname(&parent.qualname, &def.name);
visit_def(
&mut dataset,
AnalysisKind::Def,
&def.span,
&trait_dependent_name,
&def,
Some(&parent.qualname),
None,
)
}
}
let crate_id = &file_analysis.prelude.as_ref().unwrap().crate_id;
let qualname = crate_independent_qualname(&def, crate_id);
let nested_span = recursive_union_spans_of_def(def, &file_analysis, &defs);
let maybe_nested = ignore_boring_spans(&nested_span);
visit_def(
&mut dataset,
AnalysisKind::Def,
&def.span,
&qualname,
&def,
parent.as_ref().map(|p| &*p.qualname),
maybe_nested,
)
}
// We want to expose impls as "def,namespace" with an inferred nesting_range for their
// contents. I don't know if it's a bug or just a dubious design decision, but the impls all
// have empty values and no names, so to get a useful string out of them, we need to extract
// the contents of their span directly.
//
// Because the name needs to be extracted from the source file, we omit this step if we were
// unable to open the file.
if let Some(mut source_file) = maybe_source_file {
for imp in &file_analysis.impls {
// (for simple.rs at least, there is never a parent)
let name = match extract_span_from_source_as_string(&mut source_file, &imp.span) {
Some(s) => s,
None => continue,
};
let crate_id = &file_analysis.prelude.as_ref().unwrap().crate_id;
let qualname = construct_qualname(&crate_id.name, &name);
let pretty = pretty_for_impl(&imp, &qualname);
let nested_span = union_spans_of_defs(&imp.span, &imp.children, &file_analysis, &defs);
let maybe_nested = ignore_boring_spans(&nested_span);
// XXX visit_common currently never emits any syntax types; we want to pretend this is
// a namespace once it does.
visit_common(
&mut dataset,
AnalysisKind::Def,
&imp.span,
&qualname,
&pretty,
None,
maybe_nested,
)
}
}
for ref_ in &file_analysis.refs {
let def = match defs.get(file_analysis, ref_.ref_id) {
Some(d) => d,
None => {
debug!(
"Dropping ref {:?}, kind {:?}, no def",
ref_.ref_id, ref_.kind
);
continue;
}
};
visit_def(
&mut dataset,
AnalysisKind::Use,
&ref_.span,
&def.qualname,
&def,
/* context = */ None, // TODO
/* nesting = */ None,
)
}
for obj in &dataset {
file.write_all(obj.as_bytes()).unwrap();
write!(file, "\n").unwrap();
}
}
// Replace any backslashes in the path with forward slashes. Paths can be a
// combination of backslashes and forward slashes for windows platform builds
// because the paths are normalized by a sed script that will match backslashes
// and output front-slashes. The sed script could be made smarter.
fn linuxized_path(path: &PathBuf) -> PathBuf {
if let Some(pathstr) = path.to_str() {
if pathstr.find('\\').is_some() {
// Pesky backslashes, get rid of them!
let converted = pathstr.replace('\\', "/");
// If we're seeing this, it means the paths weren't normalized and
// now it's a question of minimizing fallout.
if converted.find(":/") == Some(1) {
// Starts with a drive letter, so let's turn this into
// an absolute path
let abs = "/".to_string() + &converted;
return PathBuf::from(abs);
}
// Turn it into a relative path
return PathBuf::from(converted);
}
}
// Already a valid path!
path.clone()
}
fn analyze_crate(analysis: &data::Analysis, defs: &Defs, tree_info: &TreeInfo) {
// Create and populate per-file Analysis instances from the provided per-crate Analysis file.
let mut per_file = HashMap::new();
let crate_name = &*analysis.prelude.as_ref().unwrap().crate_id.name;
info!("Analyzing crate: '{}'", crate_name);
debug!("Crate prelude: {:?}", analysis.prelude);
macro_rules! flat_map_per_file {
($field:ident) => {
for item in &analysis.$field {
let file_analysis = per_file
.entry(linuxized_path(&item.span.file_name))
.or_insert_with(|| {
let prelude = analysis.prelude.clone();
let mut analysis = data::Analysis::new(analysis.config.clone());
analysis.prelude = prelude;
analysis
});
file_analysis.$field.push(item.clone());
}
};
}
flat_map_per_file!(imports);
flat_map_per_file!(defs);
flat_map_per_file!(impls);
flat_map_per_file!(refs);
flat_map_per_file!(macro_refs);
flat_map_per_file!(relations);
for (searchfox_path, analysis) in per_file.drain() {
// Absolute paths mean that the save-analysis data wasn't normalized
// into the searchfox path convention, which means we can't generate
// analysis data, so just skip.
//
// This will be the case for libraries built with cargo that have paths
// that have prefixes that look like "/cargo/registry/src/github.com-".
if searchfox_path.is_absolute() {
warn!(
"Skipping absolute analysis path {}",
searchfox_path.display()
);
continue;
}
analyze_file(&searchfox_path, defs, &analysis, tree_info);
}
}
fn main() {
use clap::Arg;
env_logger::init();
let matches = app_from_crate!()
.args_from_usage(
"<src> 'Points to the source root (FILES_ROOT)'
<output> 'Points to the directory where searchfox metadata should go (ANALYSIS_ROOT)'
<generated> 'Points to the generated source files root (GENERATED)'",
)
.arg(
Arg::with_name("input")
.required(false)
.multiple(true)
.help("rustc analysis directories"),
)
.get_matches();
let srcdir = Path::new(matches.value_of("src").unwrap());
let out_analysis_dir = Path::new(matches.value_of("output").unwrap());
let generated = Path::new(matches.value_of("generated").unwrap());
let tree_info = TreeInfo {
srcdir,
out_analysis_dir,
generated,
generated_friendly: &PathBuf::from("__GENERATED__"),
};
info!("Tree info: {:?}", tree_info);
let input_dirs = match matches.values_of("input") {
Some(inputs) => inputs.map(PathBuf::from).collect(),
None => vec![],
};
let loader = Loader::new(input_dirs);
let crates = rls_analysis::read_analysis_from_files(&loader, Default::default(), &[]);
info!(
"Crates: {:?}",
crates.iter().map(|k| &k.id.name).collect::<Vec<_>>()
);
// Create and populate Defs, a map from Id to Def, across all crates before beginning analysis.
// This is necessary because Def and Ref instances name Defs via Id.
let mut defs = Defs::new();
for krate in &crates {
for def in &krate.analysis.defs {
defs.insert(&krate.analysis, def);
}
}
for krate in crates {
analyze_crate(&krate.analysis, &defs, &tree_info);
}
} | random_line_split |
|
question-player-state.service.spec.ts | // Copyright 2021 The Oppia Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS-IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @fileoverview Unit tests for the question player state service.
*/
import { TestBed, waitForAsync } from '@angular/core/testing';
import { Question } from 'domain/question/QuestionObjectFactory';
import { StateObjectFactory } from 'domain/state/StateObjectFactory';
import { QuestionPlayerStateService } from './question-player-state.service';
describe('Question player state service', () => {
let qpss: QuestionPlayerStateService;
let questionId = 'question_id';
let stateObject: StateObjectFactory;
let question: Question;
beforeEach(waitForAsync(() => {
TestBed.configureTestingModule({}).compileComponents();
stateObject = TestBed.inject(StateObjectFactory);
question = new Question(
questionId, stateObject.createDefaultState('state'), '', 7, [], []
);
}));
beforeEach(() => {
qpss = TestBed.inject(QuestionPlayerStateService);
});
it('should register hint as used', () => {
qpss.hintUsed(question);
expect(qpss.questionPlayerState[questionId]).toBeDefined();
});
it('should register solution viewed', () => {
qpss.solutionViewed(question);
expect(qpss.questionPlayerState[questionId].viewedSolution).toBeDefined();
});
it('should submit answer', () => {
qpss.answerSubmitted(question, true, '');
qpss.solutionViewed(question);
qpss.answerSubmitted(question, true, '');
expect(qpss.questionPlayerState[questionId].answers.length).toEqual(1);
}); | it('should access on question session completed', () => {
expect(qpss.onQuestionSessionCompleted).toBeDefined();
});
}); |
it('should get question player state data', () => {
expect(qpss.getQuestionPlayerStateData()).toBeDefined();
});
| random_line_split |
puertas.py | #!/bin/python3.5
# Programa obtenido de hacker run, se le pasa lista con 0 y 1, que simbolizan puertas, 0 la puerta abierta 1 la puerta cerrada.
# Nuestro objetivo es abrir todas las puertas
# si se abre y las subyacentes se abrirán si no están abiertas
# el programa devuelve para una lista de 0 y 1 le mínimo de puertas a abrir y el máximo siguiendo este patrón
import sys
def puertas( doors ):
min = 0
max = 0
i = 1
while i < len( doors) -2 :
# Casos en los que hay reducción
if(doors[i]) == 1:
if doo | eturn [ min , max]
def prueba ( ):
for i in range (10):
print (i )
i += i
if __name__ == "__main__":
doors = list ( map( int, input().strip().split(' ')))
print ("La puerta creada: " , doors)
result = puertas (doors)
print( " ".join( map(str , result )))
prueba();
| rs[ i-1 : i+2] == [1,1,1]:
min += 1
max += 2
i += 2
elif doors[ i] == 1:
min += 1
max += 1
i += 1
else:
min += 1
max += 1
i += 1
r | conditional_block |
puertas.py | #!/bin/python3.5
# Programa obtenido de hacker run, se le pasa lista con 0 y 1, que simbolizan puertas, 0 la puerta abierta 1 la puerta cerrada.
# Nuestro objetivo es abrir todas las puertas
# si se abre y las subyacentes se abrirán si no están abiertas
# el programa devuelve para una lista de 0 y 1 le mínimo de puertas a abrir y el máximo siguiendo este patrón
import sys
def puertas( doors ):
min = 0
max = 0
i = 1
while i < len( doors) -2 :
# Casos en los que hay reducción
if(doors[i]) == 1:
if doors[ i-1 : i+2] == [1,1,1]:
min += 1
max += 2
i += 2
elif doors[ i] == 1:
min += 1
max += 1
i += 1
else:
min += 1
max += 1
i += 1
return [ min , max]
def prueba ( ):
for i |
if __name__ == "__main__":
doors = list ( map( int, input().strip().split(' ')))
print ("La puerta creada: " , doors)
result = puertas (doors)
print( " ".join( map(str , result )))
prueba();
| in range (10):
print (i )
i += i
| identifier_body |
puertas.py | #!/bin/python3.5
# Programa obtenido de hacker run, se le pasa lista con 0 y 1, que simbolizan puertas, 0 la puerta abierta 1 la puerta cerrada.
# Nuestro objetivo es abrir todas las puertas
# si se abre y las subyacentes se abrirán si no están abiertas |
import sys
def puertas( doors ):
min = 0
max = 0
i = 1
while i < len( doors) -2 :
# Casos en los que hay reducción
if(doors[i]) == 1:
if doors[ i-1 : i+2] == [1,1,1]:
min += 1
max += 2
i += 2
elif doors[ i] == 1:
min += 1
max += 1
i += 1
else:
min += 1
max += 1
i += 1
return [ min , max]
def prueba ( ):
for i in range (10):
print (i )
i += i
if __name__ == "__main__":
doors = list ( map( int, input().strip().split(' ')))
print ("La puerta creada: " , doors)
result = puertas (doors)
print( " ".join( map(str , result )))
prueba(); | # el programa devuelve para una lista de 0 y 1 le mínimo de puertas a abrir y el máximo siguiendo este patrón | random_line_split |
puertas.py | #!/bin/python3.5
# Programa obtenido de hacker run, se le pasa lista con 0 y 1, que simbolizan puertas, 0 la puerta abierta 1 la puerta cerrada.
# Nuestro objetivo es abrir todas las puertas
# si se abre y las subyacentes se abrirán si no están abiertas
# el programa devuelve para una lista de 0 y 1 le mínimo de puertas a abrir y el máximo siguiendo este patrón
import sys
def puertas( doors ):
min = 0
max = 0
i = 1
while i < len( doors) -2 :
# Casos en los que hay reducción
if(doors[i]) == 1:
if doors[ i-1 : i+2] == [1,1,1]:
min += 1
max += 2
i += 2
elif doors[ i] == 1:
min += 1
max += 1
i += 1
else:
min += 1
max += 1
i += 1
return [ min , max]
def prueba | for i in range (10):
print (i )
i += i
if __name__ == "__main__":
doors = list ( map( int, input().strip().split(' ')))
print ("La puerta creada: " , doors)
result = puertas (doors)
print( " ".join( map(str , result )))
prueba();
| ( ):
| identifier_name |
traits.rs | use approx::AbsDiffEq;
use num::{Bounded, FromPrimitive, Signed};
use na::allocator::Allocator;
use na::{DimMin, DimName, Scalar, U1};
use simba::scalar::{ClosedAdd, ClosedMul, ClosedSub};
use std::cmp::PartialOrd;
/// A type-level number representing a vector, matrix row, or matrix column, dimension.
pub trait Dimension: DimName + DimMin<Self, Output = Self> {}
impl<D: DimName + DimMin<D, Output = Self>> Dimension for D {}
/// A number that can either be an integer or a float.
pub trait Number:
Scalar
+ Copy
+ PartialOrd
+ ClosedAdd
+ ClosedSub
+ ClosedMul
+ AbsDiffEq<Epsilon = Self>
+ Signed
+ FromPrimitive
+ Bounded
{
}
impl<
T: Scalar
+ Copy
+ PartialOrd
+ ClosedAdd | + AbsDiffEq<Epsilon = Self>
+ Signed
+ FromPrimitive
+ Bounded,
> Number for T
{
}
#[doc(hidden)]
pub trait Alloc<N: Scalar, R: Dimension, C: Dimension = U1>:
Allocator<N, R>
+ Allocator<N, C>
+ Allocator<N, U1, R>
+ Allocator<N, U1, C>
+ Allocator<N, R, C>
+ Allocator<N, C, R>
+ Allocator<N, R, R>
+ Allocator<N, C, C>
+ Allocator<bool, R>
+ Allocator<bool, C>
+ Allocator<f32, R>
+ Allocator<f32, C>
+ Allocator<u32, R>
+ Allocator<u32, C>
+ Allocator<i32, R>
+ Allocator<i32, C>
+ Allocator<f64, R>
+ Allocator<f64, C>
+ Allocator<u64, R>
+ Allocator<u64, C>
+ Allocator<i64, R>
+ Allocator<i64, C>
+ Allocator<i16, R>
+ Allocator<i16, C>
+ Allocator<(usize, usize), R>
+ Allocator<(usize, usize), C>
{
}
impl<N: Scalar, R: Dimension, C: Dimension, T> Alloc<N, R, C> for T where
T: Allocator<N, R>
+ Allocator<N, C>
+ Allocator<N, U1, R>
+ Allocator<N, U1, C>
+ Allocator<N, R, C>
+ Allocator<N, C, R>
+ Allocator<N, R, R>
+ Allocator<N, C, C>
+ Allocator<bool, R>
+ Allocator<bool, C>
+ Allocator<f32, R>
+ Allocator<f32, C>
+ Allocator<u32, R>
+ Allocator<u32, C>
+ Allocator<i32, R>
+ Allocator<i32, C>
+ Allocator<f64, R>
+ Allocator<f64, C>
+ Allocator<u64, R>
+ Allocator<u64, C>
+ Allocator<i64, R>
+ Allocator<i64, C>
+ Allocator<i16, R>
+ Allocator<i16, C>
+ Allocator<(usize, usize), R>
+ Allocator<(usize, usize), C>
{
} | + ClosedSub
+ ClosedMul | random_line_split |
db.rs | use postgres::{Connection, TlsMode};
use postgres::types::ToSql;
use postgres::error as sqlerr;
use getopts;
pub fn build (matches: &getopts::Matches) {
if matches.opt_present("i") {
let user = matches.opt_str("u").unwrap_or("postgres".to_owned());
let pass = matches.opt_str("p").expect("need password, use -p opt");
let mut s = String::from("postgres://");
s.push_str(&(user+":"+&pass+"@localhost"));
let conn = Connection::connect(s,
TlsMode::None).expect("cannot connect to sql");
if matches.opt_present("f") {
let rdb = conn.execute("DROP DATABASE stratis", &[]);
let ru = conn.execute("DROP USER stratis", &[]);
println!("FORCED: DB {:?}, User {:?}", rdb, ru);
}
let build = vec![&include_bytes!("../../sql/create_login.sql")[..],
&include_bytes!("../../sql/create_db.sql")[..]];
for n in build {
let s = String::from_utf8_lossy(n);
if let Err(e) = conn.execute(&s, &[]) |
}
}
if matches.opt_present("b") {
let conn = Connection::connect("postgres://stratis:stratis@localhost",
TlsMode::None).expect("cannot connect to sql");
let build = vec![&include_bytes!("../../sql/create_players.sql")[..],
&include_bytes!("../../sql/create_msg.sql")[..],
&include_bytes!("../../sql/create_clients.sql")[..],
&include_bytes!("../../sql/grant_stratis.sql")[..]];
for n in build {
let s = String::from_utf8_lossy(n);
if let Err(e) = conn.execute(&s, &[]) {
println!("build:{:?}\nfor:{:?}\n\n",e,s);
}
}
}
}
pub fn sql_exec(matches: &getopts::Matches, query: &str, params: &[&ToSql]) -> Result<u64, sqlerr::Error> {
let user = matches.opt_str("u").unwrap_or("postgres".to_owned());
let pass = matches.opt_str("p").expect("need password, use -p opt");
let mut s = String::from("postgres://");
s.push_str(&(user+":"+&pass+"@localhost"));
let conn = Connection::connect(s,
TlsMode::None).expect("cannot connect to sql");
let r = conn.execute(query, params);
r
}
| {
println!("build:{:?}\nfor:{:?}\n\n",e,s);
} | conditional_block |
db.rs | use postgres::{Connection, TlsMode};
use postgres::types::ToSql;
use postgres::error as sqlerr;
use getopts;
pub fn | (matches: &getopts::Matches) {
if matches.opt_present("i") {
let user = matches.opt_str("u").unwrap_or("postgres".to_owned());
let pass = matches.opt_str("p").expect("need password, use -p opt");
let mut s = String::from("postgres://");
s.push_str(&(user+":"+&pass+"@localhost"));
let conn = Connection::connect(s,
TlsMode::None).expect("cannot connect to sql");
if matches.opt_present("f") {
let rdb = conn.execute("DROP DATABASE stratis", &[]);
let ru = conn.execute("DROP USER stratis", &[]);
println!("FORCED: DB {:?}, User {:?}", rdb, ru);
}
let build = vec![&include_bytes!("../../sql/create_login.sql")[..],
&include_bytes!("../../sql/create_db.sql")[..]];
for n in build {
let s = String::from_utf8_lossy(n);
if let Err(e) = conn.execute(&s, &[]) {
println!("build:{:?}\nfor:{:?}\n\n",e,s);
}
}
}
if matches.opt_present("b") {
let conn = Connection::connect("postgres://stratis:stratis@localhost",
TlsMode::None).expect("cannot connect to sql");
let build = vec![&include_bytes!("../../sql/create_players.sql")[..],
&include_bytes!("../../sql/create_msg.sql")[..],
&include_bytes!("../../sql/create_clients.sql")[..],
&include_bytes!("../../sql/grant_stratis.sql")[..]];
for n in build {
let s = String::from_utf8_lossy(n);
if let Err(e) = conn.execute(&s, &[]) {
println!("build:{:?}\nfor:{:?}\n\n",e,s);
}
}
}
}
pub fn sql_exec(matches: &getopts::Matches, query: &str, params: &[&ToSql]) -> Result<u64, sqlerr::Error> {
let user = matches.opt_str("u").unwrap_or("postgres".to_owned());
let pass = matches.opt_str("p").expect("need password, use -p opt");
let mut s = String::from("postgres://");
s.push_str(&(user+":"+&pass+"@localhost"));
let conn = Connection::connect(s,
TlsMode::None).expect("cannot connect to sql");
let r = conn.execute(query, params);
r
}
| build | identifier_name |
db.rs | use postgres::{Connection, TlsMode};
use postgres::types::ToSql;
use postgres::error as sqlerr;
use getopts;
pub fn build (matches: &getopts::Matches) {
if matches.opt_present("i") {
let user = matches.opt_str("u").unwrap_or("postgres".to_owned());
let pass = matches.opt_str("p").expect("need password, use -p opt");
let mut s = String::from("postgres://");
s.push_str(&(user+":"+&pass+"@localhost"));
let conn = Connection::connect(s,
TlsMode::None).expect("cannot connect to sql");
if matches.opt_present("f") {
let rdb = conn.execute("DROP DATABASE stratis", &[]);
let ru = conn.execute("DROP USER stratis", &[]);
println!("FORCED: DB {:?}, User {:?}", rdb, ru);
}
let build = vec![&include_bytes!("../../sql/create_login.sql")[..],
&include_bytes!("../../sql/create_db.sql")[..]];
for n in build {
let s = String::from_utf8_lossy(n);
if let Err(e) = conn.execute(&s, &[]) {
println!("build:{:?}\nfor:{:?}\n\n",e,s);
}
}
}
if matches.opt_present("b") {
let conn = Connection::connect("postgres://stratis:stratis@localhost",
TlsMode::None).expect("cannot connect to sql");
let build = vec![&include_bytes!("../../sql/create_players.sql")[..],
&include_bytes!("../../sql/create_msg.sql")[..],
&include_bytes!("../../sql/create_clients.sql")[..],
&include_bytes!("../../sql/grant_stratis.sql")[..]];
for n in build {
let s = String::from_utf8_lossy(n);
if let Err(e) = conn.execute(&s, &[]) {
println!("build:{:?}\nfor:{:?}\n\n",e,s);
}
}
}
}
pub fn sql_exec(matches: &getopts::Matches, query: &str, params: &[&ToSql]) -> Result<u64, sqlerr::Error> | {
let user = matches.opt_str("u").unwrap_or("postgres".to_owned());
let pass = matches.opt_str("p").expect("need password, use -p opt");
let mut s = String::from("postgres://");
s.push_str(&(user+":"+&pass+"@localhost"));
let conn = Connection::connect(s,
TlsMode::None).expect("cannot connect to sql");
let r = conn.execute(query, params);
r
} | identifier_body |
|
db.rs | use postgres::{Connection, TlsMode};
use postgres::types::ToSql;
use postgres::error as sqlerr;
use getopts;
pub fn build (matches: &getopts::Matches) {
if matches.opt_present("i") {
let user = matches.opt_str("u").unwrap_or("postgres".to_owned());
let pass = matches.opt_str("p").expect("need password, use -p opt");
let mut s = String::from("postgres://");
s.push_str(&(user+":"+&pass+"@localhost"));
let conn = Connection::connect(s,
TlsMode::None).expect("cannot connect to sql");
if matches.opt_present("f") {
let rdb = conn.execute("DROP DATABASE stratis", &[]);
let ru = conn.execute("DROP USER stratis", &[]);
println!("FORCED: DB {:?}, User {:?}", rdb, ru);
}
let build = vec![&include_bytes!("../../sql/create_login.sql")[..],
&include_bytes!("../../sql/create_db.sql")[..]];
for n in build {
let s = String::from_utf8_lossy(n);
if let Err(e) = conn.execute(&s, &[]) {
println!("build:{:?}\nfor:{:?}\n\n",e,s);
} | let conn = Connection::connect("postgres://stratis:stratis@localhost",
TlsMode::None).expect("cannot connect to sql");
let build = vec![&include_bytes!("../../sql/create_players.sql")[..],
&include_bytes!("../../sql/create_msg.sql")[..],
&include_bytes!("../../sql/create_clients.sql")[..],
&include_bytes!("../../sql/grant_stratis.sql")[..]];
for n in build {
let s = String::from_utf8_lossy(n);
if let Err(e) = conn.execute(&s, &[]) {
println!("build:{:?}\nfor:{:?}\n\n",e,s);
}
}
}
}
pub fn sql_exec(matches: &getopts::Matches, query: &str, params: &[&ToSql]) -> Result<u64, sqlerr::Error> {
let user = matches.opt_str("u").unwrap_or("postgres".to_owned());
let pass = matches.opt_str("p").expect("need password, use -p opt");
let mut s = String::from("postgres://");
s.push_str(&(user+":"+&pass+"@localhost"));
let conn = Connection::connect(s,
TlsMode::None).expect("cannot connect to sql");
let r = conn.execute(query, params);
r
} | }
}
if matches.opt_present("b") { | random_line_split |
strategy_utils.py | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities for managing distrubtion strategies."""
from absl import flags
from absl import logging
import tensorflow.compat.v2 as tf
flags.DEFINE_string('tpu', None, 'BNS address for the TPU')
flags.DEFINE_bool('use_gpu', False, 'If True a MirroredStrategy will be used.')
def | (tpu, use_gpu):
"""Utility to create a `tf.DistributionStrategy` for TPU or GPU.
If neither is being used a DefaultStrategy is returned which allows executing
on CPU only.
Args:
tpu: BNS address of TPU to use. Note the flag and param are called TPU as
that is what the xmanager utilities call.
use_gpu: Whether a GPU should be used. This will create a MirroredStrategy.
Raises:
ValueError if both tpu and use_gpu are set.
Returns:
An instance of a `tf.DistributionStrategy`.
"""
if tpu and use_gpu:
raise ValueError('Only one of tpu or use_gpu should be provided.')
if tpu or use_gpu:
logging.info('Devices: \n%s', tf.config.list_logical_devices())
if tpu:
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=tpu)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
else:
strategy = tf.distribute.MirroredStrategy()
logging.info('Devices after getting strategy:\n%s',
tf.config.list_logical_devices())
else:
strategy = tf.distribute.get_strategy()
return strategy
| get_strategy | identifier_name |
strategy_utils.py | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities for managing distrubtion strategies."""
from absl import flags
from absl import logging
import tensorflow.compat.v2 as tf
flags.DEFINE_string('tpu', None, 'BNS address for the TPU')
flags.DEFINE_bool('use_gpu', False, 'If True a MirroredStrategy will be used.')
def get_strategy(tpu, use_gpu):
"""Utility to create a `tf.DistributionStrategy` for TPU or GPU.
If neither is being used a DefaultStrategy is returned which allows executing
on CPU only.
Args:
tpu: BNS address of TPU to use. Note the flag and param are called TPU as
that is what the xmanager utilities call.
use_gpu: Whether a GPU should be used. This will create a MirroredStrategy.
Raises:
ValueError if both tpu and use_gpu are set.
Returns:
An instance of a `tf.DistributionStrategy`.
"""
if tpu and use_gpu:
raise ValueError('Only one of tpu or use_gpu should be provided.')
if tpu or use_gpu:
logging.info('Devices: \n%s', tf.config.list_logical_devices())
if tpu:
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=tpu)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
else:
|
logging.info('Devices after getting strategy:\n%s',
tf.config.list_logical_devices())
else:
strategy = tf.distribute.get_strategy()
return strategy
| strategy = tf.distribute.MirroredStrategy() | conditional_block |
strategy_utils.py | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities for managing distrubtion strategies."""
from absl import flags
from absl import logging
import tensorflow.compat.v2 as tf
flags.DEFINE_string('tpu', None, 'BNS address for the TPU')
flags.DEFINE_bool('use_gpu', False, 'If True a MirroredStrategy will be used.')
def get_strategy(tpu, use_gpu):
| """Utility to create a `tf.DistributionStrategy` for TPU or GPU.
If neither is being used a DefaultStrategy is returned which allows executing
on CPU only.
Args:
tpu: BNS address of TPU to use. Note the flag and param are called TPU as
that is what the xmanager utilities call.
use_gpu: Whether a GPU should be used. This will create a MirroredStrategy.
Raises:
ValueError if both tpu and use_gpu are set.
Returns:
An instance of a `tf.DistributionStrategy`.
"""
if tpu and use_gpu:
raise ValueError('Only one of tpu or use_gpu should be provided.')
if tpu or use_gpu:
logging.info('Devices: \n%s', tf.config.list_logical_devices())
if tpu:
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=tpu)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
else:
strategy = tf.distribute.MirroredStrategy()
logging.info('Devices after getting strategy:\n%s',
tf.config.list_logical_devices())
else:
strategy = tf.distribute.get_strategy()
return strategy | identifier_body |
|
strategy_utils.py | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities for managing distrubtion strategies."""
from absl import flags
from absl import logging
import tensorflow.compat.v2 as tf
flags.DEFINE_string('tpu', None, 'BNS address for the TPU')
flags.DEFINE_bool('use_gpu', False, 'If True a MirroredStrategy will be used.')
def get_strategy(tpu, use_gpu):
"""Utility to create a `tf.DistributionStrategy` for TPU or GPU.
If neither is being used a DefaultStrategy is returned which allows executing
on CPU only.
Args:
tpu: BNS address of TPU to use. Note the flag and param are called TPU as
that is what the xmanager utilities call.
use_gpu: Whether a GPU should be used. This will create a MirroredStrategy.
Raises:
ValueError if both tpu and use_gpu are set.
Returns:
An instance of a `tf.DistributionStrategy`.
"""
if tpu and use_gpu:
raise ValueError('Only one of tpu or use_gpu should be provided.')
if tpu or use_gpu:
logging.info('Devices: \n%s', tf.config.list_logical_devices())
if tpu:
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=tpu)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
else:
strategy = tf.distribute.MirroredStrategy()
logging.info('Devices after getting strategy:\n%s',
tf.config.list_logical_devices())
else: | strategy = tf.distribute.get_strategy()
return strategy | random_line_split |
|
ef2.py | #!/usr/bin/env python
import itertools
from operator import attrgetter, itemgetter
class ClusterRecommendation(object):
__slots__ = ("cluster_id", "papers")
def __init__(self, cluster_id, papers):
self.cluster_id = cluster_id
self.papers = [(p.pid, p.score) for p in papers]
def __str__(self):
return "%s %s" % (self.cluster_id, len(self.papers))
def | (self):
return "<ClusterRecommendation %s>" % self.cluster_id
def get_papers(self):
"""Only return a tuple of papers"""
return tuple(zip(*self.papers))[0]
def get_parent(cluster_id):
parent = ":".join(cluster_id.split(":")[:-1])
if parent == "":
return None
return parent
def get_subtree(cluster_id):
subtree = ":".join(cluster_id.split(":")[1:])
if subtree == "":
return None
return subtree
def make_leaf_rec(stream, rec_limit=10):
leaf_stream = itertools.groupby(stream, lambda e: e.local)
for (cluster_id, stream) in leaf_stream:
papers = [e for e in stream]
papers = sorted(papers, key=attrgetter('score'), reverse=True)
yield ClusterRecommendation(cluster_id, papers[:rec_limit])
def parse_tree(stream, rec_limit=10):
mstream = make_leaf_rec(stream, rec_limit)
child_stream = itertools.groupby(mstream, lambda e: get_parent(e.cluster_id))
for (parent_cluster_id, recs) in child_stream:
child_recs = [r for r in recs]
papers = itertools.chain.from_iterable(map(attrgetter('papers'), child_recs))
parent_papers = tuple(zip(*sorted(papers, key=itemgetter(1), reverse=True)))[0]
yield (parent_cluster_id, parent_papers[:rec_limit], child_recs)
| __repr__ | identifier_name |
ef2.py | #!/usr/bin/env python
import itertools
from operator import attrgetter, itemgetter
class ClusterRecommendation(object):
__slots__ = ("cluster_id", "papers")
def __init__(self, cluster_id, papers):
self.cluster_id = cluster_id
self.papers = [(p.pid, p.score) for p in papers]
def __str__(self):
return "%s %s" % (self.cluster_id, len(self.papers))
def __repr__(self):
return "<ClusterRecommendation %s>" % self.cluster_id
def get_papers(self):
"""Only return a tuple of papers"""
return tuple(zip(*self.papers))[0]
def get_parent(cluster_id):
parent = ":".join(cluster_id.split(":")[:-1])
if parent == "":
return None
return parent
def get_subtree(cluster_id):
subtree = ":".join(cluster_id.split(":")[1:])
if subtree == "":
return None
return subtree
| papers = sorted(papers, key=attrgetter('score'), reverse=True)
yield ClusterRecommendation(cluster_id, papers[:rec_limit])
def parse_tree(stream, rec_limit=10):
mstream = make_leaf_rec(stream, rec_limit)
child_stream = itertools.groupby(mstream, lambda e: get_parent(e.cluster_id))
for (parent_cluster_id, recs) in child_stream:
child_recs = [r for r in recs]
papers = itertools.chain.from_iterable(map(attrgetter('papers'), child_recs))
parent_papers = tuple(zip(*sorted(papers, key=itemgetter(1), reverse=True)))[0]
yield (parent_cluster_id, parent_papers[:rec_limit], child_recs) | def make_leaf_rec(stream, rec_limit=10):
leaf_stream = itertools.groupby(stream, lambda e: e.local)
for (cluster_id, stream) in leaf_stream:
papers = [e for e in stream] | random_line_split |
ef2.py | #!/usr/bin/env python
import itertools
from operator import attrgetter, itemgetter
class ClusterRecommendation(object):
__slots__ = ("cluster_id", "papers")
def __init__(self, cluster_id, papers):
self.cluster_id = cluster_id
self.papers = [(p.pid, p.score) for p in papers]
def __str__(self):
return "%s %s" % (self.cluster_id, len(self.papers))
def __repr__(self):
return "<ClusterRecommendation %s>" % self.cluster_id
def get_papers(self):
"""Only return a tuple of papers"""
return tuple(zip(*self.papers))[0]
def get_parent(cluster_id):
parent = ":".join(cluster_id.split(":")[:-1])
if parent == "":
return None
return parent
def get_subtree(cluster_id):
subtree = ":".join(cluster_id.split(":")[1:])
if subtree == "":
return None
return subtree
def make_leaf_rec(stream, rec_limit=10):
leaf_stream = itertools.groupby(stream, lambda e: e.local)
for (cluster_id, stream) in leaf_stream:
|
def parse_tree(stream, rec_limit=10):
mstream = make_leaf_rec(stream, rec_limit)
child_stream = itertools.groupby(mstream, lambda e: get_parent(e.cluster_id))
for (parent_cluster_id, recs) in child_stream:
child_recs = [r for r in recs]
papers = itertools.chain.from_iterable(map(attrgetter('papers'), child_recs))
parent_papers = tuple(zip(*sorted(papers, key=itemgetter(1), reverse=True)))[0]
yield (parent_cluster_id, parent_papers[:rec_limit], child_recs)
| papers = [e for e in stream]
papers = sorted(papers, key=attrgetter('score'), reverse=True)
yield ClusterRecommendation(cluster_id, papers[:rec_limit]) | conditional_block |
ef2.py | #!/usr/bin/env python
import itertools
from operator import attrgetter, itemgetter
class ClusterRecommendation(object):
__slots__ = ("cluster_id", "papers")
def __init__(self, cluster_id, papers):
|
def __str__(self):
return "%s %s" % (self.cluster_id, len(self.papers))
def __repr__(self):
return "<ClusterRecommendation %s>" % self.cluster_id
def get_papers(self):
"""Only return a tuple of papers"""
return tuple(zip(*self.papers))[0]
def get_parent(cluster_id):
parent = ":".join(cluster_id.split(":")[:-1])
if parent == "":
return None
return parent
def get_subtree(cluster_id):
subtree = ":".join(cluster_id.split(":")[1:])
if subtree == "":
return None
return subtree
def make_leaf_rec(stream, rec_limit=10):
leaf_stream = itertools.groupby(stream, lambda e: e.local)
for (cluster_id, stream) in leaf_stream:
papers = [e for e in stream]
papers = sorted(papers, key=attrgetter('score'), reverse=True)
yield ClusterRecommendation(cluster_id, papers[:rec_limit])
def parse_tree(stream, rec_limit=10):
mstream = make_leaf_rec(stream, rec_limit)
child_stream = itertools.groupby(mstream, lambda e: get_parent(e.cluster_id))
for (parent_cluster_id, recs) in child_stream:
child_recs = [r for r in recs]
papers = itertools.chain.from_iterable(map(attrgetter('papers'), child_recs))
parent_papers = tuple(zip(*sorted(papers, key=itemgetter(1), reverse=True)))[0]
yield (parent_cluster_id, parent_papers[:rec_limit], child_recs)
| self.cluster_id = cluster_id
self.papers = [(p.pid, p.score) for p in papers] | identifier_body |
git.py | from .utils import do, do_ex, trace
from .version import meta
from os.path import abspath, realpath
FILES_COMMAND = 'git ls-files'
DEFAULT_DESCRIBE = 'git describe --dirty --tags --long --match *.*'
def | (root, describe_command=DEFAULT_DESCRIBE):
real_root, _, ret = do_ex('git rev-parse --show-toplevel', root)
if ret:
return
trace('real root', real_root)
if abspath(realpath(real_root)) != abspath(realpath(root)):
return
rev_node, _, ret = do_ex('git rev-parse --verify --quiet HEAD', root)
if ret:
return meta('0.0')
rev_node = rev_node[:7]
out, err, ret = do_ex(describe_command, root)
if '-' not in out and '.' not in out:
revs = do('git rev-list HEAD', root)
count = revs.count('\n')
if ret:
out = rev_node
return meta('0.0', distance=count + 1, node=out)
if ret:
return
dirty = out.endswith('-dirty')
if dirty:
out = out.rsplit('-', 1)[0]
tag, number, node = out.rsplit('-', 2)
number = int(number)
if number:
return meta(tag, distance=number, node=node, dirty=dirty)
else:
return meta(tag, dirty=dirty, node=node)
| parse | identifier_name |
git.py | from .utils import do, do_ex, trace
from .version import meta
from os.path import abspath, realpath
| FILES_COMMAND = 'git ls-files'
DEFAULT_DESCRIBE = 'git describe --dirty --tags --long --match *.*'
def parse(root, describe_command=DEFAULT_DESCRIBE):
real_root, _, ret = do_ex('git rev-parse --show-toplevel', root)
if ret:
return
trace('real root', real_root)
if abspath(realpath(real_root)) != abspath(realpath(root)):
return
rev_node, _, ret = do_ex('git rev-parse --verify --quiet HEAD', root)
if ret:
return meta('0.0')
rev_node = rev_node[:7]
out, err, ret = do_ex(describe_command, root)
if '-' not in out and '.' not in out:
revs = do('git rev-list HEAD', root)
count = revs.count('\n')
if ret:
out = rev_node
return meta('0.0', distance=count + 1, node=out)
if ret:
return
dirty = out.endswith('-dirty')
if dirty:
out = out.rsplit('-', 1)[0]
tag, number, node = out.rsplit('-', 2)
number = int(number)
if number:
return meta(tag, distance=number, node=node, dirty=dirty)
else:
return meta(tag, dirty=dirty, node=node) | random_line_split |
|
git.py | from .utils import do, do_ex, trace
from .version import meta
from os.path import abspath, realpath
FILES_COMMAND = 'git ls-files'
DEFAULT_DESCRIBE = 'git describe --dirty --tags --long --match *.*'
def parse(root, describe_command=DEFAULT_DESCRIBE):
real_root, _, ret = do_ex('git rev-parse --show-toplevel', root)
if ret:
return
trace('real root', real_root)
if abspath(realpath(real_root)) != abspath(realpath(root)):
|
rev_node, _, ret = do_ex('git rev-parse --verify --quiet HEAD', root)
if ret:
return meta('0.0')
rev_node = rev_node[:7]
out, err, ret = do_ex(describe_command, root)
if '-' not in out and '.' not in out:
revs = do('git rev-list HEAD', root)
count = revs.count('\n')
if ret:
out = rev_node
return meta('0.0', distance=count + 1, node=out)
if ret:
return
dirty = out.endswith('-dirty')
if dirty:
out = out.rsplit('-', 1)[0]
tag, number, node = out.rsplit('-', 2)
number = int(number)
if number:
return meta(tag, distance=number, node=node, dirty=dirty)
else:
return meta(tag, dirty=dirty, node=node)
| return | conditional_block |
git.py | from .utils import do, do_ex, trace
from .version import meta
from os.path import abspath, realpath
FILES_COMMAND = 'git ls-files'
DEFAULT_DESCRIBE = 'git describe --dirty --tags --long --match *.*'
def parse(root, describe_command=DEFAULT_DESCRIBE):
| real_root, _, ret = do_ex('git rev-parse --show-toplevel', root)
if ret:
return
trace('real root', real_root)
if abspath(realpath(real_root)) != abspath(realpath(root)):
return
rev_node, _, ret = do_ex('git rev-parse --verify --quiet HEAD', root)
if ret:
return meta('0.0')
rev_node = rev_node[:7]
out, err, ret = do_ex(describe_command, root)
if '-' not in out and '.' not in out:
revs = do('git rev-list HEAD', root)
count = revs.count('\n')
if ret:
out = rev_node
return meta('0.0', distance=count + 1, node=out)
if ret:
return
dirty = out.endswith('-dirty')
if dirty:
out = out.rsplit('-', 1)[0]
tag, number, node = out.rsplit('-', 2)
number = int(number)
if number:
return meta(tag, distance=number, node=node, dirty=dirty)
else:
return meta(tag, dirty=dirty, node=node) | identifier_body |
|
class.spec.ts | /**
* @license
* Copyright Renobi. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://opensource.org/licenses/MIT
*/
import { expect } from 'chai';
import { Any, MockType1 } from '../test/any';
import { getClass, getClassOrSymbol, getClassName, getContructor } from '../src/class';
describe('Class', () => {
describe('Get Class', () => {
it('should return prototype', () => {
expect(getClass(MockType1.prototype)).to.equal(MockType1);
});
it('should return class', () => {
expect(getClass(MockType1)).to.equal(MockType1);
expect(getClass(new MockType1)).to.equal(MockType1);
});
});
describe('Get Class Or Symbol', () => {
it('should return symbol', () => {
const symbol = Symbol(Any.string());
expect(getClassOrSymbol(symbol)).to.equal(symbol);
});
});
describe('Get Class Name', () => {
it('should return constructor name', () => {
expect(getClassName(MockType1)).to.equal('MockType1');
});
it('should return function name', () => {
expect(getClassName(new MockType1())).to.equal('MockType1');
});
it('should return symbol string', () => { | describe('Get Constructor', () => {
it('should return constructor if type', () => {
expect(getContructor(MockType1)).to.equal(MockType1);
});
it('should return self if function', () => {
expect(getContructor(new MockType1())).to.equal(MockType1);
});
});
}); | expect(getClassName(Symbol('test:MockType1'))).to.equal('Symbol(test:MockType1)');
});
});
| random_line_split |
confirmationController.js | 'use strict';
/* main App */
var app = angular.module('submitConformationcontroller', []);
app.controller('confirmationCtrl', ['$scope', function($scope){
$scope.volunteerList = ["Joop Bakker", "Dirk Dijkstra", "Sterre Hendriks",
"Hendrik Jacobs", "Hans Heuvel", "Jaap Beek", "Jan-Jaap Dijk",
"Marleen Jansen", "Geert Hoek", "Beer Heuvel"];
$scope.jobTitle = '';
$scope.jobType = '';
$scope.describeWork = '';
function | (name) {
name = name.replace(/[\[]/, "\\[").replace(/[\]]/, "\\]");
var regex = new RegExp("[\\?&]" + name + "=([^&#]*)"),
results = regex.exec(location.search);
return results == null ? "" : decodeURIComponent(results[1].replace(/\+/g, " "));
}
$scope.jobTitle = getParameterByName('jobTitle');
$scope.jobType = getParameterByName('jobType');
$scope.describeWork = getParameterByName('describeWork');
}]); | getParameterByName | identifier_name |
confirmationController.js | 'use strict';
/* main App */
var app = angular.module('submitConformationcontroller', []);
app.controller('confirmationCtrl', ['$scope', function($scope){
$scope.volunteerList = ["Joop Bakker", "Dirk Dijkstra", "Sterre Hendriks",
"Hendrik Jacobs", "Hans Heuvel", "Jaap Beek", "Jan-Jaap Dijk",
"Marleen Jansen", "Geert Hoek", "Beer Heuvel"];
$scope.jobTitle = '';
$scope.jobType = '';
$scope.describeWork = '';
function getParameterByName(name) |
$scope.jobTitle = getParameterByName('jobTitle');
$scope.jobType = getParameterByName('jobType');
$scope.describeWork = getParameterByName('describeWork');
}]); | {
name = name.replace(/[\[]/, "\\[").replace(/[\]]/, "\\]");
var regex = new RegExp("[\\?&]" + name + "=([^&#]*)"),
results = regex.exec(location.search);
return results == null ? "" : decodeURIComponent(results[1].replace(/\+/g, " "));
} | identifier_body |
confirmationController.js | 'use strict';
/* main App */
var app = angular.module('submitConformationcontroller', []);
app.controller('confirmationCtrl', ['$scope', function($scope){
$scope.volunteerList = ["Joop Bakker", "Dirk Dijkstra", "Sterre Hendriks",
"Hendrik Jacobs", "Hans Heuvel", "Jaap Beek", "Jan-Jaap Dijk",
"Marleen Jansen", "Geert Hoek", "Beer Heuvel"];
|
$scope.jobTitle = '';
$scope.jobType = '';
$scope.describeWork = '';
function getParameterByName(name) {
name = name.replace(/[\[]/, "\\[").replace(/[\]]/, "\\]");
var regex = new RegExp("[\\?&]" + name + "=([^&#]*)"),
results = regex.exec(location.search);
return results == null ? "" : decodeURIComponent(results[1].replace(/\+/g, " "));
}
$scope.jobTitle = getParameterByName('jobTitle');
$scope.jobType = getParameterByName('jobType');
$scope.describeWork = getParameterByName('describeWork');
}]); | random_line_split |
|
cstore.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// The crate store - a central repo for information collected about external
// crates and libraries
use metadata::cstore;
use metadata::decoder;
use std::hashmap::HashMap;
use extra;
use syntax::ast;
use syntax::parse::token::ident_interner;
// A map from external crate numbers (as decoded from some crate file) to
// local crate numbers (as generated during this session). Each external
// crate may refer to types in other external crates, and each has their
// own crate numbers.
pub type cnum_map = @mut HashMap<ast::crate_num, ast::crate_num>;
pub struct crate_metadata {
name: @str,
data: @~[u8],
cnum_map: cnum_map,
cnum: ast::crate_num
}
pub struct CStore {
priv metas: HashMap <ast::crate_num, @crate_metadata>,
priv extern_mod_crate_map: extern_mod_crate_map,
priv used_crate_files: ~[Path],
priv used_libraries: ~[@str],
priv used_link_args: ~[@str],
intr: @ident_interner
}
// Map from node_id's of local extern mod statements to crate numbers
type extern_mod_crate_map = HashMap<ast::node_id, ast::crate_num>;
pub fn mk_cstore(intr: @ident_interner) -> CStore {
return CStore {
metas: HashMap::new(),
extern_mod_crate_map: HashMap::new(),
used_crate_files: ~[],
used_libraries: ~[],
used_link_args: ~[],
intr: intr
};
}
pub fn get_crate_data(cstore: &CStore, cnum: ast::crate_num)
-> @crate_metadata {
return *cstore.metas.get(&cnum);
}
pub fn get_crate_hash(cstore: &CStore, cnum: ast::crate_num) -> @str {
let cdata = get_crate_data(cstore, cnum);
decoder::get_crate_hash(cdata.data)
}
pub fn get_crate_vers(cstore: &CStore, cnum: ast::crate_num) -> @str {
let cdata = get_crate_data(cstore, cnum);
decoder::get_crate_vers(cdata.data)
}
pub fn set_crate_data(cstore: &mut CStore,
cnum: ast::crate_num,
data: @crate_metadata) {
cstore.metas.insert(cnum, data);
}
pub fn have_crate_data(cstore: &CStore, cnum: ast::crate_num) -> bool {
cstore.metas.contains_key(&cnum)
}
pub fn iter_crate_data(cstore: &CStore,
i: &fn(ast::crate_num, @crate_metadata)) {
for cstore.metas.iter().advance |(&k, &v)| {
i(k, v);
}
}
pub fn add_used_crate_file(cstore: &mut CStore, lib: &Path) {
if !cstore.used_crate_files.contains(lib) {
cstore.used_crate_files.push(copy *lib);
}
}
pub fn get_used_crate_files(cstore: &CStore) -> ~[Path] {
return /*bad*/copy cstore.used_crate_files;
}
pub fn add_used_library(cstore: &mut CStore, lib: @str) -> bool {
assert!(!lib.is_empty());
if cstore.used_libraries.iter().any_(|x| x == &lib) |
cstore.used_libraries.push(lib);
true
}
pub fn get_used_libraries<'a>(cstore: &'a CStore) -> &'a [@str] {
let slice: &'a [@str] = cstore.used_libraries;
slice
}
pub fn add_used_link_args(cstore: &mut CStore, args: &str) {
for args.split_iter(' ').advance |s| {
cstore.used_link_args.push(s.to_managed());
}
}
pub fn get_used_link_args<'a>(cstore: &'a CStore) -> &'a [@str] {
let slice: &'a [@str] = cstore.used_link_args;
slice
}
pub fn add_extern_mod_stmt_cnum(cstore: &mut CStore,
emod_id: ast::node_id,
cnum: ast::crate_num) {
cstore.extern_mod_crate_map.insert(emod_id, cnum);
}
pub fn find_extern_mod_stmt_cnum(cstore: &CStore,
emod_id: ast::node_id)
-> Option<ast::crate_num> {
cstore.extern_mod_crate_map.find(&emod_id).map_consume(|x| *x)
}
// returns hashes of crates directly used by this crate. Hashes are sorted by
// (crate name, crate version, crate hash) in lexicographic order (not semver)
pub fn get_dep_hashes(cstore: &CStore) -> ~[@str] {
struct crate_hash { name: @str, vers: @str, hash: @str }
let mut result = ~[];
for cstore.extern_mod_crate_map.each_value |&cnum| {
let cdata = cstore::get_crate_data(cstore, cnum);
let hash = decoder::get_crate_hash(cdata.data);
let vers = decoder::get_crate_vers(cdata.data);
debug!("Add hash[%s]: %s %s", cdata.name, vers, hash);
result.push(crate_hash {
name: cdata.name,
vers: vers,
hash: hash
});
}
let sorted = do extra::sort::merge_sort(result) |a, b| {
(a.name, a.vers, a.hash) <= (b.name, b.vers, b.hash)
};
debug!("sorted:");
for sorted.iter().advance |x| {
debug!(" hash[%s]: %s", x.name, x.hash);
}
sorted.map(|ch| ch.hash)
}
| { return false; } | conditional_block |
cstore.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// The crate store - a central repo for information collected about external
// crates and libraries
use metadata::cstore;
use metadata::decoder;
use std::hashmap::HashMap;
use extra;
use syntax::ast;
use syntax::parse::token::ident_interner;
// A map from external crate numbers (as decoded from some crate file) to
// local crate numbers (as generated during this session). Each external
// crate may refer to types in other external crates, and each has their
// own crate numbers.
pub type cnum_map = @mut HashMap<ast::crate_num, ast::crate_num>;
pub struct crate_metadata {
name: @str,
data: @~[u8],
cnum_map: cnum_map,
cnum: ast::crate_num
}
pub struct CStore {
priv metas: HashMap <ast::crate_num, @crate_metadata>,
priv extern_mod_crate_map: extern_mod_crate_map,
priv used_crate_files: ~[Path],
priv used_libraries: ~[@str],
priv used_link_args: ~[@str],
intr: @ident_interner
}
// Map from node_id's of local extern mod statements to crate numbers
type extern_mod_crate_map = HashMap<ast::node_id, ast::crate_num>;
pub fn mk_cstore(intr: @ident_interner) -> CStore {
return CStore {
metas: HashMap::new(),
extern_mod_crate_map: HashMap::new(),
used_crate_files: ~[],
used_libraries: ~[],
used_link_args: ~[],
intr: intr
};
}
pub fn get_crate_data(cstore: &CStore, cnum: ast::crate_num)
-> @crate_metadata {
return *cstore.metas.get(&cnum);
}
pub fn get_crate_hash(cstore: &CStore, cnum: ast::crate_num) -> @str {
let cdata = get_crate_data(cstore, cnum);
decoder::get_crate_hash(cdata.data)
}
pub fn get_crate_vers(cstore: &CStore, cnum: ast::crate_num) -> @str {
let cdata = get_crate_data(cstore, cnum);
decoder::get_crate_vers(cdata.data)
}
pub fn set_crate_data(cstore: &mut CStore,
cnum: ast::crate_num,
data: @crate_metadata) {
cstore.metas.insert(cnum, data);
}
pub fn have_crate_data(cstore: &CStore, cnum: ast::crate_num) -> bool {
cstore.metas.contains_key(&cnum)
}
pub fn iter_crate_data(cstore: &CStore,
i: &fn(ast::crate_num, @crate_metadata)) {
for cstore.metas.iter().advance |(&k, &v)| {
i(k, v);
}
}
pub fn add_used_crate_file(cstore: &mut CStore, lib: &Path) {
if !cstore.used_crate_files.contains(lib) {
cstore.used_crate_files.push(copy *lib);
}
}
pub fn get_used_crate_files(cstore: &CStore) -> ~[Path] {
return /*bad*/copy cstore.used_crate_files;
}
pub fn add_used_library(cstore: &mut CStore, lib: @str) -> bool {
assert!(!lib.is_empty());
if cstore.used_libraries.iter().any_(|x| x == &lib) { return false; }
cstore.used_libraries.push(lib);
true
}
pub fn get_used_libraries<'a>(cstore: &'a CStore) -> &'a [@str] {
let slice: &'a [@str] = cstore.used_libraries;
slice
}
pub fn add_used_link_args(cstore: &mut CStore, args: &str) {
for args.split_iter(' ').advance |s| {
cstore.used_link_args.push(s.to_managed());
}
}
pub fn get_used_link_args<'a>(cstore: &'a CStore) -> &'a [@str] {
let slice: &'a [@str] = cstore.used_link_args;
slice
}
pub fn add_extern_mod_stmt_cnum(cstore: &mut CStore,
emod_id: ast::node_id,
cnum: ast::crate_num) {
cstore.extern_mod_crate_map.insert(emod_id, cnum);
}
pub fn find_extern_mod_stmt_cnum(cstore: &CStore,
emod_id: ast::node_id)
-> Option<ast::crate_num> |
// returns hashes of crates directly used by this crate. Hashes are sorted by
// (crate name, crate version, crate hash) in lexicographic order (not semver)
pub fn get_dep_hashes(cstore: &CStore) -> ~[@str] {
struct crate_hash { name: @str, vers: @str, hash: @str }
let mut result = ~[];
for cstore.extern_mod_crate_map.each_value |&cnum| {
let cdata = cstore::get_crate_data(cstore, cnum);
let hash = decoder::get_crate_hash(cdata.data);
let vers = decoder::get_crate_vers(cdata.data);
debug!("Add hash[%s]: %s %s", cdata.name, vers, hash);
result.push(crate_hash {
name: cdata.name,
vers: vers,
hash: hash
});
}
let sorted = do extra::sort::merge_sort(result) |a, b| {
(a.name, a.vers, a.hash) <= (b.name, b.vers, b.hash)
};
debug!("sorted:");
for sorted.iter().advance |x| {
debug!(" hash[%s]: %s", x.name, x.hash);
}
sorted.map(|ch| ch.hash)
}
| {
cstore.extern_mod_crate_map.find(&emod_id).map_consume(|x| *x)
} | identifier_body |
cstore.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed | // The crate store - a central repo for information collected about external
// crates and libraries
use metadata::cstore;
use metadata::decoder;
use std::hashmap::HashMap;
use extra;
use syntax::ast;
use syntax::parse::token::ident_interner;
// A map from external crate numbers (as decoded from some crate file) to
// local crate numbers (as generated during this session). Each external
// crate may refer to types in other external crates, and each has their
// own crate numbers.
pub type cnum_map = @mut HashMap<ast::crate_num, ast::crate_num>;
pub struct crate_metadata {
name: @str,
data: @~[u8],
cnum_map: cnum_map,
cnum: ast::crate_num
}
pub struct CStore {
priv metas: HashMap <ast::crate_num, @crate_metadata>,
priv extern_mod_crate_map: extern_mod_crate_map,
priv used_crate_files: ~[Path],
priv used_libraries: ~[@str],
priv used_link_args: ~[@str],
intr: @ident_interner
}
// Map from node_id's of local extern mod statements to crate numbers
type extern_mod_crate_map = HashMap<ast::node_id, ast::crate_num>;
pub fn mk_cstore(intr: @ident_interner) -> CStore {
return CStore {
metas: HashMap::new(),
extern_mod_crate_map: HashMap::new(),
used_crate_files: ~[],
used_libraries: ~[],
used_link_args: ~[],
intr: intr
};
}
pub fn get_crate_data(cstore: &CStore, cnum: ast::crate_num)
-> @crate_metadata {
return *cstore.metas.get(&cnum);
}
pub fn get_crate_hash(cstore: &CStore, cnum: ast::crate_num) -> @str {
let cdata = get_crate_data(cstore, cnum);
decoder::get_crate_hash(cdata.data)
}
pub fn get_crate_vers(cstore: &CStore, cnum: ast::crate_num) -> @str {
let cdata = get_crate_data(cstore, cnum);
decoder::get_crate_vers(cdata.data)
}
pub fn set_crate_data(cstore: &mut CStore,
cnum: ast::crate_num,
data: @crate_metadata) {
cstore.metas.insert(cnum, data);
}
pub fn have_crate_data(cstore: &CStore, cnum: ast::crate_num) -> bool {
cstore.metas.contains_key(&cnum)
}
pub fn iter_crate_data(cstore: &CStore,
i: &fn(ast::crate_num, @crate_metadata)) {
for cstore.metas.iter().advance |(&k, &v)| {
i(k, v);
}
}
pub fn add_used_crate_file(cstore: &mut CStore, lib: &Path) {
if !cstore.used_crate_files.contains(lib) {
cstore.used_crate_files.push(copy *lib);
}
}
pub fn get_used_crate_files(cstore: &CStore) -> ~[Path] {
return /*bad*/copy cstore.used_crate_files;
}
pub fn add_used_library(cstore: &mut CStore, lib: @str) -> bool {
assert!(!lib.is_empty());
if cstore.used_libraries.iter().any_(|x| x == &lib) { return false; }
cstore.used_libraries.push(lib);
true
}
pub fn get_used_libraries<'a>(cstore: &'a CStore) -> &'a [@str] {
let slice: &'a [@str] = cstore.used_libraries;
slice
}
pub fn add_used_link_args(cstore: &mut CStore, args: &str) {
for args.split_iter(' ').advance |s| {
cstore.used_link_args.push(s.to_managed());
}
}
pub fn get_used_link_args<'a>(cstore: &'a CStore) -> &'a [@str] {
let slice: &'a [@str] = cstore.used_link_args;
slice
}
pub fn add_extern_mod_stmt_cnum(cstore: &mut CStore,
emod_id: ast::node_id,
cnum: ast::crate_num) {
cstore.extern_mod_crate_map.insert(emod_id, cnum);
}
pub fn find_extern_mod_stmt_cnum(cstore: &CStore,
emod_id: ast::node_id)
-> Option<ast::crate_num> {
cstore.extern_mod_crate_map.find(&emod_id).map_consume(|x| *x)
}
// returns hashes of crates directly used by this crate. Hashes are sorted by
// (crate name, crate version, crate hash) in lexicographic order (not semver)
pub fn get_dep_hashes(cstore: &CStore) -> ~[@str] {
struct crate_hash { name: @str, vers: @str, hash: @str }
let mut result = ~[];
for cstore.extern_mod_crate_map.each_value |&cnum| {
let cdata = cstore::get_crate_data(cstore, cnum);
let hash = decoder::get_crate_hash(cdata.data);
let vers = decoder::get_crate_vers(cdata.data);
debug!("Add hash[%s]: %s %s", cdata.name, vers, hash);
result.push(crate_hash {
name: cdata.name,
vers: vers,
hash: hash
});
}
let sorted = do extra::sort::merge_sort(result) |a, b| {
(a.name, a.vers, a.hash) <= (b.name, b.vers, b.hash)
};
debug!("sorted:");
for sorted.iter().advance |x| {
debug!(" hash[%s]: %s", x.name, x.hash);
}
sorted.map(|ch| ch.hash)
} | // except according to those terms.
| random_line_split |
cstore.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// The crate store - a central repo for information collected about external
// crates and libraries
use metadata::cstore;
use metadata::decoder;
use std::hashmap::HashMap;
use extra;
use syntax::ast;
use syntax::parse::token::ident_interner;
// A map from external crate numbers (as decoded from some crate file) to
// local crate numbers (as generated during this session). Each external
// crate may refer to types in other external crates, and each has their
// own crate numbers.
pub type cnum_map = @mut HashMap<ast::crate_num, ast::crate_num>;
pub struct crate_metadata {
name: @str,
data: @~[u8],
cnum_map: cnum_map,
cnum: ast::crate_num
}
pub struct CStore {
priv metas: HashMap <ast::crate_num, @crate_metadata>,
priv extern_mod_crate_map: extern_mod_crate_map,
priv used_crate_files: ~[Path],
priv used_libraries: ~[@str],
priv used_link_args: ~[@str],
intr: @ident_interner
}
// Map from node_id's of local extern mod statements to crate numbers
type extern_mod_crate_map = HashMap<ast::node_id, ast::crate_num>;
pub fn mk_cstore(intr: @ident_interner) -> CStore {
return CStore {
metas: HashMap::new(),
extern_mod_crate_map: HashMap::new(),
used_crate_files: ~[],
used_libraries: ~[],
used_link_args: ~[],
intr: intr
};
}
pub fn get_crate_data(cstore: &CStore, cnum: ast::crate_num)
-> @crate_metadata {
return *cstore.metas.get(&cnum);
}
pub fn get_crate_hash(cstore: &CStore, cnum: ast::crate_num) -> @str {
let cdata = get_crate_data(cstore, cnum);
decoder::get_crate_hash(cdata.data)
}
pub fn | (cstore: &CStore, cnum: ast::crate_num) -> @str {
let cdata = get_crate_data(cstore, cnum);
decoder::get_crate_vers(cdata.data)
}
pub fn set_crate_data(cstore: &mut CStore,
cnum: ast::crate_num,
data: @crate_metadata) {
cstore.metas.insert(cnum, data);
}
pub fn have_crate_data(cstore: &CStore, cnum: ast::crate_num) -> bool {
cstore.metas.contains_key(&cnum)
}
pub fn iter_crate_data(cstore: &CStore,
i: &fn(ast::crate_num, @crate_metadata)) {
for cstore.metas.iter().advance |(&k, &v)| {
i(k, v);
}
}
pub fn add_used_crate_file(cstore: &mut CStore, lib: &Path) {
if !cstore.used_crate_files.contains(lib) {
cstore.used_crate_files.push(copy *lib);
}
}
pub fn get_used_crate_files(cstore: &CStore) -> ~[Path] {
return /*bad*/copy cstore.used_crate_files;
}
pub fn add_used_library(cstore: &mut CStore, lib: @str) -> bool {
assert!(!lib.is_empty());
if cstore.used_libraries.iter().any_(|x| x == &lib) { return false; }
cstore.used_libraries.push(lib);
true
}
pub fn get_used_libraries<'a>(cstore: &'a CStore) -> &'a [@str] {
let slice: &'a [@str] = cstore.used_libraries;
slice
}
pub fn add_used_link_args(cstore: &mut CStore, args: &str) {
for args.split_iter(' ').advance |s| {
cstore.used_link_args.push(s.to_managed());
}
}
pub fn get_used_link_args<'a>(cstore: &'a CStore) -> &'a [@str] {
let slice: &'a [@str] = cstore.used_link_args;
slice
}
pub fn add_extern_mod_stmt_cnum(cstore: &mut CStore,
emod_id: ast::node_id,
cnum: ast::crate_num) {
cstore.extern_mod_crate_map.insert(emod_id, cnum);
}
pub fn find_extern_mod_stmt_cnum(cstore: &CStore,
emod_id: ast::node_id)
-> Option<ast::crate_num> {
cstore.extern_mod_crate_map.find(&emod_id).map_consume(|x| *x)
}
// returns hashes of crates directly used by this crate. Hashes are sorted by
// (crate name, crate version, crate hash) in lexicographic order (not semver)
pub fn get_dep_hashes(cstore: &CStore) -> ~[@str] {
struct crate_hash { name: @str, vers: @str, hash: @str }
let mut result = ~[];
for cstore.extern_mod_crate_map.each_value |&cnum| {
let cdata = cstore::get_crate_data(cstore, cnum);
let hash = decoder::get_crate_hash(cdata.data);
let vers = decoder::get_crate_vers(cdata.data);
debug!("Add hash[%s]: %s %s", cdata.name, vers, hash);
result.push(crate_hash {
name: cdata.name,
vers: vers,
hash: hash
});
}
let sorted = do extra::sort::merge_sort(result) |a, b| {
(a.name, a.vers, a.hash) <= (b.name, b.vers, b.hash)
};
debug!("sorted:");
for sorted.iter().advance |x| {
debug!(" hash[%s]: %s", x.name, x.hash);
}
sorted.map(|ch| ch.hash)
}
| get_crate_vers | identifier_name |
test_program_code.py | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import unittest
from multiprocessing import Process
import signal
import numpy
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid.layers.io import ListenAndServ
from paddle.fluid.layers.io import Recv
from paddle.fluid.layers.io import Send
import paddle.fluid.layers.ops as ops
from paddle.fluid.transpiler.details import program_to_code
class TestProgram2Code(unittest.TestCase):
|
if __name__ == "__main__":
unittest.main()
| def test_print(self):
place = fluid.CPUPlace()
self.init_serv(place)
self.init_client(place, 9123)
def init_serv(self, place):
main = fluid.Program()
with fluid.program_guard(main):
serv = ListenAndServ("127.0.0.1:0", ["X"], optimizer_mode=False)
with serv.do():
out_var = main.global_block().create_var(
name="scale_0.tmp_0",
psersistable=True,
dtype="float32",
shape=[32, 32])
x = layers.data(
shape=[32, 32],
dtype='float32',
name="X",
append_batch_size=False)
fluid.initializer.Constant(value=1.0)(x, main.global_block())
ops._scale(x=x, scale=10.0, out=out_var)
program_to_code(main)
def init_client(self, place, port):
main = fluid.Program()
with fluid.program_guard(main):
x = layers.data(
shape=[32, 32],
dtype='float32',
name='X',
append_batch_size=False)
fluid.initializer.Constant(value=2.3)(x, main.global_block())
get_var = main.global_block().create_var(
name="scale_0.tmp_0", # server side var
dtype="float32",
persistable=False,
shape=[32, 32])
fluid.initializer.Constant(value=2.3)(get_var, main.global_block())
Send("127.0.0.1:%d" % port, [x])
o = Recv("127.0.0.1:%d" % port, [get_var])
program_to_code(main) | identifier_body |
test_program_code.py | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import unittest
from multiprocessing import Process
import signal
import numpy
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid.layers.io import ListenAndServ
from paddle.fluid.layers.io import Recv
from paddle.fluid.layers.io import Send
import paddle.fluid.layers.ops as ops
from paddle.fluid.transpiler.details import program_to_code
class TestProgram2Code(unittest.TestCase):
def test_print(self):
place = fluid.CPUPlace()
self.init_serv(place)
self.init_client(place, 9123)
def | (self, place):
main = fluid.Program()
with fluid.program_guard(main):
serv = ListenAndServ("127.0.0.1:0", ["X"], optimizer_mode=False)
with serv.do():
out_var = main.global_block().create_var(
name="scale_0.tmp_0",
psersistable=True,
dtype="float32",
shape=[32, 32])
x = layers.data(
shape=[32, 32],
dtype='float32',
name="X",
append_batch_size=False)
fluid.initializer.Constant(value=1.0)(x, main.global_block())
ops._scale(x=x, scale=10.0, out=out_var)
program_to_code(main)
def init_client(self, place, port):
main = fluid.Program()
with fluid.program_guard(main):
x = layers.data(
shape=[32, 32],
dtype='float32',
name='X',
append_batch_size=False)
fluid.initializer.Constant(value=2.3)(x, main.global_block())
get_var = main.global_block().create_var(
name="scale_0.tmp_0", # server side var
dtype="float32",
persistable=False,
shape=[32, 32])
fluid.initializer.Constant(value=2.3)(get_var, main.global_block())
Send("127.0.0.1:%d" % port, [x])
o = Recv("127.0.0.1:%d" % port, [get_var])
program_to_code(main)
if __name__ == "__main__":
unittest.main()
| init_serv | identifier_name |
test_program_code.py | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import unittest
from multiprocessing import Process
import signal
import numpy
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid.layers.io import ListenAndServ
from paddle.fluid.layers.io import Recv
from paddle.fluid.layers.io import Send
import paddle.fluid.layers.ops as ops
from paddle.fluid.transpiler.details import program_to_code
class TestProgram2Code(unittest.TestCase):
def test_print(self):
place = fluid.CPUPlace()
self.init_serv(place)
self.init_client(place, 9123)
def init_serv(self, place):
main = fluid.Program() | with fluid.program_guard(main):
serv = ListenAndServ("127.0.0.1:0", ["X"], optimizer_mode=False)
with serv.do():
out_var = main.global_block().create_var(
name="scale_0.tmp_0",
psersistable=True,
dtype="float32",
shape=[32, 32])
x = layers.data(
shape=[32, 32],
dtype='float32',
name="X",
append_batch_size=False)
fluid.initializer.Constant(value=1.0)(x, main.global_block())
ops._scale(x=x, scale=10.0, out=out_var)
program_to_code(main)
def init_client(self, place, port):
main = fluid.Program()
with fluid.program_guard(main):
x = layers.data(
shape=[32, 32],
dtype='float32',
name='X',
append_batch_size=False)
fluid.initializer.Constant(value=2.3)(x, main.global_block())
get_var = main.global_block().create_var(
name="scale_0.tmp_0", # server side var
dtype="float32",
persistable=False,
shape=[32, 32])
fluid.initializer.Constant(value=2.3)(get_var, main.global_block())
Send("127.0.0.1:%d" % port, [x])
o = Recv("127.0.0.1:%d" % port, [get_var])
program_to_code(main)
if __name__ == "__main__":
unittest.main() | random_line_split |
|
test_program_code.py | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import unittest
from multiprocessing import Process
import signal
import numpy
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid.layers.io import ListenAndServ
from paddle.fluid.layers.io import Recv
from paddle.fluid.layers.io import Send
import paddle.fluid.layers.ops as ops
from paddle.fluid.transpiler.details import program_to_code
class TestProgram2Code(unittest.TestCase):
def test_print(self):
place = fluid.CPUPlace()
self.init_serv(place)
self.init_client(place, 9123)
def init_serv(self, place):
main = fluid.Program()
with fluid.program_guard(main):
serv = ListenAndServ("127.0.0.1:0", ["X"], optimizer_mode=False)
with serv.do():
out_var = main.global_block().create_var(
name="scale_0.tmp_0",
psersistable=True,
dtype="float32",
shape=[32, 32])
x = layers.data(
shape=[32, 32],
dtype='float32',
name="X",
append_batch_size=False)
fluid.initializer.Constant(value=1.0)(x, main.global_block())
ops._scale(x=x, scale=10.0, out=out_var)
program_to_code(main)
def init_client(self, place, port):
main = fluid.Program()
with fluid.program_guard(main):
x = layers.data(
shape=[32, 32],
dtype='float32',
name='X',
append_batch_size=False)
fluid.initializer.Constant(value=2.3)(x, main.global_block())
get_var = main.global_block().create_var(
name="scale_0.tmp_0", # server side var
dtype="float32",
persistable=False,
shape=[32, 32])
fluid.initializer.Constant(value=2.3)(get_var, main.global_block())
Send("127.0.0.1:%d" % port, [x])
o = Recv("127.0.0.1:%d" % port, [get_var])
program_to_code(main)
if __name__ == "__main__":
| unittest.main() | conditional_block |
|
ShouldDecorateChildren.js | import React from 'react';
import ReactTouchPosition from '../../../dist/ReactTouchPosition';
import TouchPositionLabel from './TouchPositionLabel';
import OnPositionChangedLabel from './OnPositionChangedLabel';
import InstructionsLabel from './InstructionsLabel';
export default class extends React.Component {
constructor(props) {
super(props);
this.state = {
isPositionOutside: true,
touchPosition: {
x: 0,
y: 0,
}
}
}
render() {
return (
<div className="example-container">
<ReactTouchPosition {...{
className: 'example',
onPositionChanged: ({ isPositionOutside, touchPosition }) => {
this.setState({
isPositionOutside,
touchPosition
});
},
shouldDecorateChildren: false
}}>
<TouchPositionLabel />
<InstructionsLabel />
</ReactTouchPosition>
<OnPositionChangedLabel {...this.state} />
</div>
);
} | } | random_line_split |
|
ShouldDecorateChildren.js | import React from 'react';
import ReactTouchPosition from '../../../dist/ReactTouchPosition';
import TouchPositionLabel from './TouchPositionLabel';
import OnPositionChangedLabel from './OnPositionChangedLabel';
import InstructionsLabel from './InstructionsLabel';
export default class extends React.Component {
constructor(props) {
super(props);
this.state = {
isPositionOutside: true,
touchPosition: {
x: 0,
y: 0,
}
}
}
render() |
}
| {
return (
<div className="example-container">
<ReactTouchPosition {...{
className: 'example',
onPositionChanged: ({ isPositionOutside, touchPosition }) => {
this.setState({
isPositionOutside,
touchPosition
});
},
shouldDecorateChildren: false
}}>
<TouchPositionLabel />
<InstructionsLabel />
</ReactTouchPosition>
<OnPositionChangedLabel {...this.state} />
</div>
);
} | identifier_body |
ShouldDecorateChildren.js | import React from 'react';
import ReactTouchPosition from '../../../dist/ReactTouchPosition';
import TouchPositionLabel from './TouchPositionLabel';
import OnPositionChangedLabel from './OnPositionChangedLabel';
import InstructionsLabel from './InstructionsLabel';
export default class extends React.Component {
| (props) {
super(props);
this.state = {
isPositionOutside: true,
touchPosition: {
x: 0,
y: 0,
}
}
}
render() {
return (
<div className="example-container">
<ReactTouchPosition {...{
className: 'example',
onPositionChanged: ({ isPositionOutside, touchPosition }) => {
this.setState({
isPositionOutside,
touchPosition
});
},
shouldDecorateChildren: false
}}>
<TouchPositionLabel />
<InstructionsLabel />
</ReactTouchPosition>
<OnPositionChangedLabel {...this.state} />
</div>
);
}
}
| constructor | identifier_name |
connect-vue.ts | import { join, relative } from 'path';
import { red } from 'colors/safe';
import { FileSystem } from '../../file-system';
export function connectVue(path: string) {
const fs = new FileSystem();
if (!fs.exists(path)) {
if (process.env.P1Z7kEbSUUPMxF8GqPwD8Gx_FOAL_CLI_TEST !== 'true') {
console.log(red(` The directory ${path} does not exist.`));
}
return;
}
if (!fs.exists(join(path, 'package.json'))) {
if (process.env.P1Z7kEbSUUPMxF8GqPwD8Gx_FOAL_CLI_TEST !== 'true') {
console.log(red(` The directory ${path} is not a Vue project (missing package.json).`));
}
return;
}
fs
.cd(path)
.modify('package.json', content => {
const pkg = JSON.parse(content);
pkg.vue = pkg.vue || {};
// Proxy configuration
pkg.vue.devServer = pkg.vue.devServer || {};
pkg.vue.devServer.proxy = pkg.vue.devServer.proxy || {};
pkg.vue.devServer.proxy['^/api'] = { target: 'http://localhost:3001' };
// Output build directory
const outputPath = join(relative(path, process.cwd()), 'public')
// Make projects generated on Windows build on Unix.
.replace(/\\/g, '/'); | });
} | pkg.vue.outputDir = outputPath;
return JSON.stringify(pkg, null, 2); | random_line_split |
connect-vue.ts | import { join, relative } from 'path';
import { red } from 'colors/safe';
import { FileSystem } from '../../file-system';
export function connectVue(path: string) {
const fs = new FileSystem();
if (!fs.exists(path)) {
if (process.env.P1Z7kEbSUUPMxF8GqPwD8Gx_FOAL_CLI_TEST !== 'true') |
return;
}
if (!fs.exists(join(path, 'package.json'))) {
if (process.env.P1Z7kEbSUUPMxF8GqPwD8Gx_FOAL_CLI_TEST !== 'true') {
console.log(red(` The directory ${path} is not a Vue project (missing package.json).`));
}
return;
}
fs
.cd(path)
.modify('package.json', content => {
const pkg = JSON.parse(content);
pkg.vue = pkg.vue || {};
// Proxy configuration
pkg.vue.devServer = pkg.vue.devServer || {};
pkg.vue.devServer.proxy = pkg.vue.devServer.proxy || {};
pkg.vue.devServer.proxy['^/api'] = { target: 'http://localhost:3001' };
// Output build directory
const outputPath = join(relative(path, process.cwd()), 'public')
// Make projects generated on Windows build on Unix.
.replace(/\\/g, '/');
pkg.vue.outputDir = outputPath;
return JSON.stringify(pkg, null, 2);
});
}
| {
console.log(red(` The directory ${path} does not exist.`));
} | conditional_block |
connect-vue.ts | import { join, relative } from 'path';
import { red } from 'colors/safe';
import { FileSystem } from '../../file-system';
export function connectVue(path: string) | {
const fs = new FileSystem();
if (!fs.exists(path)) {
if (process.env.P1Z7kEbSUUPMxF8GqPwD8Gx_FOAL_CLI_TEST !== 'true') {
console.log(red(` The directory ${path} does not exist.`));
}
return;
}
if (!fs.exists(join(path, 'package.json'))) {
if (process.env.P1Z7kEbSUUPMxF8GqPwD8Gx_FOAL_CLI_TEST !== 'true') {
console.log(red(` The directory ${path} is not a Vue project (missing package.json).`));
}
return;
}
fs
.cd(path)
.modify('package.json', content => {
const pkg = JSON.parse(content);
pkg.vue = pkg.vue || {};
// Proxy configuration
pkg.vue.devServer = pkg.vue.devServer || {};
pkg.vue.devServer.proxy = pkg.vue.devServer.proxy || {};
pkg.vue.devServer.proxy['^/api'] = { target: 'http://localhost:3001' };
// Output build directory
const outputPath = join(relative(path, process.cwd()), 'public')
// Make projects generated on Windows build on Unix.
.replace(/\\/g, '/');
pkg.vue.outputDir = outputPath;
return JSON.stringify(pkg, null, 2);
});
} | identifier_body |
|
connect-vue.ts | import { join, relative } from 'path';
import { red } from 'colors/safe';
import { FileSystem } from '../../file-system';
export function | (path: string) {
const fs = new FileSystem();
if (!fs.exists(path)) {
if (process.env.P1Z7kEbSUUPMxF8GqPwD8Gx_FOAL_CLI_TEST !== 'true') {
console.log(red(` The directory ${path} does not exist.`));
}
return;
}
if (!fs.exists(join(path, 'package.json'))) {
if (process.env.P1Z7kEbSUUPMxF8GqPwD8Gx_FOAL_CLI_TEST !== 'true') {
console.log(red(` The directory ${path} is not a Vue project (missing package.json).`));
}
return;
}
fs
.cd(path)
.modify('package.json', content => {
const pkg = JSON.parse(content);
pkg.vue = pkg.vue || {};
// Proxy configuration
pkg.vue.devServer = pkg.vue.devServer || {};
pkg.vue.devServer.proxy = pkg.vue.devServer.proxy || {};
pkg.vue.devServer.proxy['^/api'] = { target: 'http://localhost:3001' };
// Output build directory
const outputPath = join(relative(path, process.cwd()), 'public')
// Make projects generated on Windows build on Unix.
.replace(/\\/g, '/');
pkg.vue.outputDir = outputPath;
return JSON.stringify(pkg, null, 2);
});
}
| connectVue | identifier_name |
stats_bootstrap.py | import random
from math import sqrt
import numpy
def validate_cost(result, boot_size, delta=500):
budget = result["budget"]
for metric_name, _, data_process in result['analysis']:
if metric_name == "cost":
cost_data = list(x() for x in data_process)
data_analysis = yield_analysis(cost_data, boot_size)
cost_val = data_analysis["btstrpd"]["metrics"]
return cost_val <= budget + delta
return True
def find_acceptable_result_for_budget(results, boot_size):
delta = 500
prev_budget = results[-1]['budget']
for result in reversed(results):
budget = result['budget']
delta += prev_budget - budget
if validate_cost(result,boot_size, delta):
return result
prev_budget = budget
return None
def average(xs):
if len(xs) == 0:
|
return sum(xs) * 1.0 / len(xs)
def sample_wr(population, k):
"""Chooses k random elements (with replacement) from a population"""
n = len(population) - 1
return [population[int(random.randint(0, n))] for i in range(k)]
def bootstrap(population, f, n, k, alpha):
btstrp = sorted(f(sample_wr(population, k)) for i in range(n))
return {
"confidence": 100.0 * (1 - 2 * alpha),
"from": btstrp[int(1.0 * n * alpha)],
"to": btstrp[int(1.0 * n * (1 - alpha))],
"metrics": f(population)
}
def yield_analysis(data_process, boot_size):
q1 = numpy.percentile(data_process, 25)
q3 = numpy.percentile(data_process, 75)
iq = q3 - q1
low_inn_fence = q1 - 1.5*iq
upp_inn_fence = q3 + 1.5*iq
low_out_fence = q1 - 3*iq
upp_out_fence = q3 + 3*iq
# noinspection PyRedeclaratione
extr_outliers = len([x
for x in data_process
if (x < low_out_fence or upp_out_fence < x)])
# noinspection PyRedeclaration
mild_outliers = len([x for x in data_process if (x < low_inn_fence or upp_inn_fence < x)]) - extr_outliers
extr_outliers = extr_outliers > 0 and "{0:6.2f}%".format(extr_outliers * 100.0 / len(data_process)) or "--"
mild_outliers = mild_outliers > 0 and "{0:6.2f}%".format(mild_outliers * 100.0 / len(data_process)) or "--"
metrics_nooutliers = average([x for x in data_process if low_inn_fence <= x <= upp_inn_fence])
try:
mean_nooutliers = float(average([x for x in data_process if low_inn_fence <= x <= upp_inn_fence]))
variance_nooutliers = [(x - mean_nooutliers) ** 2 for x in data_process if low_inn_fence <= x <= upp_inn_fence]
stdev_nooutliers = sqrt(average(variance_nooutliers))
except ValueError:
stdev_nooutliers = -float("inf")
mean_nooutliers = float("inf")
btstrpd = bootstrap(data_process, average, boot_size, int(len(data_process) * 0.66), 0.025)
goodbench = "✓"
try:
mean = float(average(data_process))
variance = [(x - mean) ** 2 for x in data_process]
stdev = sqrt(average(variance))
lower = mean - 3 * stdev
upper = mean + 3 * stdev
if len([x for x in data_process if lower <= x <= upper]) < 0.95 * len(data_process):
goodbench = "╳╳╳╳╳"
except ValueError:
stdev = lower = upper = mean = float("inf")
goodbench = "?"
try:
mean_nooutliers_diff = 100.0 * (mean_nooutliers - mean) / mean
except ZeroDivisionError:
mean_nooutliers_diff = float("inf")
try:
stdev_nooutliers_diff = 100.0 * (stdev_nooutliers - stdev) / stdev
except ZeroDivisionError:
stdev_nooutliers_diff = float("inf")
dispersion_warn = ""
try:
pr_dispersion = 100.0 * (float(btstrpd["to"]) - float(btstrpd["from"])) / btstrpd["metrics"]
if abs(pr_dispersion) > 30.:
dispersion_warn = " HIGH"
except ZeroDivisionError:
pr_dispersion = float("+Infinity")
return {
"low_inn_fence": low_inn_fence,
"upp_inn_fence": upp_inn_fence,
"low_out_fence": low_out_fence,
"upp_out_fence": upp_out_fence,
"stdev": stdev,
"mean": mean,
"lower": lower,
"upper": upper,
"goodbench": goodbench,
"btstrpd": btstrpd,
"mild_outliers": mild_outliers,
"extr_outliers": extr_outliers,
"metrics_nooutliers": metrics_nooutliers,
"mean_nooutliers_diff": mean_nooutliers_diff,
"stdev_nooutliers": stdev_nooutliers,
"stdev_nooutliers_diff": stdev_nooutliers_diff,
"pr_dispersion": pr_dispersion,
"dispersion_warn": dispersion_warn
}
# return low_inn_fence, upp_inn_fence, low_out_fence, upp_out_fence, stdev, mean, lower, upper, goodbench, btstrpd,
# stdev, mild_outliers, extr_outliers, metrics_nooutliers, mean_nooutliers_diff, stdev_nooutliers,
# stdev_nooutliers_diff, pr_dispersion, dispersion_warn | return -float("inf") | conditional_block |
stats_bootstrap.py | import random
from math import sqrt
import numpy
def validate_cost(result, boot_size, delta=500):
budget = result["budget"]
for metric_name, _, data_process in result['analysis']:
if metric_name == "cost":
cost_data = list(x() for x in data_process)
data_analysis = yield_analysis(cost_data, boot_size)
cost_val = data_analysis["btstrpd"]["metrics"]
return cost_val <= budget + delta
return True
def find_acceptable_result_for_budget(results, boot_size):
delta = 500
prev_budget = results[-1]['budget']
for result in reversed(results):
budget = result['budget']
delta += prev_budget - budget
if validate_cost(result,boot_size, delta):
return result
prev_budget = budget
return None
def average(xs):
if len(xs) == 0:
return -float("inf")
return sum(xs) * 1.0 / len(xs)
def sample_wr(population, k):
|
def bootstrap(population, f, n, k, alpha):
btstrp = sorted(f(sample_wr(population, k)) for i in range(n))
return {
"confidence": 100.0 * (1 - 2 * alpha),
"from": btstrp[int(1.0 * n * alpha)],
"to": btstrp[int(1.0 * n * (1 - alpha))],
"metrics": f(population)
}
def yield_analysis(data_process, boot_size):
q1 = numpy.percentile(data_process, 25)
q3 = numpy.percentile(data_process, 75)
iq = q3 - q1
low_inn_fence = q1 - 1.5*iq
upp_inn_fence = q3 + 1.5*iq
low_out_fence = q1 - 3*iq
upp_out_fence = q3 + 3*iq
# noinspection PyRedeclaratione
extr_outliers = len([x
for x in data_process
if (x < low_out_fence or upp_out_fence < x)])
# noinspection PyRedeclaration
mild_outliers = len([x for x in data_process if (x < low_inn_fence or upp_inn_fence < x)]) - extr_outliers
extr_outliers = extr_outliers > 0 and "{0:6.2f}%".format(extr_outliers * 100.0 / len(data_process)) or "--"
mild_outliers = mild_outliers > 0 and "{0:6.2f}%".format(mild_outliers * 100.0 / len(data_process)) or "--"
metrics_nooutliers = average([x for x in data_process if low_inn_fence <= x <= upp_inn_fence])
try:
mean_nooutliers = float(average([x for x in data_process if low_inn_fence <= x <= upp_inn_fence]))
variance_nooutliers = [(x - mean_nooutliers) ** 2 for x in data_process if low_inn_fence <= x <= upp_inn_fence]
stdev_nooutliers = sqrt(average(variance_nooutliers))
except ValueError:
stdev_nooutliers = -float("inf")
mean_nooutliers = float("inf")
btstrpd = bootstrap(data_process, average, boot_size, int(len(data_process) * 0.66), 0.025)
goodbench = "✓"
try:
mean = float(average(data_process))
variance = [(x - mean) ** 2 for x in data_process]
stdev = sqrt(average(variance))
lower = mean - 3 * stdev
upper = mean + 3 * stdev
if len([x for x in data_process if lower <= x <= upper]) < 0.95 * len(data_process):
goodbench = "╳╳╳╳╳"
except ValueError:
stdev = lower = upper = mean = float("inf")
goodbench = "?"
try:
mean_nooutliers_diff = 100.0 * (mean_nooutliers - mean) / mean
except ZeroDivisionError:
mean_nooutliers_diff = float("inf")
try:
stdev_nooutliers_diff = 100.0 * (stdev_nooutliers - stdev) / stdev
except ZeroDivisionError:
stdev_nooutliers_diff = float("inf")
dispersion_warn = ""
try:
pr_dispersion = 100.0 * (float(btstrpd["to"]) - float(btstrpd["from"])) / btstrpd["metrics"]
if abs(pr_dispersion) > 30.:
dispersion_warn = " HIGH"
except ZeroDivisionError:
pr_dispersion = float("+Infinity")
return {
"low_inn_fence": low_inn_fence,
"upp_inn_fence": upp_inn_fence,
"low_out_fence": low_out_fence,
"upp_out_fence": upp_out_fence,
"stdev": stdev,
"mean": mean,
"lower": lower,
"upper": upper,
"goodbench": goodbench,
"btstrpd": btstrpd,
"mild_outliers": mild_outliers,
"extr_outliers": extr_outliers,
"metrics_nooutliers": metrics_nooutliers,
"mean_nooutliers_diff": mean_nooutliers_diff,
"stdev_nooutliers": stdev_nooutliers,
"stdev_nooutliers_diff": stdev_nooutliers_diff,
"pr_dispersion": pr_dispersion,
"dispersion_warn": dispersion_warn
}
# return low_inn_fence, upp_inn_fence, low_out_fence, upp_out_fence, stdev, mean, lower, upper, goodbench, btstrpd,
# stdev, mild_outliers, extr_outliers, metrics_nooutliers, mean_nooutliers_diff, stdev_nooutliers,
# stdev_nooutliers_diff, pr_dispersion, dispersion_warn | """Chooses k random elements (with replacement) from a population"""
n = len(population) - 1
return [population[int(random.randint(0, n))] for i in range(k)] | identifier_body |
stats_bootstrap.py | import random
from math import sqrt
import numpy
def validate_cost(result, boot_size, delta=500):
budget = result["budget"]
for metric_name, _, data_process in result['analysis']:
if metric_name == "cost":
cost_data = list(x() for x in data_process)
data_analysis = yield_analysis(cost_data, boot_size)
cost_val = data_analysis["btstrpd"]["metrics"]
return cost_val <= budget + delta
return True
def find_acceptable_result_for_budget(results, boot_size):
delta = 500
prev_budget = results[-1]['budget']
for result in reversed(results):
budget = result['budget']
delta += prev_budget - budget
if validate_cost(result,boot_size, delta): | if len(xs) == 0:
return -float("inf")
return sum(xs) * 1.0 / len(xs)
def sample_wr(population, k):
"""Chooses k random elements (with replacement) from a population"""
n = len(population) - 1
return [population[int(random.randint(0, n))] for i in range(k)]
def bootstrap(population, f, n, k, alpha):
btstrp = sorted(f(sample_wr(population, k)) for i in range(n))
return {
"confidence": 100.0 * (1 - 2 * alpha),
"from": btstrp[int(1.0 * n * alpha)],
"to": btstrp[int(1.0 * n * (1 - alpha))],
"metrics": f(population)
}
def yield_analysis(data_process, boot_size):
q1 = numpy.percentile(data_process, 25)
q3 = numpy.percentile(data_process, 75)
iq = q3 - q1
low_inn_fence = q1 - 1.5*iq
upp_inn_fence = q3 + 1.5*iq
low_out_fence = q1 - 3*iq
upp_out_fence = q3 + 3*iq
# noinspection PyRedeclaratione
extr_outliers = len([x
for x in data_process
if (x < low_out_fence or upp_out_fence < x)])
# noinspection PyRedeclaration
mild_outliers = len([x for x in data_process if (x < low_inn_fence or upp_inn_fence < x)]) - extr_outliers
extr_outliers = extr_outliers > 0 and "{0:6.2f}%".format(extr_outliers * 100.0 / len(data_process)) or "--"
mild_outliers = mild_outliers > 0 and "{0:6.2f}%".format(mild_outliers * 100.0 / len(data_process)) or "--"
metrics_nooutliers = average([x for x in data_process if low_inn_fence <= x <= upp_inn_fence])
try:
mean_nooutliers = float(average([x for x in data_process if low_inn_fence <= x <= upp_inn_fence]))
variance_nooutliers = [(x - mean_nooutliers) ** 2 for x in data_process if low_inn_fence <= x <= upp_inn_fence]
stdev_nooutliers = sqrt(average(variance_nooutliers))
except ValueError:
stdev_nooutliers = -float("inf")
mean_nooutliers = float("inf")
btstrpd = bootstrap(data_process, average, boot_size, int(len(data_process) * 0.66), 0.025)
goodbench = "✓"
try:
mean = float(average(data_process))
variance = [(x - mean) ** 2 for x in data_process]
stdev = sqrt(average(variance))
lower = mean - 3 * stdev
upper = mean + 3 * stdev
if len([x for x in data_process if lower <= x <= upper]) < 0.95 * len(data_process):
goodbench = "╳╳╳╳╳"
except ValueError:
stdev = lower = upper = mean = float("inf")
goodbench = "?"
try:
mean_nooutliers_diff = 100.0 * (mean_nooutliers - mean) / mean
except ZeroDivisionError:
mean_nooutliers_diff = float("inf")
try:
stdev_nooutliers_diff = 100.0 * (stdev_nooutliers - stdev) / stdev
except ZeroDivisionError:
stdev_nooutliers_diff = float("inf")
dispersion_warn = ""
try:
pr_dispersion = 100.0 * (float(btstrpd["to"]) - float(btstrpd["from"])) / btstrpd["metrics"]
if abs(pr_dispersion) > 30.:
dispersion_warn = " HIGH"
except ZeroDivisionError:
pr_dispersion = float("+Infinity")
return {
"low_inn_fence": low_inn_fence,
"upp_inn_fence": upp_inn_fence,
"low_out_fence": low_out_fence,
"upp_out_fence": upp_out_fence,
"stdev": stdev,
"mean": mean,
"lower": lower,
"upper": upper,
"goodbench": goodbench,
"btstrpd": btstrpd,
"mild_outliers": mild_outliers,
"extr_outliers": extr_outliers,
"metrics_nooutliers": metrics_nooutliers,
"mean_nooutliers_diff": mean_nooutliers_diff,
"stdev_nooutliers": stdev_nooutliers,
"stdev_nooutliers_diff": stdev_nooutliers_diff,
"pr_dispersion": pr_dispersion,
"dispersion_warn": dispersion_warn
}
# return low_inn_fence, upp_inn_fence, low_out_fence, upp_out_fence, stdev, mean, lower, upper, goodbench, btstrpd,
# stdev, mild_outliers, extr_outliers, metrics_nooutliers, mean_nooutliers_diff, stdev_nooutliers,
# stdev_nooutliers_diff, pr_dispersion, dispersion_warn | return result
prev_budget = budget
return None
def average(xs): | random_line_split |
stats_bootstrap.py | import random
from math import sqrt
import numpy
def | (result, boot_size, delta=500):
budget = result["budget"]
for metric_name, _, data_process in result['analysis']:
if metric_name == "cost":
cost_data = list(x() for x in data_process)
data_analysis = yield_analysis(cost_data, boot_size)
cost_val = data_analysis["btstrpd"]["metrics"]
return cost_val <= budget + delta
return True
def find_acceptable_result_for_budget(results, boot_size):
delta = 500
prev_budget = results[-1]['budget']
for result in reversed(results):
budget = result['budget']
delta += prev_budget - budget
if validate_cost(result,boot_size, delta):
return result
prev_budget = budget
return None
def average(xs):
if len(xs) == 0:
return -float("inf")
return sum(xs) * 1.0 / len(xs)
def sample_wr(population, k):
"""Chooses k random elements (with replacement) from a population"""
n = len(population) - 1
return [population[int(random.randint(0, n))] for i in range(k)]
def bootstrap(population, f, n, k, alpha):
btstrp = sorted(f(sample_wr(population, k)) for i in range(n))
return {
"confidence": 100.0 * (1 - 2 * alpha),
"from": btstrp[int(1.0 * n * alpha)],
"to": btstrp[int(1.0 * n * (1 - alpha))],
"metrics": f(population)
}
def yield_analysis(data_process, boot_size):
q1 = numpy.percentile(data_process, 25)
q3 = numpy.percentile(data_process, 75)
iq = q3 - q1
low_inn_fence = q1 - 1.5*iq
upp_inn_fence = q3 + 1.5*iq
low_out_fence = q1 - 3*iq
upp_out_fence = q3 + 3*iq
# noinspection PyRedeclaratione
extr_outliers = len([x
for x in data_process
if (x < low_out_fence or upp_out_fence < x)])
# noinspection PyRedeclaration
mild_outliers = len([x for x in data_process if (x < low_inn_fence or upp_inn_fence < x)]) - extr_outliers
extr_outliers = extr_outliers > 0 and "{0:6.2f}%".format(extr_outliers * 100.0 / len(data_process)) or "--"
mild_outliers = mild_outliers > 0 and "{0:6.2f}%".format(mild_outliers * 100.0 / len(data_process)) or "--"
metrics_nooutliers = average([x for x in data_process if low_inn_fence <= x <= upp_inn_fence])
try:
mean_nooutliers = float(average([x for x in data_process if low_inn_fence <= x <= upp_inn_fence]))
variance_nooutliers = [(x - mean_nooutliers) ** 2 for x in data_process if low_inn_fence <= x <= upp_inn_fence]
stdev_nooutliers = sqrt(average(variance_nooutliers))
except ValueError:
stdev_nooutliers = -float("inf")
mean_nooutliers = float("inf")
btstrpd = bootstrap(data_process, average, boot_size, int(len(data_process) * 0.66), 0.025)
goodbench = "✓"
try:
mean = float(average(data_process))
variance = [(x - mean) ** 2 for x in data_process]
stdev = sqrt(average(variance))
lower = mean - 3 * stdev
upper = mean + 3 * stdev
if len([x for x in data_process if lower <= x <= upper]) < 0.95 * len(data_process):
goodbench = "╳╳╳╳╳"
except ValueError:
stdev = lower = upper = mean = float("inf")
goodbench = "?"
try:
mean_nooutliers_diff = 100.0 * (mean_nooutliers - mean) / mean
except ZeroDivisionError:
mean_nooutliers_diff = float("inf")
try:
stdev_nooutliers_diff = 100.0 * (stdev_nooutliers - stdev) / stdev
except ZeroDivisionError:
stdev_nooutliers_diff = float("inf")
dispersion_warn = ""
try:
pr_dispersion = 100.0 * (float(btstrpd["to"]) - float(btstrpd["from"])) / btstrpd["metrics"]
if abs(pr_dispersion) > 30.:
dispersion_warn = " HIGH"
except ZeroDivisionError:
pr_dispersion = float("+Infinity")
return {
"low_inn_fence": low_inn_fence,
"upp_inn_fence": upp_inn_fence,
"low_out_fence": low_out_fence,
"upp_out_fence": upp_out_fence,
"stdev": stdev,
"mean": mean,
"lower": lower,
"upper": upper,
"goodbench": goodbench,
"btstrpd": btstrpd,
"mild_outliers": mild_outliers,
"extr_outliers": extr_outliers,
"metrics_nooutliers": metrics_nooutliers,
"mean_nooutliers_diff": mean_nooutliers_diff,
"stdev_nooutliers": stdev_nooutliers,
"stdev_nooutliers_diff": stdev_nooutliers_diff,
"pr_dispersion": pr_dispersion,
"dispersion_warn": dispersion_warn
}
# return low_inn_fence, upp_inn_fence, low_out_fence, upp_out_fence, stdev, mean, lower, upper, goodbench, btstrpd,
# stdev, mild_outliers, extr_outliers, metrics_nooutliers, mean_nooutliers_diff, stdev_nooutliers,
# stdev_nooutliers_diff, pr_dispersion, dispersion_warn | validate_cost | identifier_name |
util.py | #
# GdbLib - A Gdb python library.
# Copyright (C) 2012 Fernando Castillo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
def change(values):
pass
def removeFile(path):
| index = path.rfind(os.sep)
return path[:index] | identifier_body |
|
util.py | #
# GdbLib - A Gdb python library.
# Copyright (C) 2012 Fernando Castillo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
def change(values):
pass |
def removeFile(path):
index = path.rfind(os.sep)
return path[:index] | random_line_split |
|
util.py | #
# GdbLib - A Gdb python library.
# Copyright (C) 2012 Fernando Castillo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
def | (values):
pass
def removeFile(path):
index = path.rfind(os.sep)
return path[:index]
| change | identifier_name |
utils.py | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, cstr, cint
from frappe import _
import json
from erpnext.stock.doctype.item.item import get_last_purchase_details
from erpnext.stock.doctype.item.item import validate_end_of_life
def update_last_purchase_rate(doc, is_submit):
"""updates last_purchase_rate in item table for each item"""
import frappe.utils
this_purchase_date = frappe.utils.getdate(doc.get('posting_date') or doc.get('transaction_date'))
for d in doc.get("items"):
# get last purchase details
last_purchase_details = get_last_purchase_details(d.item_code, doc.name)
# compare last purchase date and this transaction's date
last_purchase_rate = None
if last_purchase_details and \
(last_purchase_details.purchase_date > this_purchase_date):
last_purchase_rate = last_purchase_details['base_rate']
elif is_submit == 1:
# even if this transaction is the latest one, it should be submitted
# for it to be considered for latest purchase rate
if flt(d.conversion_factor):
last_purchase_rate = flt(d.base_rate) / flt(d.conversion_factor)
else:
frappe.throw(_("UOM Conversion factor is required in row {0}").format(d.idx))
# update last purchsae rate
if last_purchase_rate:
frappe.db.sql("""update `tabItem` set last_purchase_rate = %s where name = %s""",
(flt(last_purchase_rate), d.item_code))
def | (doc):
items = []
for d in doc.get("items"):
if not d.qty:
if doc.doctype == "Purchase Receipt" and d.rejected_qty:
continue
frappe.throw(_("Please enter quantity for Item {0}").format(d.item_code))
# update with latest quantities
bin = frappe.db.sql("""select projected_qty from `tabBin` where
item_code = %s and warehouse = %s""", (d.item_code, d.warehouse), as_dict=1)
f_lst ={'projected_qty': bin and flt(bin[0]['projected_qty']) or 0, 'ordered_qty': 0, 'received_qty' : 0}
if d.doctype in ('Purchase Receipt Item', 'Purchase Invoice Item'):
f_lst.pop('received_qty')
for x in f_lst :
if d.meta.get_field(x):
d.set(x, f_lst[x])
item = frappe.db.sql("""select is_stock_item,
is_sub_contracted_item, end_of_life, disabled from `tabItem` where name=%s""",
d.item_code, as_dict=1)[0]
validate_end_of_life(d.item_code, item.end_of_life, item.disabled)
# validate stock item
if item.is_stock_item==1 and d.qty and not d.warehouse and not d.get("delivered_by_supplier"):
frappe.throw(_("Warehouse is mandatory for stock Item {0} in row {1}").format(d.item_code, d.idx))
items.append(cstr(d.item_code))
if items and len(items) != len(set(items)) and \
not cint(frappe.db.get_single_value("Buying Settings", "allow_multiple_items") or 0):
frappe.throw(_("Same item cannot be entered multiple times."))
def check_for_closed_status(doctype, docname):
status = frappe.db.get_value(doctype, docname, "status")
if status == "Closed":
frappe.throw(_("{0} {1} status is {2}").format(doctype, docname, status), frappe.InvalidStatusError)
@frappe.whitelist()
def get_linked_material_requests(items):
items = json.loads(items)
mr_list = []
for item in items:
material_request = frappe.db.sql("""SELECT distinct mr.name AS mr_name,
(mr_item.qty - mr_item.ordered_qty) AS qty,
mr_item.item_code AS item_code,
mr_item.name AS mr_item
FROM `tabMaterial Request` mr, `tabMaterial Request Item` mr_item
WHERE mr.name = mr_item.parent
AND mr_item.item_code = %(item)s
AND mr.material_request_type = 'Purchase'
AND mr.per_ordered < 99.99
AND mr.docstatus = 1
AND mr.status != 'Stopped'
ORDER BY mr_item.item_code ASC""",{"item": item}, as_dict=1)
if material_request:
mr_list.append(material_request)
return mr_list
| validate_for_items | identifier_name |
utils.py | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, cstr, cint
from frappe import _
import json
from erpnext.stock.doctype.item.item import get_last_purchase_details
from erpnext.stock.doctype.item.item import validate_end_of_life
def update_last_purchase_rate(doc, is_submit):
"""updates last_purchase_rate in item table for each item"""
import frappe.utils
this_purchase_date = frappe.utils.getdate(doc.get('posting_date') or doc.get('transaction_date'))
for d in doc.get("items"):
# get last purchase details
last_purchase_details = get_last_purchase_details(d.item_code, doc.name)
# compare last purchase date and this transaction's date
last_purchase_rate = None
if last_purchase_details and \
(last_purchase_details.purchase_date > this_purchase_date):
last_purchase_rate = last_purchase_details['base_rate']
elif is_submit == 1:
# even if this transaction is the latest one, it should be submitted
# for it to be considered for latest purchase rate
if flt(d.conversion_factor):
last_purchase_rate = flt(d.base_rate) / flt(d.conversion_factor)
else:
frappe.throw(_("UOM Conversion factor is required in row {0}").format(d.idx)) | (flt(last_purchase_rate), d.item_code))
def validate_for_items(doc):
items = []
for d in doc.get("items"):
if not d.qty:
if doc.doctype == "Purchase Receipt" and d.rejected_qty:
continue
frappe.throw(_("Please enter quantity for Item {0}").format(d.item_code))
# update with latest quantities
bin = frappe.db.sql("""select projected_qty from `tabBin` where
item_code = %s and warehouse = %s""", (d.item_code, d.warehouse), as_dict=1)
f_lst ={'projected_qty': bin and flt(bin[0]['projected_qty']) or 0, 'ordered_qty': 0, 'received_qty' : 0}
if d.doctype in ('Purchase Receipt Item', 'Purchase Invoice Item'):
f_lst.pop('received_qty')
for x in f_lst :
if d.meta.get_field(x):
d.set(x, f_lst[x])
item = frappe.db.sql("""select is_stock_item,
is_sub_contracted_item, end_of_life, disabled from `tabItem` where name=%s""",
d.item_code, as_dict=1)[0]
validate_end_of_life(d.item_code, item.end_of_life, item.disabled)
# validate stock item
if item.is_stock_item==1 and d.qty and not d.warehouse and not d.get("delivered_by_supplier"):
frappe.throw(_("Warehouse is mandatory for stock Item {0} in row {1}").format(d.item_code, d.idx))
items.append(cstr(d.item_code))
if items and len(items) != len(set(items)) and \
not cint(frappe.db.get_single_value("Buying Settings", "allow_multiple_items") or 0):
frappe.throw(_("Same item cannot be entered multiple times."))
def check_for_closed_status(doctype, docname):
status = frappe.db.get_value(doctype, docname, "status")
if status == "Closed":
frappe.throw(_("{0} {1} status is {2}").format(doctype, docname, status), frappe.InvalidStatusError)
@frappe.whitelist()
def get_linked_material_requests(items):
items = json.loads(items)
mr_list = []
for item in items:
material_request = frappe.db.sql("""SELECT distinct mr.name AS mr_name,
(mr_item.qty - mr_item.ordered_qty) AS qty,
mr_item.item_code AS item_code,
mr_item.name AS mr_item
FROM `tabMaterial Request` mr, `tabMaterial Request Item` mr_item
WHERE mr.name = mr_item.parent
AND mr_item.item_code = %(item)s
AND mr.material_request_type = 'Purchase'
AND mr.per_ordered < 99.99
AND mr.docstatus = 1
AND mr.status != 'Stopped'
ORDER BY mr_item.item_code ASC""",{"item": item}, as_dict=1)
if material_request:
mr_list.append(material_request)
return mr_list |
# update last purchsae rate
if last_purchase_rate:
frappe.db.sql("""update `tabItem` set last_purchase_rate = %s where name = %s""", | random_line_split |
utils.py | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, cstr, cint
from frappe import _
import json
from erpnext.stock.doctype.item.item import get_last_purchase_details
from erpnext.stock.doctype.item.item import validate_end_of_life
def update_last_purchase_rate(doc, is_submit):
"""updates last_purchase_rate in item table for each item"""
import frappe.utils
this_purchase_date = frappe.utils.getdate(doc.get('posting_date') or doc.get('transaction_date'))
for d in doc.get("items"):
# get last purchase details
last_purchase_details = get_last_purchase_details(d.item_code, doc.name)
# compare last purchase date and this transaction's date
last_purchase_rate = None
if last_purchase_details and \
(last_purchase_details.purchase_date > this_purchase_date):
last_purchase_rate = last_purchase_details['base_rate']
elif is_submit == 1:
# even if this transaction is the latest one, it should be submitted
# for it to be considered for latest purchase rate
if flt(d.conversion_factor):
last_purchase_rate = flt(d.base_rate) / flt(d.conversion_factor)
else:
frappe.throw(_("UOM Conversion factor is required in row {0}").format(d.idx))
# update last purchsae rate
if last_purchase_rate:
frappe.db.sql("""update `tabItem` set last_purchase_rate = %s where name = %s""",
(flt(last_purchase_rate), d.item_code))
def validate_for_items(doc):
items = []
for d in doc.get("items"):
if not d.qty:
|
# update with latest quantities
bin = frappe.db.sql("""select projected_qty from `tabBin` where
item_code = %s and warehouse = %s""", (d.item_code, d.warehouse), as_dict=1)
f_lst ={'projected_qty': bin and flt(bin[0]['projected_qty']) or 0, 'ordered_qty': 0, 'received_qty' : 0}
if d.doctype in ('Purchase Receipt Item', 'Purchase Invoice Item'):
f_lst.pop('received_qty')
for x in f_lst :
if d.meta.get_field(x):
d.set(x, f_lst[x])
item = frappe.db.sql("""select is_stock_item,
is_sub_contracted_item, end_of_life, disabled from `tabItem` where name=%s""",
d.item_code, as_dict=1)[0]
validate_end_of_life(d.item_code, item.end_of_life, item.disabled)
# validate stock item
if item.is_stock_item==1 and d.qty and not d.warehouse and not d.get("delivered_by_supplier"):
frappe.throw(_("Warehouse is mandatory for stock Item {0} in row {1}").format(d.item_code, d.idx))
items.append(cstr(d.item_code))
if items and len(items) != len(set(items)) and \
not cint(frappe.db.get_single_value("Buying Settings", "allow_multiple_items") or 0):
frappe.throw(_("Same item cannot be entered multiple times."))
def check_for_closed_status(doctype, docname):
status = frappe.db.get_value(doctype, docname, "status")
if status == "Closed":
frappe.throw(_("{0} {1} status is {2}").format(doctype, docname, status), frappe.InvalidStatusError)
@frappe.whitelist()
def get_linked_material_requests(items):
items = json.loads(items)
mr_list = []
for item in items:
material_request = frappe.db.sql("""SELECT distinct mr.name AS mr_name,
(mr_item.qty - mr_item.ordered_qty) AS qty,
mr_item.item_code AS item_code,
mr_item.name AS mr_item
FROM `tabMaterial Request` mr, `tabMaterial Request Item` mr_item
WHERE mr.name = mr_item.parent
AND mr_item.item_code = %(item)s
AND mr.material_request_type = 'Purchase'
AND mr.per_ordered < 99.99
AND mr.docstatus = 1
AND mr.status != 'Stopped'
ORDER BY mr_item.item_code ASC""",{"item": item}, as_dict=1)
if material_request:
mr_list.append(material_request)
return mr_list
| if doc.doctype == "Purchase Receipt" and d.rejected_qty:
continue
frappe.throw(_("Please enter quantity for Item {0}").format(d.item_code)) | conditional_block |
utils.py | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, cstr, cint
from frappe import _
import json
from erpnext.stock.doctype.item.item import get_last_purchase_details
from erpnext.stock.doctype.item.item import validate_end_of_life
def update_last_purchase_rate(doc, is_submit):
|
def validate_for_items(doc):
items = []
for d in doc.get("items"):
if not d.qty:
if doc.doctype == "Purchase Receipt" and d.rejected_qty:
continue
frappe.throw(_("Please enter quantity for Item {0}").format(d.item_code))
# update with latest quantities
bin = frappe.db.sql("""select projected_qty from `tabBin` where
item_code = %s and warehouse = %s""", (d.item_code, d.warehouse), as_dict=1)
f_lst ={'projected_qty': bin and flt(bin[0]['projected_qty']) or 0, 'ordered_qty': 0, 'received_qty' : 0}
if d.doctype in ('Purchase Receipt Item', 'Purchase Invoice Item'):
f_lst.pop('received_qty')
for x in f_lst :
if d.meta.get_field(x):
d.set(x, f_lst[x])
item = frappe.db.sql("""select is_stock_item,
is_sub_contracted_item, end_of_life, disabled from `tabItem` where name=%s""",
d.item_code, as_dict=1)[0]
validate_end_of_life(d.item_code, item.end_of_life, item.disabled)
# validate stock item
if item.is_stock_item==1 and d.qty and not d.warehouse and not d.get("delivered_by_supplier"):
frappe.throw(_("Warehouse is mandatory for stock Item {0} in row {1}").format(d.item_code, d.idx))
items.append(cstr(d.item_code))
if items and len(items) != len(set(items)) and \
not cint(frappe.db.get_single_value("Buying Settings", "allow_multiple_items") or 0):
frappe.throw(_("Same item cannot be entered multiple times."))
def check_for_closed_status(doctype, docname):
status = frappe.db.get_value(doctype, docname, "status")
if status == "Closed":
frappe.throw(_("{0} {1} status is {2}").format(doctype, docname, status), frappe.InvalidStatusError)
@frappe.whitelist()
def get_linked_material_requests(items):
items = json.loads(items)
mr_list = []
for item in items:
material_request = frappe.db.sql("""SELECT distinct mr.name AS mr_name,
(mr_item.qty - mr_item.ordered_qty) AS qty,
mr_item.item_code AS item_code,
mr_item.name AS mr_item
FROM `tabMaterial Request` mr, `tabMaterial Request Item` mr_item
WHERE mr.name = mr_item.parent
AND mr_item.item_code = %(item)s
AND mr.material_request_type = 'Purchase'
AND mr.per_ordered < 99.99
AND mr.docstatus = 1
AND mr.status != 'Stopped'
ORDER BY mr_item.item_code ASC""",{"item": item}, as_dict=1)
if material_request:
mr_list.append(material_request)
return mr_list
| """updates last_purchase_rate in item table for each item"""
import frappe.utils
this_purchase_date = frappe.utils.getdate(doc.get('posting_date') or doc.get('transaction_date'))
for d in doc.get("items"):
# get last purchase details
last_purchase_details = get_last_purchase_details(d.item_code, doc.name)
# compare last purchase date and this transaction's date
last_purchase_rate = None
if last_purchase_details and \
(last_purchase_details.purchase_date > this_purchase_date):
last_purchase_rate = last_purchase_details['base_rate']
elif is_submit == 1:
# even if this transaction is the latest one, it should be submitted
# for it to be considered for latest purchase rate
if flt(d.conversion_factor):
last_purchase_rate = flt(d.base_rate) / flt(d.conversion_factor)
else:
frappe.throw(_("UOM Conversion factor is required in row {0}").format(d.idx))
# update last purchsae rate
if last_purchase_rate:
frappe.db.sql("""update `tabItem` set last_purchase_rate = %s where name = %s""",
(flt(last_purchase_rate), d.item_code)) | identifier_body |
limits.rs | use crate::error::Result;
use postgres::Connection;
use std::collections::BTreeMap;
use std::time::Duration;
#[derive(Debug, Clone, PartialEq, Eq)]
pub(crate) struct Limits {
memory: usize,
targets: usize,
timeout: Duration,
networking: bool,
max_log_size: usize,
}
impl Default for Limits {
fn default() -> Self {
Self {
memory: 3 * 1024 * 1024 * 1024, // 3 GB
timeout: Duration::from_secs(15 * 60), // 15 minutes
targets: 10,
networking: false,
max_log_size: 100 * 1024, // 100 KB
}
}
}
impl Limits {
pub(crate) fn for_crate(conn: &Connection, name: &str) -> Result<Self> {
let mut limits = Self::default();
let res = conn.query(
"SELECT * FROM sandbox_overrides WHERE crate_name = $1;",
&[&name],
)?;
if !res.is_empty() {
let row = res.get(0);
if let Some(memory) = row.get::<_, Option<i64>>("max_memory_bytes") {
limits.memory = memory as usize;
}
if let Some(timeout) = row.get::<_, Option<i32>>("timeout_seconds") {
limits.timeout = Duration::from_secs(timeout as u64);
}
if let Some(targets) = row.get::<_, Option<i32>>("max_targets") {
limits.targets = targets as usize;
}
}
Ok(limits)
}
pub(crate) fn memory(&self) -> usize {
self.memory
}
pub(crate) fn timeout(&self) -> Duration {
self.timeout
}
pub(crate) fn networking(&self) -> bool {
self.networking
}
pub(crate) fn max_log_size(&self) -> usize {
self.max_log_size
}
pub(crate) fn targets(&self) -> usize {
self.targets
}
pub(crate) fn for_website(&self) -> BTreeMap<String, String> {
let mut res = BTreeMap::new();
res.insert("Available RAM".into(), SIZE_SCALE(self.memory));
res.insert(
"Maximum rustdoc execution time".into(),
TIME_SCALE(self.timeout.as_secs() as usize),
);
res.insert(
"Maximum size of a build log".into(),
SIZE_SCALE(self.max_log_size),
);
if self.networking {
res.insert("Network access".into(), "allowed".into());
} else {
res.insert("Network access".into(), "blocked".into());
}
res.insert(
"Maximum number of build targets".into(),
self.targets.to_string(),
);
res
}
}
const TIME_SCALE: fn(usize) -> String = |v| scale(v, 60, &["seconds", "minutes", "hours"]);
const SIZE_SCALE: fn(usize) -> String = |v| scale(v, 1024, &["bytes", "KB", "MB", "GB"]);
fn scale(value: usize, interval: usize, labels: &[&str]) -> String {
let (mut value, interval) = (value as f64, interval as f64);
let mut chosen_label = &labels[0];
for label in &labels[1..] {
if value / interval >= 1.0 {
chosen_label = label;
value /= interval;
} else {
break;
}
}
// 2.x
let mut value = format!("{:.1}", value);
// 2.0 -> 2
if value.ends_with(".0") {
value.truncate(value.len() - 2);
}
format!("{} {}", value, chosen_label)
}
#[cfg(test)] | use crate::test::*;
#[test]
fn retrieve_limits() {
wrapper(|env| {
let db = env.db();
let krate = "hexponent";
// limits work if no crate has limits set
let hexponent = Limits::for_crate(&db.conn(), krate)?;
assert_eq!(hexponent, Limits::default());
db.conn().query(
"INSERT INTO sandbox_overrides (crate_name, max_targets) VALUES ($1, 15)",
&[&krate],
)?;
// limits work if crate has limits set
let hexponent = Limits::for_crate(&db.conn(), krate)?;
assert_eq!(
hexponent,
Limits {
targets: 15,
..Limits::default()
}
);
// all limits work
let krate = "regex";
let limits = Limits {
memory: 100_000,
timeout: Duration::from_secs(300),
targets: 1,
..Limits::default()
};
db.conn().query(
"INSERT INTO sandbox_overrides (crate_name, max_memory_bytes, timeout_seconds, max_targets)
VALUES ($1, $2, $3, $4)",
&[&krate, &(limits.memory as i64), &(limits.timeout.as_secs() as i32), &(limits.targets as i32)]
)?;
assert_eq!(limits, Limits::for_crate(&db.conn(), krate)?);
Ok(())
});
}
#[test]
fn display_limits() {
let limits = Limits {
memory: 102_400,
timeout: Duration::from_secs(300),
targets: 1,
..Limits::default()
};
let display = limits.for_website();
assert_eq!(display.get("Network access"), Some(&"blocked".into()));
assert_eq!(
display.get("Maximum size of a build log"),
Some(&"100 KB".into())
);
assert_eq!(
display.get("Maximum number of build targets"),
Some(&limits.targets.to_string())
);
assert_eq!(
display.get("Maximum rustdoc execution time"),
Some(&"5 minutes".into())
);
assert_eq!(display.get("Available RAM"), Some(&"100 KB".into()));
}
#[test]
fn scale_limits() {
// time
assert_eq!(TIME_SCALE(300), "5 minutes");
assert_eq!(TIME_SCALE(1), "1 seconds");
assert_eq!(TIME_SCALE(7200), "2 hours");
// size
assert_eq!(SIZE_SCALE(1), "1 bytes");
assert_eq!(SIZE_SCALE(100), "100 bytes");
assert_eq!(SIZE_SCALE(1024), "1 KB");
assert_eq!(SIZE_SCALE(10240), "10 KB");
assert_eq!(SIZE_SCALE(1_048_576), "1 MB");
assert_eq!(SIZE_SCALE(10_485_760), "10 MB");
assert_eq!(SIZE_SCALE(1_073_741_824), "1 GB");
assert_eq!(SIZE_SCALE(10_737_418_240), "10 GB");
assert_eq!(SIZE_SCALE(std::u32::MAX as usize), "4 GB");
// fractional sizes
assert_eq!(TIME_SCALE(90), "1.5 minutes");
assert_eq!(TIME_SCALE(5400), "1.5 hours");
assert_eq!(SIZE_SCALE(1_288_490_189), "1.2 GB");
assert_eq!(SIZE_SCALE(3_758_096_384), "3.5 GB");
assert_eq!(SIZE_SCALE(1_048_051_712), "999.5 MB");
}
} | mod test {
use super::*; | random_line_split |
limits.rs | use crate::error::Result;
use postgres::Connection;
use std::collections::BTreeMap;
use std::time::Duration;
#[derive(Debug, Clone, PartialEq, Eq)]
pub(crate) struct Limits {
memory: usize,
targets: usize,
timeout: Duration,
networking: bool,
max_log_size: usize,
}
impl Default for Limits {
fn default() -> Self {
Self {
memory: 3 * 1024 * 1024 * 1024, // 3 GB
timeout: Duration::from_secs(15 * 60), // 15 minutes
targets: 10,
networking: false,
max_log_size: 100 * 1024, // 100 KB
}
}
}
impl Limits {
pub(crate) fn for_crate(conn: &Connection, name: &str) -> Result<Self> {
let mut limits = Self::default();
let res = conn.query(
"SELECT * FROM sandbox_overrides WHERE crate_name = $1;",
&[&name],
)?;
if !res.is_empty() {
let row = res.get(0);
if let Some(memory) = row.get::<_, Option<i64>>("max_memory_bytes") {
limits.memory = memory as usize;
}
if let Some(timeout) = row.get::<_, Option<i32>>("timeout_seconds") {
limits.timeout = Duration::from_secs(timeout as u64);
}
if let Some(targets) = row.get::<_, Option<i32>>("max_targets") {
limits.targets = targets as usize;
}
}
Ok(limits)
}
pub(crate) fn memory(&self) -> usize {
self.memory
}
pub(crate) fn timeout(&self) -> Duration {
self.timeout
}
pub(crate) fn networking(&self) -> bool |
pub(crate) fn max_log_size(&self) -> usize {
self.max_log_size
}
pub(crate) fn targets(&self) -> usize {
self.targets
}
pub(crate) fn for_website(&self) -> BTreeMap<String, String> {
let mut res = BTreeMap::new();
res.insert("Available RAM".into(), SIZE_SCALE(self.memory));
res.insert(
"Maximum rustdoc execution time".into(),
TIME_SCALE(self.timeout.as_secs() as usize),
);
res.insert(
"Maximum size of a build log".into(),
SIZE_SCALE(self.max_log_size),
);
if self.networking {
res.insert("Network access".into(), "allowed".into());
} else {
res.insert("Network access".into(), "blocked".into());
}
res.insert(
"Maximum number of build targets".into(),
self.targets.to_string(),
);
res
}
}
const TIME_SCALE: fn(usize) -> String = |v| scale(v, 60, &["seconds", "minutes", "hours"]);
const SIZE_SCALE: fn(usize) -> String = |v| scale(v, 1024, &["bytes", "KB", "MB", "GB"]);
fn scale(value: usize, interval: usize, labels: &[&str]) -> String {
let (mut value, interval) = (value as f64, interval as f64);
let mut chosen_label = &labels[0];
for label in &labels[1..] {
if value / interval >= 1.0 {
chosen_label = label;
value /= interval;
} else {
break;
}
}
// 2.x
let mut value = format!("{:.1}", value);
// 2.0 -> 2
if value.ends_with(".0") {
value.truncate(value.len() - 2);
}
format!("{} {}", value, chosen_label)
}
#[cfg(test)]
mod test {
use super::*;
use crate::test::*;
#[test]
fn retrieve_limits() {
wrapper(|env| {
let db = env.db();
let krate = "hexponent";
// limits work if no crate has limits set
let hexponent = Limits::for_crate(&db.conn(), krate)?;
assert_eq!(hexponent, Limits::default());
db.conn().query(
"INSERT INTO sandbox_overrides (crate_name, max_targets) VALUES ($1, 15)",
&[&krate],
)?;
// limits work if crate has limits set
let hexponent = Limits::for_crate(&db.conn(), krate)?;
assert_eq!(
hexponent,
Limits {
targets: 15,
..Limits::default()
}
);
// all limits work
let krate = "regex";
let limits = Limits {
memory: 100_000,
timeout: Duration::from_secs(300),
targets: 1,
..Limits::default()
};
db.conn().query(
"INSERT INTO sandbox_overrides (crate_name, max_memory_bytes, timeout_seconds, max_targets)
VALUES ($1, $2, $3, $4)",
&[&krate, &(limits.memory as i64), &(limits.timeout.as_secs() as i32), &(limits.targets as i32)]
)?;
assert_eq!(limits, Limits::for_crate(&db.conn(), krate)?);
Ok(())
});
}
#[test]
fn display_limits() {
let limits = Limits {
memory: 102_400,
timeout: Duration::from_secs(300),
targets: 1,
..Limits::default()
};
let display = limits.for_website();
assert_eq!(display.get("Network access"), Some(&"blocked".into()));
assert_eq!(
display.get("Maximum size of a build log"),
Some(&"100 KB".into())
);
assert_eq!(
display.get("Maximum number of build targets"),
Some(&limits.targets.to_string())
);
assert_eq!(
display.get("Maximum rustdoc execution time"),
Some(&"5 minutes".into())
);
assert_eq!(display.get("Available RAM"), Some(&"100 KB".into()));
}
#[test]
fn scale_limits() {
// time
assert_eq!(TIME_SCALE(300), "5 minutes");
assert_eq!(TIME_SCALE(1), "1 seconds");
assert_eq!(TIME_SCALE(7200), "2 hours");
// size
assert_eq!(SIZE_SCALE(1), "1 bytes");
assert_eq!(SIZE_SCALE(100), "100 bytes");
assert_eq!(SIZE_SCALE(1024), "1 KB");
assert_eq!(SIZE_SCALE(10240), "10 KB");
assert_eq!(SIZE_SCALE(1_048_576), "1 MB");
assert_eq!(SIZE_SCALE(10_485_760), "10 MB");
assert_eq!(SIZE_SCALE(1_073_741_824), "1 GB");
assert_eq!(SIZE_SCALE(10_737_418_240), "10 GB");
assert_eq!(SIZE_SCALE(std::u32::MAX as usize), "4 GB");
// fractional sizes
assert_eq!(TIME_SCALE(90), "1.5 minutes");
assert_eq!(TIME_SCALE(5400), "1.5 hours");
assert_eq!(SIZE_SCALE(1_288_490_189), "1.2 GB");
assert_eq!(SIZE_SCALE(3_758_096_384), "3.5 GB");
assert_eq!(SIZE_SCALE(1_048_051_712), "999.5 MB");
}
}
| {
self.networking
} | identifier_body |
limits.rs | use crate::error::Result;
use postgres::Connection;
use std::collections::BTreeMap;
use std::time::Duration;
#[derive(Debug, Clone, PartialEq, Eq)]
pub(crate) struct Limits {
memory: usize,
targets: usize,
timeout: Duration,
networking: bool,
max_log_size: usize,
}
impl Default for Limits {
fn default() -> Self {
Self {
memory: 3 * 1024 * 1024 * 1024, // 3 GB
timeout: Duration::from_secs(15 * 60), // 15 minutes
targets: 10,
networking: false,
max_log_size: 100 * 1024, // 100 KB
}
}
}
impl Limits {
pub(crate) fn for_crate(conn: &Connection, name: &str) -> Result<Self> {
let mut limits = Self::default();
let res = conn.query(
"SELECT * FROM sandbox_overrides WHERE crate_name = $1;",
&[&name],
)?;
if !res.is_empty() {
let row = res.get(0);
if let Some(memory) = row.get::<_, Option<i64>>("max_memory_bytes") {
limits.memory = memory as usize;
}
if let Some(timeout) = row.get::<_, Option<i32>>("timeout_seconds") {
limits.timeout = Duration::from_secs(timeout as u64);
}
if let Some(targets) = row.get::<_, Option<i32>>("max_targets") {
limits.targets = targets as usize;
}
}
Ok(limits)
}
pub(crate) fn memory(&self) -> usize {
self.memory
}
pub(crate) fn timeout(&self) -> Duration {
self.timeout
}
pub(crate) fn networking(&self) -> bool {
self.networking
}
pub(crate) fn max_log_size(&self) -> usize {
self.max_log_size
}
pub(crate) fn targets(&self) -> usize {
self.targets
}
pub(crate) fn for_website(&self) -> BTreeMap<String, String> {
let mut res = BTreeMap::new();
res.insert("Available RAM".into(), SIZE_SCALE(self.memory));
res.insert(
"Maximum rustdoc execution time".into(),
TIME_SCALE(self.timeout.as_secs() as usize),
);
res.insert(
"Maximum size of a build log".into(),
SIZE_SCALE(self.max_log_size),
);
if self.networking {
res.insert("Network access".into(), "allowed".into());
} else |
res.insert(
"Maximum number of build targets".into(),
self.targets.to_string(),
);
res
}
}
const TIME_SCALE: fn(usize) -> String = |v| scale(v, 60, &["seconds", "minutes", "hours"]);
const SIZE_SCALE: fn(usize) -> String = |v| scale(v, 1024, &["bytes", "KB", "MB", "GB"]);
fn scale(value: usize, interval: usize, labels: &[&str]) -> String {
let (mut value, interval) = (value as f64, interval as f64);
let mut chosen_label = &labels[0];
for label in &labels[1..] {
if value / interval >= 1.0 {
chosen_label = label;
value /= interval;
} else {
break;
}
}
// 2.x
let mut value = format!("{:.1}", value);
// 2.0 -> 2
if value.ends_with(".0") {
value.truncate(value.len() - 2);
}
format!("{} {}", value, chosen_label)
}
#[cfg(test)]
mod test {
use super::*;
use crate::test::*;
#[test]
fn retrieve_limits() {
wrapper(|env| {
let db = env.db();
let krate = "hexponent";
// limits work if no crate has limits set
let hexponent = Limits::for_crate(&db.conn(), krate)?;
assert_eq!(hexponent, Limits::default());
db.conn().query(
"INSERT INTO sandbox_overrides (crate_name, max_targets) VALUES ($1, 15)",
&[&krate],
)?;
// limits work if crate has limits set
let hexponent = Limits::for_crate(&db.conn(), krate)?;
assert_eq!(
hexponent,
Limits {
targets: 15,
..Limits::default()
}
);
// all limits work
let krate = "regex";
let limits = Limits {
memory: 100_000,
timeout: Duration::from_secs(300),
targets: 1,
..Limits::default()
};
db.conn().query(
"INSERT INTO sandbox_overrides (crate_name, max_memory_bytes, timeout_seconds, max_targets)
VALUES ($1, $2, $3, $4)",
&[&krate, &(limits.memory as i64), &(limits.timeout.as_secs() as i32), &(limits.targets as i32)]
)?;
assert_eq!(limits, Limits::for_crate(&db.conn(), krate)?);
Ok(())
});
}
#[test]
fn display_limits() {
let limits = Limits {
memory: 102_400,
timeout: Duration::from_secs(300),
targets: 1,
..Limits::default()
};
let display = limits.for_website();
assert_eq!(display.get("Network access"), Some(&"blocked".into()));
assert_eq!(
display.get("Maximum size of a build log"),
Some(&"100 KB".into())
);
assert_eq!(
display.get("Maximum number of build targets"),
Some(&limits.targets.to_string())
);
assert_eq!(
display.get("Maximum rustdoc execution time"),
Some(&"5 minutes".into())
);
assert_eq!(display.get("Available RAM"), Some(&"100 KB".into()));
}
#[test]
fn scale_limits() {
// time
assert_eq!(TIME_SCALE(300), "5 minutes");
assert_eq!(TIME_SCALE(1), "1 seconds");
assert_eq!(TIME_SCALE(7200), "2 hours");
// size
assert_eq!(SIZE_SCALE(1), "1 bytes");
assert_eq!(SIZE_SCALE(100), "100 bytes");
assert_eq!(SIZE_SCALE(1024), "1 KB");
assert_eq!(SIZE_SCALE(10240), "10 KB");
assert_eq!(SIZE_SCALE(1_048_576), "1 MB");
assert_eq!(SIZE_SCALE(10_485_760), "10 MB");
assert_eq!(SIZE_SCALE(1_073_741_824), "1 GB");
assert_eq!(SIZE_SCALE(10_737_418_240), "10 GB");
assert_eq!(SIZE_SCALE(std::u32::MAX as usize), "4 GB");
// fractional sizes
assert_eq!(TIME_SCALE(90), "1.5 minutes");
assert_eq!(TIME_SCALE(5400), "1.5 hours");
assert_eq!(SIZE_SCALE(1_288_490_189), "1.2 GB");
assert_eq!(SIZE_SCALE(3_758_096_384), "3.5 GB");
assert_eq!(SIZE_SCALE(1_048_051_712), "999.5 MB");
}
}
| {
res.insert("Network access".into(), "blocked".into());
} | conditional_block |
limits.rs | use crate::error::Result;
use postgres::Connection;
use std::collections::BTreeMap;
use std::time::Duration;
#[derive(Debug, Clone, PartialEq, Eq)]
pub(crate) struct Limits {
memory: usize,
targets: usize,
timeout: Duration,
networking: bool,
max_log_size: usize,
}
impl Default for Limits {
fn default() -> Self {
Self {
memory: 3 * 1024 * 1024 * 1024, // 3 GB
timeout: Duration::from_secs(15 * 60), // 15 minutes
targets: 10,
networking: false,
max_log_size: 100 * 1024, // 100 KB
}
}
}
impl Limits {
pub(crate) fn | (conn: &Connection, name: &str) -> Result<Self> {
let mut limits = Self::default();
let res = conn.query(
"SELECT * FROM sandbox_overrides WHERE crate_name = $1;",
&[&name],
)?;
if !res.is_empty() {
let row = res.get(0);
if let Some(memory) = row.get::<_, Option<i64>>("max_memory_bytes") {
limits.memory = memory as usize;
}
if let Some(timeout) = row.get::<_, Option<i32>>("timeout_seconds") {
limits.timeout = Duration::from_secs(timeout as u64);
}
if let Some(targets) = row.get::<_, Option<i32>>("max_targets") {
limits.targets = targets as usize;
}
}
Ok(limits)
}
pub(crate) fn memory(&self) -> usize {
self.memory
}
pub(crate) fn timeout(&self) -> Duration {
self.timeout
}
pub(crate) fn networking(&self) -> bool {
self.networking
}
pub(crate) fn max_log_size(&self) -> usize {
self.max_log_size
}
pub(crate) fn targets(&self) -> usize {
self.targets
}
pub(crate) fn for_website(&self) -> BTreeMap<String, String> {
let mut res = BTreeMap::new();
res.insert("Available RAM".into(), SIZE_SCALE(self.memory));
res.insert(
"Maximum rustdoc execution time".into(),
TIME_SCALE(self.timeout.as_secs() as usize),
);
res.insert(
"Maximum size of a build log".into(),
SIZE_SCALE(self.max_log_size),
);
if self.networking {
res.insert("Network access".into(), "allowed".into());
} else {
res.insert("Network access".into(), "blocked".into());
}
res.insert(
"Maximum number of build targets".into(),
self.targets.to_string(),
);
res
}
}
const TIME_SCALE: fn(usize) -> String = |v| scale(v, 60, &["seconds", "minutes", "hours"]);
const SIZE_SCALE: fn(usize) -> String = |v| scale(v, 1024, &["bytes", "KB", "MB", "GB"]);
fn scale(value: usize, interval: usize, labels: &[&str]) -> String {
let (mut value, interval) = (value as f64, interval as f64);
let mut chosen_label = &labels[0];
for label in &labels[1..] {
if value / interval >= 1.0 {
chosen_label = label;
value /= interval;
} else {
break;
}
}
// 2.x
let mut value = format!("{:.1}", value);
// 2.0 -> 2
if value.ends_with(".0") {
value.truncate(value.len() - 2);
}
format!("{} {}", value, chosen_label)
}
#[cfg(test)]
mod test {
use super::*;
use crate::test::*;
#[test]
fn retrieve_limits() {
wrapper(|env| {
let db = env.db();
let krate = "hexponent";
// limits work if no crate has limits set
let hexponent = Limits::for_crate(&db.conn(), krate)?;
assert_eq!(hexponent, Limits::default());
db.conn().query(
"INSERT INTO sandbox_overrides (crate_name, max_targets) VALUES ($1, 15)",
&[&krate],
)?;
// limits work if crate has limits set
let hexponent = Limits::for_crate(&db.conn(), krate)?;
assert_eq!(
hexponent,
Limits {
targets: 15,
..Limits::default()
}
);
// all limits work
let krate = "regex";
let limits = Limits {
memory: 100_000,
timeout: Duration::from_secs(300),
targets: 1,
..Limits::default()
};
db.conn().query(
"INSERT INTO sandbox_overrides (crate_name, max_memory_bytes, timeout_seconds, max_targets)
VALUES ($1, $2, $3, $4)",
&[&krate, &(limits.memory as i64), &(limits.timeout.as_secs() as i32), &(limits.targets as i32)]
)?;
assert_eq!(limits, Limits::for_crate(&db.conn(), krate)?);
Ok(())
});
}
#[test]
fn display_limits() {
let limits = Limits {
memory: 102_400,
timeout: Duration::from_secs(300),
targets: 1,
..Limits::default()
};
let display = limits.for_website();
assert_eq!(display.get("Network access"), Some(&"blocked".into()));
assert_eq!(
display.get("Maximum size of a build log"),
Some(&"100 KB".into())
);
assert_eq!(
display.get("Maximum number of build targets"),
Some(&limits.targets.to_string())
);
assert_eq!(
display.get("Maximum rustdoc execution time"),
Some(&"5 minutes".into())
);
assert_eq!(display.get("Available RAM"), Some(&"100 KB".into()));
}
#[test]
fn scale_limits() {
// time
assert_eq!(TIME_SCALE(300), "5 minutes");
assert_eq!(TIME_SCALE(1), "1 seconds");
assert_eq!(TIME_SCALE(7200), "2 hours");
// size
assert_eq!(SIZE_SCALE(1), "1 bytes");
assert_eq!(SIZE_SCALE(100), "100 bytes");
assert_eq!(SIZE_SCALE(1024), "1 KB");
assert_eq!(SIZE_SCALE(10240), "10 KB");
assert_eq!(SIZE_SCALE(1_048_576), "1 MB");
assert_eq!(SIZE_SCALE(10_485_760), "10 MB");
assert_eq!(SIZE_SCALE(1_073_741_824), "1 GB");
assert_eq!(SIZE_SCALE(10_737_418_240), "10 GB");
assert_eq!(SIZE_SCALE(std::u32::MAX as usize), "4 GB");
// fractional sizes
assert_eq!(TIME_SCALE(90), "1.5 minutes");
assert_eq!(TIME_SCALE(5400), "1.5 hours");
assert_eq!(SIZE_SCALE(1_288_490_189), "1.2 GB");
assert_eq!(SIZE_SCALE(3_758_096_384), "3.5 GB");
assert_eq!(SIZE_SCALE(1_048_051_712), "999.5 MB");
}
}
| for_crate | identifier_name |
sqlite_utilities.py | #!/usr/bin/env python3
# This script connects to and performs queries on an SQLite database using Python.
# Jen Thomas, Oct 2016.
#########################################################
import sqlite3
import shutil
def connect_to_sqlite_db(sqlite_file):
""" Connect to an SQLite database. Return a connection."""
try:
conn = sqlite3.connect(sqlite_file)
cur = conn.cursor()
return conn, cur
except:
print()
return None
def close_sqlite_connection(conn):
""" Close the connection to an SQLite database."""
conn.close()
def sql_get_data_colnames(cur, sql, column_names):
"""Perform an SQL command to get data from an SQL database. Return data in a list of dictionaries with column headers as keys and their associated values."""
print(sql)
sql = sql.replace('{column_names}', ",".join(column_names))
cur.execute(sql)
all_rows = cur.fetchall()
data = []
for row in all_rows:
d={}
i=0
for column_name in column_names:
d[column_name] = row[i]
i=i+1
data.append(d)
return data
def get_data(conn, cur, sql):
'''Get data from a database using the connection and cursor and defined sql.Output the data in a python list.'''
cur.execute(sql)
all_rows = cur.fetchall()
return all_rows
def copy_sqlite_file(original_sqlite_file, destination_sqlite_file):
'''Creates a copy of an sqlite file. Outputs an exact copy as an sqlite file.'''
shutil.copy(original_sqlite_file, destination_sqlite_file)
def execute_sql(conn, cur, sql):
''' Execute some sql on the database.'''
try:
cur.execute(sql)
except:
print()
def main():
|
if __name__ == '__main__':
main()
| sqlite_file = '/home/jen/projects/smelly_london/git/smelly_london/database'
column_names = ['category', 'location', 'number_of_smells', 'centroid_lat', 'centroid_lon', 'id', 'year', 'sentence']
sql = 'select {column_names} from (select Category category, Borough location, Id id, Year year, Sentence sentence, count(*) number_of_smells from smells group by Category, Borough having Year = "1904") join locations on location = name;'
conn, cur = connect_to_sqlite_db(sqlite_file)
data = sql_get_data_colnames(cur, sql, column_names)
close_sqlite_connection(conn)
return data | identifier_body |
sqlite_utilities.py | #!/usr/bin/env python3
# This script connects to and performs queries on an SQLite database using Python.
# Jen Thomas, Oct 2016.
#########################################################
import sqlite3
import shutil | conn = sqlite3.connect(sqlite_file)
cur = conn.cursor()
return conn, cur
except:
print()
return None
def close_sqlite_connection(conn):
""" Close the connection to an SQLite database."""
conn.close()
def sql_get_data_colnames(cur, sql, column_names):
"""Perform an SQL command to get data from an SQL database. Return data in a list of dictionaries with column headers as keys and their associated values."""
print(sql)
sql = sql.replace('{column_names}', ",".join(column_names))
cur.execute(sql)
all_rows = cur.fetchall()
data = []
for row in all_rows:
d={}
i=0
for column_name in column_names:
d[column_name] = row[i]
i=i+1
data.append(d)
return data
def get_data(conn, cur, sql):
'''Get data from a database using the connection and cursor and defined sql.Output the data in a python list.'''
cur.execute(sql)
all_rows = cur.fetchall()
return all_rows
def copy_sqlite_file(original_sqlite_file, destination_sqlite_file):
'''Creates a copy of an sqlite file. Outputs an exact copy as an sqlite file.'''
shutil.copy(original_sqlite_file, destination_sqlite_file)
def execute_sql(conn, cur, sql):
''' Execute some sql on the database.'''
try:
cur.execute(sql)
except:
print()
def main():
sqlite_file = '/home/jen/projects/smelly_london/git/smelly_london/database'
column_names = ['category', 'location', 'number_of_smells', 'centroid_lat', 'centroid_lon', 'id', 'year', 'sentence']
sql = 'select {column_names} from (select Category category, Borough location, Id id, Year year, Sentence sentence, count(*) number_of_smells from smells group by Category, Borough having Year = "1904") join locations on location = name;'
conn, cur = connect_to_sqlite_db(sqlite_file)
data = sql_get_data_colnames(cur, sql, column_names)
close_sqlite_connection(conn)
return data
if __name__ == '__main__':
main() |
def connect_to_sqlite_db(sqlite_file):
""" Connect to an SQLite database. Return a connection."""
try: | random_line_split |
sqlite_utilities.py | #!/usr/bin/env python3
# This script connects to and performs queries on an SQLite database using Python.
# Jen Thomas, Oct 2016.
#########################################################
import sqlite3
import shutil
def connect_to_sqlite_db(sqlite_file):
""" Connect to an SQLite database. Return a connection."""
try:
conn = sqlite3.connect(sqlite_file)
cur = conn.cursor()
return conn, cur
except:
print()
return None
def close_sqlite_connection(conn):
""" Close the connection to an SQLite database."""
conn.close()
def sql_get_data_colnames(cur, sql, column_names):
"""Perform an SQL command to get data from an SQL database. Return data in a list of dictionaries with column headers as keys and their associated values."""
print(sql)
sql = sql.replace('{column_names}', ",".join(column_names))
cur.execute(sql)
all_rows = cur.fetchall()
data = []
for row in all_rows:
d={}
i=0
for column_name in column_names:
d[column_name] = row[i]
i=i+1
data.append(d)
return data
def get_data(conn, cur, sql):
'''Get data from a database using the connection and cursor and defined sql.Output the data in a python list.'''
cur.execute(sql)
all_rows = cur.fetchall()
return all_rows
def copy_sqlite_file(original_sqlite_file, destination_sqlite_file):
'''Creates a copy of an sqlite file. Outputs an exact copy as an sqlite file.'''
shutil.copy(original_sqlite_file, destination_sqlite_file)
def execute_sql(conn, cur, sql):
''' Execute some sql on the database.'''
try:
cur.execute(sql)
except:
print()
def | ():
sqlite_file = '/home/jen/projects/smelly_london/git/smelly_london/database'
column_names = ['category', 'location', 'number_of_smells', 'centroid_lat', 'centroid_lon', 'id', 'year', 'sentence']
sql = 'select {column_names} from (select Category category, Borough location, Id id, Year year, Sentence sentence, count(*) number_of_smells from smells group by Category, Borough having Year = "1904") join locations on location = name;'
conn, cur = connect_to_sqlite_db(sqlite_file)
data = sql_get_data_colnames(cur, sql, column_names)
close_sqlite_connection(conn)
return data
if __name__ == '__main__':
main()
| main | identifier_name |
sqlite_utilities.py | #!/usr/bin/env python3
# This script connects to and performs queries on an SQLite database using Python.
# Jen Thomas, Oct 2016.
#########################################################
import sqlite3
import shutil
def connect_to_sqlite_db(sqlite_file):
""" Connect to an SQLite database. Return a connection."""
try:
conn = sqlite3.connect(sqlite_file)
cur = conn.cursor()
return conn, cur
except:
print()
return None
def close_sqlite_connection(conn):
""" Close the connection to an SQLite database."""
conn.close()
def sql_get_data_colnames(cur, sql, column_names):
"""Perform an SQL command to get data from an SQL database. Return data in a list of dictionaries with column headers as keys and their associated values."""
print(sql)
sql = sql.replace('{column_names}', ",".join(column_names))
cur.execute(sql)
all_rows = cur.fetchall()
data = []
for row in all_rows:
d={}
i=0
for column_name in column_names:
d[column_name] = row[i]
i=i+1
data.append(d)
return data
def get_data(conn, cur, sql):
'''Get data from a database using the connection and cursor and defined sql.Output the data in a python list.'''
cur.execute(sql)
all_rows = cur.fetchall()
return all_rows
def copy_sqlite_file(original_sqlite_file, destination_sqlite_file):
'''Creates a copy of an sqlite file. Outputs an exact copy as an sqlite file.'''
shutil.copy(original_sqlite_file, destination_sqlite_file)
def execute_sql(conn, cur, sql):
''' Execute some sql on the database.'''
try:
cur.execute(sql)
except:
print()
def main():
sqlite_file = '/home/jen/projects/smelly_london/git/smelly_london/database'
column_names = ['category', 'location', 'number_of_smells', 'centroid_lat', 'centroid_lon', 'id', 'year', 'sentence']
sql = 'select {column_names} from (select Category category, Borough location, Id id, Year year, Sentence sentence, count(*) number_of_smells from smells group by Category, Borough having Year = "1904") join locations on location = name;'
conn, cur = connect_to_sqlite_db(sqlite_file)
data = sql_get_data_colnames(cur, sql, column_names)
close_sqlite_connection(conn)
return data
if __name__ == '__main__':
| main() | conditional_block |
|
BorjesProtoLattice.js | "use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
var _react = require('react');
var _react2 = _interopRequireDefault(_react);
var _borjes = require('borjes');
var _borjes2 = _interopRequireDefault(_borjes);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function | (self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var BorjesProtoLattice = function (_React$Component) {
_inherits(BorjesProtoLattice, _React$Component);
function BorjesProtoLattice() {
_classCallCheck(this, BorjesProtoLattice);
return _possibleConstructorReturn(this, (BorjesProtoLattice.__proto__ || Object.getPrototypeOf(BorjesProtoLattice)).apply(this, arguments));
}
_createClass(BorjesProtoLattice, [{
key: 'update',
value: function update(el, sub) {
var x = this.props.x;
if (sub !== null && Object.keys(sub).length == 0) {
sub = null;
}
x[el] = sub;
this.props.update(x);
}
}, {
key: 'addEl',
value: function addEl() {
var el = this.newEl.value;
var x = this.props.x;
if (el.length > 0 && x[el] === undefined) {
x[el] = null;
this.props.update(x);
}
}
}, {
key: 'remEl',
value: function remEl(el) {
var x = this.props.x;
delete x[el];
this.props.update(x);
}
}, {
key: 'cpEl',
value: function cpEl(el) {
this.props.opts.cpbuffer.v = {
borjes: 'latticeel',
l: this.props.name || this.props.opts.name,
e: el
};
}
}, {
key: 'render',
value: function render() {
var _this2 = this;
var x = this.props.x;
var opts = this.props.opts;
opts.cpbuffer = this.props.cpbuffer || opts.cpbuffer || {};
opts.name = this.props.name || opts.name;
var contStyle = {
display: 'flex',
flexDirection: 'column'
};
var rowStyle = {
display: 'flex',
flexDirection: 'row',
alignItems: 'center'
};
if (!opts.child) {
opts.child = true;
opts.refs = {};
return _react2.default.createElement(
'span',
{ style: rowStyle },
'\u22A4\u2014',
_react2.default.createElement(BorjesProtoLattice, { x: x, opts: opts, update: this.props.update })
);
}
var i = 0;
return _react2.default.createElement(
'span',
{ style: contStyle, className: 'borjes borjes_latticeproto' },
Object.keys(x).map(function (k) {
var first = !opts.refs[k];
opts.refs[k] = true;
var after = first && (opts.editable || x[k] !== null);
return _react2.default.createElement(
'span',
{ key: k + i++, style: rowStyle },
opts.editable ? _react2.default.createElement(
'button',
{ className: 'small', onClick: _this2.remEl.bind(_this2, k) },
'x'
) : null,
opts.editable && opts.name ? _react2.default.createElement(
'button',
{ className: 'small', onClick: _this2.cpEl.bind(_this2, k) },
'c'
) : null,
_react2.default.createElement(
'span',
{ className: first ? "borjes_lpel_first" : "borjes_lpel_ref" },
k
),
after ? '—' : null,
after ? _react2.default.createElement(BorjesProtoLattice, { x: x[k] || {}, opts: opts, update: _this2.update.bind(_this2, k) }) : null
);
}),
opts.editable ? _react2.default.createElement(
'span',
null,
_react2.default.createElement('input', { ref: function ref(d) {
return _this2.newEl = d;
}, type: 'text' }),
_react2.default.createElement(
'button',
{ onClick: this.addEl.bind(this) },
'+'
)
) : null
);
}
}]);
return BorjesProtoLattice;
}(_react2.default.Component);
exports.default = BorjesProtoLattice;
| _possibleConstructorReturn | identifier_name |
BorjesProtoLattice.js | "use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
var _react = require('react');
var _react2 = _interopRequireDefault(_react);
var _borjes = require('borjes');
var _borjes2 = _interopRequireDefault(_borjes);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var BorjesProtoLattice = function (_React$Component) {
_inherits(BorjesProtoLattice, _React$Component);
function BorjesProtoLattice() {
_classCallCheck(this, BorjesProtoLattice);
return _possibleConstructorReturn(this, (BorjesProtoLattice.__proto__ || Object.getPrototypeOf(BorjesProtoLattice)).apply(this, arguments));
}
_createClass(BorjesProtoLattice, [{
key: 'update',
value: function update(el, sub) {
var x = this.props.x;
if (sub !== null && Object.keys(sub).length == 0) {
sub = null;
}
x[el] = sub;
this.props.update(x);
}
}, {
key: 'addEl',
value: function addEl() {
var el = this.newEl.value;
var x = this.props.x;
if (el.length > 0 && x[el] === undefined) {
x[el] = null;
this.props.update(x);
}
}
}, {
key: 'remEl',
value: function remEl(el) {
var x = this.props.x; | delete x[el];
this.props.update(x);
}
}, {
key: 'cpEl',
value: function cpEl(el) {
this.props.opts.cpbuffer.v = {
borjes: 'latticeel',
l: this.props.name || this.props.opts.name,
e: el
};
}
}, {
key: 'render',
value: function render() {
var _this2 = this;
var x = this.props.x;
var opts = this.props.opts;
opts.cpbuffer = this.props.cpbuffer || opts.cpbuffer || {};
opts.name = this.props.name || opts.name;
var contStyle = {
display: 'flex',
flexDirection: 'column'
};
var rowStyle = {
display: 'flex',
flexDirection: 'row',
alignItems: 'center'
};
if (!opts.child) {
opts.child = true;
opts.refs = {};
return _react2.default.createElement(
'span',
{ style: rowStyle },
'\u22A4\u2014',
_react2.default.createElement(BorjesProtoLattice, { x: x, opts: opts, update: this.props.update })
);
}
var i = 0;
return _react2.default.createElement(
'span',
{ style: contStyle, className: 'borjes borjes_latticeproto' },
Object.keys(x).map(function (k) {
var first = !opts.refs[k];
opts.refs[k] = true;
var after = first && (opts.editable || x[k] !== null);
return _react2.default.createElement(
'span',
{ key: k + i++, style: rowStyle },
opts.editable ? _react2.default.createElement(
'button',
{ className: 'small', onClick: _this2.remEl.bind(_this2, k) },
'x'
) : null,
opts.editable && opts.name ? _react2.default.createElement(
'button',
{ className: 'small', onClick: _this2.cpEl.bind(_this2, k) },
'c'
) : null,
_react2.default.createElement(
'span',
{ className: first ? "borjes_lpel_first" : "borjes_lpel_ref" },
k
),
after ? '—' : null,
after ? _react2.default.createElement(BorjesProtoLattice, { x: x[k] || {}, opts: opts, update: _this2.update.bind(_this2, k) }) : null
);
}),
opts.editable ? _react2.default.createElement(
'span',
null,
_react2.default.createElement('input', { ref: function ref(d) {
return _this2.newEl = d;
}, type: 'text' }),
_react2.default.createElement(
'button',
{ onClick: this.addEl.bind(this) },
'+'
)
) : null
);
}
}]);
return BorjesProtoLattice;
}(_react2.default.Component);
exports.default = BorjesProtoLattice; | random_line_split |
|
BorjesProtoLattice.js | "use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
var _createClass = function () { function defineProperties(target, props) | return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
var _react = require('react');
var _react2 = _interopRequireDefault(_react);
var _borjes = require('borjes');
var _borjes2 = _interopRequireDefault(_borjes);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var BorjesProtoLattice = function (_React$Component) {
_inherits(BorjesProtoLattice, _React$Component);
function BorjesProtoLattice() {
_classCallCheck(this, BorjesProtoLattice);
return _possibleConstructorReturn(this, (BorjesProtoLattice.__proto__ || Object.getPrototypeOf(BorjesProtoLattice)).apply(this, arguments));
}
_createClass(BorjesProtoLattice, [{
key: 'update',
value: function update(el, sub) {
var x = this.props.x;
if (sub !== null && Object.keys(sub).length == 0) {
sub = null;
}
x[el] = sub;
this.props.update(x);
}
}, {
key: 'addEl',
value: function addEl() {
var el = this.newEl.value;
var x = this.props.x;
if (el.length > 0 && x[el] === undefined) {
x[el] = null;
this.props.update(x);
}
}
}, {
key: 'remEl',
value: function remEl(el) {
var x = this.props.x;
delete x[el];
this.props.update(x);
}
}, {
key: 'cpEl',
value: function cpEl(el) {
this.props.opts.cpbuffer.v = {
borjes: 'latticeel',
l: this.props.name || this.props.opts.name,
e: el
};
}
}, {
key: 'render',
value: function render() {
var _this2 = this;
var x = this.props.x;
var opts = this.props.opts;
opts.cpbuffer = this.props.cpbuffer || opts.cpbuffer || {};
opts.name = this.props.name || opts.name;
var contStyle = {
display: 'flex',
flexDirection: 'column'
};
var rowStyle = {
display: 'flex',
flexDirection: 'row',
alignItems: 'center'
};
if (!opts.child) {
opts.child = true;
opts.refs = {};
return _react2.default.createElement(
'span',
{ style: rowStyle },
'\u22A4\u2014',
_react2.default.createElement(BorjesProtoLattice, { x: x, opts: opts, update: this.props.update })
);
}
var i = 0;
return _react2.default.createElement(
'span',
{ style: contStyle, className: 'borjes borjes_latticeproto' },
Object.keys(x).map(function (k) {
var first = !opts.refs[k];
opts.refs[k] = true;
var after = first && (opts.editable || x[k] !== null);
return _react2.default.createElement(
'span',
{ key: k + i++, style: rowStyle },
opts.editable ? _react2.default.createElement(
'button',
{ className: 'small', onClick: _this2.remEl.bind(_this2, k) },
'x'
) : null,
opts.editable && opts.name ? _react2.default.createElement(
'button',
{ className: 'small', onClick: _this2.cpEl.bind(_this2, k) },
'c'
) : null,
_react2.default.createElement(
'span',
{ className: first ? "borjes_lpel_first" : "borjes_lpel_ref" },
k
),
after ? '—' : null,
after ? _react2.default.createElement(BorjesProtoLattice, { x: x[k] || {}, opts: opts, update: _this2.update.bind(_this2, k) }) : null
);
}),
opts.editable ? _react2.default.createElement(
'span',
null,
_react2.default.createElement('input', { ref: function ref(d) {
return _this2.newEl = d;
}, type: 'text' }),
_react2.default.createElement(
'button',
{ onClick: this.addEl.bind(this) },
'+'
)
) : null
);
}
}]);
return BorjesProtoLattice;
}(_react2.default.Component);
exports.default = BorjesProtoLattice;
| { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } | identifier_body |
costMemory.py | # -*- coding: utf-8 -*-
"""
@file costMemory.py
@author Jakob Erdmann
@author Michael Behrisch
@date 2012-03-14
@version $Id: costMemory.py 22608 2017-01-17 06:28:54Z behrisch $
Perform smoothing of edge costs across successive iterations of duaIterate
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2012-2017 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
from collections import defaultdict
from xml.sax import saxutils, make_parser, handler
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from sumolib.net import readNet
class EdgeMemory:
def __init__(self, cost):
self.cost = cost
self.seen = True
def update(self, cost, memory_weight, new_weight, pessimism):
p = (cost / self.cost) ** pessimism if self.cost > 0 else 1
memory_factor = memory_weight / (memory_weight + new_weight * p)
self.cost = self.cost * memory_factor + cost * (1 - memory_factor)
self.seen = True
class | (handler.ContentHandler):
# memorize the weighted average of edge costs
def __init__(self, cost_attribute, pessimism=0, network_file=None):
# the cost attribute to parse (i.e. 'traveltime')
self.cost_attribute = cost_attribute.encode('utf8')
# the duaIterate iteration index
self.iteration = None
# the main data store: for every interval and edge id we store costs and
# whether data was seen in the last call of load_costs()
# start -> (edge_id -> EdgeMemory)
self.intervals = defaultdict(dict)
# the intervall length (only known for certain if multiple intervals
# have been seen)
self.interval_length = 214748 # SUMOTIME_MAXSTRING
# the intervall currently being parsed
self.current_interval = None
# the combined weigth of all previously loaded costs
self.memory_weight = 0.0
# update is done according to: memory * memory_factor + new * (1 -
# memory_factor)
self.memory_factor = None
# differences between the previously loaded costs and the memorized
# costs
self.errors = None
# some statistics
self.num_loaded = 0
self.num_decayed = 0
# travel times without obstructing traffic
# XXX could use the minimum known traveltime
self.traveltime_free = defaultdict(lambda: 0)
if network_file is not None:
# build a map of default weights for decaying edges assuming the
# attribute is traveltime
self.traveltime_free = dict([(e.getID(), e.getLength() / e.getSpeed())
for e in readNet(network_file).getEdges()])
self.pessimism = pessimism
def startElement(self, name, attrs):
if name == 'interval':
self.current_interval = self.intervals[float(attrs['begin'])]
if name == 'edge':
id = attrs['id']
# may be missing for some
if self.cost_attribute.decode('utf-8') in attrs:
self.num_loaded += 1
cost = float(attrs[self.cost_attribute.decode('utf-8')])
if id in self.current_interval:
edgeMemory = self.current_interval[id]
self.errors.append(edgeMemory.cost - cost)
edgeMemory.update(
cost, self.memory_weight, self.new_weight, self.pessimism)
# if id == "4.3to4.4":
# with open('debuglog', 'a') as f:
# print(self.memory_factor, edgeMemory.cost, file=f)
else:
self.errors.append(0)
self.current_interval[id] = EdgeMemory(cost)
def load_costs(self, dumpfile, iteration, weight):
# load costs from dumpfile and update memory according to weight and
# iteration
if weight <= 0:
sys.stderr.write(
"Skipped loading of costs because the weight was %s but should have been > 0\n" % weight)
return
assert(weight > 0)
if self.iteration == None and iteration != 0:
print("Warning: continuing with empty memory")
# update memory weights. memory is a weighted average across all runs
self.new_weight = float(weight)
self.iteration = iteration
self.errors = []
# mark all edges as unseen
for edges in self.intervals.values():
for edgeMemory in edges.values():
edgeMemory.seen = False
# parse costs
self.num_loaded = 0
parser = make_parser()
parser.setContentHandler(self)
parser.parse(dumpfile)
# decay costs of unseen edges
self.num_decayed = 0
for edges in self.intervals.values():
for id, edgeMemory in edges.items():
if not edgeMemory.seen:
edgeMemory.update(
self.traveltime_free[id], self.memory_weight, self.new_weight, self.pessimism)
self.num_decayed += 1
# if id == "4.3to4.4":
# with open('debuglog', 'a') as f:
# print(self.memory_factor, 'decay', edgeMemory.cost, file=f)
# figure out the interval length
if len(self.intervals.keys()) > 1:
sorted_begin_times = sorted(self.intervals.keys())
self.interval_length = sorted_begin_times[
1] - sorted_begin_times[0]
self.memory_weight += self.new_weight
def write_costs(self, weight_file):
with open(weight_file, 'w') as f:
f.write('<netstats>\n')
for start, edge_costs in self.intervals.items():
f.write(' <interval begin="%d" end="%d">\n' %
(start, start + self.interval_length))
for id, edgeMemory in edge_costs.items():
f.write(' <edge id="%s" %s="%s"/>\n' %
(id, self.cost_attribute.decode('utf-8'), edgeMemory.cost))
f.write(' </interval>\n')
f.write('</netstats>\n')
def avg_error(self, values=None):
if not values:
values = self.errors
l = len(list(values))
if l > 0:
return (sum(list(values)) / l)
else:
return 0
def avg_abs_error(self):
return self.avg_error(list(map(abs, self.errors)))
def mean_error(self, values=None):
if not values:
values = self.errors
values.sort()
if values:
return values[len(values) // 2]
def mean_abs_error(self):
return self.mean_error(list(map(abs, self.errors)))
def loaded(self):
return self.num_loaded
def decayed(self):
return self.num_decayed
| CostMemory | identifier_name |
costMemory.py | # -*- coding: utf-8 -*-
"""
@file costMemory.py
@author Jakob Erdmann
@author Michael Behrisch
@date 2012-03-14
@version $Id: costMemory.py 22608 2017-01-17 06:28:54Z behrisch $
Perform smoothing of edge costs across successive iterations of duaIterate
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2012-2017 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
from collections import defaultdict
from xml.sax import saxutils, make_parser, handler
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from sumolib.net import readNet
class EdgeMemory:
def __init__(self, cost):
self.cost = cost
self.seen = True
def update(self, cost, memory_weight, new_weight, pessimism):
p = (cost / self.cost) ** pessimism if self.cost > 0 else 1
memory_factor = memory_weight / (memory_weight + new_weight * p)
self.cost = self.cost * memory_factor + cost * (1 - memory_factor)
self.seen = True
class CostMemory(handler.ContentHandler):
# memorize the weighted average of edge costs
def __init__(self, cost_attribute, pessimism=0, network_file=None):
# the cost attribute to parse (i.e. 'traveltime')
self.cost_attribute = cost_attribute.encode('utf8')
# the duaIterate iteration index
self.iteration = None
# the main data store: for every interval and edge id we store costs and
# whether data was seen in the last call of load_costs()
# start -> (edge_id -> EdgeMemory)
self.intervals = defaultdict(dict)
# the intervall length (only known for certain if multiple intervals
# have been seen)
self.interval_length = 214748 # SUMOTIME_MAXSTRING
# the intervall currently being parsed
self.current_interval = None
# the combined weigth of all previously loaded costs
self.memory_weight = 0.0
# update is done according to: memory * memory_factor + new * (1 -
# memory_factor)
self.memory_factor = None
# differences between the previously loaded costs and the memorized
# costs
self.errors = None
# some statistics
self.num_loaded = 0
self.num_decayed = 0
# travel times without obstructing traffic
# XXX could use the minimum known traveltime
self.traveltime_free = defaultdict(lambda: 0)
if network_file is not None:
# build a map of default weights for decaying edges assuming the
# attribute is traveltime
|
self.pessimism = pessimism
def startElement(self, name, attrs):
if name == 'interval':
self.current_interval = self.intervals[float(attrs['begin'])]
if name == 'edge':
id = attrs['id']
# may be missing for some
if self.cost_attribute.decode('utf-8') in attrs:
self.num_loaded += 1
cost = float(attrs[self.cost_attribute.decode('utf-8')])
if id in self.current_interval:
edgeMemory = self.current_interval[id]
self.errors.append(edgeMemory.cost - cost)
edgeMemory.update(
cost, self.memory_weight, self.new_weight, self.pessimism)
# if id == "4.3to4.4":
# with open('debuglog', 'a') as f:
# print(self.memory_factor, edgeMemory.cost, file=f)
else:
self.errors.append(0)
self.current_interval[id] = EdgeMemory(cost)
def load_costs(self, dumpfile, iteration, weight):
# load costs from dumpfile and update memory according to weight and
# iteration
if weight <= 0:
sys.stderr.write(
"Skipped loading of costs because the weight was %s but should have been > 0\n" % weight)
return
assert(weight > 0)
if self.iteration == None and iteration != 0:
print("Warning: continuing with empty memory")
# update memory weights. memory is a weighted average across all runs
self.new_weight = float(weight)
self.iteration = iteration
self.errors = []
# mark all edges as unseen
for edges in self.intervals.values():
for edgeMemory in edges.values():
edgeMemory.seen = False
# parse costs
self.num_loaded = 0
parser = make_parser()
parser.setContentHandler(self)
parser.parse(dumpfile)
# decay costs of unseen edges
self.num_decayed = 0
for edges in self.intervals.values():
for id, edgeMemory in edges.items():
if not edgeMemory.seen:
edgeMemory.update(
self.traveltime_free[id], self.memory_weight, self.new_weight, self.pessimism)
self.num_decayed += 1
# if id == "4.3to4.4":
# with open('debuglog', 'a') as f:
# print(self.memory_factor, 'decay', edgeMemory.cost, file=f)
# figure out the interval length
if len(self.intervals.keys()) > 1:
sorted_begin_times = sorted(self.intervals.keys())
self.interval_length = sorted_begin_times[
1] - sorted_begin_times[0]
self.memory_weight += self.new_weight
def write_costs(self, weight_file):
with open(weight_file, 'w') as f:
f.write('<netstats>\n')
for start, edge_costs in self.intervals.items():
f.write(' <interval begin="%d" end="%d">\n' %
(start, start + self.interval_length))
for id, edgeMemory in edge_costs.items():
f.write(' <edge id="%s" %s="%s"/>\n' %
(id, self.cost_attribute.decode('utf-8'), edgeMemory.cost))
f.write(' </interval>\n')
f.write('</netstats>\n')
def avg_error(self, values=None):
if not values:
values = self.errors
l = len(list(values))
if l > 0:
return (sum(list(values)) / l)
else:
return 0
def avg_abs_error(self):
return self.avg_error(list(map(abs, self.errors)))
def mean_error(self, values=None):
if not values:
values = self.errors
values.sort()
if values:
return values[len(values) // 2]
def mean_abs_error(self):
return self.mean_error(list(map(abs, self.errors)))
def loaded(self):
return self.num_loaded
def decayed(self):
return self.num_decayed
| self.traveltime_free = dict([(e.getID(), e.getLength() / e.getSpeed())
for e in readNet(network_file).getEdges()]) | conditional_block |
costMemory.py | # -*- coding: utf-8 -*-
"""
@file costMemory.py
@author Jakob Erdmann
@author Michael Behrisch
@date 2012-03-14
@version $Id: costMemory.py 22608 2017-01-17 06:28:54Z behrisch $
Perform smoothing of edge costs across successive iterations of duaIterate
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2012-2017 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
from collections import defaultdict
from xml.sax import saxutils, make_parser, handler
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from sumolib.net import readNet
class EdgeMemory:
def __init__(self, cost):
|
def update(self, cost, memory_weight, new_weight, pessimism):
p = (cost / self.cost) ** pessimism if self.cost > 0 else 1
memory_factor = memory_weight / (memory_weight + new_weight * p)
self.cost = self.cost * memory_factor + cost * (1 - memory_factor)
self.seen = True
class CostMemory(handler.ContentHandler):
# memorize the weighted average of edge costs
def __init__(self, cost_attribute, pessimism=0, network_file=None):
# the cost attribute to parse (i.e. 'traveltime')
self.cost_attribute = cost_attribute.encode('utf8')
# the duaIterate iteration index
self.iteration = None
# the main data store: for every interval and edge id we store costs and
# whether data was seen in the last call of load_costs()
# start -> (edge_id -> EdgeMemory)
self.intervals = defaultdict(dict)
# the intervall length (only known for certain if multiple intervals
# have been seen)
self.interval_length = 214748 # SUMOTIME_MAXSTRING
# the intervall currently being parsed
self.current_interval = None
# the combined weigth of all previously loaded costs
self.memory_weight = 0.0
# update is done according to: memory * memory_factor + new * (1 -
# memory_factor)
self.memory_factor = None
# differences between the previously loaded costs and the memorized
# costs
self.errors = None
# some statistics
self.num_loaded = 0
self.num_decayed = 0
# travel times without obstructing traffic
# XXX could use the minimum known traveltime
self.traveltime_free = defaultdict(lambda: 0)
if network_file is not None:
# build a map of default weights for decaying edges assuming the
# attribute is traveltime
self.traveltime_free = dict([(e.getID(), e.getLength() / e.getSpeed())
for e in readNet(network_file).getEdges()])
self.pessimism = pessimism
def startElement(self, name, attrs):
if name == 'interval':
self.current_interval = self.intervals[float(attrs['begin'])]
if name == 'edge':
id = attrs['id']
# may be missing for some
if self.cost_attribute.decode('utf-8') in attrs:
self.num_loaded += 1
cost = float(attrs[self.cost_attribute.decode('utf-8')])
if id in self.current_interval:
edgeMemory = self.current_interval[id]
self.errors.append(edgeMemory.cost - cost)
edgeMemory.update(
cost, self.memory_weight, self.new_weight, self.pessimism)
# if id == "4.3to4.4":
# with open('debuglog', 'a') as f:
# print(self.memory_factor, edgeMemory.cost, file=f)
else:
self.errors.append(0)
self.current_interval[id] = EdgeMemory(cost)
def load_costs(self, dumpfile, iteration, weight):
# load costs from dumpfile and update memory according to weight and
# iteration
if weight <= 0:
sys.stderr.write(
"Skipped loading of costs because the weight was %s but should have been > 0\n" % weight)
return
assert(weight > 0)
if self.iteration == None and iteration != 0:
print("Warning: continuing with empty memory")
# update memory weights. memory is a weighted average across all runs
self.new_weight = float(weight)
self.iteration = iteration
self.errors = []
# mark all edges as unseen
for edges in self.intervals.values():
for edgeMemory in edges.values():
edgeMemory.seen = False
# parse costs
self.num_loaded = 0
parser = make_parser()
parser.setContentHandler(self)
parser.parse(dumpfile)
# decay costs of unseen edges
self.num_decayed = 0
for edges in self.intervals.values():
for id, edgeMemory in edges.items():
if not edgeMemory.seen:
edgeMemory.update(
self.traveltime_free[id], self.memory_weight, self.new_weight, self.pessimism)
self.num_decayed += 1
# if id == "4.3to4.4":
# with open('debuglog', 'a') as f:
# print(self.memory_factor, 'decay', edgeMemory.cost, file=f)
# figure out the interval length
if len(self.intervals.keys()) > 1:
sorted_begin_times = sorted(self.intervals.keys())
self.interval_length = sorted_begin_times[
1] - sorted_begin_times[0]
self.memory_weight += self.new_weight
def write_costs(self, weight_file):
with open(weight_file, 'w') as f:
f.write('<netstats>\n')
for start, edge_costs in self.intervals.items():
f.write(' <interval begin="%d" end="%d">\n' %
(start, start + self.interval_length))
for id, edgeMemory in edge_costs.items():
f.write(' <edge id="%s" %s="%s"/>\n' %
(id, self.cost_attribute.decode('utf-8'), edgeMemory.cost))
f.write(' </interval>\n')
f.write('</netstats>\n')
def avg_error(self, values=None):
if not values:
values = self.errors
l = len(list(values))
if l > 0:
return (sum(list(values)) / l)
else:
return 0
def avg_abs_error(self):
return self.avg_error(list(map(abs, self.errors)))
def mean_error(self, values=None):
if not values:
values = self.errors
values.sort()
if values:
return values[len(values) // 2]
def mean_abs_error(self):
return self.mean_error(list(map(abs, self.errors)))
def loaded(self):
return self.num_loaded
def decayed(self):
return self.num_decayed
| self.cost = cost
self.seen = True | identifier_body |
costMemory.py | # -*- coding: utf-8 -*-
"""
@file costMemory.py
@author Jakob Erdmann
@author Michael Behrisch
@date 2012-03-14
@version $Id: costMemory.py 22608 2017-01-17 06:28:54Z behrisch $
Perform smoothing of edge costs across successive iterations of duaIterate
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2012-2017 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
from collections import defaultdict
from xml.sax import saxutils, make_parser, handler
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from sumolib.net import readNet
class EdgeMemory:
def __init__(self, cost):
self.cost = cost
self.seen = True
def update(self, cost, memory_weight, new_weight, pessimism):
p = (cost / self.cost) ** pessimism if self.cost > 0 else 1
memory_factor = memory_weight / (memory_weight + new_weight * p)
self.cost = self.cost * memory_factor + cost * (1 - memory_factor)
self.seen = True
class CostMemory(handler.ContentHandler):
# memorize the weighted average of edge costs
def __init__(self, cost_attribute, pessimism=0, network_file=None):
# the cost attribute to parse (i.e. 'traveltime')
self.cost_attribute = cost_attribute.encode('utf8')
# the duaIterate iteration index
self.iteration = None
# the main data store: for every interval and edge id we store costs and
# whether data was seen in the last call of load_costs()
# start -> (edge_id -> EdgeMemory)
self.intervals = defaultdict(dict)
# the intervall length (only known for certain if multiple intervals
# have been seen)
self.interval_length = 214748 # SUMOTIME_MAXSTRING
# the intervall currently being parsed
self.current_interval = None
# the combined weigth of all previously loaded costs
self.memory_weight = 0.0
# update is done according to: memory * memory_factor + new * (1 -
# memory_factor)
self.memory_factor = None
# differences between the previously loaded costs and the memorized
# costs
self.errors = None
# some statistics
self.num_loaded = 0
self.num_decayed = 0
# travel times without obstructing traffic
# XXX could use the minimum known traveltime
self.traveltime_free = defaultdict(lambda: 0)
if network_file is not None:
# build a map of default weights for decaying edges assuming the
# attribute is traveltime
self.traveltime_free = dict([(e.getID(), e.getLength() / e.getSpeed())
for e in readNet(network_file).getEdges()])
self.pessimism = pessimism
def startElement(self, name, attrs):
if name == 'interval':
self.current_interval = self.intervals[float(attrs['begin'])]
if name == 'edge':
id = attrs['id']
# may be missing for some
if self.cost_attribute.decode('utf-8') in attrs:
self.num_loaded += 1
cost = float(attrs[self.cost_attribute.decode('utf-8')])
if id in self.current_interval:
edgeMemory = self.current_interval[id]
self.errors.append(edgeMemory.cost - cost)
edgeMemory.update(
cost, self.memory_weight, self.new_weight, self.pessimism)
# if id == "4.3to4.4":
# with open('debuglog', 'a') as f:
# print(self.memory_factor, edgeMemory.cost, file=f)
else:
self.errors.append(0)
self.current_interval[id] = EdgeMemory(cost)
def load_costs(self, dumpfile, iteration, weight):
# load costs from dumpfile and update memory according to weight and
# iteration
if weight <= 0:
sys.stderr.write(
"Skipped loading of costs because the weight was %s but should have been > 0\n" % weight)
return
assert(weight > 0)
if self.iteration == None and iteration != 0:
print("Warning: continuing with empty memory")
# update memory weights. memory is a weighted average across all runs
self.new_weight = float(weight)
self.iteration = iteration
self.errors = []
# mark all edges as unseen
for edges in self.intervals.values():
for edgeMemory in edges.values():
edgeMemory.seen = False
# parse costs
self.num_loaded = 0
parser = make_parser()
parser.setContentHandler(self)
parser.parse(dumpfile)
# decay costs of unseen edges
self.num_decayed = 0
for edges in self.intervals.values():
for id, edgeMemory in edges.items():
if not edgeMemory.seen:
edgeMemory.update(
self.traveltime_free[id], self.memory_weight, self.new_weight, self.pessimism)
self.num_decayed += 1
# if id == "4.3to4.4":
# with open('debuglog', 'a') as f:
# print(self.memory_factor, 'decay', edgeMemory.cost, file=f)
# figure out the interval length | sorted_begin_times = sorted(self.intervals.keys())
self.interval_length = sorted_begin_times[
1] - sorted_begin_times[0]
self.memory_weight += self.new_weight
def write_costs(self, weight_file):
with open(weight_file, 'w') as f:
f.write('<netstats>\n')
for start, edge_costs in self.intervals.items():
f.write(' <interval begin="%d" end="%d">\n' %
(start, start + self.interval_length))
for id, edgeMemory in edge_costs.items():
f.write(' <edge id="%s" %s="%s"/>\n' %
(id, self.cost_attribute.decode('utf-8'), edgeMemory.cost))
f.write(' </interval>\n')
f.write('</netstats>\n')
def avg_error(self, values=None):
if not values:
values = self.errors
l = len(list(values))
if l > 0:
return (sum(list(values)) / l)
else:
return 0
def avg_abs_error(self):
return self.avg_error(list(map(abs, self.errors)))
def mean_error(self, values=None):
if not values:
values = self.errors
values.sort()
if values:
return values[len(values) // 2]
def mean_abs_error(self):
return self.mean_error(list(map(abs, self.errors)))
def loaded(self):
return self.num_loaded
def decayed(self):
return self.num_decayed | if len(self.intervals.keys()) > 1: | random_line_split |
modal_button.js | /**
* @fileOverview This file initializes the modal button (browser action).
* This file initializing the button's state on browser start and
* change button's state (badage color and text) when user toggles
* injection in popup.html.
*
* The text message on the button determines the operating mode.
* "off" indicates that the content script can be injected, but it
* doesn't execute any code.
*
* "on" indicates that the content script is injected, and should
* actively replace links on the user's whitelist.
*/
/*global chrome */
/*global Privly */
/**
* This function updates the badge text and background color of the
* browser action button (modal button) according to the parameter.
*
* @param {Boolean} enableInjection Whether injection is enabled
*/
function updateBrowserAction(enableInjection) |
// Subscribe to option changed events
chrome.runtime.onMessage.addListener(function (request) {
if (request.ask === 'options/changed') {
if (request.option === 'options/isInjectionEnabled') {
updateBrowserAction(request.newValue);
}
}
});
// Retrive the initial option value
updateBrowserAction(Privly.options.isInjectionEnabled());
| {
if (enableInjection) {
chrome.browserAction.setBadgeBackgroundColor({color: "#004F00"});
chrome.browserAction.setBadgeText({text: "on"});
} else {
chrome.browserAction.setBadgeBackgroundColor({color: "#FF0000"});
chrome.browserAction.setBadgeText({text: "off"});
}
} | identifier_body |
modal_button.js | /**
* @fileOverview This file initializes the modal button (browser action).
* This file initializing the button's state on browser start and
* change button's state (badage color and text) when user toggles
* injection in popup.html.
*
* The text message on the button determines the operating mode.
* "off" indicates that the content script can be injected, but it
* doesn't execute any code.
*
* "on" indicates that the content script is injected, and should
* actively replace links on the user's whitelist.
*/
/*global chrome */
/*global Privly */
/**
* This function updates the badge text and background color of the
* browser action button (modal button) according to the parameter.
*
* @param {Boolean} enableInjection Whether injection is enabled
*/
function | (enableInjection) {
if (enableInjection) {
chrome.browserAction.setBadgeBackgroundColor({color: "#004F00"});
chrome.browserAction.setBadgeText({text: "on"});
} else {
chrome.browserAction.setBadgeBackgroundColor({color: "#FF0000"});
chrome.browserAction.setBadgeText({text: "off"});
}
}
// Subscribe to option changed events
chrome.runtime.onMessage.addListener(function (request) {
if (request.ask === 'options/changed') {
if (request.option === 'options/isInjectionEnabled') {
updateBrowserAction(request.newValue);
}
}
});
// Retrive the initial option value
updateBrowserAction(Privly.options.isInjectionEnabled());
| updateBrowserAction | identifier_name |
modal_button.js | * This file initializing the button's state on browser start and
* change button's state (badage color and text) when user toggles
* injection in popup.html.
*
* The text message on the button determines the operating mode.
* "off" indicates that the content script can be injected, but it
* doesn't execute any code.
*
* "on" indicates that the content script is injected, and should
* actively replace links on the user's whitelist.
*/
/*global chrome */
/*global Privly */
/**
* This function updates the badge text and background color of the
* browser action button (modal button) according to the parameter.
*
* @param {Boolean} enableInjection Whether injection is enabled
*/
function updateBrowserAction(enableInjection) {
if (enableInjection) {
chrome.browserAction.setBadgeBackgroundColor({color: "#004F00"});
chrome.browserAction.setBadgeText({text: "on"});
} else {
chrome.browserAction.setBadgeBackgroundColor({color: "#FF0000"});
chrome.browserAction.setBadgeText({text: "off"});
}
}
// Subscribe to option changed events
chrome.runtime.onMessage.addListener(function (request) {
if (request.ask === 'options/changed') {
if (request.option === 'options/isInjectionEnabled') {
updateBrowserAction(request.newValue);
}
}
});
// Retrive the initial option value
updateBrowserAction(Privly.options.isInjectionEnabled()); | /**
* @fileOverview This file initializes the modal button (browser action). | random_line_split |
|
modal_button.js | /**
* @fileOverview This file initializes the modal button (browser action).
* This file initializing the button's state on browser start and
* change button's state (badage color and text) when user toggles
* injection in popup.html.
*
* The text message on the button determines the operating mode.
* "off" indicates that the content script can be injected, but it
* doesn't execute any code.
*
* "on" indicates that the content script is injected, and should
* actively replace links on the user's whitelist.
*/
/*global chrome */
/*global Privly */
/**
* This function updates the badge text and background color of the
* browser action button (modal button) according to the parameter.
*
* @param {Boolean} enableInjection Whether injection is enabled
*/
function updateBrowserAction(enableInjection) {
if (enableInjection) {
chrome.browserAction.setBadgeBackgroundColor({color: "#004F00"});
chrome.browserAction.setBadgeText({text: "on"});
} else {
chrome.browserAction.setBadgeBackgroundColor({color: "#FF0000"});
chrome.browserAction.setBadgeText({text: "off"});
}
}
// Subscribe to option changed events
chrome.runtime.onMessage.addListener(function (request) {
if (request.ask === 'options/changed') |
});
// Retrive the initial option value
updateBrowserAction(Privly.options.isInjectionEnabled());
| {
if (request.option === 'options/isInjectionEnabled') {
updateBrowserAction(request.newValue);
}
} | conditional_block |
Color.ts | const allColors = ["0072C6", "4617B4", "8C0095", "008A17", "D24726", "008299", "AC193D", "DC4FAD", "FF8F32", "82BA00", "03B3B2", "5DB2FF"];
const currentIterationColor = "#C1E6FF"; /*Pattens Blue*/
const otherIterationColors = ["#FFDAC1", "#E6FFC1", "#FFC1E6"]; /*Negroni, Chiffon, Cotton Candy*/
var interationCount = 0;
/**
* Generates a color from the specified name
* @param name String value used to generate a color
* @return RGB color in the form of #RRGGBB
*/
export function ge | ame: string): string {
if (name === "currentIteration") {
return currentIterationColor;
}
if (name === "otherIteration") {
return otherIterationColors[interationCount++ % otherIterationColors.length];
}
const id = name.slice().toLowerCase();
let value = 0;
for (let i = 0; i < (id || "").length; i++) {
value += id.charCodeAt(i) * (i + 1);
}
return "#" + allColors[value % allColors.length];
}
| nerateColor(n | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.