code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
parseStr = None
chunk1 = chunk2 = ''
m = self.ptc.CRE_DATE3.search(s)
# NO LONGER NEEDED, THE REGEXP HANDLED MTHNAME NOW
# for match in self.ptc.CRE_DATE3.finditer(s):
# to prevent "HH:MM(:SS) time strings" expressions from
# triggering this regex, we checks if the month field
# exists in the searched expression, if it doesn't exist,
# the date field is not valid
# if match.group('mthname'):
# m = self.ptc.CRE_DATE3.search(s, match.start())
# valid_date = True
# break
# String date format
if m is not None:
if (m.group('date') != s):
# capture remaining string
mStart = m.start('date')
mEnd = m.end('date')
# we need to check that anything following the parsed
# date is a time expression because it is often picked
# up as a valid year if the hour is 2 digits
fTime = False
mm = self.ptc.CRE_TIMEHMS2.search(s)
# "February 24th 1PM" doesn't get caught
# "February 24th 12PM" does
mYear = m.group('year')
if mm is not None and mYear is not None:
fTime = True
else:
# "February 24th 12:00"
mm = self.ptc.CRE_TIMEHMS.search(s)
if mm is not None and mYear is None:
fTime = True
if fTime:
hoursStart = mm.start('hours')
if hoursStart < m.end('year'):
mEnd = hoursStart
parseStr = s[mStart:mEnd]
chunk1 = s[:mStart]
chunk2 = s[mEnd:]
s = '%s %s' % (chunk1, chunk2)
else:
parseStr = s
s = ''
if parseStr:
debug and log.debug(
'found (date3) [%s][%s][%s]', parseStr, chunk1, chunk2)
sourceTime = self._evalDateStr(parseStr, sourceTime)
return s, sourceTime, bool(parseStr) | def _partialParseDateStr(self, s, sourceTime) | test if giving C{s} matched CRE_DATE3, used by L{parse()}
@type s: string
@param s: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of remained date/time text, datetime object and
an boolean value to describ if matched or not | 5.08872 | 5.161032 | 0.985989 |
parseStr = None
chunk1 = chunk2 = ''
# Standard date format
m = self.ptc.CRE_DATE.search(s)
if m is not None:
if (m.group('date') != s):
# capture remaining string
parseStr = m.group('date')
chunk1 = s[:m.start('date')]
chunk2 = s[m.end('date'):]
s = '%s %s' % (chunk1, chunk2)
else:
parseStr = s
s = ''
if parseStr:
debug and log.debug(
'found (date) [%s][%s][%s]', parseStr, chunk1, chunk2)
sourceTime = self._evalDateStd(parseStr, sourceTime)
return s, sourceTime, bool(parseStr) | def _partialParseDateStd(self, s, sourceTime) | test if giving C{s} matched CRE_DATE, used by L{parse()}
@type s: string
@param s: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of remained date/time text, datetime object and
an boolean value to describ if matched or not | 4.208643 | 4.418604 | 0.952483 |
parseStr = None
chunk1 = chunk2 = ''
# Natural language day strings
m = self.ptc.CRE_DAY.search(s)
if m is not None:
if (m.group() != s):
# capture remaining string
parseStr = m.group()
chunk1 = s[:m.start()]
chunk2 = s[m.end():]
s = '%s %s' % (chunk1, chunk2)
else:
parseStr = s
s = ''
if parseStr:
debug and log.debug(
'found (day) [%s][%s][%s]', parseStr, chunk1, chunk2)
sourceTime = self._evalDayStr(parseStr, sourceTime)
return s, sourceTime, bool(parseStr) | def _partialParseDayStr(self, s, sourceTime) | test if giving C{s} matched CRE_DAY, used by L{parse()}
@type s: string
@param s: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of remained date/time text, datetime object and
an boolean value to describ if matched or not | 4.627971 | 4.804492 | 0.963259 |
parseStr = None
chunk1 = chunk2 = ''
ctx = self.currentContext
log.debug('eval %s with context - %s, %s', s, ctx.hasDate, ctx.hasTime)
# Weekday
m = self.ptc.CRE_WEEKDAY.search(s)
if m is not None:
gv = m.group()
if s not in self.ptc.dayOffsets:
if (gv != s):
# capture remaining string
parseStr = gv
chunk1 = s[:m.start()]
chunk2 = s[m.end():]
s = '%s %s' % (chunk1, chunk2)
else:
parseStr = s
s = ''
if parseStr and not ctx.hasDate:
debug and log.debug(
'found (weekday) [%s][%s][%s]', parseStr, chunk1, chunk2)
sourceTime = self._evalWeekday(parseStr, sourceTime)
return s, sourceTime, bool(parseStr) | def _partialParseWeekday(self, s, sourceTime) | test if giving C{s} matched CRE_WEEKDAY, used by L{parse()}
@type s: string
@param s: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of remained date/time text, datetime object and
an boolean value to describ if matched or not | 5.454153 | 5.764758 | 0.94612 |
parseStr = None
chunk1 = chunk2 = ''
# Natural language time strings
m = self.ptc.CRE_TIME.search(s)
if m is not None or s in self.ptc.re_values['now']:
if (m and m.group() != s):
# capture remaining string
parseStr = m.group()
chunk1 = s[:m.start()]
chunk2 = s[m.end():]
s = '%s %s' % (chunk1, chunk2)
else:
parseStr = s
s = ''
if parseStr:
debug and log.debug(
'found (time) [%s][%s][%s]', parseStr, chunk1, chunk2)
sourceTime = self._evalTimeStr(parseStr, sourceTime)
return s, sourceTime, bool(parseStr) | def _partialParseTimeStr(self, s, sourceTime) | test if giving C{s} matched CRE_TIME, used by L{parse()}
@type s: string
@param s: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of remained date/time text, datetime object and
an boolean value to describ if matched or not | 4.798473 | 5.150383 | 0.931673 |
parseStr = None
chunk1 = chunk2 = ''
# HH:MM(:SS) am/pm time strings
m = self.ptc.CRE_TIMEHMS2.search(s)
if m is not None:
if m.group('minutes') is not None:
if m.group('seconds') is not None:
parseStr = '%s:%s:%s' % (m.group('hours'),
m.group('minutes'),
m.group('seconds'))
else:
parseStr = '%s:%s' % (m.group('hours'),
m.group('minutes'))
else:
parseStr = m.group('hours')
parseStr += ' ' + m.group('meridian')
chunk1 = s[:m.start()]
chunk2 = s[m.end():]
s = '%s %s' % (chunk1, chunk2)
if parseStr:
debug and log.debug('found (meridian) [%s][%s][%s]',
parseStr, chunk1, chunk2)
sourceTime = self._evalMeridian(parseStr, sourceTime)
return s, sourceTime, bool(parseStr) | def _partialParseMeridian(self, s, sourceTime) | test if giving C{s} matched CRE_TIMEHMS2, used by L{parse()}
@type s: string
@param s: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of remained date/time text, datetime object and
an boolean value to describ if matched or not | 3.061649 | 2.970278 | 1.030762 |
parseStr = None
chunk1 = chunk2 = ''
# HH:MM(:SS) time strings
m = self.ptc.CRE_TIMEHMS.search(s)
if m is not None:
if m.group('seconds') is not None:
parseStr = '%s:%s:%s' % (m.group('hours'),
m.group('minutes'),
m.group('seconds'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('seconds'):]
else:
parseStr = '%s:%s' % (m.group('hours'),
m.group('minutes'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('minutes'):]
s = '%s %s' % (chunk1, chunk2)
if parseStr:
debug and log.debug(
'found (hms) [%s][%s][%s]', parseStr, chunk1, chunk2)
sourceTime = self._evalTimeStd(parseStr, sourceTime)
return s, sourceTime, bool(parseStr) | def _partialParseTimeStd(self, s, sourceTime) | test if giving C{s} matched CRE_TIMEHMS, used by L{parse()}
@type s: string
@param s: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of remained date/time text, datetime object and
an boolean value to describ if matched or not | 2.954661 | 2.934488 | 1.006875 |
# if sourceTime has a timetuple method, use thet, else, just pass the
# entire thing to parse and prey the user knows what the hell they are
# doing.
sourceTime = getattr(sourceTime, 'timetuple', (lambda: sourceTime))()
# You REALLY SHOULD be using pytz. Using localize if available,
# hacking if not. Note, None is a valid tzinfo object in the case of
# the ugly hack.
localize = getattr(
tzinfo,
'localize',
(lambda dt: dt.replace(tzinfo=tzinfo)), # ugly hack is ugly :(
)
# Punt
time_struct, ret_code = self.parse(
datetimeString,
sourceTime=sourceTime,
version=version)
# Comments from GHI indicate that it is desired to have the same return
# signature on this method as that one it punts to, with the exception
# of using datetime objects instead of time_structs.
dt = localize(datetime.datetime(*time_struct[:6]))
return dt, ret_code | def parseDT(self, datetimeString, sourceTime=None,
tzinfo=None, version=None) | C{datetimeString} is as C{.parse}, C{sourceTime} has the same semantic
meaning as C{.parse}, but now also accepts datetime objects. C{tzinfo}
accepts a tzinfo object. It is advisable to use pytz.
@type datetimeString: string
@param datetimeString: date/time text to evaluate
@type sourceTime: struct_time, datetime, date, time
@param sourceTime: time value to use as the base
@type tzinfo: tzinfo
@param tzinfo: Timezone to apply to generated datetime objs.
@type version: integer
@param version: style version, default will use L{Calendar}
parameter version value
@rtype: tuple
@return: tuple of: modified C{sourceTime} and the result flag/context
see .parse for return code details. | 10.544245 | 10.03732 | 1.050504 |
debug and log.debug('parse()')
datetimeString = re.sub(r'(\w)\.(\s)', r'\1\2', datetimeString)
datetimeString = re.sub(r'(\w)[\'"](\s|$)', r'\1 \2', datetimeString)
datetimeString = re.sub(r'(\s|^)[\'"](\w)', r'\1 \2', datetimeString)
if sourceTime:
if isinstance(sourceTime, datetime.datetime):
debug and log.debug('coercing datetime to timetuple')
sourceTime = sourceTime.timetuple()
else:
if not isinstance(sourceTime, time.struct_time) and \
not isinstance(sourceTime, tuple):
raise ValueError('sourceTime is not a struct_time')
else:
sourceTime = time.localtime()
with self.context() as ctx:
s = datetimeString.lower().strip()
debug and log.debug('remainedString (before parsing): [%s]', s)
while s:
for parseMeth in (self._partialParseModifier,
self._partialParseUnits,
self._partialParseQUnits,
self._partialParseDateStr,
self._partialParseDateStd,
self._partialParseDayStr,
self._partialParseWeekday,
self._partialParseTimeStr,
self._partialParseMeridian,
self._partialParseTimeStd):
retS, retTime, matched = parseMeth(s, sourceTime)
if matched:
s, sourceTime = retS.strip(), retTime
break
else:
# nothing matched
s = ''
debug and log.debug('hasDate: [%s], hasTime: [%s]',
ctx.hasDate, ctx.hasTime)
debug and log.debug('remainedString: [%s]', s)
# String is not parsed at all
if sourceTime is None:
debug and log.debug('not parsed [%s]', str(sourceTime))
sourceTime = time.localtime()
if not isinstance(sourceTime, time.struct_time):
sourceTime = time.struct_time(sourceTime)
version = self.version if version is None else version
if version == VERSION_CONTEXT_STYLE:
return sourceTime, ctx
else:
return sourceTime, ctx.dateTimeFlag | def parse(self, datetimeString, sourceTime=None, version=None) | Splits the given C{datetimeString} into tokens, finds the regex
patterns that match and then calculates a C{struct_time} value from
the chunks.
If C{sourceTime} is given then the C{struct_time} value will be
calculated from that value, otherwise from the current date/time.
If the C{datetimeString} is parsed and date/time value found, then::
If C{version} equals to L{VERSION_FLAG_STYLE}, the second item of
the returned tuple will be a flag to let you know what kind of
C{struct_time} value is being returned::
0 = not parsed at all
1 = parsed as a C{date}
2 = parsed as a C{time}
3 = parsed as a C{datetime}
If C{version} equals to L{VERSION_CONTEXT_STYLE}, the second value
will be an instance of L{pdtContext}
@type datetimeString: string
@param datetimeString: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@type version: integer
@param version: style version, default will use L{Calendar}
parameter version value
@rtype: tuple
@return: tuple of: modified C{sourceTime} and the result flag/context | 3.340769 | 3.328357 | 1.003729 |
yr = source.year
mth = source.month
dy = source.day
try:
month = float(month)
except (TypeError, ValueError):
month = 0
try:
year = float(year)
except (TypeError, ValueError):
year = 0
finally:
month += year * 12
year = 0
subMi = 0.0
maxDay = 0
if month:
mi = int(month)
subMi = month - mi
y = int(mi / 12.0)
m = mi - y * 12
mth = mth + m
if mth < 1: # cross start-of-year?
y -= 1 # yes - decrement year
mth += 12 # and fix month
elif mth > 12: # cross end-of-year?
y += 1 # yes - increment year
mth -= 12 # and fix month
yr += y
# if the day ends up past the last day of
# the new month, set it to the last day
maxDay = self.ptc.daysInMonth(mth, yr)
if dy > maxDay:
dy = maxDay
if yr > datetime.MAXYEAR or yr < datetime.MINYEAR:
raise OverflowError('year is out of range')
d = source.replace(year=yr, month=mth, day=dy)
if subMi:
d += datetime.timedelta(days=subMi * maxDay)
return source + (d - source) | def inc(self, source, month=None, year=None) | Takes the given C{source} date, or current date if none is
passed, and increments it according to the values passed in
by month and/or year.
This routine is needed because Python's C{timedelta()} function
does not allow for month or year increments.
@type source: struct_time
@param source: C{struct_time} value to increment
@type month: float or integer
@param month: optional number of months to increment
@type year: float or integer
@param year: optional number of years to increment
@rtype: datetime
@return: C{source} incremented by the number of months and/or years | 3.185956 | 3.165676 | 1.006406 |
result = None
debug and log.debug('daysInMonth(%s, %s)', month, year)
if month > 0 and month <= 12:
result = self._DaysInMonthList[month - 1]
if month == 2:
if year in self._leapYears:
result += 1
else:
if calendar.isleap(year):
self._leapYears.append(year)
result += 1
return result | def daysInMonth(self, month, year) | Take the given month (1-12) and a given year (4 digit) return
the number of days in the month adjusting for leap year as needed | 3.319904 | 3.204973 | 1.03586 |
if sourceKey not in self.re_sources:
return None
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
defaults = {'yr': yr, 'mth': mth, 'dy': dy,
'hr': hr, 'mn': mn, 'sec': sec}
source = self.re_sources[sourceKey]
values = {}
for key, default in defaults.items():
values[key] = source.get(key, default)
return (values['yr'], values['mth'], values['dy'],
values['hr'], values['mn'], values['sec'],
wd, yd, isdst) | def getSource(self, sourceKey, sourceTime=None) | GetReturn a date/time tuple based on the giving source key
and the corresponding key found in self.re_sources.
The current time is used as the default and any specified
item found in self.re_sources is inserted into the value
and the generated dictionary is returned. | 1.877137 | 1.666808 | 1.126187 |
for acc in accuracy:
if not isinstance(acc, int):
acc = self._ACCURACY_REVERSE_MAPPING[acc]
self.accuracy |= acc | def updateAccuracy(self, *accuracy) | Updates current accuracy flag | 5.809827 | 5.423058 | 1.071319 |
if locale not in locales:
raise NotImplementedError("The locale '%s' is not supported" % locale)
if locale not in __locale_caches:
mod = __import__(__name__, fromlist=[locale], level=0)
__locale_caches[locale] = getattr(mod, locale)
return __locale_caches[locale] | def load_locale(locale, icu=False) | Return data of locale
:param locale:
:return: | 3.011497 | 3.236603 | 0.93045 |
def wrapper(self, *args, **kwds):
func(self, *args, **kwds)
return Indent(self)
return wrapper | def indent(func) | Decorator for allowing to use method as normal method or with
context manager for auto-indenting code blocks. | 3.12135 | 3.284201 | 0.950414 |
def resolve_path(schema, fragment):
fragment = fragment.lstrip('/')
parts = unquote(fragment).split('/') if fragment else []
for part in parts:
part = part.replace('~1', '/').replace('~0', '~')
if isinstance(schema, list):
schema = schema[int(part)]
elif part in schema:
schema = schema[part]
else:
raise JsonSchemaException('Unresolvable ref: {}'.format(part))
return schema | Return definition from path.
Path is unescaped according https://tools.ietf.org/html/rfc6901 | null | null | null |
|
def resolve_remote(uri, handlers):
scheme = urlparse.urlsplit(uri).scheme
if scheme in handlers:
result = handlers[scheme](uri)
else:
req = urlopen(uri)
encoding = req.info().get_content_charset() or 'utf-8'
result = json.loads(req.read().decode(encoding),)
return result | Resolve a remote ``uri``.
.. note::
urllib library is used to fetch requests from the remote ``uri``
if handlers does notdefine otherwise. | null | null | null |
|
def from_schema(cls, schema, handlers={}, **kwargs):
return cls(
schema.get('$id', schema.get('id', '')) if isinstance(schema, dict) else '',
schema,
handlers=handlers,
**kwargs
) | Construct a resolver from a JSON schema object. | null | null | null |
|
def in_scope(self, scope: str):
old_scope = self.resolution_scope
self.resolution_scope = urlparse.urljoin(old_scope, scope)
try:
yield
finally:
self.resolution_scope = old_scope | Context manager to handle current scope. | null | null | null |
|
def resolving(self, ref: str):
new_uri = urlparse.urljoin(self.resolution_scope, ref)
uri, fragment = urlparse.urldefrag(new_uri)
if normalize(uri) in self.store:
schema = self.store[normalize(uri)]
elif not uri or uri == self.base_uri:
schema = self.schema
else:
schema = resolve_remote(uri, self.handlers)
if self.cache:
self.store[normalize(uri)] = schema
old_base_uri, old_schema = self.base_uri, self.schema
self.base_uri, self.schema = uri, schema
try:
with self.in_scope(uri):
yield resolve_path(schema, fragment)
finally:
self.base_uri, self.schema = old_base_uri, old_schema | Context manager which resolves a JSON ``ref`` and enters the
resolution scope of this ref. | null | null | null |
|
def get_scope_name(self):
name = 'validate_' + unquote(self.resolution_scope).replace('~1', '_').replace('~0', '_')
name = re.sub(r'[:/#\.\-\%]', '_', name)
name = name.lower().rstrip('_')
return name | Get current scope and return it as a valid function name. | null | null | null |
|
def walk(self, node: dict):
if isinstance(node, bool):
pass
elif '$ref' in node and isinstance(node['$ref'], str):
ref = node['$ref']
node['$ref'] = urlparse.urljoin(self.resolution_scope, ref)
elif 'id' in node and isinstance(node['id'], str):
with self.in_scope(node['id']):
self.store[normalize(self.resolution_scope)] = node
for _, item in node.items():
if isinstance(item, dict):
self.walk(item)
else:
for _, item in node.items():
if isinstance(item, dict):
self.walk(item) | Walk thru schema and dereferencing ``id`` and ``$ref`` instances | null | null | null |
|
types = enforce_list(self._definition['type'])
try:
python_types = ', '.join(JSON_TYPE_TO_PYTHON_TYPE[t] for t in types)
except KeyError as exc:
raise JsonSchemaDefinitionException('Unknown type: {}'.format(exc))
extra = ''
if 'integer' in types:
extra += ' and not (isinstance({variable}, float) and {variable}.is_integer())'.format(
variable=self._variable,
)
if ('number' in types or 'integer' in types) and 'boolean' not in types:
extra += ' or isinstance({variable}, bool)'.format(variable=self._variable)
with self.l('if not isinstance({variable}, ({})){}:', python_types, extra):
self.l('raise JsonSchemaException("{name} must be {}")', ' or '.join(types)) | def generate_type(self) | Validation of type. Can be one type or list of types.
Since draft 06 a float without fractional part is an integer.
.. code-block:: python
{'type': 'string'}
{'type': ['string', 'number']} | 4.152013 | 4.07599 | 1.018651 |
property_names_definition = self._definition.get('propertyNames', {})
if property_names_definition is True:
pass
elif property_names_definition is False:
self.create_variable_keys()
with self.l('if {variable}_keys:'):
self.l('raise JsonSchemaException("{name} must not be there")')
else:
self.create_variable_is_dict()
with self.l('if {variable}_is_dict:'):
self.create_variable_with_length()
with self.l('if {variable}_len != 0:'):
self.l('{variable}_property_names = True')
with self.l('for {variable}_key in {variable}:'):
with self.l('try:'):
self.generate_func_code_block(
property_names_definition,
'{}_key'.format(self._variable),
self._variable_name,
clear_variables=True,
)
with self.l('except JsonSchemaException:'):
self.l('{variable}_property_names = False')
with self.l('if not {variable}_property_names:'):
self.l('raise JsonSchemaException("{name} must be named by propertyName definition")') | def generate_property_names(self) | Means that keys of object must to follow this definition.
.. code-block:: python
{
'propertyNames': {
'maxLength': 3,
},
}
Valid keys of object for this definition are foo, bar, ... but not foobar for example. | 3.783298 | 3.717366 | 1.017736 |
self.create_variable_is_list()
with self.l('if {variable}_is_list:'):
contains_definition = self._definition['contains']
if contains_definition is False:
self.l('raise JsonSchemaException("{name} is always invalid")')
elif contains_definition is True:
with self.l('if not {variable}:'):
self.l('raise JsonSchemaException("{name} must not be empty")')
else:
self.l('{variable}_contains = False')
with self.l('for {variable}_key in {variable}:'):
with self.l('try:'):
self.generate_func_code_block(
contains_definition,
'{}_key'.format(self._variable),
self._variable_name,
clear_variables=True,
)
self.l('{variable}_contains = True')
self.l('break')
self.l('except JsonSchemaException: pass')
with self.l('if not {variable}_contains:'):
self.l('raise JsonSchemaException("{name} must contain one of contains definition")') | def generate_contains(self) | Means that array must contain at least one defined item.
.. code-block:: python
{
'contains': {
'type': 'number',
},
}
Valid array is any with at least one number. | 3.815025 | 3.897963 | 0.978723 |
const = self._definition['const']
if isinstance(const, str):
const = '"{}"'.format(const)
with self.l('if {variable} != {}:', const):
self.l('raise JsonSchemaException("{name} must be same as const definition")') | def generate_const(self) | Means that value is valid when is equeal to const definition.
.. code-block:: python
{
'const': 42,
}
Only valid value is 42 in this example. | 9.7702 | 8.838085 | 1.105466 |
self._generate_func_code()
return dict(
REGEX_PATTERNS=self._compile_regexps,
re=re,
JsonSchemaException=JsonSchemaException,
) | def global_state(self) | Returns global variables for generating function from ``func_code``. Includes
compiled regular expressions and imports, so it does not have to do it every
time when validation function is called. | 16.37941 | 9.303426 | 1.760578 |
self._generate_func_code()
if not self._compile_regexps:
return '\n'.join(
[
'from fastjsonschema import JsonSchemaException',
'',
'',
]
)
regexs = ['"{}": re.compile(r"{}")'.format(key, value.pattern) for key, value in self._compile_regexps.items()]
return '\n'.join(
[
'import re',
'from fastjsonschema import JsonSchemaException',
'',
'',
'REGEX_PATTERNS = {',
' ' + ',\n '.join(regexs),
'}',
'',
]
) | def global_state_code(self) | Returns global variables for generating function from ``func_code`` as code.
Includes compiled regular expressions and imports. | 3.552031 | 2.95409 | 1.202411 |
self.l('NoneType = type(None)')
# Generate parts that are referenced and not yet generated
while self._needed_validation_functions:
# During generation of validation function, could be needed to generate
# new one that is added again to `_needed_validation_functions`.
# Therefore usage of while instead of for loop.
uri, name = self._needed_validation_functions.popitem()
self.generate_validation_function(uri, name) | def generate_func_code(self) | Creates base code of validation function and calls helper
for creating code by definition. | 11.973916 | 11.659321 | 1.026982 |
self._validation_functions_done.add(uri)
self.l('')
with self._resolver.resolving(uri) as definition:
with self.l('def {}(data):', name):
self.generate_func_code_block(definition, 'data', 'data', clear_variables=True)
self.l('return data') | def generate_validation_function(self, uri, name) | Generate validation function for given uri with given name | 6.960404 | 7.005842 | 0.993514 |
backup = self._definition, self._variable, self._variable_name
self._definition, self._variable, self._variable_name = definition, variable, variable_name
if clear_variables:
backup_variables = self._variables
self._variables = set()
self._generate_func_code_block(definition)
self._definition, self._variable, self._variable_name = backup
if clear_variables:
self._variables = backup_variables | def generate_func_code_block(self, definition, variable, variable_name, clear_variables=False) | Creates validation rules for current definition. | 2.276409 | 2.128168 | 1.069657 |
with self._resolver.in_scope(self._definition['$ref']):
name = self._resolver.get_scope_name()
uri = self._resolver.get_uri()
if uri not in self._validation_functions_done:
self._needed_validation_functions[uri] = name
# call validation function
self.l('{}({variable})', name) | def generate_ref(self) | Ref can be link to remote or local definition.
.. code-block:: python
{'$ref': 'http://json-schema.org/draft-04/schema#'}
{
'properties': {
'foo': {'type': 'integer'},
'bar': {'$ref': '#/properties/foo'}
}
} | 12.0553 | 11.128179 | 1.083313 |
spaces = ' ' * self.INDENT * self._indent
name = self._variable_name
if name and '{' in name:
name = '"+"{}".format(**locals())+"'.format(self._variable_name)
context = dict(
self._definition or {},
variable=self._variable,
name=name,
**kwds
)
self._code.append(spaces + line.format(*args, **context)) | def l(self, line, *args, **kwds) | Short-cut of line. Used for inserting line. It's formated with parameters
``variable``, ``variable_name`` (as ``name`` for short-cut), all keys from
current JSON schema ``definition`` and also passed arguments in ``args``
and named ``kwds``.
.. code-block:: python
self.l('if {variable} not in {enum}: raise JsonSchemaException("Wrong!")')
When you want to indent block, use it as context manager. For example:
.. code-block:: python
with self.l('if {variable} not in {enum}:'):
self.l('raise JsonSchemaException("Wrong!")') | 7.612754 | 7.552474 | 1.007981 |
variable_name = '{}_len'.format(self._variable)
if variable_name in self._variables:
return
self._variables.add(variable_name)
self.l('{variable}_len = len({variable})') | def create_variable_with_length(self) | Append code for creating variable with length of that variable
(for example length of list or dictionary) with name ``{variable}_len``.
It can be called several times and always it's done only when that variable
still does not exists. | 5.628143 | 3.924443 | 1.434125 |
variable_name = '{}_keys'.format(self._variable)
if variable_name in self._variables:
return
self._variables.add(variable_name)
self.l('{variable}_keys = set({variable}.keys())') | def create_variable_keys(self) | Append code for creating variable with keys of that variable (dictionary)
with a name ``{variable}_keys``. Similar to `create_variable_with_length`. | 6.659795 | 5.087 | 1.309179 |
variable_name = '{}_is_list'.format(self._variable)
if variable_name in self._variables:
return
self._variables.add(variable_name)
self.l('{variable}_is_list = isinstance({variable}, list)') | def create_variable_is_list(self) | Append code for creating variable with bool if it's instance of list
with a name ``{variable}_is_list``. Similar to `create_variable_with_length`. | 4.620283 | 3.856087 | 1.198179 |
variable_name = '{}_is_dict'.format(self._variable)
if variable_name in self._variables:
return
self._variables.add(variable_name)
self.l('{variable}_is_dict = isinstance({variable}, dict)') | def create_variable_is_dict(self) | Append code for creating variable with bool if it's instance of list
with a name ``{variable}_is_dict``. Similar to `create_variable_with_length`. | 4.638041 | 4.049067 | 1.145459 |
with self.l('try:'):
self.generate_func_code_block(
self._definition['if'],
self._variable,
self._variable_name,
clear_variables=True
)
with self.l('except JsonSchemaException:'):
if 'else' in self._definition:
self.generate_func_code_block(
self._definition['else'],
self._variable,
self._variable_name,
clear_variables=True
)
else:
self.l('pass')
if 'then' in self._definition:
with self.l('else:'):
self.generate_func_code_block(
self._definition['then'],
self._variable,
self._variable_name,
clear_variables=True
) | def generate_if_then_else(self) | Implementation of if-then-else.
.. code-block:: python
{
'if': {
'exclusiveMaximum': 0,
},
'then': {
'minimum': -10,
},
'else': {
'multipleOf': 2,
},
}
Valid values are any between -10 and 0 or any multiplication of two. | 2.548162 | 2.643281 | 0.964014 |
if self._definition['contentEncoding'] == 'base64':
with self.l('if isinstance({variable}, str):'):
with self.l('try:'):
self.l('import base64')
self.l('{variable} = base64.b64decode({variable})')
with self.l('except Exception:'):
self.l('raise JsonSchemaException("{name} must be encoded by base64")')
with self.l('if {variable} == "":'):
self.l('raise JsonSchemaException("contentEncoding must be base64")') | def generate_content_encoding(self) | Means decoding value when it's encoded by base64.
.. code-block:: python
{
'contentEncoding': 'base64',
} | 3.646704 | 3.578749 | 1.018989 |
if self._definition['contentMediaType'] == 'application/json':
with self.l('if isinstance({variable}, bytes):'):
with self.l('try:'):
self.l('{variable} = {variable}.decode("utf-8")')
with self.l('except Exception:'):
self.l('raise JsonSchemaException("{name} must encoded by utf8")')
with self.l('if isinstance({variable}, str):'):
with self.l('try:'):
self.l('import json')
self.l('{variable} = json.loads({variable})')
with self.l('except Exception:'):
self.l('raise JsonSchemaException("{name} must be valid JSON")') | def generate_content_media_type(self) | Means loading value when it's specified as JSON.
.. code-block:: python
{
'contentMediaType': 'application/json',
} | 2.912424 | 2.925349 | 0.995582 |
enum = self._definition['enum']
if not isinstance(enum, (list, tuple)):
raise JsonSchemaDefinitionException('enum must be an array')
with self.l('if {variable} not in {enum}:'):
enum = str(enum).replace('"', '\\"')
self.l('raise JsonSchemaException("{name} must be one of {}")', enum) | def generate_enum(self) | Means that only value specified in the enum is valid.
.. code-block:: python
{
'enum': ['a', 'b'],
} | 6.306518 | 6.096146 | 1.034509 |
for definition_item in self._definition['allOf']:
self.generate_func_code_block(definition_item, self._variable, self._variable_name, clear_variables=True) | def generate_all_of(self) | Means that value have to be valid by all of those definitions. It's like put it in
one big definition.
.. code-block:: python
{
'allOf': [
{'type': 'number'},
{'minimum': 5},
],
}
Valid values for this definition are 5, 6, 7, ... but not 4 or 'abc' for example. | 11.631495 | 9.86811 | 1.178695 |
self.l('{variable}_one_of_count = 0')
for definition_item in self._definition['oneOf']:
# When we know it's failing (one of means exactly once), we do not need to do another expensive try-except.
with self.l('if {variable}_one_of_count < 2:'):
with self.l('try:'):
self.generate_func_code_block(definition_item, self._variable, self._variable_name, clear_variables=True)
self.l('{variable}_one_of_count += 1')
self.l('except JsonSchemaException: pass')
with self.l('if {variable}_one_of_count != 1:'):
self.l('raise JsonSchemaException("{name} must be valid exactly by one of oneOf definition")') | def generate_one_of(self) | Means that value have to be valid by only one of those definitions. It can't be valid
by two or more of them.
.. code-block:: python
{
'oneOf': [
{'type': 'number', 'multipleOf': 3},
{'type': 'number', 'multipleOf': 5},
],
}
Valid values for this definition are 3, 5, 6, ... but not 15 for example. | 6.023314 | 5.679721 | 1.060495 |
not_definition = self._definition['not']
if not_definition is True:
self.l('raise JsonSchemaException("{name} must not be there")')
elif not_definition is False:
return
elif not not_definition:
with self.l('if {}:', self._variable):
self.l('raise JsonSchemaException("{name} must not be valid by not definition")')
else:
with self.l('try:'):
self.generate_func_code_block(not_definition, self._variable, self._variable_name)
self.l('except JsonSchemaException: pass')
self.l('else: raise JsonSchemaException("{name} must not be valid by not definition")') | def generate_not(self) | Means that value have not to be valid by this definition.
.. code-block:: python
{'not': {'type': 'null'}}
Valid values for this definition are 'hello', 42, {} ... but not None.
Since draft 06 definition can be boolean. False means nothing, True
means everything is invalid. | 4.189403 | 4.018447 | 1.042543 |
with self.l('if isinstance({variable}, str):'):
format_ = self._definition['format']
if format_ in self.FORMAT_REGEXS:
format_regex = self.FORMAT_REGEXS[format_]
self._generate_format(format_, format_ + '_re_pattern', format_regex)
# format regex is used only in meta schemas
elif format_ == 'regex':
with self.l('try:'):
self.l('re.compile({variable})')
with self.l('except Exception:'):
self.l('raise JsonSchemaException("{name} must be a valid regex")')
else:
self.l('pass') | def generate_format(self) | Means that value have to be in specified format. For example date, email or other.
.. code-block:: python
{'format': 'email'}
Valid value for this definition is [email protected] but not @username | 5.489275 | 7.038188 | 0.779927 |
items_definition = self._definition['items']
if items_definition is True:
return
self.create_variable_is_list()
with self.l('if {variable}_is_list:'):
self.create_variable_with_length()
if items_definition is False:
with self.l('if {variable}:'):
self.l('raise JsonSchemaException("{name} must not be there")')
elif isinstance(items_definition, list):
for idx, item_definition in enumerate(items_definition):
with self.l('if {variable}_len > {}:', idx):
self.l('{variable}__{0} = {variable}[{0}]', idx)
self.generate_func_code_block(
item_definition,
'{}__{}'.format(self._variable, idx),
'{}[{}]'.format(self._variable_name, idx),
)
if isinstance(item_definition, dict) and 'default' in item_definition:
self.l('else: {variable}.append({})', repr(item_definition['default']))
if 'additionalItems' in self._definition:
if self._definition['additionalItems'] is False:
self.l('if {variable}_len > {}: raise JsonSchemaException("{name} must contain only specified items")', len(items_definition))
else:
with self.l('for {variable}_x, {variable}_item in enumerate({variable}[{0}:], {0}):', len(items_definition)):
self.generate_func_code_block(
self._definition['additionalItems'],
'{}_item'.format(self._variable),
'{}[{{{}_x}}]'.format(self._variable_name, self._variable),
)
else:
if items_definition:
with self.l('for {variable}_x, {variable}_item in enumerate({variable}):'):
self.generate_func_code_block(
items_definition,
'{}_item'.format(self._variable),
'{}[{{{}_x}}]'.format(self._variable_name, self._variable),
) | def generate_items(self) | Means array is valid only when all items are valid by this definition.
.. code-block:: python
{
'items': [
{'type': 'integer'},
{'type': 'string'},
],
}
Valid arrays are those with integers or strings, nothing else.
Since draft 06 definition can be also boolean. True means nothing, False
means everything is invalid. | 2.830266 | 2.826621 | 1.001289 |
self.create_variable_is_dict()
with self.l('if {variable}_is_dict:'):
self.create_variable_keys()
for key, prop_definition in self._definition['properties'].items():
key_name = re.sub(r'($[^a-zA-Z]|[^a-zA-Z0-9])', '', key)
with self.l('if "{}" in {variable}_keys:', key):
self.l('{variable}_keys.remove("{}")', key)
self.l('{variable}__{0} = {variable}["{1}"]', key_name, key)
self.generate_func_code_block(
prop_definition,
'{}__{}'.format(self._variable, key_name),
'{}.{}'.format(self._variable_name, key),
)
if isinstance(prop_definition, dict) and 'default' in prop_definition:
self.l('else: {variable}["{}"] = {}', key, repr(prop_definition['default'])) | def generate_properties(self) | Means object with defined keys.
.. code-block:: python
{
'properties': {
'key': {'type': 'number'},
},
}
Valid object is containing key called 'key' and value any number. | 4.267092 | 4.349461 | 0.981062 |
self.create_variable_is_dict()
with self.l('if {variable}_is_dict:'):
self.create_variable_keys()
for pattern, definition in self._definition['patternProperties'].items():
self._compile_regexps[pattern] = re.compile(pattern)
with self.l('for {variable}_key, {variable}_val in {variable}.items():'):
for pattern, definition in self._definition['patternProperties'].items():
with self.l('if REGEX_PATTERNS["{}"].search({variable}_key):', pattern):
with self.l('if {variable}_key in {variable}_keys:'):
self.l('{variable}_keys.remove({variable}_key)')
self.generate_func_code_block(
definition,
'{}_val'.format(self._variable),
'{}.{{{}_key}}'.format(self._variable_name, self._variable),
) | def generate_pattern_properties(self) | Means object with defined keys as patterns.
.. code-block:: python
{
'patternProperties': {
'^x': {'type': 'number'},
},
}
Valid object is containing key starting with a 'x' and value any number. | 4.204301 | 4.28938 | 0.980165 |
self.create_variable_is_dict()
with self.l('if {variable}_is_dict:'):
self.create_variable_keys()
add_prop_definition = self._definition["additionalProperties"]
if add_prop_definition:
properties_keys = list(self._definition.get("properties", {}).keys())
with self.l('for {variable}_key in {variable}_keys:'):
with self.l('if {variable}_key not in {}:', properties_keys):
self.l('{variable}_value = {variable}.get({variable}_key)')
self.generate_func_code_block(
add_prop_definition,
'{}_value'.format(self._variable),
'{}.{{{}_key}}'.format(self._variable_name, self._variable),
)
else:
with self.l('if {variable}_keys:'):
self.l('raise JsonSchemaException("{name} must contain only specified properties")') | def generate_additional_properties(self) | Means object with keys with values defined by definition.
.. code-block:: python
{
'properties': {
'key': {'type': 'number'},
}
'additionalProperties': {'type': 'string'},
}
Valid object is containing key called 'key' and it's value any number and
any other key with any string. | 4.406275 | 4.59877 | 0.958142 |
self.create_variable_is_dict()
with self.l('if {variable}_is_dict:'):
self.create_variable_keys()
for key, values in self._definition["dependencies"].items():
if values == [] or values is True:
continue
with self.l('if "{}" in {variable}_keys:', key):
if values is False:
self.l('raise JsonSchemaException("{} in {name} must not be there")', key)
elif isinstance(values, list):
for value in values:
with self.l('if "{}" not in {variable}_keys:', value):
self.l('raise JsonSchemaException("{name} missing dependency {} for {}")', value, key)
else:
self.generate_func_code_block(values, self._variable, self._variable_name, clear_variables=True) | def generate_dependencies(self) | Means when object has property, it needs to have also other property.
.. code-block:: python
{
'dependencies': {
'bar': ['foo'],
},
}
Valid object is containing only foo, both bar and foo or none of them, but not
object with only bar.
Since draft 06 definition can be boolean or empty array. True and empty array
means nothing, False means that key cannot be there at all. | 5.546368 | 5.358966 | 1.03497 |
resolver, code_generator = _factory(definition, handlers)
global_state = code_generator.global_state
# Do not pass local state so it can recursively call itself.
exec(code_generator.func_code, global_state)
return global_state[resolver.get_scope_name()] | def compile(definition, handlers={}) | Generates validation function for validating JSON schema passed in ``definition``.
Example:
.. code-block:: python
import fastjsonschema
validate = fastjsonschema.compile({'type': 'string'})
validate('hello')
This implementation support keyword ``default``:
.. code-block:: python
validate = fastjsonschema.compile({
'type': 'object',
'properties': {
'a': {'type': 'number', 'default': 42},
},
})
data = validate({})
assert data == {'a': 42}
Supported implementations are draft-04, draft-06 and draft-07. Which version
should be used is determined by `$draft` in your ``definition``. When not
specified, the latest implementation is used (draft-07).
.. code-block:: python
validate = fastjsonschema.compile({
'$schema': 'http://json-schema.org/draft-04/schema',
'type': 'number',
})
You can pass mapping from URI to function that should be used to retrieve
remote schemes used in your ``definition`` in parameter ``handlers``.
Exception :any:`JsonSchemaDefinitionException` is raised when generating the
code fails (bad definition).
Exception :any:`JsonSchemaException` is raised from generated funtion when
validation fails (data do not follow the definition). | 7.660957 | 10.944085 | 0.700009 |
_, code_generator = _factory(definition, handlers)
return (
'VERSION = "' + VERSION + '"\n' +
code_generator.global_state_code + '\n' +
code_generator.func_code
) | def compile_to_code(definition, handlers={}) | Generates validation code for validating JSON schema passed in ``definition``.
Example:
.. code-block:: python
import fastjsonschema
code = fastjsonschema.compile_to_code({'type': 'string'})
with open('your_file.py', 'w') as f:
f.write(code)
You can also use it as a script:
.. code-block:: bash
echo "{'type': 'string'}" | python3 -m fastjsonschema > your_file.py
python3 -m fastjsonschema "{'type': 'string'}" > your_file.py
Exception :any:`JsonSchemaDefinitionException` is raised when generating the
code fails (bad definition). | 6.683667 | 8.712713 | 0.767117 |
try:
i = 0
colormap = {0: (0, 0, 0)}
with open(settings.COLORMAP) as cmap:
lines = cmap.readlines()
for line in lines:
if i == 0 and 'mode = ' in line:
i = 1
maxval = float(line.replace('mode = ', ''))
elif i > 0:
str = line.split()
if str == []: # when there are empty lines at the end of the file
break
colormap.update(
{
i: (int(round(float(str[0]) * 255 / maxval)),
int(round(float(str[1]) * 255 / maxval)),
int(round(float(str[2]) * 255 / maxval)))
}
)
i += 1
except IOError:
pass
self.cmap = {k: v[:4] for k, v in colormap.items()} | def _read_cmap(self) | reads the colormap from a text file given in settings.py.
See colormap_cubehelix.txt. File must contain 256 RGB values | 2.89945 | 2.699869 | 1.073922 |
self.output("* NDVI processing started.", normal=True)
bands = self._read_bands()
image_data = self._get_image_data()
new_bands = []
for i in range(0, 2):
new_bands.append(numpy.empty(image_data['shape'], dtype=numpy.float32))
self._warp(image_data, bands, new_bands)
# Bands are no longer needed
del bands
calc_band = numpy.true_divide((new_bands[1] - new_bands[0]), (new_bands[1] + new_bands[0]))
output_band = numpy.rint((calc_band + 1) * 255 / 2).astype(numpy.uint8)
output_file = join(self.dst_path, self._filename(suffix='NDVI'))
return self.write_band(output_band, output_file, image_data) | def run(self) | Executes NDVI processing | 3.983486 | 3.731848 | 1.06743 |
buf = b''
for data in iterable:
buf += data
if len(buf) >= def_buf_size:
output = buf[:def_buf_size]
buf = buf[def_buf_size:]
yield output
if len(buf) > 0:
yield buf | def data_collector(iterable, def_buf_size=5242880) | Buffers n bytes of data.
:param iterable:
Could be a list, generator or string
:type iterable:
List, generator, String
:returns:
A generator object | 1.892673 | 2.463034 | 0.768432 |
if not connection:
from boto.s3.connection import S3Connection as connection
c = connection(aws_access_key, aws_secret_key, is_secure=secure)
else:
c = connection
b = c.get_bucket(bucket)
if not replace and b.lookup(key):
raise Exception('s3 key ' + key + ' already exists')
multipart_obj = b.initiate_multipart_upload(key)
err_queue = queue.Queue()
lock = threading.Lock()
upload.counter = 0
try:
tpool = pool.ThreadPool(processes=threads)
def check_errors():
try:
exc = err_queue.get(block=False)
except queue.Empty:
pass
else:
raise exc
def waiter():
while upload.counter >= threads:
check_errors()
time.sleep(0.1)
def cb(err):
if err:
err_queue.put(err)
with lock:
upload.counter -= 1
args = [multipart_obj.upload_part_from_file, progress_cb]
for part_no, part in enumerate(iterable):
part_no += 1
tpool.apply_async(upload_part, args + [part_no, part], callback=cb)
with lock:
upload.counter += 1
waiter()
tpool.close()
tpool.join()
# Check for thread errors before completing the upload,
# sometimes an error can be left unchecked until we
# get to this point.
check_errors()
multipart_obj.complete_upload()
except:
multipart_obj.cancel_upload()
tpool.terminate()
raise | def upload(bucket, aws_access_key, aws_secret_key,
iterable, key, progress_cb=None,
threads=5, replace=False, secure=True,
connection=None) | Upload data to s3 using the s3 multipart upload API.
:param bucket:
Name of the S3 bucket
:type bucket:
String
:param aws_access_key:
AWS access key id (optional)
:type aws_access_key:
String
:param aws_secret_key:
AWS access secret key (optional)
:type aws_secret_key:
String
:param iterable:
The data to upload. Each 'part' in the list. will be uploaded in parallel. Each part must be at
least 5242880 bytes (5mb).
:type iterable:
An iterable object
:param key:
The name of the key (filename) to create in the s3 bucket
:type key:
String
:param progress_cb:
Progress callback, will be called with (part_no, uploaded, total) each time a progress update
is available. (optional)
:type progress_cb:
function
:param threads:
the number of threads to use while uploading. (Default is 5)
:type threads:
int
:param replace:
will replace the key (filename) on S3 if set to true. (Default is false)
:type replace:
boolean
:param secure:
Use ssl when talking to s3. (Default is true)
:type secure:
boolean
:param connection:
Used for testing (optional)
:type connection:
S3 connection class
:returns:
void | 2.739658 | 2.778398 | 0.986057 |
f = open(path, 'rb')
self.source_size = os.stat(path).st_size
total_dict = {}
def cb(part_no, uploaded, total):
total_dict[part_no] = uploaded
params = {
'uploaded': round(sum(total_dict.values()) / 1048576, 0),
'size': round(self.source_size / 1048576, 0),
}
p = (self.progress_template + '\r') % params
STREAM.write(p)
STREAM.flush()
self.output('Uploading to S3', normal=True, arrow=True)
upload(bucket_name, self.key, self.secret,
data_collector(iter(f)), filename, cb,
threads=10, replace=True, secure=True, connection=self.conn)
print('\n')
self.output('Upload Completed', normal=True, arrow=True) | def run(self, bucket_name, filename, path) | Initiate the upload.
:param bucket_name:
Name of the S3 bucket
:type bucket_name:
String
:param filename:
The filname
:type filename:
String
:param path:
The path to the file that needs to be uploaded
:type path:
String
:returns:
void | 4.717006 | 4.867281 | 0.969125 |
if isinstance(scenes, list):
files = []
for scene in scenes:
# for all scenes if bands provided, first check AWS, if the bands exist
# download them, otherwise use Google and then USGS.
try:
# if bands are not provided, directly go to Goodle and then USGS
if not isinstance(bands, list):
raise RemoteFileDoesntExist
files.append(self.amazon_s3(scene, bands))
except RemoteFileDoesntExist:
try:
files.append(self.google_storage(scene, self.download_dir))
except RemoteFileDoesntExist:
files.append(self.usgs_eros(scene, self.download_dir))
return files
else:
raise Exception('Expected sceneIDs list') | def download(self, scenes, bands=None) | Download scenese from Google Storage or Amazon S3 if bands are provided
:param scenes:
A list of scene IDs
:type scenes:
List
:param bands:
A list of bands. Default value is None.
:type scenes:
List
:returns:
(List) includes downloaded scenes as key and source as value (aws or google) | 6.086594 | 5.598444 | 1.087194 |
# download from usgs if login information is provided
if self.usgs_user and self.usgs_pass:
try:
api_key = api.login(self.usgs_user, self.usgs_pass)
except USGSError as e:
error_tree = ElementTree.fromstring(str(e.message))
error_text = error_tree.find("SOAP-ENV:Body/SOAP-ENV:Fault/faultstring", api.NAMESPACES).text
raise USGSInventoryAccessMissing(error_text)
download_url = api.download('LANDSAT_8', 'EE', [scene], api_key=api_key)
if download_url:
self.output('Source: USGS EarthExplorer', normal=True, arrow=True)
return self.fetch(download_url[0], path)
raise RemoteFileDoesntExist('%s is not available on AWS S3, Google or USGS Earth Explorer' % scene)
raise RemoteFileDoesntExist('%s is not available on AWS S3 or Google Storage' % scene) | def usgs_eros(self, scene, path) | Downloads the image from USGS | 5.222606 | 5.086839 | 1.02669 |
sat = self.scene_interpreter(scene)
url = self.google_storage_url(sat)
self.remote_file_exists(url)
self.output('Source: Google Storage', normal=True, arrow=True)
return self.fetch(url, path) | def google_storage(self, scene, path) | Google Storage Downloader.
:param scene:
The scene id
:type scene:
String
:param path:
The directory path to where the image should be stored
:type path:
String
:returns:
Boolean | 9.600076 | 10.785778 | 0.890068 |
sat = self.scene_interpreter(scene)
# Always grab MTL.txt and QA band if bands are specified
if 'BQA' not in bands:
bands.append('QA')
if 'MTL' not in bands:
bands.append('MTL')
urls = []
for band in bands:
# get url for the band
url = self.amazon_s3_url(sat, band)
# make sure it exist
self.remote_file_exists(url)
urls.append(url)
# create folder
path = check_create_folder(join(self.download_dir, scene))
self.output('Source: AWS S3', normal=True, arrow=True)
for url in urls:
self.fetch(url, path)
return path | def amazon_s3(self, scene, bands) | Amazon S3 downloader | 5.813004 | 5.613524 | 1.035536 |
segments = url.split('/')
filename = segments[-1]
# remove query parameters from the filename
filename = filename.split('?')[0]
self.output('Downloading: %s' % filename, normal=True, arrow=True)
# print(join(path, filename))
# raise Exception
if exists(join(path, filename)):
size = getsize(join(path, filename))
if size == self.get_remote_file_size(url):
self.output('%s already exists on your system' % filename, normal=True, color='green', indent=1)
else:
fetch(url, path)
self.output('stored at %s' % path, normal=True, color='green', indent=1)
return join(path, filename) | def fetch(self, url, path) | Downloads the given url.
:param url:
The url to be downloaded.
:type url:
String
:param path:
The directory path to where the image should be stored
:type path:
String
:param filename:
The filename that has to be downloaded
:type filename:
String
:returns:
Boolean | 3.762441 | 4.024414 | 0.934904 |
filename = sat['scene'] + '.tar.bz'
return url_builder([self.google, sat['sat'], sat['path'], sat['row'], filename]) | def google_storage_url(self, sat) | Returns a google storage url the contains the scene provided.
:param sat:
Expects an object created by scene_interpreter method
:type sat:
dict
:returns:
(String) The URL to a google storage file | 10.952336 | 12.281126 | 0.891802 |
if band != 'MTL':
filename = '%s_B%s.TIF' % (sat['scene'], band)
else:
filename = '%s_%s.txt' % (sat['scene'], band)
return url_builder([self.s3, sat['sat'], sat['path'], sat['row'], sat['scene'], filename]) | def amazon_s3_url(self, sat, band) | Return an amazon s3 url the contains the scene and band provided.
:param sat:
Expects an object created by scene_interpreter method
:type sat:
dict
:param filename:
The filename that has to be downloaded from Amazon
:type filename:
String
:returns:
(String) The URL to a S3 file | 4.341505 | 4.405122 | 0.985558 |
status = requests.head(url).status_code
if status != 200:
raise RemoteFileDoesntExist | def remote_file_exists(self, url) | Checks whether the remote file exists.
:param url:
The url that has to be checked.
:type url:
String
:returns:
**True** if remote file exists and **False** if it doesn't exist. | 4.623389 | 6.134144 | 0.753714 |
headers = requests.head(url).headers
return int(headers['content-length']) | def get_remote_file_size(self, url) | Gets the filesize of a remote file.
:param url:
The url that has to be checked.
:type url:
String
:returns:
int | 4.660875 | 7.385141 | 0.631115 |
anatomy = {
'path': None,
'row': None,
'sat': None,
'scene': scene
}
if isinstance(scene, str) and len(scene) == 21:
anatomy['path'] = scene[3:6]
anatomy['row'] = scene[6:9]
anatomy['sat'] = 'L' + scene[2:3]
return anatomy
else:
raise IncorrectSceneId('Received incorrect scene') | def scene_interpreter(self, scene) | Conver sceneID to rows, paths and dates.
:param scene:
The scene ID.
:type scene:
String
:returns:
dict
:Example output:
>>> anatomy = {
'path': None,
'row': None,
'sat': None,
'scene': scene
} | 4.124836 | 2.838022 | 1.45342 |
query = []
or_string = ''
and_string = ''
search_string = ''
if paths_rows:
# Coverting rows and paths to paired list
new_array = create_paired_list(paths_rows)
paths_rows = ['(%s)' % self.row_path_builder(i[0], i[1]) for i in new_array]
or_string = '+OR+'.join(map(str, paths_rows))
if start_date and end_date:
query.append(self.date_range_builder(start_date, end_date))
elif start_date:
query.append(self.date_range_builder(start_date, '2100-01-01'))
elif end_date:
query.append(self.date_range_builder('2009-01-01', end_date))
if cloud_min and cloud_max:
query.append(self.cloud_cover_prct_range_builder(cloud_min, cloud_max))
elif cloud_min:
query.append(self.cloud_cover_prct_range_builder(cloud_min, '100'))
elif cloud_max:
query.append(self.cloud_cover_prct_range_builder('-1', cloud_max))
if address:
query.append(self.address_builder(address))
elif (lat is not None) and (lon is not None):
query.append(self.lat_lon_builder(lat, lon))
if query:
and_string = '+AND+'.join(map(str, query))
if and_string and or_string:
search_string = and_string + '+AND+(' + or_string + ')'
else:
search_string = or_string + and_string
return search_string | def query_builder(self, paths_rows=None, lat=None, lon=None, address=None, start_date=None, end_date=None,
cloud_min=None, cloud_max=None) | Builds the proper search syntax (query) for Landsat API.
:param paths_rows:
A string in this format: "003,003,004,004". Must be in pairs and separated by comma.
:type paths_rows:
String
:param lat:
The latitude
:type lat:
String, float, integer
:param lon:
The The longitude
:type lon:
String, float, integer
:param address:
The address
:type address:
String
:param start_date:
Date string. format: YYYY-MM-DD
:type start_date:
String
:param end_date:
date string. format: YYYY-MM-DD
:type end_date:
String
:param cloud_min:
float specifying the minimum percentage. e.g. 4.3
:type cloud_min:
float
:param cloud_max:
float specifying the maximum percentage. e.g. 78.9
:type cloud_max:
float
:returns:
String | 1.9923 | 2.029641 | 0.981602 |
if not end:
end = time.strftime('%Y-%m-%d')
return 'acquisitionDate:[%s+TO+%s]' % (start, end) | def date_range_builder(self, start='2013-02-11', end=None) | Builds date range query.
:param start:
Date string. format: YYYY-MM-DD
:type start:
String
:param end:
date string. format: YYYY-MM-DD
:type end:
String
:returns:
String | 4.014863 | 5.424769 | 0.740098 |
v = VerbosityMixin()
if code == 0:
v.output(message, normal=True, arrow=True)
v.output('Done!', normal=True, arrow=True)
else:
v.output(message, normal=True, error=True)
sys.exit(code) | def exit(message, code=0) | output a message to stdout and terminates the process.
:param message:
Message to be outputed.
:type message:
String
:param code:
The termination code. Default is 0
:type code:
int
:returns:
void | 4.289639 | 4.864582 | 0.881811 |
if isinstance(value, list):
value = ",".join(value)
array = re.split('\D+', value)
# Make sure the elements in the list are even and pairable
if len(array) % 2 == 0:
new_array = [list(array[i:i + 2]) for i in range(0, len(array), 2)]
return new_array
else:
raise ValueError('The string should include pairs and be formated. '
'The format must be 003,003,004,004 (commas with '
'no space)') | def create_paired_list(value) | Create a list of paired items from a string.
:param value:
the format must be 003,003,004,004 (commas with no space)
:type value:
String
:returns:
List
:example:
>>> create_paired_list('003,003,004,004')
[['003','003'], ['004', '004']] | 4.895967 | 3.542437 | 1.38209 |
if not os.path.exists(folder_path):
os.makedirs(folder_path)
return folder_path | def check_create_folder(folder_path) | Check whether a folder exists, if not the folder is created.
:param folder_path:
Path to the folder
:type folder_path:
String
:returns:
(String) the path to the folder | 2.062181 | 3.194346 | 0.645572 |
number = str(number)
if len(number) == 1:
return u'00%s' % number
elif len(number) == 2:
return u'0%s' % number
else:
return number | def three_digit(number) | Add 0s to inputs that their length is less than 3.
:param number:
The number to convert
:type number:
int
:returns:
String
:example:
>>> three_digit(1)
'001' | 1.978347 | 2.721124 | 0.727033 |
try:
fmt = '%m/%d/%Y'
return datetime.strptime(date, fmt).timetuple().tm_yday
except (ValueError, TypeError):
return 0 | def georgian_day(date) | Returns the number of days passed since the start of the year.
:param date:
The string date with this format %m/%d/%Y
:type date:
String
:returns:
int
:example:
>>> georgian_day('05/1/2015')
121 | 3.045742 | 3.231699 | 0.942458 |
try:
fmt = '%m/%d/%Y'
return datetime.strptime(date, fmt).timetuple().tm_year
except ValueError:
return 0 | def year(date) | Returns the year.
:param date:
The string date with this format %m/%d/%Y
:type date:
String
:returns:
int
:example:
>>> year('05/1/2015')
2015 | 3.341399 | 3.622749 | 0.922338 |
try:
if isinstance(date, datetime):
return date.strftime(new_fmt)
else:
fmt = '%m/%d/%Y'
return datetime.strptime(date, fmt).strftime(new_fmt)
except ValueError:
return date | def reformat_date(date, new_fmt='%Y-%m-%d') | Returns reformated date.
:param date:
The string date with this format %m/%d/%Y
:type date:
String
:param new_fmt:
date format string. Default is '%Y-%m-%d'
:type date:
String
:returns:
int
:example:
>>> reformat_date('05/1/2015', '%d/%m/%Y')
'1/05/2015' | 2.151808 | 2.475169 | 0.869358 |
if isinstance(value, list) or value is None:
return value
else:
s = re.findall('(10|11|QA|[0-9])', value)
for k, v in enumerate(s):
try:
s[k] = int(v)
except ValueError:
pass
return s | def convert_to_integer_list(value) | Converts a comma separate string to a list
:param value:
the format must be 003,003,004,004 (commas with no space)
:type value:
String
:returns:
List
:example:
>>> convert_to_integer_list('003,003,004,004')
['003', '003', '004', '004'] | 3.970157 | 4.513855 | 0.879549 |
geocoded = geocoder.google(address)
precision_km = geocode_confidences[geocoded.confidence]
if precision_km <= required_precision_km:
(lon, lat) = geocoded.geometry['coordinates']
return {'lat': lat, 'lon': lon}
else:
raise ValueError("Address could not be precisely located") | def geocode(address, required_precision_km=1.) | Identifies the coordinates of an address
:param address:
the address to be geocoded
:type value:
String
:param required_precision_km:
the maximum permissible geographic uncertainty for the geocoding
:type required_precision_km:
float
:returns:
dict
:example:
>>> geocode('1600 Pennsylvania Ave NW, Washington, DC 20500')
{'lat': 38.89767579999999, 'lon': -77.0364827} | 4.634099 | 5.101321 | 0.908412 |
if isinstance(value, list) or value is None:
return value
else:
s = re.findall('([-+]?\d*\.\d+|\d+|[-+]?\d+)', value)
for k, v in enumerate(s):
try:
s[k] = float(v)
except ValueError:
pass
return s | def convert_to_float_list(value) | Converts a comma separate string to a list
:param value:
the format must be 1.2,-3.5 (commas with no space)
:type value:
String
:returns:
List
:example:
>>> convert_to_integer_list('003,003,004,004')
[1.2, -3.5] | 2.54778 | 2.875625 | 0.885992 |
# out of bound check
# If it is completely outside of target bounds, return target bounds
if ((bounds2[0] > bounds1[0] and bounds2[2] > bounds1[0]) or
(bounds2[2] < bounds1[2] and bounds2[2] < bounds1[0])):
return bounds1
if ((bounds2[1] < bounds1[1] and bounds2[3] < bounds1[1]) or
(bounds2[3] > bounds1[3] and bounds2[1] > bounds1[3])):
return bounds1
new_bounds = list(bounds2)
# Adjust Y axis (Longitude)
if (bounds2[0] > bounds1[0] or bounds2[0] < bounds1[3]):
new_bounds[0] = bounds1[0]
if (bounds2[2] < bounds1[2] or bounds2[2] > bounds1[0]):
new_bounds[2] = bounds1[2]
# Adjust X axis (Latitude)
if (bounds2[1] < bounds1[1] or bounds2[1] > bounds1[3]):
new_bounds[1] = bounds1[1]
if (bounds2[3] > bounds1[3] or bounds2[3] < bounds1[1]):
new_bounds[3] = bounds1[3]
return tuple(new_bounds) | def adjust_bounding_box(bounds1, bounds2) | If the bounds 2 corners are outside of bounds1, they will be adjusted to bounds1 corners
@params
bounds1 - The source bounding box
bounds2 - The target bounding box that has to be within bounds1
@return
A bounding box tuple in (y1, x1, y2, x2) format | 1.783548 | 1.733351 | 1.02896 |
try:
bands = convert_to_integer_list(bands)
if pansharpen:
p = PanSharpen(path, bands=bands, dst_path=settings.PROCESSED_IMAGE,
verbose=verbose, force_unzip=force_unzip, bounds=bounds)
elif ndvigrey:
p = NDVI(path, verbose=verbose, dst_path=settings.PROCESSED_IMAGE, force_unzip=force_unzip, bounds=bounds)
elif ndvi:
p = NDVIWithManualColorMap(path, dst_path=settings.PROCESSED_IMAGE,
verbose=verbose, force_unzip=force_unzip, bounds=bounds)
else:
p = Simple(path, bands=bands, dst_path=settings.PROCESSED_IMAGE, verbose=verbose, force_unzip=force_unzip,
bounds=bounds)
except IOError as err:
exit(str(err), 1)
except FileDoesNotExist as err:
exit(str(err), 1)
return p.run() | def process_image(path, bands=None, verbose=False, pansharpen=False, ndvi=False, force_unzip=None,
ndvigrey=False, bounds=None) | Handles constructing and image process.
:param path:
The path to the image that has to be processed
:type path:
String
:param bands:
List of bands that has to be processed. (optional)
:type bands:
List
:param verbose:
Sets the level of verbosity. Default is False.
:type verbose:
boolean
:param pansharpen:
Whether to pansharpen the image. Default is False.
:type pansharpen:
boolean
:returns:
(String) path to the processed image | 2.374933 | 2.467534 | 0.962472 |
bands = []
try:
for i, band in enumerate(self.bands):
bands.append(rasterio.open(self.bands_path[i]).read_band(1))
except IOError as e:
exit(e.message, 1)
return bands | def _read_bands(self) | Reads a band with rasterio | 4.214911 | 3.773697 | 1.116918 |
self.output("Unzipping %s - It might take some time" % scene, normal=True, arrow=True)
try:
# check if file is already unzipped, skip
if isdir(dst) and not force_unzip:
self.output('%s is already unzipped.' % scene, normal=True, color='green', indent=1)
return
else:
tar = tarfile.open(src, 'r')
tar.extractall(path=dst)
tar.close()
except tarfile.ReadError:
check_create_folder(dst)
subprocess.check_call(['tar', '-xf', src, '-C', dst]) | def _unzip(self, src, dst, scene, force_unzip=False) | Unzip tar files | 3.611489 | 3.4874 | 1.035582 |
filename = get_file(path).split('.')
if filename[-1] in ['bz', 'bz2', 'gz']:
return True
return False | def _check_if_zipped(self, path) | Checks if the filename shows a tar/zip file | 6.755918 | 5.271735 | 1.281536 |
self.output('Calculating cloud and snow coverage from QA band', normal=True, arrow=True)
a = rasterio.open(join(self.scene_path, self._get_full_filename('QA'))).read_band(1)
cloud_high_conf = int('1100000000000000', 2)
snow_high_conf = int('0000110000000000', 2)
fill_pixels = int('0000000000000001', 2)
cloud_mask = numpy.bitwise_and(a, cloud_high_conf) == cloud_high_conf
snow_mask = numpy.bitwise_and(a, snow_high_conf) == snow_high_conf
fill_mask = numpy.bitwise_and(a, fill_pixels) == fill_pixels
perc = numpy.true_divide(numpy.sum(cloud_mask | snow_mask),
a.size - numpy.sum(fill_mask)) * 100.0
self.output('cloud/snow coverage: %s' % round(perc, 2), indent=1, normal=True, color='green')
return perc | def _calculate_cloud_ice_perc(self) | Return the percentage of pixels that are either cloud or snow with
high confidence (> 67%). | 2.994801 | 2.700096 | 1.109146 |
filename = ''
if prefix:
filename += str(prefix) + '_'
if name:
filename += str(name)
else:
filename += str(self.scene)
if suffix:
filename += '_' + str(suffix)
if self.clipped:
bounds = [tuple(self.bounds[0:2]), tuple(self.bounds[2:4])]
polyline = PolylineCodec().encode(bounds)
filename += '_clipped_' + polyline
filename += '.TIF'
return filename | def _filename(self, name=None, suffix=None, prefix=None) | File name generator for processed images | 3.541652 | 3.274452 | 1.081601 |
self.output("Clipping", normal=True)
# create new folder for clipped images
path = check_create_folder(join(self.scene_path, 'clipped'))
try:
temp_bands = copy(self.bands)
temp_bands.append('QA')
for i, band in enumerate(temp_bands):
band_name = self._get_full_filename(band)
band_path = join(self.scene_path, band_name)
self.output("Band %s" % band, normal=True, color='green', indent=1)
with rasterio.open(band_path) as src:
bounds = transform_bounds(
{
'proj': 'longlat',
'ellps': 'WGS84',
'datum': 'WGS84',
'no_defs': True
},
src.crs,
*self.bounds
)
if disjoint_bounds(bounds, src.bounds):
bounds = adjust_bounding_box(src.bounds, bounds)
window = src.window(*bounds)
out_kwargs = src.meta.copy()
out_kwargs.update({
'driver': 'GTiff',
'height': window[0][1] - window[0][0],
'width': window[1][1] - window[1][0],
'transform': src.window_transform(window)
})
with rasterio.open(join(path, band_name), 'w', **out_kwargs) as out:
out.write(src.read(window=window))
# Copy MTL to the clipped folder
copyfile(join(self.scene_path, self.scene + '_MTL.txt'), join(path, self.scene + '_MTL.txt'))
return path
except IOError as e:
exit(e.message, 1) | def clip(self) | Clip images based on bounds provided
Implementation is borrowed from
https://github.com/brendan-ward/rasterio/blob/e3687ce0ccf8ad92844c16d913a6482d5142cf48/rasterio/rio/convert.py | 3.012987 | 2.840133 | 1.060861 |
self.output('Image processing started for bands %s' % '-'.join(map(str, self.bands)), normal=True, arrow=True)
bands = self._read_bands()
image_data = self._get_image_data()
new_bands = self._generate_new_bands(image_data['shape'])
self._warp(image_data, bands, new_bands)
# Bands are no longer needed
del bands
rasterio_options = {
'driver': 'GTiff',
'width': image_data['shape'][1],
'height': image_data['shape'][0],
'count': 3,
'dtype': numpy.uint8,
'nodata': 0,
'transform': image_data['dst_transform'],
'photometric': 'RGB',
'crs': self.dst_crs
}
return self._write_to_file(new_bands, **rasterio_options) | def run(self) | Executes the image processing.
:returns:
(String) the path to the processed image | 3.690378 | 3.607297 | 1.023031 |
self.output('PanSharpened Image processing started for bands %s' % '-'.join(map(str, self.bands)),
normal=True, arrow=True)
bands = self._read_bands()
image_data = self._get_image_data()
new_bands = self._generate_new_bands(image_data['shape'])
bands[:3] = self._rescale(bands[:3])
new_bands.append(numpy.empty(image_data['shape'], dtype=numpy.uint16))
self._warp(image_data, bands, new_bands)
# Bands are no longer needed
del bands
# Calculate pan band
pan = self._pansize(new_bands)
del self.bands[self.band8]
del new_bands[self.band8]
rasterio_options = {
'driver': 'GTiff',
'width': image_data['shape'][1],
'height': image_data['shape'][0],
'count': 3,
'dtype': numpy.uint8,
'nodata': 0,
'transform': image_data['dst_transform'],
'photometric': 'RGB',
'crs': self.dst_crs
}
return self._write_to_file(new_bands, pan, **rasterio_options) | def run(self) | Executes the pansharpen image processing.
:returns:
(String) the path to the processed image | 4.202127 | 3.931775 | 1.068761 |
self.output("Rescaling", normal=True, arrow=True)
for key, band in enumerate(bands):
self.output("band %s" % self.bands[key], normal=True, color='green', indent=1)
bands[key] = sktransform.rescale(band, 2)
bands[key] = (bands[key] * 65535).astype('uint16')
return bands | def _rescale(self, bands) | Rescale bands | 4.747736 | 4.644067 | 1.022323 |
if error and value and (normal or self.verbose):
return self._print(value, color='red', indent=indent)
if self.verbose or normal:
return self._print(value, color, arrow, indent)
return | def output(self, value, normal=False, color=None, error=False,
arrow=False, indent=None) | Handles verbosity of this calls.
if priority is set to 1, the value is printed
if class instance verbose is True, the value is printed
:param value:
a string representing the message to be printed
:type value:
String
:param normal:
if set to true the message is always printed, otherwise it is only shown if verbosity is set
:type normal:
boolean
:param color:
The color of the message, choices: 'red', 'green', 'blue'
:type normal:
String
:param error:
if set to true the message appears in red
:type error:
Boolean
:param arrow:
if set to true an arrow appears before the message
:type arrow:
Boolean
:param indent:
indents the message based on the number provided
:type indent:
Boolean
:returns:
void | 4.608987 | 5.074367 | 0.908288 |
if self.verbose:
proc = subprocess.Popen(argv, stderr=subprocess.PIPE)
else:
proc = subprocess.Popen(argv, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.output(proc.stderr.read(), error=True)
return | def subprocess(self, argv) | Execute subprocess commands with proper ouput.
This is no longer used in landsat-util
:param argv:
A list of subprocess arguments
:type argv:
List
:returns:
void | 3.187148 | 3.068715 | 1.038594 |
self.output(message, normal=True, color="green")
sys.exit() | def exit(self, message) | outputs an exit message and exits
:param message:
The message to be outputed
:type message:
String
:returns:
void | 11.88582 | 12.47582 | 0.952709 |
if color:
msg = colored(msg, color)
if arrow:
msg = colored('===> ', 'blue') + msg
if indent:
msg = (' ' * indent) + msg
print(msg)
return msg | def _print(self, msg, color=None, arrow=False, indent=None) | Print the msg with the color provided. | 3.784385 | 3.540753 | 1.068808 |
"query the names and values of nanomsg symbols"
value = ctypes.c_int()
name_value_pairs = []
i = 0
while True:
name = _nn_symbol(i, ctypes.byref(value))
if name is None:
break
i += 1
name_value_pairs.append((name.decode('ascii'), value.value))
return name_value_pairs | def nn_symbols() | query the names and values of nanomsg symbols | 3.969686 | 3.064734 | 1.295279 |
try:
return _nn_setsockopt(socket, level, option, ctypes.addressof(value),
len(value))
except (TypeError, AttributeError):
buf_value = ctypes.create_string_buffer(value)
return _nn_setsockopt(socket, level, option,
ctypes.addressof(buf_value), len(value)) | def nn_setsockopt(socket, level, option, value) | set a socket option
socket - socket number
level - option level
option - option
value - a readable byte buffer (not a Unicode string) containing the value
returns - 0 on success or < 0 on error | 2.438953 | 2.769997 | 0.88049 |
if memoryview(value).readonly:
raise TypeError('Writable buffer is required')
size_t_size = ctypes.c_size_t(len(value))
rtn = _nn_getsockopt(socket, level, option, ctypes.addressof(value),
ctypes.byref(size_t_size))
return (rtn, size_t_size.value) | def nn_getsockopt(socket, level, option, value) | retrieve a socket option
socket - socket number
level - option level
option - option
value - a writable byte buffer (e.g. a bytearray) which the option value
will be copied to
returns - number of bytes copied or on error nunber < 0 | 3.537725 | 4.22455 | 0.837421 |
"send a message"
try:
return _nn_send(socket, ctypes.addressof(msg), len(buffer(msg)), flags)
except (TypeError, AttributeError):
buf_msg = ctypes.create_string_buffer(msg)
return _nn_send(socket, ctypes.addressof(buf_msg), len(msg), flags) | def nn_send(socket, msg, flags) | send a message | 3.326798 | 3.4552 | 0.962838 |
"allocate a message"
pointer = _nn_allocmsg(size, type)
if pointer is None:
return None
return _create_message(pointer, size) | def nn_allocmsg(size, type) | allocate a message | 4.65184 | 5.323575 | 0.873819 |
polls = []
for i, entry in enumerate(fds.items()):
s = PollFds()
fd, event = entry
s.fd = fd
s.events = event
s.revents = 0
polls.append(s)
poll_array = (PollFds*len(fds))(*polls)
res = _nn_poll(poll_array, len(fds), int(timeout))
if res <= 0:
return res, {}
return res, {item.fd: item.revents for item in poll_array} | def nn_poll(fds, timeout=-1) | nn_pollfds
:param fds: dict (file descriptor => pollmode)
:param timeout: timeout in milliseconds
:return: | 3.420874 | 3.520758 | 0.97163 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.