prompt
large_stringlengths 72
9.34k
| completion
large_stringlengths 0
7.61k
|
---|---|
<|file_name|>media.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
eve.io.media
~~~~~~~~~~~~
Media storage for Eve-powered APIs.
:copyright: (c) 2014 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
class MediaStorage(object):
""" The MediaStorage class provides a standardized API for storing files,
along with a set of default behaviors that all other storage systems can
inherit or override as necessary.
..versioneadded:: 0.3
"""
def __init__(self, app=None):
"""
:param app: the flask application (eve itself). This can be used by
the class to access, amongst other things, the app.config object to
retrieve class-specific settings.
"""
self.app = app
def get(self, id_or_filename):
""" Opens the file given by name or unique id. Note that although the
returned file is guaranteed to be a File object, it might actually be
some subclass. Returns None if no file was found.
"""
raise NotImplementedError
def put(self, content, filename=None, content_type=None):
""" Saves a new file using the storage system, preferably with the name
specified. If there already exists a file with this name name, the
storage system may modify the filename as necessary to get a unique
name. Depending on the storage system, a unique id or the actual name
of the stored file will be returned. The content type argument is used
to appropriately identify the file when it is retrieved.
.. versionchanged:: 0.5
Allow filename to be optional (#414).
"""
raise NotImplementedError
def <|fim_middle|>(self, id_or_filename):
""" Deletes the file referenced by name or unique id. If deletion is
not supported on the target storage system this will raise
NotImplementedError instead
"""
raise NotImplementedError
def exists(self, id_or_filename):
""" Returns True if a file referenced by the given name or unique id
already exists in the storage system, or False if the name is available
for a new file.
"""
raise NotImplementedError
<|fim▁end|> | delete |
<|file_name|>media.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
eve.io.media
~~~~~~~~~~~~
Media storage for Eve-powered APIs.
:copyright: (c) 2014 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
class MediaStorage(object):
""" The MediaStorage class provides a standardized API for storing files,
along with a set of default behaviors that all other storage systems can
inherit or override as necessary.
..versioneadded:: 0.3
"""
def __init__(self, app=None):
"""
:param app: the flask application (eve itself). This can be used by
the class to access, amongst other things, the app.config object to
retrieve class-specific settings.
"""
self.app = app
def get(self, id_or_filename):
""" Opens the file given by name or unique id. Note that although the
returned file is guaranteed to be a File object, it might actually be
some subclass. Returns None if no file was found.
"""
raise NotImplementedError
def put(self, content, filename=None, content_type=None):
""" Saves a new file using the storage system, preferably with the name
specified. If there already exists a file with this name name, the
storage system may modify the filename as necessary to get a unique
name. Depending on the storage system, a unique id or the actual name
of the stored file will be returned. The content type argument is used
to appropriately identify the file when it is retrieved.
.. versionchanged:: 0.5
Allow filename to be optional (#414).
"""
raise NotImplementedError
def delete(self, id_or_filename):
""" Deletes the file referenced by name or unique id. If deletion is
not supported on the target storage system this will raise
NotImplementedError instead
"""
raise NotImplementedError
def <|fim_middle|>(self, id_or_filename):
""" Returns True if a file referenced by the given name or unique id
already exists in the storage system, or False if the name is available
for a new file.
"""
raise NotImplementedError
<|fim▁end|> | exists |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
<|fim▁hole|> """Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_<|fim▁end|> | RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value): |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
<|fim_middle|>
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | """Return the root input type from JSON formatted string."""
return parse_format(json.loads(data)) |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
<|fim_middle|>
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | """Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id] |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
<|fim_middle|>
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | """Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
<|fim_middle|>
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | """Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value) |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
<|fim_middle|>
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | """Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type() |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
<|fim_middle|>
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | """Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
<|fim_middle|>
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | """Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_] |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
<|fim_middle|>
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | """Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']] |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
<|fim_middle|>
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | """Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
<|fim_middle|>
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | """Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
<|fim_middle|>
<|fim▁end|> | """
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_ |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
<|fim_middle|>
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | input_types[input_type['id']] = input_type # register by id |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
<|fim_middle|>
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | return False |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
<|fim_middle|>
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | input_type['default_descendant'] = input_types[id_] |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
<|fim_middle|>
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | input_type['subtype'] = input_types[input_type['subtype']] |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
<|fim_middle|>
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | _substitute_implementations()
_substitute_default_descendant() |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
<|fim_middle|>
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | _substitute_key_type() |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
<|fim_middle|>
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | return None |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
<|fim_middle|>
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | input_type.update(_parse_range(data)) |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
<|fim_middle|>
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype'] |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
<|fim_middle|>
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | input_type['min'] = 0 |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
<|fim_middle|>
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | input_type['file_mode'] = data['file_mode'] |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
<|fim_middle|>
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | input_type['values'] = _list_to_dict(data['values'], 'name') |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
<|fim_middle|>
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None) |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
<|fim_middle|>
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None) |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def <|fim_middle|>(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | get_root_input_type_from_json |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def <|fim_middle|>(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | parse_format |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def <|fim_middle|>(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | is_scalar |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def <|fim_middle|>(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | is_param |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def <|fim_middle|>(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | _substitute_ids_with_references |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def <|fim_middle|>():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | _substitute_implementations |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def <|fim_middle|>():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | _substitute_default_descendant |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def <|fim_middle|>():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | _substitute_key_type |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def <|fim_middle|>(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | _get_input_type |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def <|fim_middle|>(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def _list_to_dict(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | _parse_range |
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""
Contains format specification class and methods to parse it from JSON.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
import json
import re
def get_root_input_type_from_json(data):
"""Return the root input type from JSON formatted string."""
return parse_format(json.loads(data))
def parse_format(data):
"""Returns root input type from data."""
input_types = {}
data = data['ist_nodes']
root_id = data[0]['id'] # set root type
for item in data:
input_type = _get_input_type(item)
if input_type is not None:
input_types[input_type['id']] = input_type # register by id
_substitute_ids_with_references(input_types)
return input_types[root_id]
SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']
def is_scalar(input_type):
"""Returns True if input_type is scalar."""
return input_type['base_type'] in SCALAR
RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')
def is_param(value):
"""Determine whether given value is a parameter string."""
if not isinstance(value, str):
return False
return RE_PARAM.match(value)
def _substitute_ids_with_references(input_types):
"""Replaces ids or type names with python object references."""
input_type = {}
def _substitute_implementations():
"""Replaces implementation ids with input_types."""
impls = {}
for id_ in input_type['implementations']:
type_ = input_types[id_]
impls[type_['name']] = type_
input_type['implementations'] = impls
def _substitute_default_descendant():
"""Replaces default descendant id with input_type."""
id_ = input_type.get('default_descendant', None)
if id_ is not None:
input_type['default_descendant'] = input_types[id_]
def _substitute_key_type():
"""Replaces key type with input_type."""
# pylint: disable=unused-variable, invalid-name
for __, value in input_type['keys'].items():
value['type'] = input_types[value['type']]
# pylint: disable=unused-variable, invalid-name
for __, input_type in input_types.items():
if input_type['base_type'] == 'Array':
input_type['subtype'] = input_types[input_type['subtype']]
elif input_type['base_type'] == 'Abstract':
_substitute_implementations()
_substitute_default_descendant()
elif input_type['base_type'] == 'Record':
_substitute_key_type()
def _get_input_type(data):
"""Returns the input_type data structure that defines an input type
and its constraints for validation."""
if 'id' not in data or 'input_type' not in data:
return None
input_type = dict(
id=data['id'],
base_type=data['input_type']
)
input_type['name'] = data.get('name', '')
input_type['full_name'] = data.get('full_name', '')
input_type['description'] = data.get('description', '')
input_type['attributes'] = data.get('attributes', {})
if input_type['base_type'] in ['Double', 'Integer']:
input_type.update(_parse_range(data))
elif input_type['base_type'] == 'Array':
input_type.update(_parse_range(data))
if input_type['min'] < 0:
input_type['min'] = 0
input_type['subtype'] = data['subtype']
elif input_type['base_type'] == 'FileName':
input_type['file_mode'] = data['file_mode']
elif input_type['base_type'] == 'Selection':
input_type['values'] = _list_to_dict(data['values'], 'name')
elif input_type['base_type'] == 'Record':
input_type['keys'] = _list_to_dict(data['keys'])
input_type['implements'] = data.get('implements', [])
input_type['reducible_to_key'] = data.get('reducible_to_key', None)
elif input_type['base_type'] == 'Abstract':
input_type['implementations'] = data['implementations']
input_type['default_descendant'] = data.get('default_descendant', None)
return input_type
def _parse_range(data):
"""Parses the format range properties - min, max."""
input_type = {}
try:
input_type['min'] = data['range'][0]
except (KeyError, TypeError): # set default value
input_type['min'] = float('-inf')
try:
input_type['max'] = data['range'][1]
except (KeyError, TypeError): # set default value
input_type['max'] = float('inf')
return input_type
def <|fim_middle|>(list_, key_label='key'):
"""
Transforms a list of dictionaries into a dictionary of dictionaries.
Original dictionaries are assigned key specified in each of them
by key_label.
"""
dict_ = {}
for item in list_:
dict_[item[key_label]] = item
return dict_
<|fim▁end|> | _list_to_dict |
<|file_name|>conf.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# SymPy documentation build configuration file, created by
# sphinx-quickstart.py on Sat Mar 22 19:34:32 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys
# If your extensions are in another directory, add it here.
sys.path.extend(['../sympy', 'ext'])
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.addons.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.mathjax',
'numpydoc', 'sympylive',]<|fim▁hole|>
# Use this to use pngmath instead
#extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.pngmath', ]
# MathJax file, which is free to use. See http://www.mathjax.org/docs/2.0/start.html
mathjax_path = 'https://c328740.ssl.cf1.rackcdn.com/mathjax/latest/MathJax.js?config=TeX-AMS_HTML-full'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'SymPy'
copyright = '2008, 2009, 2010, 2011, 2012 SymPy Development Team'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = '0.7.2'
# The full version, including alpha/beta/rc tags.
release = '0.7.2-git'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# Translations:
locale_dirs = ["i18n/"]
# List of documents that shouldn't be included in the build.
#unused_docs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
html_logo = '_static/sympylogo.png'
html_favicon = '../logo/SymPy-Favicon.ico'
html_theme_options = {'collapsiblesidebar': True}
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
#html_index = ''
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'SymPydoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual], toctree_only).
# toctree_only is set to True so that the start file document itself is not included in the
# output, only the documents referenced by it via TOC trees. The extra stuff in the master
# document is intended to show up in the HTML, but doesn't really belong in the LaTeX output.
latex_documents = [('index', 'sympy-%s.tex' % release, 'SymPy Documentation',
'SymPy Development Team', 'manual', True)]
# Additional stuff for the LaTeX preamble.
# Tweaked to work with XeTeX.
latex_elements = {
'babel': '',
'fontenc': r'''
\usepackage{amssymb}
\usepackage{fontspec}
\defaultfontfeatures{Mapping=tex-text}
\setmainfont{DejaVu Serif}
\setsansfont{DejaVu Sans}
\setmonofont{DejaVu Sans Mono}
''',
'fontpkg': '',
'inputenc': '',
'utf8extra': '',
'preamble': ''
}
# SymPy logo on title page
latex_logo = '_static/sympylogo.png'
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# Show page numbers next to internal references
latex_show_pagerefs = True
# We use False otherwise the module index gets generated twice.
latex_use_modindex = False
default_role = 'math'
pngmath_divpng_args = ['-gamma 1.5','-D 110']
# Note, this is ignored by the mathjax extension
# Any \newcommand should be defined in the file
pngmath_latex_preamble = '\\usepackage{amsmath}\n'+\
'\\usepackage{bm}\n'+\
'\\usepackage{amsfonts}\n'+\
'\\usepackage{amssymb}\n'+\
'\\setlength{\\parindent}{0pt}\n'
texinfo_documents = [
(master_doc, 'sympy', 'SymPy Documentation',
'SymPy Development Team',
'SymPy', 'Computer algebra system (CAS) in Python', 'Programming',
1),
]<|fim▁end|> | |
<|file_name|>hash_file.py<|end_file_name|><|fim▁begin|>import hashlib
from core.analytics import InlineAnalytics
from core.observables import Hash
HASH_TYPES_DICT = {
"md5": hashlib.md5,
"sha1": hashlib.sha1,
"sha256": hashlib.sha256,
"sha512": hashlib.sha512,
}
class HashFile(InlineAnalytics):
default_values = {
"name": "HashFile",
"description": "Extracts MD5, SHA1, SHA256, SHA512 hashes from file",
}
ACTS_ON = ["File", "Certificate"]<|fim▁hole|> def each(f):
if f.body:
f.hashes = []
for hash_type, h in HashFile.extract_hashes(f.body.contents):
hash_object = Hash.get_or_create(value=h.hexdigest())
hash_object.add_source("analytics")
hash_object.save()
f.active_link_to(
hash_object,
"{} hash".format(hash_type.upper()),
"HashFile",
clean_old=False,
)
f.hashes.append({"hash": hash_type, "value": h.hexdigest()})
f.save()
@staticmethod
def extract_hashes(body_contents):
hashers = {k: HASH_TYPES_DICT[k]() for k in HASH_TYPES_DICT}
while True:
chunk = body_contents.read(512 * 16)
if not chunk:
break
for h in hashers.values():
h.update(chunk)
return hashers.items()<|fim▁end|> |
@staticmethod |
<|file_name|>hash_file.py<|end_file_name|><|fim▁begin|>import hashlib
from core.analytics import InlineAnalytics
from core.observables import Hash
HASH_TYPES_DICT = {
"md5": hashlib.md5,
"sha1": hashlib.sha1,
"sha256": hashlib.sha256,
"sha512": hashlib.sha512,
}
class HashFile(InlineAnalytics):
<|fim_middle|>
<|fim▁end|> | default_values = {
"name": "HashFile",
"description": "Extracts MD5, SHA1, SHA256, SHA512 hashes from file",
}
ACTS_ON = ["File", "Certificate"]
@staticmethod
def each(f):
if f.body:
f.hashes = []
for hash_type, h in HashFile.extract_hashes(f.body.contents):
hash_object = Hash.get_or_create(value=h.hexdigest())
hash_object.add_source("analytics")
hash_object.save()
f.active_link_to(
hash_object,
"{} hash".format(hash_type.upper()),
"HashFile",
clean_old=False,
)
f.hashes.append({"hash": hash_type, "value": h.hexdigest()})
f.save()
@staticmethod
def extract_hashes(body_contents):
hashers = {k: HASH_TYPES_DICT[k]() for k in HASH_TYPES_DICT}
while True:
chunk = body_contents.read(512 * 16)
if not chunk:
break
for h in hashers.values():
h.update(chunk)
return hashers.items() |
<|file_name|>hash_file.py<|end_file_name|><|fim▁begin|>import hashlib
from core.analytics import InlineAnalytics
from core.observables import Hash
HASH_TYPES_DICT = {
"md5": hashlib.md5,
"sha1": hashlib.sha1,
"sha256": hashlib.sha256,
"sha512": hashlib.sha512,
}
class HashFile(InlineAnalytics):
default_values = {
"name": "HashFile",
"description": "Extracts MD5, SHA1, SHA256, SHA512 hashes from file",
}
ACTS_ON = ["File", "Certificate"]
@staticmethod
def each(f):
<|fim_middle|>
@staticmethod
def extract_hashes(body_contents):
hashers = {k: HASH_TYPES_DICT[k]() for k in HASH_TYPES_DICT}
while True:
chunk = body_contents.read(512 * 16)
if not chunk:
break
for h in hashers.values():
h.update(chunk)
return hashers.items()
<|fim▁end|> | if f.body:
f.hashes = []
for hash_type, h in HashFile.extract_hashes(f.body.contents):
hash_object = Hash.get_or_create(value=h.hexdigest())
hash_object.add_source("analytics")
hash_object.save()
f.active_link_to(
hash_object,
"{} hash".format(hash_type.upper()),
"HashFile",
clean_old=False,
)
f.hashes.append({"hash": hash_type, "value": h.hexdigest()})
f.save() |
<|file_name|>hash_file.py<|end_file_name|><|fim▁begin|>import hashlib
from core.analytics import InlineAnalytics
from core.observables import Hash
HASH_TYPES_DICT = {
"md5": hashlib.md5,
"sha1": hashlib.sha1,
"sha256": hashlib.sha256,
"sha512": hashlib.sha512,
}
class HashFile(InlineAnalytics):
default_values = {
"name": "HashFile",
"description": "Extracts MD5, SHA1, SHA256, SHA512 hashes from file",
}
ACTS_ON = ["File", "Certificate"]
@staticmethod
def each(f):
if f.body:
f.hashes = []
for hash_type, h in HashFile.extract_hashes(f.body.contents):
hash_object = Hash.get_or_create(value=h.hexdigest())
hash_object.add_source("analytics")
hash_object.save()
f.active_link_to(
hash_object,
"{} hash".format(hash_type.upper()),
"HashFile",
clean_old=False,
)
f.hashes.append({"hash": hash_type, "value": h.hexdigest()})
f.save()
@staticmethod
def extract_hashes(body_contents):
<|fim_middle|>
<|fim▁end|> | hashers = {k: HASH_TYPES_DICT[k]() for k in HASH_TYPES_DICT}
while True:
chunk = body_contents.read(512 * 16)
if not chunk:
break
for h in hashers.values():
h.update(chunk)
return hashers.items() |
<|file_name|>hash_file.py<|end_file_name|><|fim▁begin|>import hashlib
from core.analytics import InlineAnalytics
from core.observables import Hash
HASH_TYPES_DICT = {
"md5": hashlib.md5,
"sha1": hashlib.sha1,
"sha256": hashlib.sha256,
"sha512": hashlib.sha512,
}
class HashFile(InlineAnalytics):
default_values = {
"name": "HashFile",
"description": "Extracts MD5, SHA1, SHA256, SHA512 hashes from file",
}
ACTS_ON = ["File", "Certificate"]
@staticmethod
def each(f):
if f.body:
<|fim_middle|>
@staticmethod
def extract_hashes(body_contents):
hashers = {k: HASH_TYPES_DICT[k]() for k in HASH_TYPES_DICT}
while True:
chunk = body_contents.read(512 * 16)
if not chunk:
break
for h in hashers.values():
h.update(chunk)
return hashers.items()
<|fim▁end|> | f.hashes = []
for hash_type, h in HashFile.extract_hashes(f.body.contents):
hash_object = Hash.get_or_create(value=h.hexdigest())
hash_object.add_source("analytics")
hash_object.save()
f.active_link_to(
hash_object,
"{} hash".format(hash_type.upper()),
"HashFile",
clean_old=False,
)
f.hashes.append({"hash": hash_type, "value": h.hexdigest()})
f.save() |
<|file_name|>hash_file.py<|end_file_name|><|fim▁begin|>import hashlib
from core.analytics import InlineAnalytics
from core.observables import Hash
HASH_TYPES_DICT = {
"md5": hashlib.md5,
"sha1": hashlib.sha1,
"sha256": hashlib.sha256,
"sha512": hashlib.sha512,
}
class HashFile(InlineAnalytics):
default_values = {
"name": "HashFile",
"description": "Extracts MD5, SHA1, SHA256, SHA512 hashes from file",
}
ACTS_ON = ["File", "Certificate"]
@staticmethod
def each(f):
if f.body:
f.hashes = []
for hash_type, h in HashFile.extract_hashes(f.body.contents):
hash_object = Hash.get_or_create(value=h.hexdigest())
hash_object.add_source("analytics")
hash_object.save()
f.active_link_to(
hash_object,
"{} hash".format(hash_type.upper()),
"HashFile",
clean_old=False,
)
f.hashes.append({"hash": hash_type, "value": h.hexdigest()})
f.save()
@staticmethod
def extract_hashes(body_contents):
hashers = {k: HASH_TYPES_DICT[k]() for k in HASH_TYPES_DICT}
while True:
chunk = body_contents.read(512 * 16)
if not chunk:
<|fim_middle|>
for h in hashers.values():
h.update(chunk)
return hashers.items()
<|fim▁end|> | break |
<|file_name|>hash_file.py<|end_file_name|><|fim▁begin|>import hashlib
from core.analytics import InlineAnalytics
from core.observables import Hash
HASH_TYPES_DICT = {
"md5": hashlib.md5,
"sha1": hashlib.sha1,
"sha256": hashlib.sha256,
"sha512": hashlib.sha512,
}
class HashFile(InlineAnalytics):
default_values = {
"name": "HashFile",
"description": "Extracts MD5, SHA1, SHA256, SHA512 hashes from file",
}
ACTS_ON = ["File", "Certificate"]
@staticmethod
def <|fim_middle|>(f):
if f.body:
f.hashes = []
for hash_type, h in HashFile.extract_hashes(f.body.contents):
hash_object = Hash.get_or_create(value=h.hexdigest())
hash_object.add_source("analytics")
hash_object.save()
f.active_link_to(
hash_object,
"{} hash".format(hash_type.upper()),
"HashFile",
clean_old=False,
)
f.hashes.append({"hash": hash_type, "value": h.hexdigest()})
f.save()
@staticmethod
def extract_hashes(body_contents):
hashers = {k: HASH_TYPES_DICT[k]() for k in HASH_TYPES_DICT}
while True:
chunk = body_contents.read(512 * 16)
if not chunk:
break
for h in hashers.values():
h.update(chunk)
return hashers.items()
<|fim▁end|> | each |
<|file_name|>hash_file.py<|end_file_name|><|fim▁begin|>import hashlib
from core.analytics import InlineAnalytics
from core.observables import Hash
HASH_TYPES_DICT = {
"md5": hashlib.md5,
"sha1": hashlib.sha1,
"sha256": hashlib.sha256,
"sha512": hashlib.sha512,
}
class HashFile(InlineAnalytics):
default_values = {
"name": "HashFile",
"description": "Extracts MD5, SHA1, SHA256, SHA512 hashes from file",
}
ACTS_ON = ["File", "Certificate"]
@staticmethod
def each(f):
if f.body:
f.hashes = []
for hash_type, h in HashFile.extract_hashes(f.body.contents):
hash_object = Hash.get_or_create(value=h.hexdigest())
hash_object.add_source("analytics")
hash_object.save()
f.active_link_to(
hash_object,
"{} hash".format(hash_type.upper()),
"HashFile",
clean_old=False,
)
f.hashes.append({"hash": hash_type, "value": h.hexdigest()})
f.save()
@staticmethod
def <|fim_middle|>(body_contents):
hashers = {k: HASH_TYPES_DICT[k]() for k in HASH_TYPES_DICT}
while True:
chunk = body_contents.read(512 * 16)
if not chunk:
break
for h in hashers.values():
h.update(chunk)
return hashers.items()
<|fim▁end|> | extract_hashes |
<|file_name|>server-again.py<|end_file_name|><|fim▁begin|>###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(<|fim▁hole|> 'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
return
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def set_app(self, application):
self.application = application
def serve_forever(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
self.client_connection.close() # close parent copy
def handle_one_request(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def parse_request(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def finish_response(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close()
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()<|fim▁end|> | -1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print( |
<|file_name|>server-again.py<|end_file_name|><|fim▁begin|>###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
<|fim_middle|>
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def set_app(self, application):
self.application = application
def serve_forever(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
self.client_connection.close() # close parent copy
def handle_one_request(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def parse_request(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def finish_response(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close()
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()
<|fim▁end|> | while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
return |
<|file_name|>server-again.py<|end_file_name|><|fim▁begin|>###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
return
class WSGIServer(object):
<|fim_middle|>
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()
<|fim▁end|> | address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def set_app(self, application):
self.application = application
def serve_forever(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
self.client_connection.close() # close parent copy
def handle_one_request(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def parse_request(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def finish_response(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close() |
<|file_name|>server-again.py<|end_file_name|><|fim▁begin|>###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
return
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
<|fim_middle|>
def set_app(self, application):
self.application = application
def serve_forever(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
self.client_connection.close() # close parent copy
def handle_one_request(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def parse_request(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def finish_response(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close()
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()
<|fim▁end|> | self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = [] |
<|file_name|>server-again.py<|end_file_name|><|fim▁begin|>###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
return
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def set_app(self, application):
<|fim_middle|>
def serve_forever(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
self.client_connection.close() # close parent copy
def handle_one_request(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def parse_request(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def finish_response(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close()
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()
<|fim▁end|> | self.application = application |
<|file_name|>server-again.py<|end_file_name|><|fim▁begin|>###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
return
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def set_app(self, application):
self.application = application
def serve_forever(self):
<|fim_middle|>
def handle_one_request(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def parse_request(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def finish_response(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close()
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()
<|fim▁end|> | listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
self.client_connection.close() # close parent copy |
<|file_name|>server-again.py<|end_file_name|><|fim▁begin|>###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
return
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def set_app(self, application):
self.application = application
def serve_forever(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
self.client_connection.close() # close parent copy
def handle_one_request(self):
<|fim_middle|>
def parse_request(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def finish_response(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close()
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()
<|fim▁end|> | self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result) |
<|file_name|>server-again.py<|end_file_name|><|fim▁begin|>###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
return
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def set_app(self, application):
self.application = application
def serve_forever(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
self.client_connection.close() # close parent copy
def handle_one_request(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def parse_request(self, text):
<|fim_middle|>
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def finish_response(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close()
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()
<|fim▁end|> | request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split() |
<|file_name|>server-again.py<|end_file_name|><|fim▁begin|>###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
return
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def set_app(self, application):
self.application = application
def serve_forever(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
self.client_connection.close() # close parent copy
def handle_one_request(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def parse_request(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def get_environ(self):
<|fim_middle|>
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def finish_response(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close()
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()
<|fim▁end|> | env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env |
<|file_name|>server-again.py<|end_file_name|><|fim▁begin|>###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
return
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def set_app(self, application):
self.application = application
def serve_forever(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
self.client_connection.close() # close parent copy
def handle_one_request(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def parse_request(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
<|fim_middle|>
def finish_response(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close()
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()
<|fim▁end|> | server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response |
<|file_name|>server-again.py<|end_file_name|><|fim▁begin|>###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
return
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def set_app(self, application):
self.application = application
def serve_forever(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
self.client_connection.close() # close parent copy
def handle_one_request(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def parse_request(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def finish_response(self, result):
<|fim_middle|>
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()
<|fim▁end|> | try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close() |
<|file_name|>server-again.py<|end_file_name|><|fim▁begin|>###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
return
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def set_app(self, application):
self.application = application
def serve_forever(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
self.client_connection.close() # close parent copy
def handle_one_request(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def parse_request(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def finish_response(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close()
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
<|fim_middle|>
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()
<|fim▁end|> | signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server |
<|file_name|>server-again.py<|end_file_name|><|fim▁begin|>###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
<|fim_middle|>
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def set_app(self, application):
self.application = application
def serve_forever(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
self.client_connection.close() # close parent copy
def handle_one_request(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def parse_request(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def finish_response(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close()
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()
<|fim▁end|> | return |
<|file_name|>server-again.py<|end_file_name|><|fim▁begin|>###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
return
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def set_app(self, application):
self.application = application
def serve_forever(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
<|fim_middle|>
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
self.client_connection.close() # close parent copy
def handle_one_request(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def parse_request(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def finish_response(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close()
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()
<|fim▁end|> | continue |
<|file_name|>server-again.py<|end_file_name|><|fim▁begin|>###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
return
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def set_app(self, application):
self.application = application
def serve_forever(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
<|fim_middle|>
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
self.client_connection.close() # close parent copy
def handle_one_request(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def parse_request(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def finish_response(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close()
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()
<|fim▁end|> | raise |
<|file_name|>server-again.py<|end_file_name|><|fim▁begin|>###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
return
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def set_app(self, application):
self.application = application
def serve_forever(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
<|fim_middle|>
else: # parent
self.client_connection.close() # close parent copy
def handle_one_request(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def parse_request(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def finish_response(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close()
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()
<|fim▁end|> | listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0) |
<|file_name|>server-again.py<|end_file_name|><|fim▁begin|>###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
return
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def set_app(self, application):
self.application = application
def serve_forever(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
<|fim_middle|>
def handle_one_request(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def parse_request(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def finish_response(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close()
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()
<|fim▁end|> | self.client_connection.close() # close parent copy |
<|file_name|>server-again.py<|end_file_name|><|fim▁begin|>###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
return
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def set_app(self, application):
self.application = application
def serve_forever(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
self.client_connection.close() # close parent copy
def handle_one_request(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def parse_request(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def finish_response(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close()
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
<|fim_middle|>
<|fim▁end|> | if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever() |
<|file_name|>server-again.py<|end_file_name|><|fim▁begin|>###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
return
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def set_app(self, application):
self.application = application
def serve_forever(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
self.client_connection.close() # close parent copy
def handle_one_request(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def parse_request(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def finish_response(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close()
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
if len(sys.argv) < 2:
<|fim_middle|>
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()
<|fim▁end|> | sys.exit('Provide a WSGI application object as module:callable') |
<|file_name|>server-again.py<|end_file_name|><|fim▁begin|>###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def <|fim_middle|>(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
return
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def set_app(self, application):
self.application = application
def serve_forever(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
self.client_connection.close() # close parent copy
def handle_one_request(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def parse_request(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def finish_response(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close()
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()
<|fim▁end|> | grim_reaper |
<|file_name|>server-again.py<|end_file_name|><|fim▁begin|>###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
return
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def <|fim_middle|>(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def set_app(self, application):
self.application = application
def serve_forever(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
self.client_connection.close() # close parent copy
def handle_one_request(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def parse_request(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def finish_response(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close()
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()
<|fim▁end|> | __init__ |
<|file_name|>server-again.py<|end_file_name|><|fim▁begin|>###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
return
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def <|fim_middle|>(self, application):
self.application = application
def serve_forever(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
self.client_connection.close() # close parent copy
def handle_one_request(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def parse_request(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def finish_response(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close()
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()
<|fim▁end|> | set_app |
<|file_name|>server-again.py<|end_file_name|><|fim▁begin|>###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
return
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def set_app(self, application):
self.application = application
def <|fim_middle|>(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
self.client_connection.close() # close parent copy
def handle_one_request(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def parse_request(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def finish_response(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close()
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()
<|fim▁end|> | serve_forever |
<|file_name|>server-again.py<|end_file_name|><|fim▁begin|>###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
return
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def set_app(self, application):
self.application = application
def serve_forever(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
self.client_connection.close() # close parent copy
def <|fim_middle|>(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def parse_request(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def finish_response(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close()
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()
<|fim▁end|> | handle_one_request |
<|file_name|>server-again.py<|end_file_name|><|fim▁begin|>###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
return
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def set_app(self, application):
self.application = application
def serve_forever(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
self.client_connection.close() # close parent copy
def handle_one_request(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def <|fim_middle|>(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def finish_response(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close()
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()
<|fim▁end|> | parse_request |
<|file_name|>server-again.py<|end_file_name|><|fim▁begin|>###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
return
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def set_app(self, application):
self.application = application
def serve_forever(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
self.client_connection.close() # close parent copy
def handle_one_request(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def parse_request(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def <|fim_middle|>(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def finish_response(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close()
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()
<|fim▁end|> | get_environ |
<|file_name|>server-again.py<|end_file_name|><|fim▁begin|>###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
return
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def set_app(self, application):
self.application = application
def serve_forever(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
self.client_connection.close() # close parent copy
def handle_one_request(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def parse_request(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def <|fim_middle|>(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def finish_response(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close()
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()
<|fim▁end|> | start_response |
<|file_name|>server-again.py<|end_file_name|><|fim▁begin|>###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
return
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def set_app(self, application):
self.application = application
def serve_forever(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
self.client_connection.close() # close parent copy
def handle_one_request(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def parse_request(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def <|fim_middle|>(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close()
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()
<|fim▁end|> | finish_response |
<|file_name|>server-again.py<|end_file_name|><|fim▁begin|>###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
return
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def set_app(self, application):
self.application = application
def serve_forever(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
self.client_connection.close() # close parent copy
def handle_one_request(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def parse_request(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def finish_response(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close()
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def <|fim_middle|>(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()
<|fim▁end|> | make_server |
<|file_name|>horrorkino.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import dom_parser
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.genre_filter = ['horror']
self.domains = ['horrorkino.do.am']
self.base_link = 'http://horrorkino.do.am/'
self.search_link = 'video/shv'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases), year)
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases), year)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources<|fim▁hole|> r = [i.attrs['src'] for i in r]
for i in r:
valid, host = source_utils.is_host_valid(i, hostDict)
if not valid: continue
sources.append({'source': host, 'quality': 'SD', 'language': 'de', 'url': i, 'direct': False, 'debridonly': False, 'checkquality': True})
return sources
except:
return sources
def resolve(self, url):
return url
def __search(self, titles, year):
try:
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(urlparse.urljoin(self.base_link, self.search_link), post={'query': cleantitle.query(titles[0])})
r = dom_parser.parse_dom(r, 'li', attrs={'class': 'entTd'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 've-screen'}, req='title')
r = [(dom_parser.parse_dom(i, 'a', req='href'), i.attrs['title'].split(' - ')[0]) for i in r]
r = [(i[0][0].attrs['href'], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
return source_utils.strip_domain(r)
except:
return<|fim▁end|> |
r = client.request(urlparse.urljoin(self.base_link, url))
r = re.findall('''vicode\s*=\s*["'](.*?)["'];''', r)[0].decode('string_escape')
r = dom_parser.parse_dom(r, 'iframe', req='src') |
<|file_name|>horrorkino.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import dom_parser
from resources.lib.modules import source_utils
class source:
<|fim_middle|>
<|fim▁end|> | def __init__(self):
self.priority = 1
self.language = ['de']
self.genre_filter = ['horror']
self.domains = ['horrorkino.do.am']
self.base_link = 'http://horrorkino.do.am/'
self.search_link = 'video/shv'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases), year)
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases), year)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
r = client.request(urlparse.urljoin(self.base_link, url))
r = re.findall('''vicode\s*=\s*["'](.*?)["'];''', r)[0].decode('string_escape')
r = dom_parser.parse_dom(r, 'iframe', req='src')
r = [i.attrs['src'] for i in r]
for i in r:
valid, host = source_utils.is_host_valid(i, hostDict)
if not valid: continue
sources.append({'source': host, 'quality': 'SD', 'language': 'de', 'url': i, 'direct': False, 'debridonly': False, 'checkquality': True})
return sources
except:
return sources
def resolve(self, url):
return url
def __search(self, titles, year):
try:
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(urlparse.urljoin(self.base_link, self.search_link), post={'query': cleantitle.query(titles[0])})
r = dom_parser.parse_dom(r, 'li', attrs={'class': 'entTd'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 've-screen'}, req='title')
r = [(dom_parser.parse_dom(i, 'a', req='href'), i.attrs['title'].split(' - ')[0]) for i in r]
r = [(i[0][0].attrs['href'], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
return source_utils.strip_domain(r)
except:
return |
<|file_name|>horrorkino.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import dom_parser
from resources.lib.modules import source_utils
class source:
def __init__(self):
<|fim_middle|>
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases), year)
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases), year)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
r = client.request(urlparse.urljoin(self.base_link, url))
r = re.findall('''vicode\s*=\s*["'](.*?)["'];''', r)[0].decode('string_escape')
r = dom_parser.parse_dom(r, 'iframe', req='src')
r = [i.attrs['src'] for i in r]
for i in r:
valid, host = source_utils.is_host_valid(i, hostDict)
if not valid: continue
sources.append({'source': host, 'quality': 'SD', 'language': 'de', 'url': i, 'direct': False, 'debridonly': False, 'checkquality': True})
return sources
except:
return sources
def resolve(self, url):
return url
def __search(self, titles, year):
try:
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(urlparse.urljoin(self.base_link, self.search_link), post={'query': cleantitle.query(titles[0])})
r = dom_parser.parse_dom(r, 'li', attrs={'class': 'entTd'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 've-screen'}, req='title')
r = [(dom_parser.parse_dom(i, 'a', req='href'), i.attrs['title'].split(' - ')[0]) for i in r]
r = [(i[0][0].attrs['href'], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
return source_utils.strip_domain(r)
except:
return
<|fim▁end|> | self.priority = 1
self.language = ['de']
self.genre_filter = ['horror']
self.domains = ['horrorkino.do.am']
self.base_link = 'http://horrorkino.do.am/'
self.search_link = 'video/shv' |
<|file_name|>horrorkino.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import dom_parser
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.genre_filter = ['horror']
self.domains = ['horrorkino.do.am']
self.base_link = 'http://horrorkino.do.am/'
self.search_link = 'video/shv'
def movie(self, imdb, title, localtitle, aliases, year):
<|fim_middle|>
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
r = client.request(urlparse.urljoin(self.base_link, url))
r = re.findall('''vicode\s*=\s*["'](.*?)["'];''', r)[0].decode('string_escape')
r = dom_parser.parse_dom(r, 'iframe', req='src')
r = [i.attrs['src'] for i in r]
for i in r:
valid, host = source_utils.is_host_valid(i, hostDict)
if not valid: continue
sources.append({'source': host, 'quality': 'SD', 'language': 'de', 'url': i, 'direct': False, 'debridonly': False, 'checkquality': True})
return sources
except:
return sources
def resolve(self, url):
return url
def __search(self, titles, year):
try:
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(urlparse.urljoin(self.base_link, self.search_link), post={'query': cleantitle.query(titles[0])})
r = dom_parser.parse_dom(r, 'li', attrs={'class': 'entTd'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 've-screen'}, req='title')
r = [(dom_parser.parse_dom(i, 'a', req='href'), i.attrs['title'].split(' - ')[0]) for i in r]
r = [(i[0][0].attrs['href'], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
return source_utils.strip_domain(r)
except:
return
<|fim▁end|> | try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases), year)
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases), year)
return url
except:
return |
<|file_name|>horrorkino.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import dom_parser
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.genre_filter = ['horror']
self.domains = ['horrorkino.do.am']
self.base_link = 'http://horrorkino.do.am/'
self.search_link = 'video/shv'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases), year)
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases), year)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
<|fim_middle|>
def resolve(self, url):
return url
def __search(self, titles, year):
try:
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(urlparse.urljoin(self.base_link, self.search_link), post={'query': cleantitle.query(titles[0])})
r = dom_parser.parse_dom(r, 'li', attrs={'class': 'entTd'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 've-screen'}, req='title')
r = [(dom_parser.parse_dom(i, 'a', req='href'), i.attrs['title'].split(' - ')[0]) for i in r]
r = [(i[0][0].attrs['href'], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
return source_utils.strip_domain(r)
except:
return
<|fim▁end|> | sources = []
try:
if not url:
return sources
r = client.request(urlparse.urljoin(self.base_link, url))
r = re.findall('''vicode\s*=\s*["'](.*?)["'];''', r)[0].decode('string_escape')
r = dom_parser.parse_dom(r, 'iframe', req='src')
r = [i.attrs['src'] for i in r]
for i in r:
valid, host = source_utils.is_host_valid(i, hostDict)
if not valid: continue
sources.append({'source': host, 'quality': 'SD', 'language': 'de', 'url': i, 'direct': False, 'debridonly': False, 'checkquality': True})
return sources
except:
return sources |
<|file_name|>horrorkino.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import dom_parser
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.genre_filter = ['horror']
self.domains = ['horrorkino.do.am']
self.base_link = 'http://horrorkino.do.am/'
self.search_link = 'video/shv'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases), year)
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases), year)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
r = client.request(urlparse.urljoin(self.base_link, url))
r = re.findall('''vicode\s*=\s*["'](.*?)["'];''', r)[0].decode('string_escape')
r = dom_parser.parse_dom(r, 'iframe', req='src')
r = [i.attrs['src'] for i in r]
for i in r:
valid, host = source_utils.is_host_valid(i, hostDict)
if not valid: continue
sources.append({'source': host, 'quality': 'SD', 'language': 'de', 'url': i, 'direct': False, 'debridonly': False, 'checkquality': True})
return sources
except:
return sources
def resolve(self, url):
<|fim_middle|>
def __search(self, titles, year):
try:
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(urlparse.urljoin(self.base_link, self.search_link), post={'query': cleantitle.query(titles[0])})
r = dom_parser.parse_dom(r, 'li', attrs={'class': 'entTd'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 've-screen'}, req='title')
r = [(dom_parser.parse_dom(i, 'a', req='href'), i.attrs['title'].split(' - ')[0]) for i in r]
r = [(i[0][0].attrs['href'], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
return source_utils.strip_domain(r)
except:
return
<|fim▁end|> | return url |
<|file_name|>horrorkino.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import dom_parser
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.genre_filter = ['horror']
self.domains = ['horrorkino.do.am']
self.base_link = 'http://horrorkino.do.am/'
self.search_link = 'video/shv'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases), year)
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases), year)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
r = client.request(urlparse.urljoin(self.base_link, url))
r = re.findall('''vicode\s*=\s*["'](.*?)["'];''', r)[0].decode('string_escape')
r = dom_parser.parse_dom(r, 'iframe', req='src')
r = [i.attrs['src'] for i in r]
for i in r:
valid, host = source_utils.is_host_valid(i, hostDict)
if not valid: continue
sources.append({'source': host, 'quality': 'SD', 'language': 'de', 'url': i, 'direct': False, 'debridonly': False, 'checkquality': True})
return sources
except:
return sources
def resolve(self, url):
return url
def __search(self, titles, year):
<|fim_middle|>
<|fim▁end|> | try:
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(urlparse.urljoin(self.base_link, self.search_link), post={'query': cleantitle.query(titles[0])})
r = dom_parser.parse_dom(r, 'li', attrs={'class': 'entTd'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 've-screen'}, req='title')
r = [(dom_parser.parse_dom(i, 'a', req='href'), i.attrs['title'].split(' - ')[0]) for i in r]
r = [(i[0][0].attrs['href'], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
return source_utils.strip_domain(r)
except:
return |
<|file_name|>horrorkino.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import dom_parser
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.genre_filter = ['horror']
self.domains = ['horrorkino.do.am']
self.base_link = 'http://horrorkino.do.am/'
self.search_link = 'video/shv'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases), year)
if not url and title != localtitle: <|fim_middle|>
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
r = client.request(urlparse.urljoin(self.base_link, url))
r = re.findall('''vicode\s*=\s*["'](.*?)["'];''', r)[0].decode('string_escape')
r = dom_parser.parse_dom(r, 'iframe', req='src')
r = [i.attrs['src'] for i in r]
for i in r:
valid, host = source_utils.is_host_valid(i, hostDict)
if not valid: continue
sources.append({'source': host, 'quality': 'SD', 'language': 'de', 'url': i, 'direct': False, 'debridonly': False, 'checkquality': True})
return sources
except:
return sources
def resolve(self, url):
return url
def __search(self, titles, year):
try:
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(urlparse.urljoin(self.base_link, self.search_link), post={'query': cleantitle.query(titles[0])})
r = dom_parser.parse_dom(r, 'li', attrs={'class': 'entTd'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 've-screen'}, req='title')
r = [(dom_parser.parse_dom(i, 'a', req='href'), i.attrs['title'].split(' - ')[0]) for i in r]
r = [(i[0][0].attrs['href'], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
return source_utils.strip_domain(r)
except:
return
<|fim▁end|> | url = self.__search([title] + source_utils.aliases_to_array(aliases), year) |
<|file_name|>horrorkino.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import dom_parser
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.genre_filter = ['horror']
self.domains = ['horrorkino.do.am']
self.base_link = 'http://horrorkino.do.am/'
self.search_link = 'video/shv'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases), year)
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases), year)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
<|fim_middle|>
r = client.request(urlparse.urljoin(self.base_link, url))
r = re.findall('''vicode\s*=\s*["'](.*?)["'];''', r)[0].decode('string_escape')
r = dom_parser.parse_dom(r, 'iframe', req='src')
r = [i.attrs['src'] for i in r]
for i in r:
valid, host = source_utils.is_host_valid(i, hostDict)
if not valid: continue
sources.append({'source': host, 'quality': 'SD', 'language': 'de', 'url': i, 'direct': False, 'debridonly': False, 'checkquality': True})
return sources
except:
return sources
def resolve(self, url):
return url
def __search(self, titles, year):
try:
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(urlparse.urljoin(self.base_link, self.search_link), post={'query': cleantitle.query(titles[0])})
r = dom_parser.parse_dom(r, 'li', attrs={'class': 'entTd'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 've-screen'}, req='title')
r = [(dom_parser.parse_dom(i, 'a', req='href'), i.attrs['title'].split(' - ')[0]) for i in r]
r = [(i[0][0].attrs['href'], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
return source_utils.strip_domain(r)
except:
return
<|fim▁end|> | return sources |
<|file_name|>horrorkino.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import dom_parser
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.genre_filter = ['horror']
self.domains = ['horrorkino.do.am']
self.base_link = 'http://horrorkino.do.am/'
self.search_link = 'video/shv'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases), year)
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases), year)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
r = client.request(urlparse.urljoin(self.base_link, url))
r = re.findall('''vicode\s*=\s*["'](.*?)["'];''', r)[0].decode('string_escape')
r = dom_parser.parse_dom(r, 'iframe', req='src')
r = [i.attrs['src'] for i in r]
for i in r:
valid, host = source_utils.is_host_valid(i, hostDict)
if not valid: <|fim_middle|>
sources.append({'source': host, 'quality': 'SD', 'language': 'de', 'url': i, 'direct': False, 'debridonly': False, 'checkquality': True})
return sources
except:
return sources
def resolve(self, url):
return url
def __search(self, titles, year):
try:
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(urlparse.urljoin(self.base_link, self.search_link), post={'query': cleantitle.query(titles[0])})
r = dom_parser.parse_dom(r, 'li', attrs={'class': 'entTd'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 've-screen'}, req='title')
r = [(dom_parser.parse_dom(i, 'a', req='href'), i.attrs['title'].split(' - ')[0]) for i in r]
r = [(i[0][0].attrs['href'], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
return source_utils.strip_domain(r)
except:
return
<|fim▁end|> | continue |
<|file_name|>horrorkino.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import dom_parser
from resources.lib.modules import source_utils
class source:
def <|fim_middle|>(self):
self.priority = 1
self.language = ['de']
self.genre_filter = ['horror']
self.domains = ['horrorkino.do.am']
self.base_link = 'http://horrorkino.do.am/'
self.search_link = 'video/shv'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases), year)
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases), year)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
r = client.request(urlparse.urljoin(self.base_link, url))
r = re.findall('''vicode\s*=\s*["'](.*?)["'];''', r)[0].decode('string_escape')
r = dom_parser.parse_dom(r, 'iframe', req='src')
r = [i.attrs['src'] for i in r]
for i in r:
valid, host = source_utils.is_host_valid(i, hostDict)
if not valid: continue
sources.append({'source': host, 'quality': 'SD', 'language': 'de', 'url': i, 'direct': False, 'debridonly': False, 'checkquality': True})
return sources
except:
return sources
def resolve(self, url):
return url
def __search(self, titles, year):
try:
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(urlparse.urljoin(self.base_link, self.search_link), post={'query': cleantitle.query(titles[0])})
r = dom_parser.parse_dom(r, 'li', attrs={'class': 'entTd'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 've-screen'}, req='title')
r = [(dom_parser.parse_dom(i, 'a', req='href'), i.attrs['title'].split(' - ')[0]) for i in r]
r = [(i[0][0].attrs['href'], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
return source_utils.strip_domain(r)
except:
return
<|fim▁end|> | __init__ |
<|file_name|>horrorkino.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import dom_parser
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.genre_filter = ['horror']
self.domains = ['horrorkino.do.am']
self.base_link = 'http://horrorkino.do.am/'
self.search_link = 'video/shv'
def <|fim_middle|>(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases), year)
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases), year)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
r = client.request(urlparse.urljoin(self.base_link, url))
r = re.findall('''vicode\s*=\s*["'](.*?)["'];''', r)[0].decode('string_escape')
r = dom_parser.parse_dom(r, 'iframe', req='src')
r = [i.attrs['src'] for i in r]
for i in r:
valid, host = source_utils.is_host_valid(i, hostDict)
if not valid: continue
sources.append({'source': host, 'quality': 'SD', 'language': 'de', 'url': i, 'direct': False, 'debridonly': False, 'checkquality': True})
return sources
except:
return sources
def resolve(self, url):
return url
def __search(self, titles, year):
try:
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(urlparse.urljoin(self.base_link, self.search_link), post={'query': cleantitle.query(titles[0])})
r = dom_parser.parse_dom(r, 'li', attrs={'class': 'entTd'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 've-screen'}, req='title')
r = [(dom_parser.parse_dom(i, 'a', req='href'), i.attrs['title'].split(' - ')[0]) for i in r]
r = [(i[0][0].attrs['href'], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
return source_utils.strip_domain(r)
except:
return
<|fim▁end|> | movie |
<|file_name|>horrorkino.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import dom_parser
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.genre_filter = ['horror']
self.domains = ['horrorkino.do.am']
self.base_link = 'http://horrorkino.do.am/'
self.search_link = 'video/shv'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases), year)
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases), year)
return url
except:
return
def <|fim_middle|>(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
r = client.request(urlparse.urljoin(self.base_link, url))
r = re.findall('''vicode\s*=\s*["'](.*?)["'];''', r)[0].decode('string_escape')
r = dom_parser.parse_dom(r, 'iframe', req='src')
r = [i.attrs['src'] for i in r]
for i in r:
valid, host = source_utils.is_host_valid(i, hostDict)
if not valid: continue
sources.append({'source': host, 'quality': 'SD', 'language': 'de', 'url': i, 'direct': False, 'debridonly': False, 'checkquality': True})
return sources
except:
return sources
def resolve(self, url):
return url
def __search(self, titles, year):
try:
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(urlparse.urljoin(self.base_link, self.search_link), post={'query': cleantitle.query(titles[0])})
r = dom_parser.parse_dom(r, 'li', attrs={'class': 'entTd'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 've-screen'}, req='title')
r = [(dom_parser.parse_dom(i, 'a', req='href'), i.attrs['title'].split(' - ')[0]) for i in r]
r = [(i[0][0].attrs['href'], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
return source_utils.strip_domain(r)
except:
return
<|fim▁end|> | sources |
<|file_name|>horrorkino.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import dom_parser
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.genre_filter = ['horror']
self.domains = ['horrorkino.do.am']
self.base_link = 'http://horrorkino.do.am/'
self.search_link = 'video/shv'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases), year)
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases), year)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
r = client.request(urlparse.urljoin(self.base_link, url))
r = re.findall('''vicode\s*=\s*["'](.*?)["'];''', r)[0].decode('string_escape')
r = dom_parser.parse_dom(r, 'iframe', req='src')
r = [i.attrs['src'] for i in r]
for i in r:
valid, host = source_utils.is_host_valid(i, hostDict)
if not valid: continue
sources.append({'source': host, 'quality': 'SD', 'language': 'de', 'url': i, 'direct': False, 'debridonly': False, 'checkquality': True})
return sources
except:
return sources
def <|fim_middle|>(self, url):
return url
def __search(self, titles, year):
try:
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(urlparse.urljoin(self.base_link, self.search_link), post={'query': cleantitle.query(titles[0])})
r = dom_parser.parse_dom(r, 'li', attrs={'class': 'entTd'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 've-screen'}, req='title')
r = [(dom_parser.parse_dom(i, 'a', req='href'), i.attrs['title'].split(' - ')[0]) for i in r]
r = [(i[0][0].attrs['href'], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
return source_utils.strip_domain(r)
except:
return
<|fim▁end|> | resolve |
<|file_name|>horrorkino.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import dom_parser
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.genre_filter = ['horror']
self.domains = ['horrorkino.do.am']
self.base_link = 'http://horrorkino.do.am/'
self.search_link = 'video/shv'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases), year)
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases), year)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
r = client.request(urlparse.urljoin(self.base_link, url))
r = re.findall('''vicode\s*=\s*["'](.*?)["'];''', r)[0].decode('string_escape')
r = dom_parser.parse_dom(r, 'iframe', req='src')
r = [i.attrs['src'] for i in r]
for i in r:
valid, host = source_utils.is_host_valid(i, hostDict)
if not valid: continue
sources.append({'source': host, 'quality': 'SD', 'language': 'de', 'url': i, 'direct': False, 'debridonly': False, 'checkquality': True})
return sources
except:
return sources
def resolve(self, url):
return url
def <|fim_middle|>(self, titles, year):
try:
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(urlparse.urljoin(self.base_link, self.search_link), post={'query': cleantitle.query(titles[0])})
r = dom_parser.parse_dom(r, 'li', attrs={'class': 'entTd'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 've-screen'}, req='title')
r = [(dom_parser.parse_dom(i, 'a', req='href'), i.attrs['title'].split(' - ')[0]) for i in r]
r = [(i[0][0].attrs['href'], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
return source_utils.strip_domain(r)
except:
return
<|fim▁end|> | __search |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>"""
Tools and data structures for working with genomic intervals (or sets of
regions on a line in general) efficiently.
"""
# For compatiblity with existing stuff<|fim▁hole|>from bx.intervals.intersection import *<|fim▁end|> | |
<|file_name|>pipeline.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import, print_function
import logging
from django.contrib import messages
from django.core.urlresolvers import reverse<|fim▁hole|>from sentry.pipeline import Pipeline
from sentry.models import Identity, IdentityStatus, IdentityProvider
from . import default_manager
IDENTITY_LINKED = _("Your {identity_provider} account has been associated with your Sentry account")
logger = logging.getLogger('sentry.identity')
class IdentityProviderPipeline(Pipeline):
logger = logger
pipeline_name = 'identity_provider'
provider_manager = default_manager
provider_model_cls = IdentityProvider
def redirect_url(self):
associate_url = reverse('sentry-extension-setup', kwargs={
# TODO(adhiraj): Remove provider_id from the callback URL, it's unused.
'provider_id': 'default',
})
# Use configured redirect_url if specified for the pipeline if available
return self.config.get('redirect_url', associate_url)
def finish_pipeline(self):
identity = self.provider.build_identity(self.state.data)
defaults = {
'status': IdentityStatus.VALID,
'scopes': identity.get('scopes', []),
'data': identity.get('data', {}),
'date_verified': timezone.now(),
}
identity, created = Identity.objects.get_or_create(
idp=self.provider_model,
user=self.request.user,
external_id=identity['id'],
defaults=defaults,
)
if not created:
identity.update(**defaults)
messages.add_message(self.request, messages.SUCCESS, IDENTITY_LINKED.format(
identity_provider=self.provider.name,
))
self.state.clear()
# TODO(epurkhiser): When we have more identities and have built out an
# identity management page that supports these new identities (not
# social-auth ones), redirect to the identities page.
return HttpResponseRedirect(reverse('sentry-account-settings'))<|fim▁end|> | from django.http import HttpResponseRedirect
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
|
<|file_name|>pipeline.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import, print_function
import logging
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from sentry.pipeline import Pipeline
from sentry.models import Identity, IdentityStatus, IdentityProvider
from . import default_manager
IDENTITY_LINKED = _("Your {identity_provider} account has been associated with your Sentry account")
logger = logging.getLogger('sentry.identity')
class IdentityProviderPipeline(Pipeline):
<|fim_middle|>
<|fim▁end|> | logger = logger
pipeline_name = 'identity_provider'
provider_manager = default_manager
provider_model_cls = IdentityProvider
def redirect_url(self):
associate_url = reverse('sentry-extension-setup', kwargs={
# TODO(adhiraj): Remove provider_id from the callback URL, it's unused.
'provider_id': 'default',
})
# Use configured redirect_url if specified for the pipeline if available
return self.config.get('redirect_url', associate_url)
def finish_pipeline(self):
identity = self.provider.build_identity(self.state.data)
defaults = {
'status': IdentityStatus.VALID,
'scopes': identity.get('scopes', []),
'data': identity.get('data', {}),
'date_verified': timezone.now(),
}
identity, created = Identity.objects.get_or_create(
idp=self.provider_model,
user=self.request.user,
external_id=identity['id'],
defaults=defaults,
)
if not created:
identity.update(**defaults)
messages.add_message(self.request, messages.SUCCESS, IDENTITY_LINKED.format(
identity_provider=self.provider.name,
))
self.state.clear()
# TODO(epurkhiser): When we have more identities and have built out an
# identity management page that supports these new identities (not
# social-auth ones), redirect to the identities page.
return HttpResponseRedirect(reverse('sentry-account-settings')) |
<|file_name|>pipeline.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import, print_function
import logging
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from sentry.pipeline import Pipeline
from sentry.models import Identity, IdentityStatus, IdentityProvider
from . import default_manager
IDENTITY_LINKED = _("Your {identity_provider} account has been associated with your Sentry account")
logger = logging.getLogger('sentry.identity')
class IdentityProviderPipeline(Pipeline):
logger = logger
pipeline_name = 'identity_provider'
provider_manager = default_manager
provider_model_cls = IdentityProvider
def redirect_url(self):
<|fim_middle|>
def finish_pipeline(self):
identity = self.provider.build_identity(self.state.data)
defaults = {
'status': IdentityStatus.VALID,
'scopes': identity.get('scopes', []),
'data': identity.get('data', {}),
'date_verified': timezone.now(),
}
identity, created = Identity.objects.get_or_create(
idp=self.provider_model,
user=self.request.user,
external_id=identity['id'],
defaults=defaults,
)
if not created:
identity.update(**defaults)
messages.add_message(self.request, messages.SUCCESS, IDENTITY_LINKED.format(
identity_provider=self.provider.name,
))
self.state.clear()
# TODO(epurkhiser): When we have more identities and have built out an
# identity management page that supports these new identities (not
# social-auth ones), redirect to the identities page.
return HttpResponseRedirect(reverse('sentry-account-settings'))
<|fim▁end|> | associate_url = reverse('sentry-extension-setup', kwargs={
# TODO(adhiraj): Remove provider_id from the callback URL, it's unused.
'provider_id': 'default',
})
# Use configured redirect_url if specified for the pipeline if available
return self.config.get('redirect_url', associate_url) |
<|file_name|>pipeline.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import, print_function
import logging
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from sentry.pipeline import Pipeline
from sentry.models import Identity, IdentityStatus, IdentityProvider
from . import default_manager
IDENTITY_LINKED = _("Your {identity_provider} account has been associated with your Sentry account")
logger = logging.getLogger('sentry.identity')
class IdentityProviderPipeline(Pipeline):
logger = logger
pipeline_name = 'identity_provider'
provider_manager = default_manager
provider_model_cls = IdentityProvider
def redirect_url(self):
associate_url = reverse('sentry-extension-setup', kwargs={
# TODO(adhiraj): Remove provider_id from the callback URL, it's unused.
'provider_id': 'default',
})
# Use configured redirect_url if specified for the pipeline if available
return self.config.get('redirect_url', associate_url)
def finish_pipeline(self):
<|fim_middle|>
<|fim▁end|> | identity = self.provider.build_identity(self.state.data)
defaults = {
'status': IdentityStatus.VALID,
'scopes': identity.get('scopes', []),
'data': identity.get('data', {}),
'date_verified': timezone.now(),
}
identity, created = Identity.objects.get_or_create(
idp=self.provider_model,
user=self.request.user,
external_id=identity['id'],
defaults=defaults,
)
if not created:
identity.update(**defaults)
messages.add_message(self.request, messages.SUCCESS, IDENTITY_LINKED.format(
identity_provider=self.provider.name,
))
self.state.clear()
# TODO(epurkhiser): When we have more identities and have built out an
# identity management page that supports these new identities (not
# social-auth ones), redirect to the identities page.
return HttpResponseRedirect(reverse('sentry-account-settings')) |
<|file_name|>pipeline.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import, print_function
import logging
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from sentry.pipeline import Pipeline
from sentry.models import Identity, IdentityStatus, IdentityProvider
from . import default_manager
IDENTITY_LINKED = _("Your {identity_provider} account has been associated with your Sentry account")
logger = logging.getLogger('sentry.identity')
class IdentityProviderPipeline(Pipeline):
logger = logger
pipeline_name = 'identity_provider'
provider_manager = default_manager
provider_model_cls = IdentityProvider
def redirect_url(self):
associate_url = reverse('sentry-extension-setup', kwargs={
# TODO(adhiraj): Remove provider_id from the callback URL, it's unused.
'provider_id': 'default',
})
# Use configured redirect_url if specified for the pipeline if available
return self.config.get('redirect_url', associate_url)
def finish_pipeline(self):
identity = self.provider.build_identity(self.state.data)
defaults = {
'status': IdentityStatus.VALID,
'scopes': identity.get('scopes', []),
'data': identity.get('data', {}),
'date_verified': timezone.now(),
}
identity, created = Identity.objects.get_or_create(
idp=self.provider_model,
user=self.request.user,
external_id=identity['id'],
defaults=defaults,
)
if not created:
<|fim_middle|>
messages.add_message(self.request, messages.SUCCESS, IDENTITY_LINKED.format(
identity_provider=self.provider.name,
))
self.state.clear()
# TODO(epurkhiser): When we have more identities and have built out an
# identity management page that supports these new identities (not
# social-auth ones), redirect to the identities page.
return HttpResponseRedirect(reverse('sentry-account-settings'))
<|fim▁end|> | identity.update(**defaults) |
<|file_name|>pipeline.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import, print_function
import logging
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from sentry.pipeline import Pipeline
from sentry.models import Identity, IdentityStatus, IdentityProvider
from . import default_manager
IDENTITY_LINKED = _("Your {identity_provider} account has been associated with your Sentry account")
logger = logging.getLogger('sentry.identity')
class IdentityProviderPipeline(Pipeline):
logger = logger
pipeline_name = 'identity_provider'
provider_manager = default_manager
provider_model_cls = IdentityProvider
def <|fim_middle|>(self):
associate_url = reverse('sentry-extension-setup', kwargs={
# TODO(adhiraj): Remove provider_id from the callback URL, it's unused.
'provider_id': 'default',
})
# Use configured redirect_url if specified for the pipeline if available
return self.config.get('redirect_url', associate_url)
def finish_pipeline(self):
identity = self.provider.build_identity(self.state.data)
defaults = {
'status': IdentityStatus.VALID,
'scopes': identity.get('scopes', []),
'data': identity.get('data', {}),
'date_verified': timezone.now(),
}
identity, created = Identity.objects.get_or_create(
idp=self.provider_model,
user=self.request.user,
external_id=identity['id'],
defaults=defaults,
)
if not created:
identity.update(**defaults)
messages.add_message(self.request, messages.SUCCESS, IDENTITY_LINKED.format(
identity_provider=self.provider.name,
))
self.state.clear()
# TODO(epurkhiser): When we have more identities and have built out an
# identity management page that supports these new identities (not
# social-auth ones), redirect to the identities page.
return HttpResponseRedirect(reverse('sentry-account-settings'))
<|fim▁end|> | redirect_url |
<|file_name|>pipeline.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import, print_function
import logging
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from sentry.pipeline import Pipeline
from sentry.models import Identity, IdentityStatus, IdentityProvider
from . import default_manager
IDENTITY_LINKED = _("Your {identity_provider} account has been associated with your Sentry account")
logger = logging.getLogger('sentry.identity')
class IdentityProviderPipeline(Pipeline):
logger = logger
pipeline_name = 'identity_provider'
provider_manager = default_manager
provider_model_cls = IdentityProvider
def redirect_url(self):
associate_url = reverse('sentry-extension-setup', kwargs={
# TODO(adhiraj): Remove provider_id from the callback URL, it's unused.
'provider_id': 'default',
})
# Use configured redirect_url if specified for the pipeline if available
return self.config.get('redirect_url', associate_url)
def <|fim_middle|>(self):
identity = self.provider.build_identity(self.state.data)
defaults = {
'status': IdentityStatus.VALID,
'scopes': identity.get('scopes', []),
'data': identity.get('data', {}),
'date_verified': timezone.now(),
}
identity, created = Identity.objects.get_or_create(
idp=self.provider_model,
user=self.request.user,
external_id=identity['id'],
defaults=defaults,
)
if not created:
identity.update(**defaults)
messages.add_message(self.request, messages.SUCCESS, IDENTITY_LINKED.format(
identity_provider=self.provider.name,
))
self.state.clear()
# TODO(epurkhiser): When we have more identities and have built out an
# identity management page that supports these new identities (not
# social-auth ones), redirect to the identities page.
return HttpResponseRedirect(reverse('sentry-account-settings'))
<|fim▁end|> | finish_pipeline |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.