index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
68,508 |
mltable.mltable
|
to_datetime
|
Configure conversion to datetime.
:param formats: Formats to try for datetime conversion. For example `%d-%m-%Y` for data in "day-month-year",
and `%Y-%m-%dT%H:%M:%S.%f` for "combined date and time representation" according to ISO 8601.
* %Y: Year with 4 digits
* %y: Year with 2 digits
* %m: Month in digits
* %b: Month represented by its abbreviated name in 3 letters, like Aug
* %B: Month represented by its full name, like August
* %d: Day in digits
* %H: Hour as represented in 24-hour clock time
* %I: Hour as represented in 12-hour clock time
* %M: Minute in 2 digits
* %S: Second in 2 digits
* %f: Microsecond
* %p: AM/PM designator
* %z: Timezone, for example: -0700
:type formats: str or builtin.list[str]
:param date_constant: If the column contains only time values, a date to apply to the resulting DateTime.
:type date_constant: Optional[str]
|
@staticmethod
def to_datetime(formats: Union[str, List[str]], date_constant: Optional[str] = None):
"""
Configure conversion to datetime.
:param formats: Formats to try for datetime conversion. For example `%d-%m-%Y` for data in "day-month-year",
and `%Y-%m-%dT%H:%M:%S.%f` for "combined date and time representation" according to ISO 8601.
* %Y: Year with 4 digits
* %y: Year with 2 digits
* %m: Month in digits
* %b: Month represented by its abbreviated name in 3 letters, like Aug
* %B: Month represented by its full name, like August
* %d: Day in digits
* %H: Hour as represented in 24-hour clock time
* %I: Hour as represented in 12-hour clock time
* %M: Minute in 2 digits
* %S: Second in 2 digits
* %f: Microsecond
* %p: AM/PM designator
* %z: Timezone, for example: -0700
:type formats: str or builtin.list[str]
:param date_constant: If the column contains only time values, a date to apply to the resulting DateTime.
:type date_constant: Optional[str]
"""
dt = DataType._create(FieldType.DATE)
type_name = _SIMPLE_TYPES.get(FieldType.DATE)
if isinstance(formats, str):
formats = [formats]
elif not (isinstance(formats, (list, tuple)) and all(isinstance(x, str) for x in formats)):
raise UserErrorException(
'Expect `formats` to be a single string, a list of strings, or a tuple of strings')
dt._arguments = {type_name: {'formats': formats}}
if date_constant is not None:
dt._arguments[type_name]['date_constant'] = date_constant
return dt
|
(formats: Union[str, List[str]], date_constant: Optional[str] = None)
|
68,509 |
mltable.mltable
|
to_float
|
Configure conversion to 64-bit float.
|
@staticmethod
def to_float():
"""Configure conversion to 64-bit float."""
return DataType._create(FieldType.DECIMAL)
|
()
|
68,510 |
mltable.mltable
|
to_int
|
Configure conversion to 64-bit integer.
|
@staticmethod
def to_int():
"""Configure conversion to 64-bit integer."""
return DataType._create(FieldType.INTEGER)
|
()
|
68,511 |
mltable.mltable
|
to_stream
|
Configure conversion to stream.
|
@staticmethod
def to_stream():
"""Configure conversion to stream."""
return DataType._create(FieldType.STREAM)
|
()
|
68,512 |
mltable.mltable
|
to_string
|
Configure conversion to string.
|
@staticmethod
def to_string():
"""Configure conversion to string."""
return DataType._create(FieldType.STRING)
|
()
|
68,513 |
mltable.mltable
|
MLTable
|
Represents a MLTable.
A MLTable defines a series of lazily-evaluated, immutable operations to
load data from the data source. Data is not loaded from the source until
MLTable is asked to deliver data.
|
class MLTable:
"""
Represents a MLTable.
A MLTable defines a series of lazily-evaluated, immutable operations to
load data from the data source. Data is not loaded from the source until
MLTable is asked to deliver data.
"""
def __init__(self):
"""
Initialize a new MLTable.
This constructor is not supposed to be invoked directly. MLTable is
intended to be created using :func:`mltable.load`.
"""
self._loaded = False
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def _to_yaml_dict(self):
"""
Returns all the information associated with MLTable as a YAML-style dictionary.
:return: dict representation of this MLTable
:rtype: dict
"""
return yaml.safe_load(str(self))
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def __repr__(self):
"""
Returns all the information associated with MLTable as a YAML-style
string representation.
:return: string representation of this MLTable
:rtype: str
"""
self._check_loaded()
# _dataflow.to_yaml_string() serializes Serde units (anonymous value containing no data) as nulls
# this results in nested fields with empty values being serialized with nulls as value.
mltable_yaml_str = _wrap_rslex_function_call(self._dataflow.to_yaml_string)
mltable_yaml_dict = yaml.safe_load(mltable_yaml_str)
mltable_yaml_helper = MLTableYamlCleaner(mltable_yaml_dict=mltable_yaml_dict)
return str(mltable_yaml_helper)
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def __str__(self):
"""
Returns all the information associated with MLTable as a YAML-style
string representation.
:return: string representation of this MLTable
:rtype: str
"""
return self.__repr__()
def __eq__(self, other):
"""
Returns if given object equals this MLTable.
:param other: given object to compare
:type other: Any
:return: is given object equals this MLTable
:rtype: bool
"""
if not isinstance(other, MLTable):
return False
self_yaml = self._to_yaml_dict()
other_yaml = other._to_yaml_dict()
def have_same_key(key):
return self_yaml.get(key) == other_yaml.get(key)
return have_same_key(_TRANSFORMATIONS_SCHEMA_KEY) \
and have_same_key(_METADATA_SCHEMA_NAME) \
and have_same_key(_TRAITS_SECTION_KEY) \
and self.paths == other.paths # want to compare using original paths
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def _get_pyrecords(self, traceparent):
return _wrap_rslex_execute_func(func=lambda span_traceparent, _: get_rslex_executor().to_pyrecords(script=self._dataflow, traceparent=span_traceparent),
og_traceparent=traceparent,
fallback_func=lambda span_traceparent, span_context: _execute(activity=traceparent, dataflow=str(self), force_preppy=True, convert_preppy_to_pyrecords=True, span_context=span_context, traceparent=span_traceparent))
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def validate(self):
"""
Validates if this MLTable's data can be loaded, requires the MLTable's
data source(s) to be accessible from the current compute.
:return: None
:rtype: None
"""
take_mltable = self.take(1)
try:
records = take_mltable._get_pyrecords('MLTable.validate')
except Exception as e: # this is broad, but dataprepreader throws different errors
raise RuntimeError(_VALIDATE_ERR_MSG) from e
if len(records) != 1:
raise RuntimeError(_VALIDATE_ERR_MSG)
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def _ensure_random_seed(self, seed):
"""
If the given seed is not an integer or None, raises a UserErrorException. If
None selects a random seed randomly between 1 and 1000.
:param seed: possible value for random seed
:type seed: object
:return: valid random seed
:rtype: int
"""
if seed is None:
return random.randint(1, 1000)
if not isinstance(seed, int):
raise UserErrorException('A random seed must be an integer')
return seed
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def _check_loaded(self):
if not self._loaded:
raise UserErrorException('MLTable does not appear to be loaded correctly. Please use MLTable.load() to '
'load a MLTable YAML file into memory.')
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def _add_transformation_step(self, step, args, index=None):
"""
Adds the given transformation step and its associated arguments to
this MLTable's PyRsDataflow at the given index in the list of all
added transformation steps. Returns a new MLTable whose PyRsDataflow
is the PyRsDataflow resulting from the prior addition.
:param step: transformation step
:type step: str
:param args: arguments for given transformation step
:type: object
:param index: optional argument to indicate which index to add the step
:type: int
:return: MLTable with resulting PyRsDataflow
:rtype: mltable.MLTable
"""
new_dataflow = _wrap_rslex_function_call(lambda: self._dataflow.add_transformation(step, args, index))
return MLTable._create_from_dataflow(new_dataflow, self._path_pairs, self._load_uri)
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def _get_columns_in_traits(self):
"""
Gets all the columns that are set in this MLTable's Traits.
:return: set of all Traits
:rtype: set[str]
"""
columns_in_traits = set()
timestamp_col = self.traits._check_and_get_trait(_TIMESTAMP_COLUMN_KEY)
if timestamp_col is not None:
columns_in_traits.add(timestamp_col)
index_cols = self.traits._check_and_get_trait(_INDEX_COLUMNS_KEY)
if index_cols is not None:
columns_in_traits.update(index_cols)
return columns_in_traits
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def _download(self, stream_column=None, target_path=None, ignore_not_found=False, storage_options=None):
if target_path is None:
target_path = tempfile.mkdtemp()
if stream_column is None:
stream_column = 'Path'
if stream_column != 'Path':
new_mltable = self._add_transformation_step('rename_columns', {stream_column: 'Path'})
else:
new_mltable = MLTable._create_from_dataflow(self._dataflow, None, None)
function_source_str = '{"r":["Function",[[],{"r":[]},{"r":["Function",[["row"],{"r":[]},' \
'{"r":["Invoke",[{"r":["Identifier","GetPortablePath"]},' \
'[{"r":["RecordField",[{"r":["Identifier","row"]},"Path"]]},""]]]}]]}]]}'
new_mltable = new_mltable._add_transformation_step('add_columns',
{
'language': 'Native',
'expressions':
[
{
'new_column': 'Portable Path',
'prior_column': 'Path',
'function_source': function_source_str
}
]
})
new_mltable = new_mltable._add_transformation_step('write_streams_to_files',
{
'streams_column': 'Path',
'destination':
{
'directory': str(target_path),
'handler': 'Local'
},
'file_names_column': 'Portable Path'
})
# append workspace information for the stream_column for backwards support
# AmlDatastore://workspaceblobstore/data/images/animals folder/1d.jpg
workspace_info = _try_resolve_workspace_info(storage_options)
if _has_sufficient_workspace_info(workspace_info):
new_mltable = \
MLTable._append_workspace_to_stream_info_conversion(new_mltable, workspace_info, stream_column)
download_records = new_mltable._get_pyrecords('MLTable._download')
return _validate_downloads(download_records, ignore_not_found, _get_logger())
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def _with_partition_size(self, partition_size, partition_size_unit='b'):
"""
Updates delimited files and JSON lines files to utilize the given parition size.
:param partition_size: Minimum batch size data be will be partitioned against.
:type partition_size: int
:param partition_size_unit: The memory unit give partition_size is in, default to bytes. Supported options are
a :class:`mltable.MLTablePartitionSizeUnit` or a string as one of 'byte' ('b'), 'kilobyte' ('kb'),
'megabyte' ('mb'), or 'gigabyte' ('gb').
:type partition_size_unit: Union[str, mltable.MLTablePartitionSizeUnit]
:return: MLTable with updated partition size
:rtype: mltable.MLTable
"""
self._check_loaded()
partition_size = MLTablePartitionSize._parse(partition_size, partition_size_unit)
try:
new_py_rs_dataflow = self._dataflow.set_partition_size(partition_size)
return MLTable._create_from_dataflow(new_py_rs_dataflow, self._path_pairs, self._load_uri)
except AttributeError: # TODO (nathof) remove fallback after dataprep release. expect error to occur when insrting PyRsDataflow
mltable_yaml_dict = self._to_yaml_dict()
for key in [_READ_DELIMITED_KEY, _READ_JSON_KEY]:
if key in mltable_yaml_dict[_TRANSFORMATIONS_SCHEMA_KEY][0]:
mltable_yaml_dict[_TRANSFORMATIONS_SCHEMA_KEY][0][key]['partition_size'] = partition_size
return MLTable._create_from_dict(mltable_yaml_dict, self._path_pairs, self._load_uri)
raise UserErrorException(
'transformation step read_delimited or read_json_lines is required to update partition_size')
except Exception as e:
_reclassify_rslex_error(e)
@track(_get_logger, activity_type=_PUBLIC_API, custom_dimensions={'app_name': _APP_NAME})
def to_pandas_dataframe(self):
"""
Load all records from the paths specified in the MLTable file into a Pandas DataFrame.
.. remarks::
The following code snippet shows how to use the
to_pandas_dataframe api to obtain a pandas dataframe corresponding
to the provided MLTable.
.. code-block:: python
from mltable import load
tbl = load('.\\samples\\mltable_sample')
pdf = tbl.to_pandas_dataframe()
print(pdf.shape)
:return: Pandas Dataframe containing the records from paths in this MLTable
:rtype: pandas.DataFrame
"""
self._check_loaded()
custom_dimensions = {'app_name': _APP_NAME}
if self._workspace_context:
custom_dimensions.update(self._workspace_context)
with _LoggerFactory.track_activity(_get_logger(), 'to_pandas_dataframe', _PUBLIC_API, custom_dimensions) \
as activity_logger:
try:
try:
return get_dataframe_reader().to_pandas_dataframe(self._dataflow)
except Exception as e:
# TODO (nathof) remove fallback after dprep release
# released version of dprep doesn't have check to disable clex execution if dataflow is PyRsDataflow
# so that is check given to signal unreleased version of dprep is used
if isinstance(e, AttributeError) \
and "'builtins.PyRsDataflow' object has no attribute 'add_step'" in e.args[0]:
return _wrap_rslex_function_call(lambda: get_dataframe_reader().to_pandas_dataframe(str(self)))
raise _reclassify_rslex_error(e)
except Exception as e:
_log_exception(activity_logger, e)
raise e
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def get_partition_count(self) -> int:
"""
Returns the number of data partitions underlying the data associated with this MLTable.
:return: data partitions in this MLTable
:rtype: int
"""
return _wrap_rslex_execute_func(func=lambda span_traceparent, _: get_rslex_executor().get_partition_count(self._dataflow, span_traceparent),
og_traceparent='MLTable.get_partition_count',
fallback_func=lambda _, span_context: get_partition_count_with_rslex(str(self), span_context))
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def select_partitions(self, partition_index_list):
"""
Adds a transformation step to select the partition.
.. remarks::
The following code snippet shows how to use the select_partitions api to selected partitions
from the provided MLTable.
.. code-block:: python
partition_index_list = [1, 2]
mltable = mltable.select_partitions(partition_index_list)
:param partition_index_list: list of partition index
:type partition_index_list: list of int
:return: MLTable with partition size updated
:rtype: mltable.MLTable
"""
self._check_loaded()
if isinstance(partition_index_list, int):
partition_index_list = [partition_index_list]
elif not (isinstance(partition_index_list, list) \
and len(partition_index_list) > 0 \
and all(map(lambda x: isinstance(x, int), partition_index_list))):
raise UserErrorException('Columns should be a int or list of int with at least one element')
return self._add_transformation_step('select_partitions', partition_index_list)
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def extract_columns_from_partition_format(self, partition_format):
"""
Adds a transformation step to use the partition information of each path and extract them into columns
based on the specified partition format.
Format part '{column_name}' creates string column, and '{column_name:yyyy/MM/dd/HH/mm/ss}' creates
datetime column, where 'yyyy', 'MM', 'dd', 'HH', 'mm' and 'ss' are used to extract year, month, day,
hour, minute and second for the datetime type.
The format should start from the position of first partition key until the end of file path.
For example, given the path '/Accounts/2019/01/01/data.csv' where the partition is by department name
and time, partition_format='/{Department}/{PartitionDate:yyyy/MM/dd}/data.csv'
creates a string column 'Department' with the value 'Accounts' and a datetime column 'PartitionDate'
with the value '2019-01-01'.
:param partition_format: Partition format to use to extract data into columns
:type partition_format: str
:return: MLTable whose partition format is set to given format
:rtype: mltable.MLTable
"""
self._check_loaded()
return self._add_transformation_step('extract_columns_from_partition_format',
{_PARTITION_FORMAT_KEY: partition_format},
0)
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def _get_partition_key_values(self, partition_keys=None):
"""Return unique key values of partition_keys.
validate if partition_keys is a valid subset of full set of partition keys, return unique key values of
partition_keys, default to return the unique key combinations by taking the full set of partition keys of this
dataset if partition_keys is None
.. code-block:: python
# get all partition key value pairs
partitions = mltable.get_partition_key_values()
# Return [{'country': 'US', 'state': 'WA', 'partition_date': datetime('2020-1-1')}]
partitions = mltable.get_partition_key_values(['country'])
# Return [{'country': 'US'}]
:param partition_keys: partition keys
:type partition_keys: builtin.list[str]
"""
self._check_loaded()
if not partition_keys:
partition_keys = self.partition_keys
if not self.partition_keys:
raise UserErrorException("cannot retrieve partition key values for a mltable that has no partition keys")
invalid_keys = [
x for x in partition_keys if x not in self.partition_keys]
if len(invalid_keys) != 0:
raise UserErrorException(f"{invalid_keys} are invalid partition keys")
# currently use summarize to find the distinct result
mltable = self.take(count=1)
pd = mltable.to_pandas_dataframe()
no_partition_key_columns = [
x for x in pd.columns if x not in partition_keys]
mltable = self
if len(no_partition_key_columns) > 0:
mltable = mltable._add_transformation_step('summarize',
{"aggregates":
[{"source_column": no_partition_key_columns[0],
"aggregate": "count",
"new_column": "new_count"}],
"group_by": partition_keys})
mltable = mltable.keep_columns(partition_keys)
# need to implement distinct from rlex https://msdata.visualstudio.com/Vienna/_workitems/edit/1749317
# mltable = self.distinct_rows()
pd = mltable.to_pandas_dataframe()
pd = pd.drop_duplicates()
partition_key_values = pd.to_dict(
orient='records') if pd.shape[0] != 0 else []
return partition_key_values
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def filter(self, expression):
"""
Filter the data, leaving only the records that match the specified expression.
.. remarks::
Expressions are started by indexing the mltable with the name of a column. They support a variety of
functions and operators and can be combined using logical operators. The resulting expression will be
lazily evaluated for each record when a data pull occurs and not where it is defined.
.. code-block:: python
filtered_mltable = mltable.filter('feature_1 == \"5\" and target > \"0.5)\"')
filtered_mltable = mltable.filter('col("FBI Code") == \"11\"')
:param expression: The expression to evaluate.
:type expression: string
:return: MLTable after filter
:rtype: mltable.MLTable
"""
self._check_loaded()
return self._add_transformation_step('filter', expression)
@property
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def paths(self):
"""
Returns a list of dictionaries containing the original paths given to this MLTable. Relative local file paths
are assumed to be relative to the directory where the MLTable YAML file this MLTable instance was loaded from.
:return: list of dicts containing paths specified in the MLTable
:rtype: list[dict[str, str]]
"""
self._check_loaded()
return list(map(lambda x: x[0], self._path_pairs))
@property
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def partition_keys(self):
"""Return the partition keys.
:return: the partition keys
:rtype: builtin.list[str]
"""
self._check_loaded()
def parse_partition_format(partition_format):
date_parts = ['yyyy', 'MM', 'dd', 'HH', 'mm', 'ss']
date_part_map = {d: '_sys_{}'.format(d) for d in date_parts}
defined_date_parts = []
date_column = None
columns = []
i = 0
pattern = ''
while i < len(partition_format):
c = partition_format[i]
if c == '/':
pattern += '\\/'
elif partition_format[i:i + 2] in ['{{', '}}']:
pattern += c
i += 1
elif c == '{':
close = i + 1
while close < len(partition_format) and partition_format[close] != '}':
close += 1
key = partition_format[i + 1:close]
if ':' in key:
date_column, date_format = key.split(':')
for date_part in date_parts:
date_format = date_format.replace(
date_part, '{' + date_part_map[date_part] + '}')
partition_format = partition_format[:i] + \
date_format + partition_format[close + 1:]
continue
else:
found_date = False
for k, v in date_part_map.items():
if partition_format.startswith(v, i + 1):
pattern_to_add = '(?<{}>\\d{{{}}})'.format(
v, len(k))
if pattern_to_add in pattern:
pattern += '(\\d{{{}}})'.format(len(k))
else:
pattern += pattern_to_add
defined_date_parts.append(k)
found_date = True
break
if not found_date:
pattern_to_add = '(?<{}>[^\\.\\/\\\\]+)'.format(key)
if pattern_to_add in pattern:
pattern += '([^\\.\\/\\\\]+)'
else:
columns.append(key)
pattern += pattern_to_add
i = close
elif c == '*':
pattern += '(.*?)'
elif c == '.':
pattern += '\\.'
else:
pattern += c
i += 1
if date_column is not None:
columns.append(date_column)
if defined_date_parts and 'yyyy' not in defined_date_parts:
raise UserErrorException(f'Invalid partition_format "{partition_format}". '
f'{validation_error["NO_YEAR"]}')
return pattern, defined_date_parts, columns
if len(self._partition_keys) > 0:
return self._partition_keys
mltable_dict = self._to_yaml_dict()
if _TRANSFORMATIONS_SCHEMA_KEY in mltable_dict:
for mltable_transformation in mltable_dict[_TRANSFORMATIONS_SCHEMA_KEY]:
if _EXTRACT_PARTITION_FORMAT_KEY in mltable_transformation:
parsed_result = parse_partition_format(
mltable_transformation[_EXTRACT_PARTITION_FORMAT_KEY][_PARTITION_FORMAT_KEY])
if len(parsed_result) == 3 and parsed_result[2]:
self._partition_keys = parsed_result[2]
return parsed_result[2]
return []
@property
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def _is_tabular(self):
"""
check if this mltable is tabular using its yaml
"""
self._check_loaded()
mltable_yaml = self._to_yaml_dict()
return _is_tabular(mltable_yaml)
@staticmethod
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def _create_from_dict(mltable_yaml_dict, path_pairs, load_uri):
"""
Creates a new MLTable from a YAML dictionary containing information from a MLTable file.
:param mltable_yaml_dict: MLTable dict to read from
:type mltable_yaml_dict: dict
:param path_pairs: pairings from original given data file paths to transformed data paths, usually relative
file paths made absolute
:type path_pairs: list[tuple[dict[str, str], dict[str, str]]]
:param load_uri: directory path where MLTable was originally loaded from, or intended to be but doesn't
actually exist yet if created with `MLTable.from_*`
:type load_uri: str
:return: MLTable from given dict
:rtype: mltable.MLTable
"""
mltable_yaml_string = yaml.safe_dump(mltable_yaml_dict)
dataflow = _wrap_rslex_function_call(lambda: PyRsDataflow(mltable_yaml_string))
return MLTable._create_from_dataflow(dataflow, path_pairs, load_uri)
@staticmethod
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def _create_from_dataflow(dataflow, path_pairs, load_uri):
"""
Creates a new MLTable from a PyRsDataflow.
:param new_dataflow: PyRsDataflow to read from
:type new_dataflow: PyRsDataflow
:param path_pairs: pairings from original given data file paths to transformed data paths, usually relative
file paths made absolute
:type path_pairs: list[tuple[dict[str, str], dict[str, str]]]
:param load_uri: directory path where MLTable was originally loaded from, or if created with `MLTable.from_*`,
where directory is intended to be
:type load_uri: str
:return: MLTable from given PyRsDataflow
:rtype: mltable.MLTable
"""
new_mltable = MLTable()
new_mltable._dataflow = dataflow
new_mltable._loaded = True
new_mltable._path_pairs = path_pairs
new_mltable._partition_keys = []
new_mltable.traits = Traits._create(new_mltable)
new_mltable.metadata = Metadata._create(new_mltable)
new_mltable._workspace_context = None
new_mltable._load_uri = load_uri
return new_mltable
@staticmethod
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def _append_workspace_to_stream_info_conversion(mltable, workspace_info, stream_column):
def _is_stream_column_in_column_conversion(columns_item):
return 'stream_info' == columns_item['column_type'] \
and ((isinstance(columns_item['columns'], str) and columns_item['columns'] == stream_column)
or (isinstance(columns_item['columns'], list) and stream_column in columns_item['columns']))
mltable_dict = mltable._to_yaml_dict()
if _TRANSFORMATIONS_SCHEMA_KEY in mltable_dict:
columns_conversion_list = [columns_item for t in mltable_dict[_TRANSFORMATIONS_SCHEMA_KEY]
for k, v in t.items()
if k == 'convert_column_types'
for columns_item in v
if _is_stream_column_in_column_conversion(columns_item)]
if len(columns_conversion_list) == 0:
return mltable
for columns in columns_conversion_list:
columns['column_type'] = {
'stream_info': {
'subscription': workspace_info[STORAGE_OPTION_KEY_AZUREML_SUBSCRIPTION],
'resource_group': workspace_info[STORAGE_OPTION_KEY_AZUREML_RESOURCEGROUP],
'workspace_name': workspace_info[STORAGE_OPTION_KEY_AZUREML_WORKSPACE],
'escaped': False
}
}
return MLTable._create_from_dict(mltable_dict, mltable._path_pairs, mltable._load_uri)
# else skip update
return mltable
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def take(self, count=20):
"""
Adds a transformation step to select the first `count` rows of this
MLTable.
:param count: number of rows from top of table to select
:type count: int
:return: MLTable with added "take" transformation step
:rtype: mltable.MLTable
"""
self._check_loaded()
if not (isinstance(count, int) and count > 0):
raise UserErrorException('Number of rows must be a positive integer')
return self._add_transformation_step('take', count)
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def show(self, count=20):
"""
Retrieves the first `count` rows of this MLTable as a Pandas Dataframe.
:param count: number of rows from top of table to select
:type count: int
:return: first `count` rows of the MLTable
:rtype: Pandas Dataframe
"""
return self.take(count).to_pandas_dataframe()
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def take_random_sample(self, probability, seed=None):
"""
Adds a transformation step to randomly select each row of this MLTable
with `probability` chance. Probability must be in range [0, 1]. May
optionally set a random seed.
:param probability: chance that each row is selected
:type: probability: float
:param seed: optional random seed
:type seed: Optional[int]
:return: MLTable with added transformation step
:rtype: mltable.MLTable
"""
self._check_loaded()
if not (isinstance(probability, float) and 0 < probability < 1):
raise UserErrorException('Probability should an float greater than 0 and less than 1')
seed = self._ensure_random_seed(seed)
return self._add_transformation_step('take_random_sample',
{"probability": probability,
"seed": seed})
def _check_column_names(self, columns):
if isinstance(columns, str):
return columns
if not (isinstance(columns, (list, tuple, set)) and all(map(lambda x: isinstance(x, str), columns))):
raise UserErrorException(
'Expect column names to be a string, a list of strings, a tuple of strings, or a set of strings')
unique_cols = set()
dup_cols = set()
for col in columns:
if col in unique_cols:
dup_cols.add(col)
unique_cols.add(col)
if dup_cols:
raise UserErrorException(f'Found duplicate columns in given column names: {dup_cols}')
return list(columns)
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def drop_columns(self, columns: Union[str, List[str], Tuple[str], Set[str]]):
"""
Adds a transformation step to drop the given columns from the dataset. If an empty list, tuple, or set is given
nothing is dropped. Duplicate columns will raise a UserErrorException.
Attempting to drop a column that is MLTable.traits.timestamp_column or in MLTable.traits.index_columns will
raise a UserErrorException.
:param columns: column(s) to drop from this MLTable
:type columns: Union[str, builtin.list[str], builtin.tuple[str], builtin.set[str]]
:return: MLTable with added transformation step
:rtype: mltable.MLTable
"""
self._check_loaded()
columns = self._check_column_names(columns)
# columns can't contain traits
columns_in_traits = self._get_columns_in_traits()
if (isinstance(columns, str) and columns in columns_in_traits) or columns_in_traits.intersection(columns):
raise UserErrorException('Columns in traits must be kept and cannot be dropped')
return self._add_transformation_step('drop_columns', columns)
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def keep_columns(self, columns: Union[str, List[str], Tuple[str], Set[str]]):
"""
Adds a transformation step to keep the specified columns and drop all others from the dataset. If an empty list,
tuple, or set is given nothing is dropped. Duplicate columns will raise a UserErrorException.
If column in MLTable.traits.timestamp_column or columns in MLTable.traits.index_columns are not explicitly kept,
a UserErrorException is raiesd.
:param columns: columns(s) in this MLTable to keep
:type columns: Union[str, builtin.list[str], builtin.tuple[str], builtin.set[str]]
:return: MLTable with added transformation step
:rtype: mltable.MLTable
"""
self._check_loaded()
columns = self._check_column_names(columns)
# traits must be in columns
columns_in_traits = self._get_columns_in_traits()
if (isinstance(columns, str) and len(columns_in_traits) != 0 and {columns, } != columns_in_traits) or any(
x not in columns for x in columns_in_traits):
raise UserErrorException('Columns in traits must be kept and cannot be dropped')
return self._add_transformation_step('keep_columns', columns)
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def random_split(self, percent=.5, seed=None):
"""
Randomly splits this MLTable into two MLTables, one having
approximately "percent"% of the original MLTable's data and the other
having the remainder (1-"percent"%).
:param percent: percent of the MLTable to split between
:type percent: Union[int, float]
:param seed: optional random seed
:type seed: Optional[int]
:return: two MLTables with this MLTable's data split between them by
"percent"
:rtype: Tuple[mltable.MLTable, mltable.MLTable]
"""
if not (isinstance(percent, float) and 0 < percent < 1):
raise UserErrorException('Percent should be a float greater than 0 and less than 1')
seed = self._ensure_random_seed(seed)
split_a = self._add_transformation_step('sample', {"sampler": "random_percent",
"sampler_arguments": {
"probability": percent,
"probability_lower_bound": 0.0,
"seed": seed}})
split_b = self._add_transformation_step('sample', {"sampler": "random_percent",
"sampler_arguments": {
"probability": 1.0,
"probability_lower_bound": percent,
"seed": seed}})
return split_a, split_b
def _parse_uri_dirc(self, uri):
"""
Attempts to parse out the directory component of a given URI. Current supported URIs are local filesystems,
AML datastores, Azure Data Lake Gen 1, Azure Data Lake Gen 2, and Azure Blob Storage.
:param uri: URI to parse
:type uri: str
:return: directory component of `uri`
:rtype: str
"""
global _AML_DATASTORE_URI_PATTERN, _AZURE_BLOB_STORAGE_URI_PATTERN, _AZURE_DATA_LAKE_GEN_1_URI_PATTERN, \
_AZURE_DATA_LAKE_GEN_2_URI_PATTERN, _HTTPS_URI_PATTERN
# local path
if _is_local_path(uri):
return os.path.normpath(uri)
_AML_DATASTORE_URI_PATTERN = _AML_DATASTORE_URI_PATTERN or re.compile(r'^azureml:\/\/.*datastores\/(.*)\/paths\/(.*)$')
matches = _AML_DATASTORE_URI_PATTERN.match(uri)
if matches:
# join datastore with relative path properly
return pathlib.Path(matches.group(1), matches.group(2)).as_posix()
_AZURE_BLOB_STORAGE_URI_PATTERN = _AZURE_BLOB_STORAGE_URI_PATTERN or re.compile(r'^wasbs:\/\/.*@.*.blob.core.windows.net\/(.*)$')
matches = _AZURE_BLOB_STORAGE_URI_PATTERN.match(uri)
if matches:
return matches.group(1)
_AZURE_DATA_LAKE_GEN_2_URI_PATTERN = _AZURE_DATA_LAKE_GEN_2_URI_PATTERN or re.compile(r'^abfss:\/\/.*@.*.dfs.core.windows.net\/(.*)$')
matches = _AZURE_DATA_LAKE_GEN_2_URI_PATTERN.match(uri)
if matches:
return matches.group(1)
_AZURE_DATA_LAKE_GEN_1_URI_PATTERN = _AZURE_DATA_LAKE_GEN_1_URI_PATTERN or re.compile(r'^adl:\/\/.*.azuredatalakestore.net\/(.*)$')
matches = _AZURE_DATA_LAKE_GEN_1_URI_PATTERN.match(uri)
if matches:
return matches.group(1)
_HTTPS_URI_PATTERN = _HTTPS_URI_PATTERN or re.compile(r'^https:\/\/.*\..*\/(.*)$')
matches = _HTTPS_URI_PATTERN.match(uri)
if matches:
return matches.group(1)
raise UserErrorException(f'MLTable was loaded from {uri} which is not supported for saving')
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def save(self, path=None, overwrite=True, colocated=False, show_progress=False, if_err_remove_files=True):
"""
Save this MLTable as a MLTable YAML file & its assoicated paths to the given directory path.
If `path` is not given, defaults to the current working directory. If `path` does not exist, it is created.
If `path` is remote, the underlying data store must already exist. If `path` is a local directory & is not
absolute, it is made absolute.
If `path` points to a file, a UserErrorException is raised. If `path` is a directory path that already contain
one or more files being saved (including the MLTable YAML file) and `overwrite` is set to False or 'fail' - a
UserErrorException is raised. If `path` is remote, any local files paths not given as a colocated path
(file path relative to the directory that MLTable was loaded from) will raise a UserErrorException.
`colocated` controls how associated paths are saved to `path`. If True, files are copied to `path` alongside
the MLTable YAML file as relative file paths. Otherwise associated files are not copied, remote paths remain as
given and local file paths are made relative with path redirection if needed. Note that False may result in
noncolocated MLTable YAML files which is not recommended, furthermore if `path` is remote this will result in
a UserErrorException as relative path redirection is not supported for remote URIs.
Note that if the MLTable is created programatically with methods like `from_paths()` or
`from_read_delimited_files()` with local relative paths, the MLTable directory path is assumed to be the
current working directory.
Be mindful when saving a new MLTable & associated data files to a directory with an existing MLTable file &
associated data files that the directory is not cleared of existing files before saving the new files. It is
possible for already existing data files to persist after saving the new files, especially if existing data
files do not have names matching any new data files. If the new MLTable contains a pattern designator under its
paths, this may unintentionally alter the MLTable by associating existing data files with the new MLTable.
If file paths in this MLTable point to an existing file in `path` but have a different URI, if overwrite is
'fail' or 'skip' the existing file will not be overwritten (i.e. skipped).
:param path: directory path to save to, default to current working directory
:type path: str
:param colocated: If True, saves copies of local & remote file paths in this MLTable under `path` as
relative paths. Otherwise no file copying occurs and remote file paths are saved as given to the saved
MLTable YAML file and local file paths as relative file paths with path redirection. If `path` is remote
& this MLTable contains local file paths, a UserErrorException will be raised.
:type colocated: bool
:param overwrite: How existing an MLTable YAML file and associated files that may already exist under `path` are
handled. Options are 'overwrite' (or True) to replace any existing files, 'fail' (or False) to raise an
error if a file already exists, or 'skip' to leave existing files as is. May also set with
:class:`mltable.MLTableSaveOverwriteOptions`.
:type overwrite: Union[bool, str, :class:`mltable.MLTableSaveOverwriteOptions`]
:param show_progress: displays copying progress to stdout
:type show_progress: bool
:param if_err_remove_files: if any error occurs during saving, removed any successfully saved files to make
the operation atomic
:type if_err_remove_files: bool
:return: this MLTable instance
:rtype: mltable.MLTable
"""
self._check_loaded()
save_path_dirc = path or os.getcwd()
is_save_path_dirc_local = _is_local_path(save_path_dirc)
if is_save_path_dirc_local:
save_path_dirc = _prepend_file_handler(os.path.normpath(os.path.abspath(save_path_dirc)))
from azureml.dataprep.rslex import Copier, PyLocationInfo
_wrap_rslex_function_call(ensure_rslex_environment)
overwrite = MLTableSaveOverwriteOption._parse(overwrite)
mltable_yaml_dict = self._to_yaml_dict()
saved = []
def save(from_path, to_path, base):
def execute_save():
source_info = PyLocationInfo.from_uri(from_path)
dest_info = PyLocationInfo.from_uri(to_path)
try:
# skip saving if overwrite == 'fail' or == 'skip' and to_path & from_path point to same data storage (regardless of actual URI)
overwrite_source = PyIfDestinationExists.APPEND \
if (dest_info.is_same_location(source_info) and \
overwrite in (PyIfDestinationExists.FAIL_ON_FILE_CONFLICT, PyIfDestinationExists.SKIP)) \
else overwrite
except AttributeError: # TODO (nathof) remove fallback after dprep release
overwrite_source = overwrite
copier = Copier(dest_info, base, overwrite_source)
# source_info, traceparent, show_progress, break_on_first_err
copier.copy_volume(source_info, 'MLTable.save', show_progress, True)
saved.append(dest_info) # note successfully saved items
_wrap_rslex_function_call(execute_save)
def make_non_colocated_local_path_relative(file_path):
abs_dirc_path = _remove_file_handler(save_path_dirc)
file_path = _remove_file_handler(file_path)
# finds the shortest path from this file path to the save directory, if they are on different
# mounts / drives leaves path as is
# ex: file_path = D:\home\user\tmp\file.csv, abs_dirc_path = C:\system\tmp --> file_path stays the same
if os.path.splitdrive(file_path)[0] != os.path.splitdrive(abs_dirc_path)[0]:
return file_path
rel_path = os.path.normpath(os.path.relpath(file_path, abs_dirc_path))
# `file_path` is absolute so if `rel_path` has parent directory shifts ('../') just keep `file_path`,
# should only trigger on systems that Posix paths
# ex: rel_path could end up as ../../home/user/files/data.csv when /home/user/files/data.csv will suffice
return file_path if rel_path.endswith(file_path) else rel_path
try:
if self._path_pairs:
load_uri_is_remote = _is_remote_path(self._load_uri)
load_uri = self._parse_uri_dirc(self._load_uri)
def save_path_pair(og_path_dict, processed_path_dict):
path_type, og_path = list(og_path_dict.items())[0]
# processing only occurs on relative local paths
_, processed_path = list(processed_path_dict.items())[0] # file location to save from
if not colocated:
if _is_local_path(processed_path):
if not is_save_path_dirc_local:
raise UserErrorException(
'Local paths can not be uploaded to remote storage if `colocated` is False. This may '
'result in non-colocated file paths which are not supported with remote URIs.')
return {path_type: make_non_colocated_local_path_relative(processed_path)}
# if was given as remote path
if _is_remote_path(og_path):
return og_path_dict
# if a relative path but loaded from remote URI
if load_uri_is_remote:
return processed_path_dict
# for remote paths that don't exist under load_uri
# TODO (nathof) follow up for how this works with local absolute paths in other mounts
if _is_remote_path(processed_path) and not processed_path.startswith(load_uri):
base_dirc = self._parse_uri_dirc(processed_path)
# edge case of local, non-colocated path
if _is_local_path(og_path) and (os.path.isabs(og_path) or '..' in og_path):
# make base_dirc point to directory file is loaded from vs MLTable was loaded from
# add one more directory as next level are both children of common_path
headless_processed_path = _remove_file_handler(processed_path)
common_path = os.path.commonpath([load_uri, headless_processed_path])
rel_path = os.path.relpath(headless_processed_path, common_path)
child_dirc, remainder = rel_path.split(os.path.sep, maxsplit=1)
base_dirc = os.path.join(common_path, child_dirc)
save_path_dict = {path_type: remainder}
else:
base_dirc = load_uri
save_path_dict = og_path_dict
save(processed_path, save_path_dirc, base_dirc)
return save_path_dict
mltable_yaml_dict[_PATHS_KEY] = [save_path_pair(og_path_dict, processed_path_dict)
for og_path_dict, processed_path_dict in self._path_pairs]
with tempfile.TemporaryDirectory() as temp_dirc:
mltable_path = os.path.join(temp_dirc, 'MLTable')
with open(mltable_path, 'w') as f:
yaml.safe_dump(mltable_yaml_dict, f)
save(_prepend_file_handler(mltable_path), save_path_dirc, temp_dirc)
except Exception:
if if_err_remove_files:
try:
from azureml.dataprep.rslex import PyDestination, PyIfDestinationExists
for save_info in saved:
PyDestination(save_info, PyIfDestinationExists.MERGE_WITH_OVERWRITE).remove()
except ImportError: # TODO (nathof) remove fallback after dprep release
pass
except Exception as save_error:
_LoggerFactory.trace(_get_logger(), 'error occured during removing successfully saved files')
_reclassify_rslex_error(save_error)
raise
return self
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def skip(self, count):
"""
Adds a transformation step to skip the first `count` rows of this
MLTable.
:param count: number of rows to skip
:type count: int
:return: MLTable with added transformation step
:type: mltable.MLTable
"""
self._check_loaded()
if not isinstance(count, int) or count < 1:
raise UserErrorException('Count must be an integer > 0.')
return self._add_transformation_step('skip', count)
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def convert_column_types(self, column_types):
"""
Adds a transformation step to convert the specified columns into their respective specified new types.
:param column_types: Dictionary of column: types the user desires to convert
:type column_types: dict[typing.Union[typing.Tuple[str], str], mltable.DataType]
:return: MLTable with added transformation step
:rtype: mltable.MLTable
.. code-block:: python
from mltable import DataType
data_types = {
'ID': DataType.to_string(),
'Date': DataType.to_datetime('%d/%m/%Y %I:%M:%S %p'),
'Count': DataType.to_int(),
'Latitude': DataType.to_float(),
'Found': DataType.to_bool(),
'Stream': DataType.to_stream()
}
"""
self._check_loaded()
column_types = _process_column_to_type_mappings(column_types)
return self._add_transformation_step('convert_column_types', column_types)
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def _mount(self, stream_column="Path", mount_point=None, **kwargs):
"""Create a context manager for mounting file streams defined by the mltable as local files.
.. remarks::
A context manager will be returned to manage the lifecycle of the mount. To mount, you will need to
enter the context manager and to unmount, exit from the context manager.
Mount is only supported on Unix or Unix-like operating systems with the native package libfuse installed.
If you are running inside a docker container, the docker container must be started with the `--privileged`
flag or started with `--cap-add SYS_ADMIN --device /dev/fuse`.
.. code-block:: python
exp_path_1 = os.path.normpath(os.path.join(cwd, '../dataset/data/crime-spring.csv'))
paths = [{'file': exp_path_1}]
mltable = from_paths(paths)
with mltable._mount() as mount_context:
# list top level mounted files and folders in the mltable
os.listdir(mount_context.mount_point)
# You can also use the start and stop methods
mount_context = mltable._mount()
mount_context.start() # this will mount the file streams
mount_context.stop() # this will unmount the file streams
If target_path starts with a /, then it will be treated as an absolute path. If it doesn't start
with a /, then it will be treated as a relative path relative to the current working directory.
:param stream_column: The stream column to mount.
:type stream_column: str
:param mount_point: The local directory to mount the files to. If None, the data will be mounted into a
temporary directory, which you can find by calling the `MountContext.mount_point` instance method.
:type mount_point: str
:return: Returns a context manager for managing the lifecycle of the mount.
:rtype: MountContext: the context manager. Upon entering the context manager, the dataflow will be
mounted to the mount_point. Upon exit, it will remove the mount point and clean up the daemon process
used to mount the dataflow.
"""
def _ensure_path(path):
if not path or path.isspace():
return (tempfile.mkdtemp(), True)
if not os.path.exists(path):
try:
os.makedirs(path)
return (os.path.abspath(path), True)
except FileExistsError:
# There is a chance that the directory may be created after we check for existence and
# before we create it. In this case, we can no-op as though the directory already existed.
pass
is_empty = not any(files or dirnames for _,
dirnames, files in os.walk(path))
return (os.path.abspath(path), is_empty)
mltable_yaml_str = str(self)
hash_object = hashlib.md5(str(self).encode()).hexdigest()
dataflow_in_memory_uri = f'inmemory://dataflow/{hash_object}'
_wrap_rslex_function_call(ensure_rslex_environment)
from azureml.dataprep.rslex import add_in_memory_stream
_wrap_rslex_function_call(lambda: add_in_memory_stream(dataflow_in_memory_uri, mltable_yaml_str))
dataflow_in_memory_uri_encoded = urllib.parse.quote(dataflow_in_memory_uri.encode('utf8'), safe='')
stream_column_encode = urllib.parse.quote(stream_column.encode('utf8'), safe='')
dataflow_uri = f"rsdf://dataflowfs/{dataflow_in_memory_uri_encoded}/{stream_column_encode}/"
mount_point, is_empty = _ensure_path(mount_point)
if os.path.ismount(mount_point):
raise UserErrorException(
f'"{mount_point}" is already mounted. Run `sudo umount "{mount_point}"` to unmount it.')
if not is_empty:
raise UserErrorException(
'mltable mount point must be empty, mounting to non-empty folder is not supported.')
from azureml.dataprep.fuse.dprepfuse import rslex_uri_volume_mount, MountOptions
mount_options = kwargs.get('mount_options', None)
# this can be remove after default permission set for MountOption is ready
if not mount_options:
mount_options = MountOptions(data_dir_suffix=None)
return _wrap_rslex_function_call(
lambda: rslex_uri_volume_mount(uri=dataflow_uri, mount_point=mount_point, options=mount_options))
@track(_get_logger, custom_dimensions={'app_name': _APP_NAME})
def _execute(self) -> None:
"""
Excutes the current MLTable using the local execution runtime.
:return: None
:rtype: None
"""
return _wrap_rslex_execute_func(func=lambda span_traceparent, _: get_rslex_executor().execute_dataflow(script=self._dataflow, traceparent=span_traceparent, collect_results=False, fail_on_error=False, fail_on_mixed_types=False, fail_on_out_of_range_datetime=False, partition_ids=None),
og_traceparent='MLTable._execute',
fallback_func=lambda span_traceparent, span_context: _execute('mltable._execute', dataflow=str(self), span_context=span_context, traceparent=span_traceparent))
|
()
|
68,514 |
mltable.mltable
|
__eq__
|
Returns if given object equals this MLTable.
:param other: given object to compare
:type other: Any
:return: is given object equals this MLTable
:rtype: bool
|
def __eq__(self, other):
"""
Returns if given object equals this MLTable.
:param other: given object to compare
:type other: Any
:return: is given object equals this MLTable
:rtype: bool
"""
if not isinstance(other, MLTable):
return False
self_yaml = self._to_yaml_dict()
other_yaml = other._to_yaml_dict()
def have_same_key(key):
return self_yaml.get(key) == other_yaml.get(key)
return have_same_key(_TRANSFORMATIONS_SCHEMA_KEY) \
and have_same_key(_METADATA_SCHEMA_NAME) \
and have_same_key(_TRAITS_SECTION_KEY) \
and self.paths == other.paths # want to compare using original paths
|
(self, other)
|
68,515 |
mltable.mltable
|
__init__
|
Initialize a new MLTable.
This constructor is not supposed to be invoked directly. MLTable is
intended to be created using :func:`mltable.load`.
|
def __init__(self):
"""
Initialize a new MLTable.
This constructor is not supposed to be invoked directly. MLTable is
intended to be created using :func:`mltable.load`.
"""
self._loaded = False
|
(self)
|
68,516 |
mltable.mltable
|
__repr__
|
Returns all the information associated with MLTable as a YAML-style
string representation.
:return: string representation of this MLTable
:rtype: str
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(self)
|
68,518 |
mltable.mltable
|
_add_transformation_step
|
Adds the given transformation step and its associated arguments to
this MLTable's PyRsDataflow at the given index in the list of all
added transformation steps. Returns a new MLTable whose PyRsDataflow
is the PyRsDataflow resulting from the prior addition.
:param step: transformation step
:type step: str
:param args: arguments for given transformation step
:type: object
:param index: optional argument to indicate which index to add the step
:type: int
:return: MLTable with resulting PyRsDataflow
:rtype: mltable.MLTable
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(self, step, args, index=None)
|
68,519 |
mltable.mltable
|
_append_workspace_to_stream_info_conversion
| null |
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(mltable, workspace_info, stream_column)
|
68,520 |
mltable.mltable
|
_check_column_names
| null |
def _check_column_names(self, columns):
if isinstance(columns, str):
return columns
if not (isinstance(columns, (list, tuple, set)) and all(map(lambda x: isinstance(x, str), columns))):
raise UserErrorException(
'Expect column names to be a string, a list of strings, a tuple of strings, or a set of strings')
unique_cols = set()
dup_cols = set()
for col in columns:
if col in unique_cols:
dup_cols.add(col)
unique_cols.add(col)
if dup_cols:
raise UserErrorException(f'Found duplicate columns in given column names: {dup_cols}')
return list(columns)
|
(self, columns)
|
68,522 |
mltable.mltable
|
_create_from_dataflow
|
Creates a new MLTable from a PyRsDataflow.
:param new_dataflow: PyRsDataflow to read from
:type new_dataflow: PyRsDataflow
:param path_pairs: pairings from original given data file paths to transformed data paths, usually relative
file paths made absolute
:type path_pairs: list[tuple[dict[str, str], dict[str, str]]]
:param load_uri: directory path where MLTable was originally loaded from, or if created with `MLTable.from_*`,
where directory is intended to be
:type load_uri: str
:return: MLTable from given PyRsDataflow
:rtype: mltable.MLTable
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(dataflow, path_pairs, load_uri)
|
68,523 |
mltable.mltable
|
_create_from_dict
|
Creates a new MLTable from a YAML dictionary containing information from a MLTable file.
:param mltable_yaml_dict: MLTable dict to read from
:type mltable_yaml_dict: dict
:param path_pairs: pairings from original given data file paths to transformed data paths, usually relative
file paths made absolute
:type path_pairs: list[tuple[dict[str, str], dict[str, str]]]
:param load_uri: directory path where MLTable was originally loaded from, or intended to be but doesn't
actually exist yet if created with `MLTable.from_*`
:type load_uri: str
:return: MLTable from given dict
:rtype: mltable.MLTable
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(mltable_yaml_dict, path_pairs, load_uri)
|
68,525 |
mltable.mltable
|
_ensure_random_seed
|
If the given seed is not an integer or None, raises a UserErrorException. If
None selects a random seed randomly between 1 and 1000.
:param seed: possible value for random seed
:type seed: object
:return: valid random seed
:rtype: int
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(self, seed)
|
68,526 |
mltable.mltable
|
_execute
|
Excutes the current MLTable using the local execution runtime.
:return: None
:rtype: None
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(self) -> NoneType
|
68,527 |
mltable.mltable
|
_get_columns_in_traits
|
Gets all the columns that are set in this MLTable's Traits.
:return: set of all Traits
:rtype: set[str]
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(self)
|
68,528 |
mltable.mltable
|
_get_partition_key_values
|
Return unique key values of partition_keys.
validate if partition_keys is a valid subset of full set of partition keys, return unique key values of
partition_keys, default to return the unique key combinations by taking the full set of partition keys of this
dataset if partition_keys is None
.. code-block:: python
# get all partition key value pairs
partitions = mltable.get_partition_key_values()
# Return [{'country': 'US', 'state': 'WA', 'partition_date': datetime('2020-1-1')}]
partitions = mltable.get_partition_key_values(['country'])
# Return [{'country': 'US'}]
:param partition_keys: partition keys
:type partition_keys: builtin.list[str]
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(self, partition_keys=None)
|
68,530 |
mltable.mltable
|
_mount
|
Create a context manager for mounting file streams defined by the mltable as local files.
.. remarks::
A context manager will be returned to manage the lifecycle of the mount. To mount, you will need to
enter the context manager and to unmount, exit from the context manager.
Mount is only supported on Unix or Unix-like operating systems with the native package libfuse installed.
If you are running inside a docker container, the docker container must be started with the `--privileged`
flag or started with `--cap-add SYS_ADMIN --device /dev/fuse`.
.. code-block:: python
exp_path_1 = os.path.normpath(os.path.join(cwd, '../dataset/data/crime-spring.csv'))
paths = [{'file': exp_path_1}]
mltable = from_paths(paths)
with mltable._mount() as mount_context:
# list top level mounted files and folders in the mltable
os.listdir(mount_context.mount_point)
# You can also use the start and stop methods
mount_context = mltable._mount()
mount_context.start() # this will mount the file streams
mount_context.stop() # this will unmount the file streams
If target_path starts with a /, then it will be treated as an absolute path. If it doesn't start
with a /, then it will be treated as a relative path relative to the current working directory.
:param stream_column: The stream column to mount.
:type stream_column: str
:param mount_point: The local directory to mount the files to. If None, the data will be mounted into a
temporary directory, which you can find by calling the `MountContext.mount_point` instance method.
:type mount_point: str
:return: Returns a context manager for managing the lifecycle of the mount.
:rtype: MountContext: the context manager. Upon entering the context manager, the dataflow will be
mounted to the mount_point. Upon exit, it will remove the mount point and clean up the daemon process
used to mount the dataflow.
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(self, stream_column='Path', mount_point=None, **kwargs)
|
68,531 |
mltable.mltable
|
_parse_uri_dirc
|
Attempts to parse out the directory component of a given URI. Current supported URIs are local filesystems,
AML datastores, Azure Data Lake Gen 1, Azure Data Lake Gen 2, and Azure Blob Storage.
:param uri: URI to parse
:type uri: str
:return: directory component of `uri`
:rtype: str
|
def _parse_uri_dirc(self, uri):
"""
Attempts to parse out the directory component of a given URI. Current supported URIs are local filesystems,
AML datastores, Azure Data Lake Gen 1, Azure Data Lake Gen 2, and Azure Blob Storage.
:param uri: URI to parse
:type uri: str
:return: directory component of `uri`
:rtype: str
"""
global _AML_DATASTORE_URI_PATTERN, _AZURE_BLOB_STORAGE_URI_PATTERN, _AZURE_DATA_LAKE_GEN_1_URI_PATTERN, \
_AZURE_DATA_LAKE_GEN_2_URI_PATTERN, _HTTPS_URI_PATTERN
# local path
if _is_local_path(uri):
return os.path.normpath(uri)
_AML_DATASTORE_URI_PATTERN = _AML_DATASTORE_URI_PATTERN or re.compile(r'^azureml:\/\/.*datastores\/(.*)\/paths\/(.*)$')
matches = _AML_DATASTORE_URI_PATTERN.match(uri)
if matches:
# join datastore with relative path properly
return pathlib.Path(matches.group(1), matches.group(2)).as_posix()
_AZURE_BLOB_STORAGE_URI_PATTERN = _AZURE_BLOB_STORAGE_URI_PATTERN or re.compile(r'^wasbs:\/\/.*@.*.blob.core.windows.net\/(.*)$')
matches = _AZURE_BLOB_STORAGE_URI_PATTERN.match(uri)
if matches:
return matches.group(1)
_AZURE_DATA_LAKE_GEN_2_URI_PATTERN = _AZURE_DATA_LAKE_GEN_2_URI_PATTERN or re.compile(r'^abfss:\/\/.*@.*.dfs.core.windows.net\/(.*)$')
matches = _AZURE_DATA_LAKE_GEN_2_URI_PATTERN.match(uri)
if matches:
return matches.group(1)
_AZURE_DATA_LAKE_GEN_1_URI_PATTERN = _AZURE_DATA_LAKE_GEN_1_URI_PATTERN or re.compile(r'^adl:\/\/.*.azuredatalakestore.net\/(.*)$')
matches = _AZURE_DATA_LAKE_GEN_1_URI_PATTERN.match(uri)
if matches:
return matches.group(1)
_HTTPS_URI_PATTERN = _HTTPS_URI_PATTERN or re.compile(r'^https:\/\/.*\..*\/(.*)$')
matches = _HTTPS_URI_PATTERN.match(uri)
if matches:
return matches.group(1)
raise UserErrorException(f'MLTable was loaded from {uri} which is not supported for saving')
|
(self, uri)
|
68,532 |
mltable.mltable
|
_to_yaml_dict
|
Returns all the information associated with MLTable as a YAML-style dictionary.
:return: dict representation of this MLTable
:rtype: dict
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(self)
|
68,533 |
mltable.mltable
|
_with_partition_size
|
Updates delimited files and JSON lines files to utilize the given parition size.
:param partition_size: Minimum batch size data be will be partitioned against.
:type partition_size: int
:param partition_size_unit: The memory unit give partition_size is in, default to bytes. Supported options are
a :class:`mltable.MLTablePartitionSizeUnit` or a string as one of 'byte' ('b'), 'kilobyte' ('kb'),
'megabyte' ('mb'), or 'gigabyte' ('gb').
:type partition_size_unit: Union[str, mltable.MLTablePartitionSizeUnit]
:return: MLTable with updated partition size
:rtype: mltable.MLTable
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(self, partition_size, partition_size_unit='b')
|
68,534 |
mltable.mltable
|
convert_column_types
|
Adds a transformation step to convert the specified columns into their respective specified new types.
:param column_types: Dictionary of column: types the user desires to convert
:type column_types: dict[typing.Union[typing.Tuple[str], str], mltable.DataType]
:return: MLTable with added transformation step
:rtype: mltable.MLTable
.. code-block:: python
from mltable import DataType
data_types = {
'ID': DataType.to_string(),
'Date': DataType.to_datetime('%d/%m/%Y %I:%M:%S %p'),
'Count': DataType.to_int(),
'Latitude': DataType.to_float(),
'Found': DataType.to_bool(),
'Stream': DataType.to_stream()
}
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(self, column_types)
|
68,535 |
mltable.mltable
|
drop_columns
|
Adds a transformation step to drop the given columns from the dataset. If an empty list, tuple, or set is given
nothing is dropped. Duplicate columns will raise a UserErrorException.
Attempting to drop a column that is MLTable.traits.timestamp_column or in MLTable.traits.index_columns will
raise a UserErrorException.
:param columns: column(s) to drop from this MLTable
:type columns: Union[str, builtin.list[str], builtin.tuple[str], builtin.set[str]]
:return: MLTable with added transformation step
:rtype: mltable.MLTable
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(self, columns: Union[str, List[str], Tuple[str], Set[str]])
|
68,536 |
mltable.mltable
|
extract_columns_from_partition_format
|
Adds a transformation step to use the partition information of each path and extract them into columns
based on the specified partition format.
Format part '{column_name}' creates string column, and '{column_name:yyyy/MM/dd/HH/mm/ss}' creates
datetime column, where 'yyyy', 'MM', 'dd', 'HH', 'mm' and 'ss' are used to extract year, month, day,
hour, minute and second for the datetime type.
The format should start from the position of first partition key until the end of file path.
For example, given the path '/Accounts/2019/01/01/data.csv' where the partition is by department name
and time, partition_format='/{Department}/{PartitionDate:yyyy/MM/dd}/data.csv'
creates a string column 'Department' with the value 'Accounts' and a datetime column 'PartitionDate'
with the value '2019-01-01'.
:param partition_format: Partition format to use to extract data into columns
:type partition_format: str
:return: MLTable whose partition format is set to given format
:rtype: mltable.MLTable
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(self, partition_format)
|
68,537 |
mltable.mltable
|
filter
|
Filter the data, leaving only the records that match the specified expression.
.. remarks::
Expressions are started by indexing the mltable with the name of a column. They support a variety of
functions and operators and can be combined using logical operators. The resulting expression will be
lazily evaluated for each record when a data pull occurs and not where it is defined.
.. code-block:: python
filtered_mltable = mltable.filter('feature_1 == "5" and target > "0.5)"')
filtered_mltable = mltable.filter('col("FBI Code") == "11"')
:param expression: The expression to evaluate.
:type expression: string
:return: MLTable after filter
:rtype: mltable.MLTable
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(self, expression)
|
68,538 |
mltable.mltable
|
get_partition_count
|
Returns the number of data partitions underlying the data associated with this MLTable.
:return: data partitions in this MLTable
:rtype: int
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(self) -> int
|
68,539 |
mltable.mltable
|
keep_columns
|
Adds a transformation step to keep the specified columns and drop all others from the dataset. If an empty list,
tuple, or set is given nothing is dropped. Duplicate columns will raise a UserErrorException.
If column in MLTable.traits.timestamp_column or columns in MLTable.traits.index_columns are not explicitly kept,
a UserErrorException is raiesd.
:param columns: columns(s) in this MLTable to keep
:type columns: Union[str, builtin.list[str], builtin.tuple[str], builtin.set[str]]
:return: MLTable with added transformation step
:rtype: mltable.MLTable
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(self, columns: Union[str, List[str], Tuple[str], Set[str]])
|
68,540 |
mltable.mltable
|
random_split
|
Randomly splits this MLTable into two MLTables, one having
approximately "percent"% of the original MLTable's data and the other
having the remainder (1-"percent"%).
:param percent: percent of the MLTable to split between
:type percent: Union[int, float]
:param seed: optional random seed
:type seed: Optional[int]
:return: two MLTables with this MLTable's data split between them by
"percent"
:rtype: Tuple[mltable.MLTable, mltable.MLTable]
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(self, percent=0.5, seed=None)
|
68,541 |
mltable.mltable
|
save
|
Save this MLTable as a MLTable YAML file & its assoicated paths to the given directory path.
If `path` is not given, defaults to the current working directory. If `path` does not exist, it is created.
If `path` is remote, the underlying data store must already exist. If `path` is a local directory & is not
absolute, it is made absolute.
If `path` points to a file, a UserErrorException is raised. If `path` is a directory path that already contain
one or more files being saved (including the MLTable YAML file) and `overwrite` is set to False or 'fail' - a
UserErrorException is raised. If `path` is remote, any local files paths not given as a colocated path
(file path relative to the directory that MLTable was loaded from) will raise a UserErrorException.
`colocated` controls how associated paths are saved to `path`. If True, files are copied to `path` alongside
the MLTable YAML file as relative file paths. Otherwise associated files are not copied, remote paths remain as
given and local file paths are made relative with path redirection if needed. Note that False may result in
noncolocated MLTable YAML files which is not recommended, furthermore if `path` is remote this will result in
a UserErrorException as relative path redirection is not supported for remote URIs.
Note that if the MLTable is created programatically with methods like `from_paths()` or
`from_read_delimited_files()` with local relative paths, the MLTable directory path is assumed to be the
current working directory.
Be mindful when saving a new MLTable & associated data files to a directory with an existing MLTable file &
associated data files that the directory is not cleared of existing files before saving the new files. It is
possible for already existing data files to persist after saving the new files, especially if existing data
files do not have names matching any new data files. If the new MLTable contains a pattern designator under its
paths, this may unintentionally alter the MLTable by associating existing data files with the new MLTable.
If file paths in this MLTable point to an existing file in `path` but have a different URI, if overwrite is
'fail' or 'skip' the existing file will not be overwritten (i.e. skipped).
:param path: directory path to save to, default to current working directory
:type path: str
:param colocated: If True, saves copies of local & remote file paths in this MLTable under `path` as
relative paths. Otherwise no file copying occurs and remote file paths are saved as given to the saved
MLTable YAML file and local file paths as relative file paths with path redirection. If `path` is remote
& this MLTable contains local file paths, a UserErrorException will be raised.
:type colocated: bool
:param overwrite: How existing an MLTable YAML file and associated files that may already exist under `path` are
handled. Options are 'overwrite' (or True) to replace any existing files, 'fail' (or False) to raise an
error if a file already exists, or 'skip' to leave existing files as is. May also set with
:class:`mltable.MLTableSaveOverwriteOptions`.
:type overwrite: Union[bool, str, :class:`mltable.MLTableSaveOverwriteOptions`]
:param show_progress: displays copying progress to stdout
:type show_progress: bool
:param if_err_remove_files: if any error occurs during saving, removed any successfully saved files to make
the operation atomic
:type if_err_remove_files: bool
:return: this MLTable instance
:rtype: mltable.MLTable
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(self, path=None, overwrite=True, colocated=False, show_progress=False, if_err_remove_files=True)
|
68,542 |
mltable.mltable
|
select_partitions
|
Adds a transformation step to select the partition.
.. remarks::
The following code snippet shows how to use the select_partitions api to selected partitions
from the provided MLTable.
.. code-block:: python
partition_index_list = [1, 2]
mltable = mltable.select_partitions(partition_index_list)
:param partition_index_list: list of partition index
:type partition_index_list: list of int
:return: MLTable with partition size updated
:rtype: mltable.MLTable
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(self, partition_index_list)
|
68,543 |
mltable.mltable
|
show
|
Retrieves the first `count` rows of this MLTable as a Pandas Dataframe.
:param count: number of rows from top of table to select
:type count: int
:return: first `count` rows of the MLTable
:rtype: Pandas Dataframe
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(self, count=20)
|
68,544 |
mltable.mltable
|
skip
|
Adds a transformation step to skip the first `count` rows of this
MLTable.
:param count: number of rows to skip
:type count: int
:return: MLTable with added transformation step
:type: mltable.MLTable
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(self, count)
|
68,545 |
mltable.mltable
|
take
|
Adds a transformation step to select the first `count` rows of this
MLTable.
:param count: number of rows from top of table to select
:type count: int
:return: MLTable with added "take" transformation step
:rtype: mltable.MLTable
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(self, count=20)
|
68,546 |
mltable.mltable
|
take_random_sample
|
Adds a transformation step to randomly select each row of this MLTable
with `probability` chance. Probability must be in range [0, 1]. May
optionally set a random seed.
:param probability: chance that each row is selected
:type: probability: float
:param seed: optional random seed
:type seed: Optional[int]
:return: MLTable with added transformation step
:rtype: mltable.MLTable
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(self, probability, seed=None)
|
68,547 |
mltable.mltable
|
to_pandas_dataframe
|
Load all records from the paths specified in the MLTable file into a Pandas DataFrame.
.. remarks::
The following code snippet shows how to use the
to_pandas_dataframe api to obtain a pandas dataframe corresponding
to the provided MLTable.
.. code-block:: python
from mltable import load
tbl = load('.\samples\mltable_sample')
pdf = tbl.to_pandas_dataframe()
print(pdf.shape)
:return: Pandas Dataframe containing the records from paths in this MLTable
:rtype: pandas.DataFrame
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(self)
|
68,548 |
mltable.mltable
|
validate
|
Validates if this MLTable's data can be loaded, requires the MLTable's
data source(s) to be accessible from the current compute.
:return: None
:rtype: None
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(self)
|
68,549 |
mltable.mltable
|
MLTableFileEncoding
|
Defines options for how encoding are processed when reading data from
files to create a MLTable.
These enumeration values are used in the MLTable class.
|
class MLTableFileEncoding(Enum):
"""
Defines options for how encoding are processed when reading data from
files to create a MLTable.
These enumeration values are used in the MLTable class.
"""
utf8 = auto()
iso88591 = auto()
latin1 = auto()
ascii = auto()
utf16 = auto()
utf8bom = auto()
windows1252 = auto()
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(value, names=None, *, module=None, qualname=None, type=None, start=1)
|
68,550 |
mltable.mltable
|
MLTableHeaders
|
Defines options for how column headers are processed when reading data
from files to create a MLTable.
These enumeration values are used in the MLTable class.
|
class MLTableHeaders(Enum):
"""
Defines options for how column headers are processed when reading data
from files to create a MLTable.
These enumeration values are used in the MLTable class.
"""
#: No column headers are read
no_header = auto()
#: Read headers only from first row of first file, everything else is data.
from_first_file = auto()
#: Read headers from first row of each file, combining named columns.
all_files_different_headers = auto()
#: Read headers from first row of first file, drops first row from other files.
all_files_same_headers = auto()
@staticmethod
def _parse(header):
if isinstance(header, MLTableHeaders):
return header
if not isinstance(header, str):
raise UserErrorException('The header should be a string or an MLTableHeader enum')
try:
return MLTableHeaders[header.lower()]
except KeyError:
raise UserErrorException(f"Given invalid header {str(header)}, supported headers are: 'no_header', "
"'from_first_file', 'all_files_different_headers', and 'all_files_same_headers'.")
|
(value, names=None, *, module=None, qualname=None, type=None, start=1)
|
68,555 |
mltable.mltable
|
from_delimited_files
|
Creates a MLTable from the given list of delimited files.
.. remarks::
There must be a valid paths string.
.. code-block:: python
# load mltable from local delimited file
from mltable import from_delimited_files
paths = [{"file": "./samples/mltable_sample/sample_data.csv"}]
mltable = from_delimited_files(paths)
:param paths: Paths supports files or folders with local or cloud paths. Relative local file paths are assumed to be
relative to the current working directory. If the parent directory a local file path is relative to is not the
current working directory, instead recommend passing that path as a absolute file path.
:type paths: list[dict[str, str]]
:param header: How column headers are handled when reading from files. Options specified using the enum
:class:`mltable.MLTableHeaders`. Supported headers are 'no_header', 'from_first_file',
'all_files_different_headers', and 'all_files_same_headers'.
:type header: typing.Union[str, mltable.MLTableHeaders]
:param delimiter: separator used to split columns
:type delimiter: str
:param support_multi_line: If False, all line breaks, including those in quoted field values, will be interpreted
as a record break. Reading data this way is faster and more optimized for parallel execution on multiple CPU
cores. However, it may result in silently producing more records with misaligned field values. This should be
set to True when the delimited files are known to contain quoted line breaks.
.. remarks::
Given this csv file as example, the data will be read differently
based on support_multi_line.
A,B,C
A1,B1,C1
A2,"B
2",C2
.. code-block:: python
from mltable import from_delimited_files
# default behavior: support_multi_line=False
mltable = from_delimited_files(path)
print(mltable.to_pandas_dataframe())
# A B C
# 0 A1 B1 C1
# 1 A2 B None
# 2 2" C2 None
# to handle quoted line breaks
mltable = from_delimited_files(path, support_multi_line=True)
print(mltable.to_pandas_dataframe())
# A B C
# 0 A1 B1 C1
# 1 A2 B\r\n2 C2
:type support_multi_line: bool
:param empty_as_string: How empty fields should be handled. If True will read empty fields as empty strings, else
read as nulls. If True and column contains datetime or numeric data, empty fields still read as nulls.
:type empty_as_string: bool
:param encoding: Specifies the file encoding using the enum :class:`mltable.MLTableFileEncoding`. Supported
encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252"
:type encoding: typing.Union[str, mltable.MLTableFileEncoding]
:param include_path_column: Keep path information as a column in the MLTable, is useful when reading multiple files
and you want to know which file a particular record came from, or to keep useful information that may be stored
in a file path.
:type include_path_column: bool
:param infer_column_types: If True, automatically infers all column types. If False, leaves columns as strings. If
a dictionary, represents columns whose types are to be set to given types (with all other columns being
inferred). The dictionary may contain a key named `sample_size` mapped to a positive integer number,
representing the number of rows to use for inferring column types. The dictionary may also contain a key named
'column_type_overrides'. Each key in the dictionary is either a string representing a column name or a tuple
of strings representing a group of column names. Each value is either a string (one of 'boolean', 'string',
'float', or 'int') or a :class:`mltable.DataType`. mltable.DataType.to_stream() is not supported. If an empty
dictionary is given, assumed to be True. Defaults to True.
.. remarks::
An example of how to format `infer_column_types`.
.. code-block:: python
from mltable import from_delimited_files
# default behavior: support_multi_line=False
mltable = from_delimited_files(paths, infer_column_types={
'sample_size': 100,
'column_type_overrides': {
'colA': 'boolean'
('colB', 'colC'): DataType.to_int()
}
})
:type infer_column_types:
typing.Union[bool, dict[str, typing.Union[str, dict[typing.Union[typing.Tuple[str], str], mltable.DataType]]]
:return: MLTable object
:return: MLTable
:rtype: mltable.MLTable
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(paths, header='all_files_same_headers', delimiter=',', support_multi_line=False, empty_as_string=False, encoding='utf8', include_path_column=False, infer_column_types=True)
|
68,556 |
mltable.mltable
|
from_delta_lake
|
Creates an MLTable object to read in Parquet files from delta lake table.
.. remarks::
**from_delta_lake** creates an MLTable object which defines the operations to
load data from delta lake folder into tabular representation.
For the data to be accessible by Azure Machine Learning, `path` must point to the delta table directory
and the delta lake files that are referenced must be accessible by AzureML services or behind public web urls.
**from_delta_lake** supports reading delta lake data from a uri
pointing to: local path, Blob, ADLS Gen1 and ADLS Gen2
Users are able to read in and materialize the data by calling `to_pandas_dataframe()` on the returned MLTable
.. code-block:: python
# create an MLTable object from a delta lake using timestamp versioning and materialize the data
from mltable import from_delta_lake
mltable_ts = from_delta_lake(delta_table_uri="./data/delta-01", timestamp_as_of="2021-05-24T00:00:00Z")
pd = mltable_ts.to_pandas_dataframe()
# create an MLTable object from a delta lake using integer versioning and materialize the data
from mltable import from_delta_lake
mltable_version = from_delta_lake(delta_table_uri="./data/delta-02", version_as_of=1)
pd = mltable_version.to_pandas_dataframe()
:param delta_table_uri: URI pointing to the delta table directory containing the delta lake parquet files to read.
Supported URI types are: local path URI, storage URI, long-form datastore URI, or data asset uri.
:type delta_table_uri: str
:param timestamp_as_of: datetime string in RFC-3339/ISO-8601 format to use to read in matching parquet files
from a specific point in time.
ex) "2022-10-01T00:00:00Z", "2022-10-01T00:00:00+08:00", "2022-10-01T01:30:00-08:00"
:type timestamp_as_of: string
:param version_as_of: integer version to use to read in a specific version of parquet files.
:type version_as_of: int
:param include_path_column: Keep path information as a column, useful when reading multiple files and you want
to know which file a particular record came from, or to keep useful information that may be stored in a file
path.
:type include_path_column: bool
:return: MLTable instance
:rtype: mltable.MLTable
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(delta_table_uri, timestamp_as_of=None, version_as_of=None, include_path_column=False)
|
68,557 |
mltable.mltable
|
from_json_lines_files
|
Create a MLTable from the given list of JSON file paths.
.. remarks::
There must be a valid paths dictionary
.. code-block:: python
# load mltable from local JSON paths
from mltable import from_json_lines_files
paths = [{'file': './samples/mltable_sample/sample_data.jsonl'}]
mltable = from_json_lines_files(paths)
:param paths: Paths supports files or folders with local or cloud paths. Relative local file paths are assumed to be
relative to the current working directory. If the parent directory a local file path is relative to is not the
current working directory, instead recommend passing that path as a absolute file path.
:type paths: list[dict[str, str]]
:param invalid_lines: How to handle lines that are invalid JSON, can be 'drop' or 'error'. If 'drop' invalid lines
are dropped, else error is raised.
:type invalid_lines: str
:param encoding: Specifies the file encoding using the enum :class:`mltable.MLTableFileEncoding`. Supported file
encodings:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252"
:type encoding: typing.Union[str, mltable.MLTableFileEncoding]
:param include_path_column: Keep path information as a column, useful when reading multiple files and you want
to know which file a particular record came from, or to keep useful information that may be stored in a file
path.
:type include_path_column: bool
:return: MLTable
:rtype: mltable.MLTable
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(paths, invalid_lines='error', encoding='utf8', include_path_column=False)
|
68,558 |
mltable.mltable
|
from_parquet_files
|
Create the MLTable from the given list of parquet files.
.. remarks::
There must be a valid paths dictionary
.. code-block:: python
# load mltable from local parquet paths
from mltable import from_parquet_files
paths = [{'file': './samples/mltable_sample/sample.parquet'}]
mltable = from_parquet_files(paths)
:param paths: Paths supports files or folders with local or cloud paths. Relative local file paths are assumed to be
relative to the current working directory. If the parent directory a local file path is relative to is not the
current working directory, instead recommend passing that path as a absolute file path.
:type paths: list[dict[str, str]]
:param include_path_column: Keep path information as a column, useful when reading multiple files and you want
to know which file a particular record came from, or to keep useful information that may be stored in a file
path.
:type include_path_column: bool
:return: MLTable instance
:rtype: mltable.MLTable
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(paths, include_path_column=False)
|
68,559 |
mltable.mltable
|
from_paths
|
Create the MLTable from the given paths.
.. remarks::
There must be a valid paths dictionary
.. code-block:: python
# load mltable from local paths
from mltable import from_paths
tbl = from_paths([{'file': "./samples/mltable_sample"}])
# load mltable from cloud paths
from mltable import load
tbl = from_paths(
[{'file': "https://<blob-storage-name>.blob.core.windows.net/<path>/sample_file"}])
:param paths: Paths supports files or folders with local or cloud paths. Relative local file paths are assumed to be
relative to the current working directory. If the parent directory a local file path is relative to is not the
current working directory, instead recommend passing that path as a absolute file path.
:type paths: list[dict[str, str]]
:return: MLTable instance
:rtype: mltable.MLTable
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(paths)
|
68,560 |
mltable.mltable
|
load
|
Loads the MLTable file (YAML) present at the given uri.
.. remarks::
There must be a valid MLTable YAML file named 'MLTable' present at
the given uri.
.. code-block:: python
# load mltable from local folder
from mltable import load
tbl = load('.\samples\mltable_sample')
# load mltable from azureml datastore uri
from mltable import load
tbl = load(
'azureml://subscriptions/<subscription-id>/
resourcegroups/<resourcegroup-name>/workspaces/<workspace-name>/
datastores/<datastore-name>/paths/<mltable-path-on-datastore>/')
# load mltable from azureml data asset uri
from mltable import load
tbl = load(
'azureml://subscriptions/<subscription-id>/
resourcegroups/<resourcegroup-name>/providers/Microsoft.MachineLearningServices/
workspaces/<workspace-name>/data/<data-asset-name>/versions/<data-asset-version>/')
# load mltable from azureml data asset short uri
from mltable import load
from azure.ai.ml import MLClient
from azure.identity import DefaultAzureCredential
credential = DefaultAzureCredential()
ml_client = MLClient(credential, <subscription_id>, <resourcegroup-name>, <workspace-name>)
tbl = load('azureml:<data-asset-name>:<version>', ml_client=ml_client)
`storage_options` supports keys of 'subscription', 'resource_group',
'workspace', or 'location'. All must locate an Azure machine learning
workspace.
:param uri: uri supports long-form datastore uri, storage uri, local path,
or data asset uri or data asset short uri
:type uri: str
:param storage_options: AML workspace info when URI is an AML asset
:type storage_options: dict[str, str]
:param ml_client: MLClient instance. To learn more, see https://learn.microsoft.com/en-us/python/api/azure-ai-ml/azure.ai.ml.mlclient?view=azure-python
:type ml_client: azure.ai.ml.MLClient
:return: MLTable
:rtype: mltable.MLTable
|
@staticmethod
def _parse(encoding):
if isinstance(encoding, MLTableFileEncoding):
return encoding
if not isinstance(encoding, str):
raise UserErrorException('The encoding should be a string or an MLTableFileEncoding enum')
if encoding in ("utf8", "utf-8", "utf-8 bom"):
return MLTableFileEncoding.utf8
if encoding in ("iso88591", "iso-8859-1"):
return MLTableFileEncoding.iso88591
if encoding in ("latin1", "latin-1"):
return MLTableFileEncoding.latin1
if encoding == "ascii":
return MLTableFileEncoding.ascii
if encoding in ("windows1252", "windows-1252"):
return MLTableFileEncoding.windows1252
raise UserErrorException(f"""Given invalid encoding '{encoding}', supported encodings are:
- utf8 as "utf8", "utf-8", "utf-8 bom"
- iso88591 as "iso88591" or "iso-8859-1"
- latin1 as "latin1" or "latin-1"
- utf16 as "utf16" or "utf-16"
- windows1252 as "windows1252" or "windows-1252\"""")
|
(uri, storage_options: dict = None, ml_client=None)
|
68,562 |
icetk.ice_tokenizer
|
IceTokenizer
| null |
class IceTokenizer:
def __init__(self, path='~/.icetk_models', device='cuda', fp16=True):
self.configure(path, device, fp16)
def configure(self, path=None, device=None, fp16=None):
if path is not None:
self.path = os.path.expanduser(path)
if device is not None:
self.device = device
if fp16 is not None:
self.fp16 = fp16
@property
def text_tokenizer(self):
if not hasattr(self, '_text_tokenizer'):
fp = os.path.join(self.path, 'ice_text.model')
auto_create(fp)
self._text_tokenizer = TextTokenizer(fp)
return self._text_tokenizer
@property
def image_tokenizer(self):
if not hasattr(self, '_image_tokenizer'):
fp = os.path.join(self.path, 'ice_image.pt')
auto_create(fp)
self._image_tokenizer = ImageTokenizer(fp, device=self.device, fp16=self.fp16)
return self._image_tokenizer
@property
def num_image_tokens(self):
return 20000 # self.image_tokenizer.num_tokens # allow not load
@property
def num_text_tokens(self):
return self.text_tokenizer.num_tokens
@property
def num_tokens(self):
return self.num_image_tokens + self.num_text_tokens
def add_special_tokens(self, special_tokens: List[str]):
self.text_tokenizer.add_special_tokens(special_tokens)
def encode(self, text=None,
image_path=None, image_pil=None, image_torch=None,
image_size: int=None, compress_rate=8, ignore_linebreak=True):
assert (text is None) + (image_path is None) + (image_pil is None) + (image_torch is None) == 3
assert int(compress_rate) in [4, 8, 16]
if text is not None:
if not ignore_linebreak:
text = text.replace('\n', '<n>')
tmp = self.text_tokenizer.encode(text)
return [x + self.num_image_tokens for x in tmp]
else:
need_norm_to_1 = False
if image_path is not None:
image_pil = Image.open(image_path)
if image_torch is None:
image_torch = pil_to_tensor(image_pil)
need_norm_to_1 = True
if image_size is not None:
# for speed in large-scale preprocessing, set this to None and transform in Dataloader.
# TODO: test speed
tr = transforms.Compose([
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
])
image_torch = tr(image_torch)
image_torch = image_torch.to(self.image_tokenizer.device).float()
if need_norm_to_1:
image_torch /= 255.
return self.image_tokenizer.encode(image_torch, l=int(math.log2(compress_rate))-2)
def decode(self, text_ids: List[int]=None, image_ids: Union[List[int], torch.LongTensor]=None, compress_rate=8):
assert (text_ids is None) + (image_ids is None) == 1
if text_ids is not None:
ids = [int(_id) - self.num_image_tokens for _id in text_ids]
return self.text_tokenizer.decode(ids).replace('<n>', '\n')
else:
return self.image_tokenizer.decode(image_ids, l=int(math.log2(compress_rate))-2)
def tokenize(self, text):
return self.text_tokenizer.tokenize(text)
def __getitem__(self, x):
if isinstance(x, int):
if x < self.num_image_tokens:
return '<image_{}>'.format(x)
else:
return self.text_tokenizer.convert_id_to_token(x - self.num_image_tokens)
elif isinstance(x, str):
if x.startswith('<image_') and x.endswith('>') and x[7:-1].isdigit():
return int(x[7:-1])
else:
return self.text_tokenizer.convert_token_to_id(x) + self.num_image_tokens
else:
raise ValueError('The key should be str or int.')
|
(path='~/.icetk_models', device='cuda', fp16=True)
|
68,563 |
icetk.ice_tokenizer
|
__getitem__
| null |
def __getitem__(self, x):
if isinstance(x, int):
if x < self.num_image_tokens:
return '<image_{}>'.format(x)
else:
return self.text_tokenizer.convert_id_to_token(x - self.num_image_tokens)
elif isinstance(x, str):
if x.startswith('<image_') and x.endswith('>') and x[7:-1].isdigit():
return int(x[7:-1])
else:
return self.text_tokenizer.convert_token_to_id(x) + self.num_image_tokens
else:
raise ValueError('The key should be str or int.')
|
(self, x)
|
68,564 |
icetk.ice_tokenizer
|
__init__
| null |
def __init__(self, path='~/.icetk_models', device='cuda', fp16=True):
self.configure(path, device, fp16)
|
(self, path='~/.icetk_models', device='cuda', fp16=True)
|
68,565 |
icetk.ice_tokenizer
|
add_special_tokens
| null |
def add_special_tokens(self, special_tokens: List[str]):
self.text_tokenizer.add_special_tokens(special_tokens)
|
(self, special_tokens: List[str])
|
68,566 |
icetk.ice_tokenizer
|
configure
| null |
def configure(self, path=None, device=None, fp16=None):
if path is not None:
self.path = os.path.expanduser(path)
if device is not None:
self.device = device
if fp16 is not None:
self.fp16 = fp16
|
(self, path=None, device=None, fp16=None)
|
68,567 |
icetk.ice_tokenizer
|
decode
| null |
def decode(self, text_ids: List[int]=None, image_ids: Union[List[int], torch.LongTensor]=None, compress_rate=8):
assert (text_ids is None) + (image_ids is None) == 1
if text_ids is not None:
ids = [int(_id) - self.num_image_tokens for _id in text_ids]
return self.text_tokenizer.decode(ids).replace('<n>', '\n')
else:
return self.image_tokenizer.decode(image_ids, l=int(math.log2(compress_rate))-2)
|
(self, text_ids: Optional[List[int]] = None, image_ids: Union[List[int], torch.LongTensor, NoneType] = None, compress_rate=8)
|
68,568 |
icetk.ice_tokenizer
|
encode
| null |
def encode(self, text=None,
image_path=None, image_pil=None, image_torch=None,
image_size: int=None, compress_rate=8, ignore_linebreak=True):
assert (text is None) + (image_path is None) + (image_pil is None) + (image_torch is None) == 3
assert int(compress_rate) in [4, 8, 16]
if text is not None:
if not ignore_linebreak:
text = text.replace('\n', '<n>')
tmp = self.text_tokenizer.encode(text)
return [x + self.num_image_tokens for x in tmp]
else:
need_norm_to_1 = False
if image_path is not None:
image_pil = Image.open(image_path)
if image_torch is None:
image_torch = pil_to_tensor(image_pil)
need_norm_to_1 = True
if image_size is not None:
# for speed in large-scale preprocessing, set this to None and transform in Dataloader.
# TODO: test speed
tr = transforms.Compose([
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
])
image_torch = tr(image_torch)
image_torch = image_torch.to(self.image_tokenizer.device).float()
if need_norm_to_1:
image_torch /= 255.
return self.image_tokenizer.encode(image_torch, l=int(math.log2(compress_rate))-2)
|
(self, text=None, image_path=None, image_pil=None, image_torch=None, image_size: Optional[int] = None, compress_rate=8, ignore_linebreak=True)
|
68,569 |
icetk.ice_tokenizer
|
tokenize
| null |
def tokenize(self, text):
return self.text_tokenizer.tokenize(text)
|
(self, text)
|
68,578 |
times
|
format
|
Formats the given universal time for display in the given time zone.
|
def format(dt, timezone, fmt=None):
"""Formats the given universal time for display in the given time zone."""
local = to_local(dt, timezone)
if fmt is None:
return local.isoformat()
else:
return local.strftime(fmt)
|
(dt, timezone, fmt=None)
|
68,579 |
times
|
from_local
|
Converts the given local datetime to a universal datetime.
|
def from_local(local_dt, timezone=None):
"""Converts the given local datetime to a universal datetime."""
if not isinstance(local_dt, datetime.datetime):
raise TypeError('Expected a datetime object')
if timezone is None:
a = arrow.get(local_dt)
else:
a = arrow.get(local_dt, timezone)
return a.to('UTC').naive
|
(local_dt, timezone=None)
|
68,580 |
times
|
from_unix
|
Converts a UNIX timestamp, as returned by `time.time()`, to universal
time. Assumes the input is in UTC, as `time.time()` does.
|
def from_unix(ut):
"""
Converts a UNIX timestamp, as returned by `time.time()`, to universal
time. Assumes the input is in UTC, as `time.time()` does.
"""
if not isinstance(ut, (int, float)):
raise TypeError('Expected an int or float value')
return arrow.get(ut).naive
|
(ut)
|
68,583 |
times
|
to_local
|
Converts universal datetime to a local representation in given timezone.
|
def to_local(dt, timezone):
"""Converts universal datetime to a local representation in given timezone."""
if dt.tzinfo is not None:
raise ValueError(
'First argument to to_local() should be a universal time.'
)
if not isinstance(timezone, string_types):
raise TypeError('expected a timezone name (string), but got {} instead'.format(type(timezone)))
return arrow.get(dt).to(timezone).datetime
|
(dt, timezone)
|
68,584 |
times
|
to_universal
|
Converts the given local datetime or UNIX timestamp to a universal
datetime.
|
def to_universal(local_dt, timezone=None):
"""
Converts the given local datetime or UNIX timestamp to a universal
datetime.
"""
if isinstance(local_dt, (int, float)):
if timezone is not None:
raise ValueError('Timezone argument illegal when using UNIX timestamps.')
return from_unix(local_dt)
elif isinstance(local_dt, string_types):
local_dt = arrow.get(local_dt).to('UTC').naive
return from_local(local_dt, timezone)
|
(local_dt, timezone=None)
|
68,585 |
times
|
to_unix
|
Converts a datetime object to unixtime
|
def to_unix(dt):
"""Converts a datetime object to unixtime"""
if not isinstance(dt, datetime.datetime):
raise TypeError('Expected a datetime object')
return arrow.get(dt).timestamp
|
(dt)
|
68,587 |
arx
|
ArxError
|
An arx command exception
|
class ArxError(Exception):
"""An arx command exception"""
pass
| null |
68,655 |
arx
|
RSync
|
rsync process context manager
Used for transfering files from src to dst. Use the `add` method
of the returned Rsync object to add file paths to transfer.
|
class RSync:
"""rsync process context manager
Used for transfering files from src to dst. Use the `add` method
of the returned Rsync object to add file paths to transfer.
"""
def __init__(self, src, dst):
self.cmd = [
'rsync',
'--verbose',
'--compress',
'--progress',
'--ignore-existing',
'--files-from=-',
'--recursive',
'--chmod=F-w',
str(src),
str(dst),
]
logging.debug(' '.join(self.cmd))
def __enter__(self):
self.proc = subprocess.Popen(
self.cmd,
stdin=subprocess.PIPE,
# stdout=subprocess.PIPE,
# stderr=subprocess.PIPE,
text=True,
)
return self
def add(self, path):
logging.debug(f" rsync: {path}")
self.proc.stdin.write(str(path) + '\n')
def __exit__(self, etype, value, traceback):
self.proc.stdin.close()
ret = self.proc.wait()
# FIXME: this should be a connection error, should we throw a
# different kind of error here?
if ret == 255:
raise ArxError(self.proc.stderr.read().strip())
elif ret != 0:
raise RuntimeError(self.proc.stderr.read().strip())
|
(src, dst)
|
68,656 |
arx
|
__enter__
| null |
def __enter__(self):
self.proc = subprocess.Popen(
self.cmd,
stdin=subprocess.PIPE,
# stdout=subprocess.PIPE,
# stderr=subprocess.PIPE,
text=True,
)
return self
|
(self)
|
68,657 |
arx
|
__exit__
| null |
def __exit__(self, etype, value, traceback):
self.proc.stdin.close()
ret = self.proc.wait()
# FIXME: this should be a connection error, should we throw a
# different kind of error here?
if ret == 255:
raise ArxError(self.proc.stderr.read().strip())
elif ret != 0:
raise RuntimeError(self.proc.stderr.read().strip())
|
(self, etype, value, traceback)
|
68,658 |
arx
|
__init__
| null |
def __init__(self, src, dst):
self.cmd = [
'rsync',
'--verbose',
'--compress',
'--progress',
'--ignore-existing',
'--files-from=-',
'--recursive',
'--chmod=F-w',
str(src),
str(dst),
]
logging.debug(' '.join(self.cmd))
|
(self, src, dst)
|
68,659 |
arx
|
add
| null |
def add(self, path):
logging.debug(f" rsync: {path}")
self.proc.stdin.write(str(path) + '\n')
|
(self, path)
|
68,660 |
arx
|
Remote
|
class representing an archive remote, accessible via SSH
|
class Remote:
"""class representing an archive remote, accessible via SSH
"""
def __init__(self, remote):
"""initialize remote
The `remote` argument should be of the form:
path
:path
[user@]host:path
[user@]host:
If the path is not specified it is assumed to be '.'.
"""
hostpath = remote.split(':')
if len(hostpath) == 1:
host = ''
path = hostpath[0]
else:
host, path = hostpath
if path == '':
path = '.'
self.host = host
self.path = Path(path)
logging.debug(self)
@property
def remote(self):
if self.host:
return f'{self.host}:{self.path}'
else:
return f'{self.path}'
def __str__(self):
return '<Remote {}>'.format(self.remote)
def test(self):
"""test validity of remote
Will raise ArxError if the remote is not configured correctly.
Will also resolve the remote path to be absolute.
"""
if self.host:
cmd = ['ssh', '-T', self.host]
else:
cmd = ['sh', '-c']
cmd += [f'cd {self.path} && pwd']
logging.debug("remote command: " + ' '.join(cmd))
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
stdout, stderr = proc.communicate()
if proc.returncode == 255:
raise ArxError(f"Could not connect to remote '{self.host}'.")
elif proc.returncode == 1:
raise ArxError(f"Remote path '{self.path}' not found. Please check remote configuration and try again.")
elif proc.returncode != 0:
err = str(stderr).strip()
raise ArxError(f"Unknown ssh connection error, return code {proc.returncode}: {err}")
self.path = Path(stdout.strip())
|
(remote)
|
68,661 |
arx
|
__init__
|
initialize remote
The `remote` argument should be of the form:
path
:path
[user@]host:path
[user@]host:
If the path is not specified it is assumed to be '.'.
|
def __init__(self, remote):
"""initialize remote
The `remote` argument should be of the form:
path
:path
[user@]host:path
[user@]host:
If the path is not specified it is assumed to be '.'.
"""
hostpath = remote.split(':')
if len(hostpath) == 1:
host = ''
path = hostpath[0]
else:
host, path = hostpath
if path == '':
path = '.'
self.host = host
self.path = Path(path)
logging.debug(self)
|
(self, remote)
|
68,662 |
arx
|
__str__
| null |
def __str__(self):
return '<Remote {}>'.format(self.remote)
|
(self)
|
68,663 |
arx
|
test
|
test validity of remote
Will raise ArxError if the remote is not configured correctly.
Will also resolve the remote path to be absolute.
|
def test(self):
"""test validity of remote
Will raise ArxError if the remote is not configured correctly.
Will also resolve the remote path to be absolute.
"""
if self.host:
cmd = ['ssh', '-T', self.host]
else:
cmd = ['sh', '-c']
cmd += [f'cd {self.path} && pwd']
logging.debug("remote command: " + ' '.join(cmd))
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
stdout, stderr = proc.communicate()
if proc.returncode == 255:
raise ArxError(f"Could not connect to remote '{self.host}'.")
elif proc.returncode == 1:
raise ArxError(f"Remote path '{self.path}' not found. Please check remote configuration and try again.")
elif proc.returncode != 0:
err = str(stderr).strip()
raise ArxError(f"Unknown ssh connection error, return code {proc.returncode}: {err}")
self.path = Path(stdout.strip())
|
(self)
|
68,664 |
arx
|
Repo
|
class representing a local version of a remote archive
|
class Repo:
"""class representing a local version of a remote archive
"""
CDIR = f'.{PROG}'
def __init__(self, path):
"""initialize repo at path
Config file will be loaded and remote will be extracted.
"""
self.root = Path(path).resolve()
cfile = self.root.joinpath(self.CDIR, 'config')
with open(cfile) as f:
self.config = yaml.safe_load(f)
logging.debug(self)
self.remote = Remote(self.config['remote'])
def __str__(self):
return '<Repo {}>'.format(self.root)
@classmethod
def init(cls, remote, path, force=False):
"""initialize a repository at path for remote
The `remote` argument should be of the form:
[user@]host[:path]
If `force` is True the repo will be initialized even if a repo
has already been initialized for the path.
"""
cpath = Path(path).joinpath(cls.CDIR)
if cpath.exists() and not force:
raise ArxError(f"Repo path already initialized: {cpath}")
# configure and test the remote
remote = Remote(remote)
remote.test()
# make config directory
try:
cpath.mkdir(parents=True)
except FileExistsError:
pass
# write config file
with open(cpath.joinpath('config'), 'w') as f:
f.write(yaml.dump({
'remote': remote.remote,
}))
logging.info(f"initialized {PROG} repo at {cpath}")
return cls(path)
@classmethod
def find(cls, path=None):
"""find the repo for a given path
"""
if not path:
path = Path(os.getcwd())
for root in [path] + list(path.parents):
logging.debug(f"checking {root}...")
if root.joinpath(cls.CDIR).exists():
break
else:
raise ArxError(f"Directory '{path}' does not appear to be a {PROG} repo (sub)directory. Try running 'init' first.")
return cls(root)
##########
def _resolve_path(self, path):
"""resolve path relative to repo root
"""
try:
return Path(path).relative_to(self.root)
except ValueError:
return Path(path)
def commit(self, *paths):
"""commit paths to the archive
Any file paths that already exist in the archive will not be
committed, and a warning will be issued.
"""
# find all files and check that they exist (will throw an
# ArxError if not)
files, errors = _find_files(*paths)
if errors:
raise ArxError("The following errors were encountered:\n"+errors)
files = [f.relative_to(self.root) for f in files]
try:
with RSync(self.root, self.remote.remote) as rs:
for path in files:
rs.add(path)
except subprocess.CalledProcessError:
raise ArxError("Failed to transfer some paths to archive.")
# FIXME: make this configurable
# lock all local files committed to the remote
# for f in files:
# _read_only(f)
def checkout(self, *paths):
"""checkout paths from the archive
Files are copied into the local directory from the remote
repo.
"""
paths = [self._resolve_path(path) for path in paths]
try:
with RSync(self.remote.remote, self.root) as rs:
for path in paths:
rs.add(path)
except subprocess.CalledProcessError:
raise ArxError("Failed to retrieve some paths from archive.")
def list(self, *paths, remote=False, depth=None):
"""list of files in the repo
If `remote` is True files on the remote will be listed.
If `depth` is a positive integer, the depth of the listing
will be limited to the number of directories specified in the
number.
Returns a tuple of (files, errors) where `file` is the list of
files and `errors` is any encountered errors.
"""
if remote:
root = self.remote.path
remote = self.remote.host
else:
root = self.root
remote = None
if paths:
# resolve all paths to be relative to the root paths,
# which should be absolute
paths = [root.joinpath(self._resolve_path(path)) for path in paths]
else:
paths = [root]
files, errors = _find_files(*paths, remote=remote, depth=depth)
files = [f.relative_to(root) for f in files]
return files, errors
|
(path)
|
68,665 |
arx
|
__init__
|
initialize repo at path
Config file will be loaded and remote will be extracted.
|
def __init__(self, path):
"""initialize repo at path
Config file will be loaded and remote will be extracted.
"""
self.root = Path(path).resolve()
cfile = self.root.joinpath(self.CDIR, 'config')
with open(cfile) as f:
self.config = yaml.safe_load(f)
logging.debug(self)
self.remote = Remote(self.config['remote'])
|
(self, path)
|
68,666 |
arx
|
__str__
| null |
def __str__(self):
return '<Repo {}>'.format(self.root)
|
(self)
|
68,667 |
arx
|
_resolve_path
|
resolve path relative to repo root
|
def _resolve_path(self, path):
"""resolve path relative to repo root
"""
try:
return Path(path).relative_to(self.root)
except ValueError:
return Path(path)
|
(self, path)
|
68,668 |
arx
|
checkout
|
checkout paths from the archive
Files are copied into the local directory from the remote
repo.
|
def checkout(self, *paths):
"""checkout paths from the archive
Files are copied into the local directory from the remote
repo.
"""
paths = [self._resolve_path(path) for path in paths]
try:
with RSync(self.remote.remote, self.root) as rs:
for path in paths:
rs.add(path)
except subprocess.CalledProcessError:
raise ArxError("Failed to retrieve some paths from archive.")
|
(self, *paths)
|
68,669 |
arx
|
commit
|
commit paths to the archive
Any file paths that already exist in the archive will not be
committed, and a warning will be issued.
|
def commit(self, *paths):
"""commit paths to the archive
Any file paths that already exist in the archive will not be
committed, and a warning will be issued.
"""
# find all files and check that they exist (will throw an
# ArxError if not)
files, errors = _find_files(*paths)
if errors:
raise ArxError("The following errors were encountered:\n"+errors)
files = [f.relative_to(self.root) for f in files]
try:
with RSync(self.root, self.remote.remote) as rs:
for path in files:
rs.add(path)
except subprocess.CalledProcessError:
raise ArxError("Failed to transfer some paths to archive.")
# FIXME: make this configurable
# lock all local files committed to the remote
# for f in files:
# _read_only(f)
|
(self, *paths)
|
68,670 |
arx
|
list
|
list of files in the repo
If `remote` is True files on the remote will be listed.
If `depth` is a positive integer, the depth of the listing
will be limited to the number of directories specified in the
number.
Returns a tuple of (files, errors) where `file` is the list of
files and `errors` is any encountered errors.
|
def list(self, *paths, remote=False, depth=None):
"""list of files in the repo
If `remote` is True files on the remote will be listed.
If `depth` is a positive integer, the depth of the listing
will be limited to the number of directories specified in the
number.
Returns a tuple of (files, errors) where `file` is the list of
files and `errors` is any encountered errors.
"""
if remote:
root = self.remote.path
remote = self.remote.host
else:
root = self.root
remote = None
if paths:
# resolve all paths to be relative to the root paths,
# which should be absolute
paths = [root.joinpath(self._resolve_path(path)) for path in paths]
else:
paths = [root]
files, errors = _find_files(*paths, remote=remote, depth=depth)
files = [f.relative_to(root) for f in files]
return files, errors
|
(self, *paths, remote=False, depth=None)
|
68,671 |
arx
|
_find_files
|
list all files in the specified paths
If remote is specified the command is executed on the specified
remote host over ssh.
If `depth` is a positive integer, the depth of the listing will be
limited to the number of directories specified in the number.
Returns a tuple of (files, errors), where errors is the error
string if any errors are encountered.
|
def _find_files(*paths, remote=None, depth=None):
"""list all files in the specified paths
If remote is specified the command is executed on the specified
remote host over ssh.
If `depth` is a positive integer, the depth of the listing will be
limited to the number of directories specified in the number.
Returns a tuple of (files, errors), where errors is the error
string if any errors are encountered.
"""
# FIXME: rsync has a --list-only option that may be useful instead
# of find?
cmd = []
if remote:
cmd += [
'ssh', '-T', remote,
]
cmd += [
'find',
# follow symbolic links on the command line
'-H',
]
cmd += list(map(str, paths))
# if not using the depth option filter for onlye files
if depth is None:
cmd += [
# find only files
'-type', 'f',
]
if depth is not None:
if not isinstance(depth, int):
raise ArxError("list depth must be an int greater than 0")
cmd += ['-maxdepth', str(int(depth))]
logging.debug(' '.join(cmd))
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
files = []
errors = None
while True:
line = proc.stdout.readline().strip()
if not line:
break
# skip hidden files
if '/.' in line:
continue
files.append(Path(line))
proc.stdout.close()
ret = proc.wait()
# FIXME: should we throw a different kind of error here?
if remote and ret == 255:
raise ArxError(proc.stderr.read().strip())
if ret != 0:
errors = proc.stderr.read().strip()
logging.debug(f"errors: {errors}")
return files, errors
|
(*paths, remote=None, depth=None)
|
68,672 |
arx
|
_read_only
|
remove write permission from path
|
def _read_only(path):
"""remove write permission from path"""
cur = stat.S_IMODE(path.stat().st_mode)
# ugo-w
path.chmod(cur & ~stat.S_IWUSR & ~stat.S_IWGRP & ~stat.S_IWOTH)
|
(path)
|
68,679 |
path_add_jlussier.add_parent_path
|
add_parent_path_to_sys_path
| null |
def add_parent_path_to_sys_path(target_dir:str, search_dir:str):
targetDirectoryName = target_dir
currentLocation = os.path.dirname(search_dir)
testResults = currentLocation.find(targetDirectoryName)
targetPath = (currentLocation[0:(testResults + len(targetDirectoryName))])
sys.path.append(targetPath)
|
(target_dir: str, search_dir: str)
|
68,680 |
flake8_deprecated
|
Flake8Deprecated
| null |
class Flake8Deprecated:
name = 'flake8_deprecated'
version = '1.2'
message = 'D001 found {0:s} replace it with {1:s}'
deprecations = {
'assertEqual': (
'failUnlessEqual',
'assertEquals',
),
'assertNotEqual': ('failIfEqual',),
'assertTrue': (
'failUnless',
'assert_',
),
'assertFalse': ('failIf',),
'assertRaises': ('failUnlessRaises',),
'assertAlmostEqual': ('failUnlessAlmostEqual',),
'assertNotAlmostEqual': ('failIfAlmostEqual',),
'AccessControl.ClassSecurityInfo.protected': ('declareProtected',),
'AccessControl.ClassSecurityInfo.private': ('declarePrivate',),
'AccessControl.ClassSecurityInfo.public': ('declarePublic',),
'zope.interface.provider': ('directlyProvides',),
'zope.interface.implementer': (
'classImplements',
'implements',
),
'self.loadZCML(': ('xmlconfig.file',),
'zope.component.adapter': ('adapts',),
}
def __init__(self, tree):
self.old_aliases = self._reverse_data()
self.tree = tree
def run(self):
for node in ast.walk(self.tree):
value = None
if isinstance(node, ast.Call):
value = self.check_calls(node)
elif isinstance(node, ast.FunctionDef):
value = self.check_decorators(node)
if value:
yield from value
def check_calls(self, node):
function_name = getattr(node.func, 'id', '')
if function_name:
value = self.check_function_call(node)
else:
value = self.check_method_call(node)
if value:
yield from value
def check_function_call(self, node):
function_name = node.func.id
for old_alias in self.old_aliases:
if function_name == old_alias:
yield self.error(node, old_alias)
def check_method_call(self, node):
"""Check method calls, i.e. self.SOME_CALL()
Note that this can be endlessly nested, i.e. self.obj.another.more.SOME_CALL()
"""
method_name = getattr(node.func, 'attr', None)
if not method_name:
return
is_obj = getattr(node.func, 'value', False)
for old_alias in self.old_aliases:
if method_name == old_alias:
yield self.error(node, old_alias)
elif '.' in old_alias and is_obj:
obj_name = getattr(node.func.value, 'attr', False)
obj_id = getattr(node.func.value, 'id', False)
for name in (obj_name, obj_id):
if f'{name}.{method_name}' == old_alias:
yield self.error(node, old_alias)
def check_decorators(self, node):
"""Check decorators names for deprecated aliases
Check for function-style decorators, i.e @my_deprecated_decorator()
as well as for alias-like decorators, i.e @my_deprecated_decorator
"""
for decorator in node.decorator_list:
name = None
if isinstance(decorator, ast.Attribute):
name = decorator.attr
elif isinstance(decorator, ast.Name):
name = decorator.id
if not name:
continue
for old_alias in self.old_aliases:
if name == old_alias:
yield self.error(node, old_alias)
def _reverse_data(self):
"""Reverse the deprecation dictionary
This way, we can more easily loop through the deprecated snippets.
We only care about the new version at error reporting time.
"""
return {
old_alias: new_version
for new_version, alias_list in self.deprecations.items()
for old_alias in alias_list
}
def error(self, statement, old_alias):
return (
statement.lineno,
statement.col_offset,
self.message.format(old_alias, self.old_aliases[old_alias]),
type(self),
)
|
(tree)
|
68,681 |
flake8_deprecated
|
__init__
| null |
def __init__(self, tree):
self.old_aliases = self._reverse_data()
self.tree = tree
|
(self, tree)
|
68,682 |
flake8_deprecated
|
_reverse_data
|
Reverse the deprecation dictionary
This way, we can more easily loop through the deprecated snippets.
We only care about the new version at error reporting time.
|
def _reverse_data(self):
"""Reverse the deprecation dictionary
This way, we can more easily loop through the deprecated snippets.
We only care about the new version at error reporting time.
"""
return {
old_alias: new_version
for new_version, alias_list in self.deprecations.items()
for old_alias in alias_list
}
|
(self)
|
68,683 |
flake8_deprecated
|
check_calls
| null |
def check_calls(self, node):
function_name = getattr(node.func, 'id', '')
if function_name:
value = self.check_function_call(node)
else:
value = self.check_method_call(node)
if value:
yield from value
|
(self, node)
|
68,684 |
flake8_deprecated
|
check_decorators
|
Check decorators names for deprecated aliases
Check for function-style decorators, i.e @my_deprecated_decorator()
as well as for alias-like decorators, i.e @my_deprecated_decorator
|
def check_decorators(self, node):
"""Check decorators names for deprecated aliases
Check for function-style decorators, i.e @my_deprecated_decorator()
as well as for alias-like decorators, i.e @my_deprecated_decorator
"""
for decorator in node.decorator_list:
name = None
if isinstance(decorator, ast.Attribute):
name = decorator.attr
elif isinstance(decorator, ast.Name):
name = decorator.id
if not name:
continue
for old_alias in self.old_aliases:
if name == old_alias:
yield self.error(node, old_alias)
|
(self, node)
|
68,685 |
flake8_deprecated
|
check_function_call
| null |
def check_function_call(self, node):
function_name = node.func.id
for old_alias in self.old_aliases:
if function_name == old_alias:
yield self.error(node, old_alias)
|
(self, node)
|
68,686 |
flake8_deprecated
|
check_method_call
|
Check method calls, i.e. self.SOME_CALL()
Note that this can be endlessly nested, i.e. self.obj.another.more.SOME_CALL()
|
def check_method_call(self, node):
"""Check method calls, i.e. self.SOME_CALL()
Note that this can be endlessly nested, i.e. self.obj.another.more.SOME_CALL()
"""
method_name = getattr(node.func, 'attr', None)
if not method_name:
return
is_obj = getattr(node.func, 'value', False)
for old_alias in self.old_aliases:
if method_name == old_alias:
yield self.error(node, old_alias)
elif '.' in old_alias and is_obj:
obj_name = getattr(node.func.value, 'attr', False)
obj_id = getattr(node.func.value, 'id', False)
for name in (obj_name, obj_id):
if f'{name}.{method_name}' == old_alias:
yield self.error(node, old_alias)
|
(self, node)
|
68,687 |
flake8_deprecated
|
error
| null |
def error(self, statement, old_alias):
return (
statement.lineno,
statement.col_offset,
self.message.format(old_alias, self.old_aliases[old_alias]),
type(self),
)
|
(self, statement, old_alias)
|
68,688 |
flake8_deprecated
|
run
| null |
def run(self):
for node in ast.walk(self.tree):
value = None
if isinstance(node, ast.Call):
value = self.check_calls(node)
elif isinstance(node, ast.FunctionDef):
value = self.check_decorators(node)
if value:
yield from value
|
(self)
|
68,690 |
triad.collections.fs
|
FileSystem
|
A unified filesystem based on PyFileSystem2. The special requirement
for this class is that all paths must be absolute path with scheme.
To customize different file systems, you should override `create_fs`
to provide your own configured file systems.
:param auto_close: If `True` (the default), the child filesystems
will be closed when `MountFS` is closed.
.. admonition:: Examples
.. code-block:: python
fs = FileSystem()
fs.writetext("mem://from/a.txt", "hello")
fs.copy("mem://from/a.txt", "mem://to/a.txt")
.. note::
If a path is not a local path, it must include the scheme and `netloc`
(the first element after `://`)
|
class FileSystem(MountFS):
"""A unified filesystem based on PyFileSystem2. The special requirement
for this class is that all paths must be absolute path with scheme.
To customize different file systems, you should override `create_fs`
to provide your own configured file systems.
:param auto_close: If `True` (the default), the child filesystems
will be closed when `MountFS` is closed.
.. admonition:: Examples
.. code-block:: python
fs = FileSystem()
fs.writetext("mem://from/a.txt", "hello")
fs.copy("mem://from/a.txt", "mem://to/a.txt")
.. note::
If a path is not a local path, it must include the scheme and `netloc`
(the first element after `://`)
"""
def __init__(self, auto_close: bool = True):
super().__init__(auto_close)
self._fs_store: Dict[str, FSBase] = {}
self._in_create = False
self._fs_lock = RLock()
def create_fs(self, root: str) -> FSBase:
"""create a PyFileSystem instance from `root`. `root` is in the
format of `/` if local path, else `<scheme>://<netloc>`.
You should override this method to provide custom instances, for
example, if you want to create an S3FS with certain parameters.
:param root: `/` if local path, else `<scheme>://<netloc>`
"""
if root.startswith("temp://"):
fs = tempfs.TempFS(root[len("temp://") :])
return fs
if root.startswith("mem://"):
fs = memoryfs.MemoryFS()
return fs
return open_fs(root)
@property
def glob(self):
"""A globber object"""
return _BoundGlobber(self)
def _delegate(self, path) -> Tuple[FSBase, str]:
with self._fs_lock:
if self._in_create: # pragma: no cover
return super()._delegate(path)
self._in_create = True
fp = _FSPath(path)
if fp.root not in self._fs_store:
self._fs_store[fp.root] = self.create_fs(fp.root)
self.mount(to_uuid(fp.root), self._fs_store[fp.root])
self._in_create = False
m_path = to_uuid(fp.root) + "/" + fp.relative_path
return super()._delegate(m_path)
def makedirs(
self, path: str, permissions: Any = None, recreate: bool = False
) -> SubFS:
"""Make a directory, and any missing intermediate directories.
.. note::
This overrides the base ``makedirs``
:param path: path to directory from root.
:param permissions: initial permissions, or `None` to use defaults.
:recreate: if `False` (the default), attempting to
create an existing directory will raise an error. Set
to `True` to ignore existing directories.
:return: a sub-directory filesystem.
:raises fs.errors.DirectoryExists: if the path is already
a directory, and ``recreate`` is `False`.
:raises fs.errors.DirectoryExpected: if one of the ancestors
in the path is not a directory.
"""
self.check()
fs, _path = self._delegate(path)
return fs.makedirs(_path, permissions=permissions, recreate=recreate)
|
(auto_close: bool = True)
|
68,694 |
triad.collections.fs
|
__init__
| null |
def __init__(self, auto_close: bool = True):
super().__init__(auto_close)
self._fs_store: Dict[str, FSBase] = {}
self._in_create = False
self._fs_lock = RLock()
|
(self, auto_close: bool = True)
|
68,695 |
fs.mountfs
|
__repr__
| null |
def __repr__(self):
# type: () -> str
return "MountFS(auto_close={!r})".format(self.auto_close)
|
(self)
|
68,696 |
fs.mountfs
|
__str__
| null |
def __str__(self):
# type: () -> str
return "<mountfs>"
|
(self)
|
68,697 |
triad.collections.fs
|
_delegate
| null |
def _delegate(self, path) -> Tuple[FSBase, str]:
with self._fs_lock:
if self._in_create: # pragma: no cover
return super()._delegate(path)
self._in_create = True
fp = _FSPath(path)
if fp.root not in self._fs_store:
self._fs_store[fp.root] = self.create_fs(fp.root)
self.mount(to_uuid(fp.root), self._fs_store[fp.root])
self._in_create = False
m_path = to_uuid(fp.root) + "/" + fp.relative_path
return super()._delegate(m_path)
|
(self, path) -> Tuple[fs.base.FS, str]
|
68,701 |
fs.mountfs
|
close
| null |
def close(self):
# type: () -> None
# Explicitly closes children if requested
if self.auto_close:
for _path, fs in self.mounts:
fs.close()
del self.mounts[:]
self.default_fs.close()
super(MountFS, self).close()
|
(self)
|
68,702 |
fs.base
|
copy
|
Copy file contents from ``src_path`` to ``dst_path``.
Arguments:
src_path (str): Path of source file.
dst_path (str): Path to destination file.
overwrite (bool): If `True`, overwrite the destination file
if it exists (defaults to `False`).
preserve_time (bool): If `True`, try to preserve mtime of the
resource (defaults to `False`).
Raises:
fs.errors.DestinationExists: If ``dst_path`` exists,
and ``overwrite`` is `False`.
fs.errors.ResourceNotFound: If a parent directory of
``dst_path`` does not exist.
fs.errors.FileExpected: If ``src_path`` is not a file.
|
def copy(
self,
src_path, # type: Text
dst_path, # type: Text
overwrite=False, # type: bool
preserve_time=False, # type: bool
):
# type: (...) -> None
"""Copy file contents from ``src_path`` to ``dst_path``.
Arguments:
src_path (str): Path of source file.
dst_path (str): Path to destination file.
overwrite (bool): If `True`, overwrite the destination file
if it exists (defaults to `False`).
preserve_time (bool): If `True`, try to preserve mtime of the
resource (defaults to `False`).
Raises:
fs.errors.DestinationExists: If ``dst_path`` exists,
and ``overwrite`` is `False`.
fs.errors.ResourceNotFound: If a parent directory of
``dst_path`` does not exist.
fs.errors.FileExpected: If ``src_path`` is not a file.
"""
with self._lock:
if not overwrite and self.exists(dst_path):
raise errors.DestinationExists(dst_path)
with closing(self.open(src_path, "rb")) as read_file:
# FIXME(@althonos): typing complains because open return IO
self.upload(dst_path, read_file) # type: ignore
if preserve_time:
copy_modified_time(self, src_path, self, dst_path)
|
(self, src_path, dst_path, overwrite=False, preserve_time=False)
|
68,705 |
triad.collections.fs
|
create_fs
|
create a PyFileSystem instance from `root`. `root` is in the
format of `/` if local path, else `<scheme>://<netloc>`.
You should override this method to provide custom instances, for
example, if you want to create an S3FS with certain parameters.
:param root: `/` if local path, else `<scheme>://<netloc>`
|
def create_fs(self, root: str) -> FSBase:
"""create a PyFileSystem instance from `root`. `root` is in the
format of `/` if local path, else `<scheme>://<netloc>`.
You should override this method to provide custom instances, for
example, if you want to create an S3FS with certain parameters.
:param root: `/` if local path, else `<scheme>://<netloc>`
"""
if root.startswith("temp://"):
fs = tempfs.TempFS(root[len("temp://") :])
return fs
if root.startswith("mem://"):
fs = memoryfs.MemoryFS()
return fs
return open_fs(root)
|
(self, root: str) -> fs.base.FS
|
68,706 |
fs.mountfs
|
desc
| null |
def desc(self, path):
# type: (Text) -> Text
if not self.exists(path):
raise errors.ResourceNotFound(path)
fs, delegate_path = self._delegate(path)
if fs is self.default_fs:
fs = self
return "{path} on {fs}".format(fs=fs, path=delegate_path)
|
(self, path)
|
68,707 |
fs.mountfs
|
download
| null |
def download(self, path, file, chunk_size=None, **options):
# type: (Text, BinaryIO, Optional[int], **Any) -> None
fs, _path = self._delegate(path)
return fs.download(_path, file, chunk_size=chunk_size, **options)
|
(self, path, file, chunk_size=None, **options)
|
68,708 |
fs.base
|
exists
|
Check if a path maps to a resource.
Arguments:
path (str): Path to a resource.
Returns:
bool: `True` if a resource exists at the given path.
|
def exists(self, path):
# type: (Text) -> bool
"""Check if a path maps to a resource.
Arguments:
path (str): Path to a resource.
Returns:
bool: `True` if a resource exists at the given path.
"""
try:
self.getinfo(path)
except errors.ResourceNotFound:
return False
else:
return True
|
(self, path)
|
68,714 |
fs.mountfs
|
getinfo
| null |
def getinfo(self, path, namespaces=None):
# type: (Text, Optional[Collection[Text]]) -> Info
self.check()
fs, _path = self._delegate(path)
return fs.getinfo(_path, namespaces=namespaces)
|
(self, path, namespaces=None)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.