code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def __init__(self, app, *, options=None):
"""Initialize a new standalone application.
Args:
app: A wsgi Python application.
options (dict): the configuration.
"""
self.options = options or {}
self.application = app
super().__init__() | Initialize a new standalone application.
Args:
app: A wsgi Python application.
options (dict): the configuration.
| __init__ | python | bigchaindb/bigchaindb | bigchaindb/web/server.py | https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/server.py | Apache-2.0 |
def create_app(*, debug=False, threads=1, bigchaindb_factory=None):
"""Return an instance of the Flask application.
Args:
debug (bool): a flag to activate the debug mode for the app
(default: False).
threads (int): number of threads to use
Return:
an instance of the Flask application.
"""
if not bigchaindb_factory:
bigchaindb_factory = BigchainDB
app = Flask(__name__)
app.wsgi_app = StripContentTypeMiddleware(app.wsgi_app)
CORS(app)
app.debug = debug
app.config['bigchain_pool'] = utils.pool(bigchaindb_factory, size=threads)
add_routes(app)
return app | Return an instance of the Flask application.
Args:
debug (bool): a flag to activate the debug mode for the app
(default: False).
threads (int): number of threads to use
Return:
an instance of the Flask application.
| create_app | python | bigchaindb/bigchaindb | bigchaindb/web/server.py | https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/server.py | Apache-2.0 |
def create_server(settings, log_config=None, bigchaindb_factory=None):
"""Wrap and return an application ready to be run.
Args:
settings (dict): a dictionary containing the settings, more info
here http://docs.gunicorn.org/en/latest/settings.html
Return:
an initialized instance of the application.
"""
settings = copy.deepcopy(settings)
if not settings.get('workers'):
settings['workers'] = (multiprocessing.cpu_count() * 2) + 1
if not settings.get('threads'):
# Note: Threading is not recommended currently, as the frontend workload
# is largely CPU bound and parallisation across Python threads makes it
# slower.
settings['threads'] = 1
settings['custom_log_config'] = log_config
app = create_app(debug=settings.get('debug', False),
threads=settings['threads'],
bigchaindb_factory=bigchaindb_factory)
standalone = StandaloneApplication(app, options=settings)
return standalone | Wrap and return an application ready to be run.
Args:
settings (dict): a dictionary containing the settings, more info
here http://docs.gunicorn.org/en/latest/settings.html
Return:
an initialized instance of the application.
| create_server | python | bigchaindb/bigchaindb | bigchaindb/web/server.py | https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/server.py | Apache-2.0 |
def __call__(self, environ, start_response):
"""Run the middleware and then call the original WSGI application."""
if environ['REQUEST_METHOD'] == 'GET':
try:
del environ['CONTENT_TYPE']
except KeyError:
pass
else:
logger.debug('Remove header "Content-Type" from GET request')
return self.app(environ, start_response) | Run the middleware and then call the original WSGI application. | __call__ | python | bigchaindb/bigchaindb | bigchaindb/web/strip_content_type_middleware.py | https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/strip_content_type_middleware.py | Apache-2.0 |
def _multiprocessing_to_asyncio(in_queue, out_queue, loop):
"""Bridge between a synchronous multiprocessing queue
and an asynchronous asyncio queue.
Args:
in_queue (multiprocessing.Queue): input queue
out_queue (asyncio.Queue): output queue
"""
while True:
value = in_queue.get()
loop.call_soon_threadsafe(out_queue.put_nowait, value) | Bridge between a synchronous multiprocessing queue
and an asynchronous asyncio queue.
Args:
in_queue (multiprocessing.Queue): input queue
out_queue (asyncio.Queue): output queue
| _multiprocessing_to_asyncio | python | bigchaindb/bigchaindb | bigchaindb/web/websocket_server.py | https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/websocket_server.py | Apache-2.0 |
def __init__(self, event_source):
"""Create a new instance.
Args:
event_source: a source of events. Elements in the queue
should be strings.
"""
self.event_source = event_source
self.subscribers = {} | Create a new instance.
Args:
event_source: a source of events. Elements in the queue
should be strings.
| __init__ | python | bigchaindb/bigchaindb | bigchaindb/web/websocket_server.py | https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/websocket_server.py | Apache-2.0 |
async def publish(self):
"""Publish new events to the subscribers."""
while True:
event = await self.event_source.get()
str_buffer = []
if event == POISON_PILL:
return
if isinstance(event, str):
str_buffer.append(event)
elif event.type == EventTypes.BLOCK_VALID:
str_buffer = map(json.dumps, eventify_block(event.data))
for str_item in str_buffer:
for _, websocket in self.subscribers.items():
await websocket.send_str(str_item) | Publish new events to the subscribers. | publish | python | bigchaindb/bigchaindb | bigchaindb/web/websocket_server.py | https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/websocket_server.py | Apache-2.0 |
def init_app(event_source, *, loop=None):
"""Init the application server.
Return:
An aiohttp application.
"""
dispatcher = Dispatcher(event_source)
# Schedule the dispatcher
loop.create_task(dispatcher.publish())
app = web.Application(loop=loop)
app['dispatcher'] = dispatcher
app.router.add_get(EVENTS_ENDPOINT, websocket_handler)
return app | Init the application server.
Return:
An aiohttp application.
| init_app | python | bigchaindb/bigchaindb | bigchaindb/web/websocket_server.py | https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/websocket_server.py | Apache-2.0 |
def start(sync_event_source, loop=None):
"""Create and start the WebSocket server."""
if not loop:
loop = asyncio.get_event_loop()
event_source = asyncio.Queue(loop=loop)
bridge = threading.Thread(target=_multiprocessing_to_asyncio,
args=(sync_event_source, event_source, loop),
daemon=True)
bridge.start()
app = init_app(event_source, loop=loop)
aiohttp.web.run_app(app,
host=config['wsserver']['host'],
port=config['wsserver']['port']) | Create and start the WebSocket server. | start | python | bigchaindb/bigchaindb | bigchaindb/web/websocket_server.py | https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/websocket_server.py | Apache-2.0 |
def base_ws_uri():
"""Base websocket URL that is advertised to external clients.
Useful when the websocket URL advertised to the clients needs to be
customized (typically when running behind NAT, firewall, etc.)
"""
config_wsserver = config['wsserver']
scheme = config_wsserver['advertised_scheme']
host = config_wsserver['advertised_host']
port = config_wsserver['advertised_port']
return '{}://{}:{}'.format(scheme, host, port) | Base websocket URL that is advertised to external clients.
Useful when the websocket URL advertised to the clients needs to be
customized (typically when running behind NAT, firewall, etc.)
| base_ws_uri | python | bigchaindb/bigchaindb | bigchaindb/web/views/base.py | https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/views/base.py | Apache-2.0 |
def get(self, block_id):
"""API endpoint to get details about a block.
Args:
block_id (str): the id of the block.
Return:
A JSON string containing the data about the block.
"""
pool = current_app.config['bigchain_pool']
with pool() as bigchain:
block = bigchain.get_block(block_id=block_id)
if not block:
return make_error(404)
return block | API endpoint to get details about a block.
Args:
block_id (str): the id of the block.
Return:
A JSON string containing the data about the block.
| get | python | bigchaindb/bigchaindb | bigchaindb/web/views/blocks.py | https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/views/blocks.py | Apache-2.0 |
def get(self):
"""API endpoint to get the related blocks for a transaction.
Return:
A ``list`` of ``block_id``s that contain the given transaction. The
list may be filtered when provided a status query parameter:
"valid", "invalid", "undecided".
"""
parser = reqparse.RequestParser()
parser.add_argument('transaction_id', type=str, required=True)
args = parser.parse_args(strict=True)
tx_id = args['transaction_id']
pool = current_app.config['bigchain_pool']
with pool() as bigchain:
blocks = bigchain.get_block_containing_tx(tx_id)
return blocks | API endpoint to get the related blocks for a transaction.
Return:
A ``list`` of ``block_id``s that contain the given transaction. The
list may be filtered when provided a status query parameter:
"valid", "invalid", "undecided".
| get | python | bigchaindb/bigchaindb | bigchaindb/web/views/blocks.py | https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/views/blocks.py | Apache-2.0 |
def get_api_v1_info(api_prefix):
"""Return a dict with all the information specific for the v1 of the
api.
"""
websocket_root = base_ws_uri() + EVENTS_ENDPOINT
docs_url = [
'https://docs.bigchaindb.com/projects/server/en/v',
version.__version__,
'/http-client-server-api.html',
]
return {
'docs': ''.join(docs_url),
'transactions': '{}transactions/'.format(api_prefix),
'blocks': '{}blocks/'.format(api_prefix),
'assets': '{}assets/'.format(api_prefix),
'outputs': '{}outputs/'.format(api_prefix),
'streams': websocket_root,
'metadata': '{}metadata/'.format(api_prefix),
'validators': '{}validators'.format(api_prefix),
} | Return a dict with all the information specific for the v1 of the
api.
| get_api_v1_info | python | bigchaindb/bigchaindb | bigchaindb/web/views/info.py | https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/views/info.py | Apache-2.0 |
def get(self):
"""API endpoint to retrieve a list of links to transaction
outputs.
Returns:
A :obj:`list` of :cls:`str` of links to outputs.
"""
parser = reqparse.RequestParser()
parser.add_argument('public_key', type=parameters.valid_ed25519,
required=True)
parser.add_argument('spent', type=parameters.valid_bool)
args = parser.parse_args(strict=True)
pool = current_app.config['bigchain_pool']
with pool() as bigchain:
outputs = bigchain.get_outputs_filtered(args['public_key'],
args['spent'])
return [{'transaction_id': output.txid, 'output_index': output.output}
for output in outputs] | API endpoint to retrieve a list of links to transaction
outputs.
Returns:
A :obj:`list` of :cls:`str` of links to outputs.
| get | python | bigchaindb/bigchaindb | bigchaindb/web/views/outputs.py | https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/views/outputs.py | Apache-2.0 |
def get(self, tx_id):
"""API endpoint to get details about a transaction.
Args:
tx_id (str): the id of the transaction.
Return:
A JSON string containing the data about the transaction.
"""
pool = current_app.config['bigchain_pool']
with pool() as bigchain:
tx = bigchain.get_transaction(tx_id)
if not tx:
return make_error(404)
return tx.to_dict() | API endpoint to get details about a transaction.
Args:
tx_id (str): the id of the transaction.
Return:
A JSON string containing the data about the transaction.
| get | python | bigchaindb/bigchaindb | bigchaindb/web/views/transactions.py | https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/views/transactions.py | Apache-2.0 |
def post(self):
"""API endpoint to push transactions to the Federation.
Return:
A ``dict`` containing the data about the transaction.
"""
parser = reqparse.RequestParser()
parser.add_argument('mode', type=parameters.valid_mode,
default=BROADCAST_TX_ASYNC)
args = parser.parse_args()
mode = str(args['mode'])
pool = current_app.config['bigchain_pool']
# `force` will try to format the body of the POST request even if the
# `content-type` header is not set to `application/json`
tx = request.get_json(force=True)
try:
tx_obj = Transaction.from_dict(tx)
except SchemaValidationError as e:
return make_error(
400,
message='Invalid transaction schema: {}'.format(
e.__cause__.message)
)
except ValidationError as e:
return make_error(
400,
'Invalid transaction ({}): {}'.format(type(e).__name__, e)
)
with pool() as bigchain:
try:
bigchain.validate_transaction(tx_obj)
except ValidationError as e:
return make_error(
400,
'Invalid transaction ({}): {}'.format(type(e).__name__, e)
)
else:
status_code, message = bigchain.write_transaction(tx_obj, mode)
if status_code == 202:
response = jsonify(tx)
response.status_code = 202
return response
else:
return make_error(status_code, message) | API endpoint to push transactions to the Federation.
Return:
A ``dict`` containing the data about the transaction.
| post | python | bigchaindb/bigchaindb | bigchaindb/web/views/transactions.py | https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/views/transactions.py | Apache-2.0 |
def get(self):
"""API endpoint to get validators set.
Return:
A JSON string containing the validator set of the current node.
"""
pool = current_app.config['bigchain_pool']
with pool() as bigchain:
validators = bigchain.get_validators()
return validators | API endpoint to get validators set.
Return:
A JSON string containing the validator set of the current node.
| get | python | bigchaindb/bigchaindb | bigchaindb/web/views/validators.py | https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/web/views/validators.py | Apache-2.0 |
def generate_validators(powers):
"""Generates an arbitrary number of validators with random public keys.
The object under the `storage` key is in the format expected by DB.
The object under the `eleciton` key is in the format expected by
the upsert validator election.
`public_key`, `private_key` are in the format used for signing transactions.
Args:
powers: A list of intergers representing the voting power to
assign to the corresponding validators.
"""
validators = []
for power in powers:
kp = crypto.generate_key_pair()
validators.append({
'storage': {
'public_key': {
'value': key_to_base64(base58.b58decode(kp.public_key).hex()),
'type': 'ed25519-base64',
},
'voting_power': power,
},
'election': {
'node_id': f'node-{random.choice(range(100))}',
'power': power,
'public_key': {
'value': base64.b16encode(base58.b58decode(kp.public_key)).decode('utf-8'),
'type': 'ed25519-base16',
},
},
'public_key': kp.public_key,
'private_key': kp.private_key,
})
return validators | Generates an arbitrary number of validators with random public keys.
The object under the `storage` key is in the format expected by DB.
The object under the `eleciton` key is in the format expected by
the upsert validator election.
`public_key`, `private_key` are in the format used for signing transactions.
Args:
powers: A list of intergers representing the voting power to
assign to the corresponding validators.
| generate_validators | python | bigchaindb/bigchaindb | tests/utils.py | https://github.com/bigchaindb/bigchaindb/blob/master/tests/utils.py | Apache-2.0 |
def _test_additionalproperties(node, path=''):
"""Validate that each object node has additionalProperties set, so that
objects with junk keys do not pass as valid.
"""
if isinstance(node, list):
for i, nnode in enumerate(node):
_test_additionalproperties(nnode, path + str(i) + '.')
if isinstance(node, dict):
if node.get('type') == 'object':
assert 'additionalProperties' in node, \
('additionalProperties not set at path:' + path)
for name, val in node.items():
_test_additionalproperties(val, path + name + '.') | Validate that each object node has additionalProperties set, so that
objects with junk keys do not pass as valid.
| _test_additionalproperties | python | bigchaindb/bigchaindb | tests/common/test_schema.py | https://github.com/bigchaindb/bigchaindb/blob/master/tests/common/test_schema.py | Apache-2.0 |
def test_cant_spend_same_input_twice_in_tx(b, alice):
"""Recreate duplicated fulfillments bug
https://github.com/bigchaindb/bigchaindb/issues/1099
"""
from bigchaindb.models import Transaction
from bigchaindb.common.exceptions import DoubleSpend
# create a divisible asset
tx_create = Transaction.create([alice.public_key], [([alice.public_key], 100)])
tx_create_signed = tx_create.sign([alice.private_key])
assert b.validate_transaction(tx_create_signed) == tx_create_signed
b.store_bulk_transactions([tx_create_signed])
# Create a transfer transaction with duplicated fulfillments
dup_inputs = tx_create.to_inputs() + tx_create.to_inputs()
tx_transfer = Transaction.transfer(dup_inputs, [([alice.public_key], 200)],
asset_id=tx_create.id)
tx_transfer_signed = tx_transfer.sign([alice.private_key])
with pytest.raises(DoubleSpend):
tx_transfer_signed.validate(b) | Recreate duplicated fulfillments bug
https://github.com/bigchaindb/bigchaindb/issues/1099
| test_cant_spend_same_input_twice_in_tx | python | bigchaindb/bigchaindb | tests/db/test_bigchain_api.py | https://github.com/bigchaindb/bigchaindb/blob/master/tests/db/test_bigchain_api.py | Apache-2.0 |
def get_txs_patched(conn, **args):
"""Patch `get_transactions_filtered` so that rather than return an array
of transactions it returns an array of shims with a to_dict() method
that reports one of the arguments passed to `get_transactions_filtered`.
"""
return [type('', (), {'to_dict': partial(lambda a: a, arg)})
for arg in sorted(args.items())] | Patch `get_transactions_filtered` so that rather than return an array
of transactions it returns an array of shims with a to_dict() method
that reports one of the arguments passed to `get_transactions_filtered`.
| get_txs_patched | python | bigchaindb/bigchaindb | tests/web/test_transactions.py | https://github.com/bigchaindb/bigchaindb/blob/master/tests/web/test_transactions.py | Apache-2.0 |
def call(self, x, training=None):
"""
Apply random channel-swap augmentation to `x`.
Args:
x (`Tensor`): A batch tensor of 1D (signals) or 2D (spectrograms) data
"""
if training in (None, False):
return x
# figure out input data format
if K.ndim(x) not in (3, 4):
raise ValueError(
'ndim of input tensor x should be 3 (batch signal) or 4 (batch spectrogram),'
'but it is %d' % K.ndim(x)
)
if self.data_format == _CH_LAST_STR:
ch_axis = 3 if K.ndim(x) == 4 else 2
else:
ch_axis = 1
# get swap indices
n_ch = K.int_shape(x)[ch_axis]
if n_ch == 1:
return x
swap_indices = np.random.permutation(n_ch).tolist()
# swap and return
return tf.gather(x, indices=swap_indices, axis=ch_axis) |
Apply random channel-swap augmentation to `x`.
Args:
x (`Tensor`): A batch tensor of 1D (signals) or 2D (spectrograms) data
| call | python | keunwoochoi/kapre | kapre/augmentation.py | https://github.com/keunwoochoi/kapre/blob/master/kapre/augmentation.py | MIT |
def _apply_masks_to_axis(self, x, axis, mask_param, n_masks):
"""
Applies a number of masks (defined by the parameter n_masks) to the spectrogram
by the axis provided.
Args:
x (float `Tensor`): A spectrogram. Its shape is (time, freq, ch) or (ch, time, freq)
depending on data_format.
axis (int): The axis where the masks will be applied
mask_param (int): The mask param as defined in the original paper, which is the max width of the mask
applied to the specified axis.
n_masks (int): The number of masks to be applied
Returns:
(float `Tensor`): The masked spectrogram. Its shape is (time, freq, ch) or (ch, time, freq)
depending on x shape (that is, the input spectrogram).
"""
axis_limit = K.int_shape(x)[axis]
axis_indices = tf.range(axis_limit)
if axis == 0:
axis_indices = tf.reshape(axis_indices, (-1, 1, 1))
elif axis == 1:
axis_indices = tf.reshape(axis_indices, (1, -1, 1))
elif axis == 2:
axis_indices = tf.reshape(axis_indices, (1, 1, -1))
else:
raise NotImplementedError(f"Axis parameter must be one of the following: 0, 1, 2")
# Check if mask_width is greater than axis_limit
if axis_limit < mask_param:
raise ValueError(
"Time and freq axis shapes must be greater than time_mask_param "
"and freq_mask_param respectively"
)
x_repeated = tf.repeat(tf.expand_dims(x, 0), n_masks, axis=0)
axis_limit_repeated = tf.repeat(axis_limit, n_masks, axis=0)
axis_indices_repeated = tf.repeat(tf.expand_dims(axis_indices, 0), n_masks, axis=0)
mask_param_repeated = tf.repeat(mask_param, n_masks, axis=0)
masks = tf.map_fn(
elems=(x_repeated, axis_limit_repeated, axis_indices_repeated, mask_param_repeated),
fn=self._generate_axis_mask,
dtype=(tf.float32, tf.int32, tf.int32, tf.int32),
fn_output_signature=tf.bool,
)
mask = tf.math.reduce_any(masks, 0)
return tf.where(mask, self.mask_value, x) |
Applies a number of masks (defined by the parameter n_masks) to the spectrogram
by the axis provided.
Args:
x (float `Tensor`): A spectrogram. Its shape is (time, freq, ch) or (ch, time, freq)
depending on data_format.
axis (int): The axis where the masks will be applied
mask_param (int): The mask param as defined in the original paper, which is the max width of the mask
applied to the specified axis.
n_masks (int): The number of masks to be applied
Returns:
(float `Tensor`): The masked spectrogram. Its shape is (time, freq, ch) or (ch, time, freq)
depending on x shape (that is, the input spectrogram).
| _apply_masks_to_axis | python | keunwoochoi/kapre | kapre/augmentation.py | https://github.com/keunwoochoi/kapre/blob/master/kapre/augmentation.py | MIT |
def _apply_spec_augment(self, x):
"""
Main method that applies SpecAugment technique by both frequency and
time axis.
Args:
x (float `Tensor`) : A spectrogram. Its shape is (time, freq, ch) or (ch, time, freq)
depending on data_format.
Returns:
(float `Tensor`): The spectrogram masked by time and frequency axis. Its shape is (time, freq, ch)
or (ch, time, freq) depending on x shape (that is, the input spectrogram).
"""
if self.data_format == _CH_LAST_STR:
time_axis, freq_axis = 0, 1
else:
time_axis, freq_axis = 1, 2
if self.n_time_masks >= 1:
x = self._apply_masks_to_axis(
x, axis=time_axis, mask_param=self.time_mask_param, n_masks=self.n_time_masks
)
if self.n_freq_masks >= 1:
x = self._apply_masks_to_axis(
x, axis=freq_axis, mask_param=self.freq_mask_param, n_masks=self.n_freq_masks
)
return x |
Main method that applies SpecAugment technique by both frequency and
time axis.
Args:
x (float `Tensor`) : A spectrogram. Its shape is (time, freq, ch) or (ch, time, freq)
depending on data_format.
Returns:
(float `Tensor`): The spectrogram masked by time and frequency axis. Its shape is (time, freq, ch)
or (ch, time, freq) depending on x shape (that is, the input spectrogram).
| _apply_spec_augment | python | keunwoochoi/kapre | kapre/augmentation.py | https://github.com/keunwoochoi/kapre/blob/master/kapre/augmentation.py | MIT |
def get_window_fn(window_name=None):
"""Return a window function given its name.
This function is used inside layers such as `STFT` to get a window function.
Args:
window_name (None or str): name of window function. On Tensorflow 2.3, there are five windows available in
`tf.signal` (`hamming_window`, `hann_window`, `kaiser_bessel_derived_window`, `kaiser_window`, `vorbis_window`).
"""
if window_name is None:
return tf.signal.hann_window
available_windows = {
'hamming_window': tf.signal.hamming_window,
'hann_window': tf.signal.hann_window,
}
if hasattr(tf.signal, 'kaiser_bessel_derived_window'):
available_windows['kaiser_bessel_derived_window'] = tf.signal.kaiser_bessel_derived_window
if hasattr(tf.signal, 'kaiser_window'):
available_windows['kaiser_window'] = tf.signal.kaiser_window
if hasattr(tf.signal, 'vorbis_window'):
available_windows['vorbis_window'] = tf.signal.vorbis_window
if window_name not in available_windows:
raise NotImplementedError(
'Window name %s is not supported now. Currently, %d windows are'
'supported - %s'
% (
window_name,
len(available_windows),
', '.join([k for k in available_windows.keys()]),
)
)
return available_windows[window_name] | Return a window function given its name.
This function is used inside layers such as `STFT` to get a window function.
Args:
window_name (None or str): name of window function. On Tensorflow 2.3, there are five windows available in
`tf.signal` (`hamming_window`, `hann_window`, `kaiser_bessel_derived_window`, `kaiser_window`, `vorbis_window`).
| get_window_fn | python | keunwoochoi/kapre | kapre/backend.py | https://github.com/keunwoochoi/kapre/blob/master/kapre/backend.py | MIT |
def validate_data_format_str(data_format):
"""A function that validates the data format string."""
if data_format not in (_CH_DEFAULT_STR, _CH_FIRST_STR, _CH_LAST_STR):
raise ValueError(
'data_format should be one of {}'.format(
str([_CH_FIRST_STR, _CH_LAST_STR, _CH_DEFAULT_STR])
)
+ ' but we received {}'.format(data_format)
) | A function that validates the data format string. | validate_data_format_str | python | keunwoochoi/kapre | kapre/backend.py | https://github.com/keunwoochoi/kapre/blob/master/kapre/backend.py | MIT |
def magnitude_to_decibel(x, ref_value=1.0, amin=1e-5, dynamic_range=80.0):
"""A function that converts magnitude to decibel scaling.
In essence, it runs `10 * log10(x)`, but with some other utility operations.
Similar to `librosa.power_to_db` with `ref=1.0` and `top_db=dynamic_range`
Args:
x (`Tensor`): float tensor. Can be batch or not. Something like magnitude of STFT.
ref_value (`float`): an input value that would become 0 dB in the result.
For spectrogram magnitudes, ref_value=1.0 usually make the decibel-scaled output to be around zero
if the input audio was in [-1, 1].
amin (`float`): the noise floor of the input. An input that is smaller than `amin`, it's converted to `amin`.
dynamic_range (`float`): range of the resulting value. E.g., if the maximum magnitude is 30 dB,
the noise floor of the output would become (30 - dynamic_range) dB
Returns:
log_spec (`Tensor`): a decibel-scaled version of `x`.
Note:
In many deep learning based application, the input spectrogram magnitudes (e.g., abs(STFT)) are decibel-scaled
(=logarithmically mapped) for a better performance.
Example:
::
input_shape = (2048, 1) # mono signal
model = Sequential()
model.add(kapre.Frame(frame_length=1024, hop_length=512, input_shape=input_shape))
# now the shape is (batch, n_frame=3, frame_length=1024, ch=1)
"""
def _log10(x):
return tf.math.log(x) / tf.math.log(tf.constant(10, dtype=x.dtype))
if K.ndim(x) > 1: # we assume x is batch in this case
max_axis = tuple(range(K.ndim(x))[1:])
else:
max_axis = None
if amin is None:
amin = 1e-5
amin = tf.cast(amin, dtype=x.dtype)
log_spec = 10.0 * _log10(tf.math.maximum(x, amin))
log_spec = log_spec - 10.0 * _log10(tf.math.maximum(amin, ref_value))
log_spec = tf.math.maximum(
log_spec, tf.math.reduce_max(log_spec, axis=max_axis, keepdims=True) - dynamic_range
)
return log_spec | A function that converts magnitude to decibel scaling.
In essence, it runs `10 * log10(x)`, but with some other utility operations.
Similar to `librosa.power_to_db` with `ref=1.0` and `top_db=dynamic_range`
Args:
x (`Tensor`): float tensor. Can be batch or not. Something like magnitude of STFT.
ref_value (`float`): an input value that would become 0 dB in the result.
For spectrogram magnitudes, ref_value=1.0 usually make the decibel-scaled output to be around zero
if the input audio was in [-1, 1].
amin (`float`): the noise floor of the input. An input that is smaller than `amin`, it's converted to `amin`.
dynamic_range (`float`): range of the resulting value. E.g., if the maximum magnitude is 30 dB,
the noise floor of the output would become (30 - dynamic_range) dB
Returns:
log_spec (`Tensor`): a decibel-scaled version of `x`.
Note:
In many deep learning based application, the input spectrogram magnitudes (e.g., abs(STFT)) are decibel-scaled
(=logarithmically mapped) for a better performance.
Example:
::
input_shape = (2048, 1) # mono signal
model = Sequential()
model.add(kapre.Frame(frame_length=1024, hop_length=512, input_shape=input_shape))
# now the shape is (batch, n_frame=3, frame_length=1024, ch=1)
| magnitude_to_decibel | python | keunwoochoi/kapre | kapre/backend.py | https://github.com/keunwoochoi/kapre/blob/master/kapre/backend.py | MIT |
def filterbank_mel(
sample_rate, n_freq, n_mels=128, f_min=0.0, f_max=None, htk=False, norm='slaney'
):
"""A wrapper for librosa.filters.mel that additionally does transpose and tensor conversion
Args:
sample_rate (`int`): sample rate of the input audio
n_freq (`int`): number of frequency bins in the input STFT magnitude.
n_mels (`int`): the number of mel bands
f_min (`float`): lowest frequency that is going to be included in the mel filterbank (Hertz)
f_max (`float`): highest frequency that is going to be included in the mel filterbank (Hertz)
htk (bool): whether to use `htk` formula or not
norm: The default, 'slaney', would normalize the the mel weights by the width of the mel band.
Returns:
(`Tensor`): mel filterbanks. Shape=`(n_freq, n_mels)`
"""
filterbank = librosa.filters.mel(
sr=sample_rate,
n_fft=(n_freq - 1) * 2,
n_mels=n_mels,
fmin=f_min,
fmax=f_max,
htk=htk,
norm=norm,
).astype(K.floatx())
return tf.convert_to_tensor(filterbank.T) | A wrapper for librosa.filters.mel that additionally does transpose and tensor conversion
Args:
sample_rate (`int`): sample rate of the input audio
n_freq (`int`): number of frequency bins in the input STFT magnitude.
n_mels (`int`): the number of mel bands
f_min (`float`): lowest frequency that is going to be included in the mel filterbank (Hertz)
f_max (`float`): highest frequency that is going to be included in the mel filterbank (Hertz)
htk (bool): whether to use `htk` formula or not
norm: The default, 'slaney', would normalize the the mel weights by the width of the mel band.
Returns:
(`Tensor`): mel filterbanks. Shape=`(n_freq, n_mels)`
| filterbank_mel | python | keunwoochoi/kapre | kapre/backend.py | https://github.com/keunwoochoi/kapre/blob/master/kapre/backend.py | MIT |
def get_stft_magnitude_layer(
input_shape=None,
n_fft=2048,
win_length=None,
hop_length=None,
window_name=None,
pad_begin=False,
pad_end=False,
return_decibel=False,
db_amin=1e-5,
db_ref_value=1.0,
db_dynamic_range=80.0,
input_data_format='default',
output_data_format='default',
name='stft_magnitude',
):
"""A function that returns a stft magnitude layer.
The layer is a `keras.Sequential` model consists of `STFT`, `Magnitude`, and optionally `MagnitudeToDecibel`.
Args:
input_shape (None or tuple of integers): input shape of the model. Necessary only if this melspectrogram layer is
is the first layer of your model (see `keras.model.Sequential()` for more details)
n_fft (int): number of FFT points in `STFT`
win_length (int): window length of `STFT`
hop_length (int): hop length of `STFT`
window_name (str or None): *Name* of `tf.signal` function that returns a 1D tensor window that is used in analysis.
Defaults to `hann_window` which uses `tf.signal.hann_window`.
Window availability depends on Tensorflow version. More details are at `kapre.backend.get_window()`.
pad_begin (bool): Whether to pad with zeros along time axis (length: win_length - hop_length). Defaults to `False`.
pad_end (bool): whether to pad the input signal at the end in `STFT`.
return_decibel (bool): whether to apply decibel scaling at the end
db_amin (float): noise floor of decibel scaling input. See `MagnitudeToDecibel` for more details.
db_ref_value (float): reference value of decibel scaling. See `MagnitudeToDecibel` for more details.
db_dynamic_range (float): dynamic range of the decibel scaling result.
input_data_format (str): the audio data format of input waveform batch.
`'channels_last'` if it's `(batch, time, channels)`
`'channels_first'` if it's `(batch, channels, time)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
output_data_format (str): the data format of output melspectrogram.
`'channels_last'` if you want `(batch, time, frequency, channels)`
`'channels_first'` if you want `(batch, channels, time, frequency)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
name (str): name of the returned layer
Note:
STFT magnitude represents a linear-frequency spectrum of audio signal and probably the most popular choice
when it comes to audio analysis in general. By using magnitude, this layer discard the phase information,
which is generally known to be irrelevant to human auditory perception.
Note:
For audio analysis (when the output is tag/label/etc), we'd like to recommend to set `return_decibel=True`.
Decibel scaling is perceptually plausible and numerically stable
(related paper: `A Comparison of Audio Signal Preprocessing Methods for Deep Neural Networks on Music Tagging <https://arxiv.org/abs/1709.01922>`_)
Many music, speech, and audio applications have used this log-magnitude STFT, e.g.,
`Learning to Pinpoint Singing Voice from Weakly Labeled Examples <https://wp.nyu.edu/ismir2016/wp-content/uploads/sites/2294/2016/07/315_Paper.pdf>`_,
`Joint Beat and Downbeat Tracking with Recurrent Neural Networks <https://archives.ismir.net/ismir2016/paper/000186.pdf>`_,
and many more.
For audio processing (when the output is audio signal), it might be better to use STFT as it is (`return_decibel=False`).
Example: `Singing voice separation with deep U-Net convolutional networks <https://openaccess.city.ac.uk/id/eprint/19289/>`_.
This is because decibel scaling is has some clipping at the noise floor which is irreversible.
One may use `log(1+X)` instead of `log(X)` to avoid the clipping but it is not included in Kapre at the moment.
Example:
::
input_shape = (2048, 1) # mono signal, audio is channels_last
stft_mag = get_stft_magnitude_layer(input_shape=input_shape, n_fft=1024, return_decibel=True,
input_data_format='channels_last', output_data_format='channels_first')
model = Sequential()
model.add(stft_mag)
# now the shape is (batch, ch=1, n_frame=3, n_freq=513) because output_data_format is 'channels_first'
# and the dtype is float
"""
backend.validate_data_format_str(input_data_format)
backend.validate_data_format_str(output_data_format)
stft_kwargs = {}
if input_shape is not None:
stft_kwargs['input_shape'] = input_shape
waveform_to_stft = STFT(
**stft_kwargs,
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
window_name=window_name,
pad_begin=pad_begin,
pad_end=pad_end,
input_data_format=input_data_format,
output_data_format=output_data_format,
)
stft_to_stftm = Magnitude()
layers = [waveform_to_stft, stft_to_stftm]
if return_decibel:
mag_to_decibel = MagnitudeToDecibel(
ref_value=db_ref_value, amin=db_amin, dynamic_range=db_dynamic_range
)
layers.append(mag_to_decibel)
return Sequential(layers, name=name) | A function that returns a stft magnitude layer.
The layer is a `keras.Sequential` model consists of `STFT`, `Magnitude`, and optionally `MagnitudeToDecibel`.
Args:
input_shape (None or tuple of integers): input shape of the model. Necessary only if this melspectrogram layer is
is the first layer of your model (see `keras.model.Sequential()` for more details)
n_fft (int): number of FFT points in `STFT`
win_length (int): window length of `STFT`
hop_length (int): hop length of `STFT`
window_name (str or None): *Name* of `tf.signal` function that returns a 1D tensor window that is used in analysis.
Defaults to `hann_window` which uses `tf.signal.hann_window`.
Window availability depends on Tensorflow version. More details are at `kapre.backend.get_window()`.
pad_begin (bool): Whether to pad with zeros along time axis (length: win_length - hop_length). Defaults to `False`.
pad_end (bool): whether to pad the input signal at the end in `STFT`.
return_decibel (bool): whether to apply decibel scaling at the end
db_amin (float): noise floor of decibel scaling input. See `MagnitudeToDecibel` for more details.
db_ref_value (float): reference value of decibel scaling. See `MagnitudeToDecibel` for more details.
db_dynamic_range (float): dynamic range of the decibel scaling result.
input_data_format (str): the audio data format of input waveform batch.
`'channels_last'` if it's `(batch, time, channels)`
`'channels_first'` if it's `(batch, channels, time)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
output_data_format (str): the data format of output melspectrogram.
`'channels_last'` if you want `(batch, time, frequency, channels)`
`'channels_first'` if you want `(batch, channels, time, frequency)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
name (str): name of the returned layer
Note:
STFT magnitude represents a linear-frequency spectrum of audio signal and probably the most popular choice
when it comes to audio analysis in general. By using magnitude, this layer discard the phase information,
which is generally known to be irrelevant to human auditory perception.
Note:
For audio analysis (when the output is tag/label/etc), we'd like to recommend to set `return_decibel=True`.
Decibel scaling is perceptually plausible and numerically stable
(related paper: `A Comparison of Audio Signal Preprocessing Methods for Deep Neural Networks on Music Tagging <https://arxiv.org/abs/1709.01922>`_)
Many music, speech, and audio applications have used this log-magnitude STFT, e.g.,
`Learning to Pinpoint Singing Voice from Weakly Labeled Examples <https://wp.nyu.edu/ismir2016/wp-content/uploads/sites/2294/2016/07/315_Paper.pdf>`_,
`Joint Beat and Downbeat Tracking with Recurrent Neural Networks <https://archives.ismir.net/ismir2016/paper/000186.pdf>`_,
and many more.
For audio processing (when the output is audio signal), it might be better to use STFT as it is (`return_decibel=False`).
Example: `Singing voice separation with deep U-Net convolutional networks <https://openaccess.city.ac.uk/id/eprint/19289/>`_.
This is because decibel scaling is has some clipping at the noise floor which is irreversible.
One may use `log(1+X)` instead of `log(X)` to avoid the clipping but it is not included in Kapre at the moment.
Example:
::
input_shape = (2048, 1) # mono signal, audio is channels_last
stft_mag = get_stft_magnitude_layer(input_shape=input_shape, n_fft=1024, return_decibel=True,
input_data_format='channels_last', output_data_format='channels_first')
model = Sequential()
model.add(stft_mag)
# now the shape is (batch, ch=1, n_frame=3, n_freq=513) because output_data_format is 'channels_first'
# and the dtype is float
| get_stft_magnitude_layer | python | keunwoochoi/kapre | kapre/composed.py | https://github.com/keunwoochoi/kapre/blob/master/kapre/composed.py | MIT |
def get_melspectrogram_layer(
input_shape=None,
n_fft=2048,
win_length=None,
hop_length=None,
window_name=None,
pad_begin=False,
pad_end=False,
sample_rate=22050,
n_mels=128,
mel_f_min=0.0,
mel_f_max=None,
mel_htk=False,
mel_norm='slaney',
return_decibel=False,
db_amin=1e-5,
db_ref_value=1.0,
db_dynamic_range=80.0,
input_data_format='default',
output_data_format='default',
name='melspectrogram',
):
"""A function that returns a melspectrogram layer, which is a `keras.Sequential` model consists of
`STFT`, `Magnitude`, `ApplyFilterbank(_mel_filterbank)`, and optionally `MagnitudeToDecibel`.
Args:
input_shape (None or tuple of integers): input shape of the model. Necessary only if this melspectrogram layer is
is the first layer of your model (see `keras.model.Sequential()` for more details)
n_fft (int): number of FFT points in `STFT`
win_length (int): window length of `STFT`
hop_length (int): hop length of `STFT`
window_name (str or None): *Name* of `tf.signal` function that returns a 1D tensor window that is used in analysis.
Defaults to `hann_window` which uses `tf.signal.hann_window`.
Window availability depends on Tensorflow version. More details are at `kapre.backend.get_window()`.
pad_begin (bool): Whether to pad with zeros along time axis (length: win_length - hop_length). Defaults to `False`.
pad_end (bool): whether to pad the input signal at the end in `STFT`.
sample_rate (int): sample rate of the input audio
n_mels (int): number of mel bins in the mel filterbank
mel_f_min (float): lowest frequency of the mel filterbank
mel_f_max (float): highest frequency of the mel filterbank
mel_htk (bool): whether to follow the htk mel filterbank fomula or not
mel_norm ('slaney' or int): normalization policy of the mel filterbank triangles
return_decibel (bool): whether to apply decibel scaling at the end
db_amin (float): noise floor of decibel scaling input. See `MagnitudeToDecibel` for more details.
db_ref_value (float): reference value of decibel scaling. See `MagnitudeToDecibel` for more details.
db_dynamic_range (float): dynamic range of the decibel scaling result.
input_data_format (str): the audio data format of input waveform batch.
`'channels_last'` if it's `(batch, time, channels)`
`'channels_first'` if it's `(batch, channels, time)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
output_data_format (str): the data format of output melspectrogram.
`'channels_last'` if you want `(batch, time, frequency, channels)`
`'channels_first'` if you want `(batch, channels, time, frequency)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
name (str): name of the returned layer
Note:
Melspectrogram is originally developed for speech applications and has been *very* widely used for audio signal
analysis including music information retrieval. As its mel-axis is a non-linear compression of (linear)
frequency axis, a melspectrogram can be an efficient choice as an input of a machine learning model.
We recommend to set `return_decibel=True`.
**References**:
`Automatic tagging using deep convolutional neural networks <https://arxiv.org/abs/1606.00298>`_,
`Deep content-based music recommendation <http://papers.nips.cc/paper/5004-deep-content-based-music-recommen>`_,
`CNN Architectures for Large-Scale Audio Classification <https://arxiv.org/abs/1609.09430>`_,
`Multi-label vs. combined single-label sound event detection with deep neural networks <http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.711.74&rep=rep1&type=pdf>`_,
`Deep Convolutional Neural Networks and Data Augmentation for Environmental Sound Classification <https://arxiv.org/pdf/1608.04363.pdf>`_,
and way too many speech applications.
Example:
::
input_shape = (2, 2048) # stereo signal, audio is channels_first
melgram = get_melspectrogram_layer(input_shape=input_shape, n_fft=1024, return_decibel=True,
n_mels=96, input_data_format='channels_first', output_data_format='channels_last')
model = Sequential()
model.add(melgram)
# now the shape is (batch, n_frame=3, n_mels=96, n_ch=2) because output_data_format is 'channels_last'
# and the dtype is float
"""
backend.validate_data_format_str(input_data_format)
backend.validate_data_format_str(output_data_format)
stft_kwargs = {}
if input_shape is not None:
stft_kwargs['input_shape'] = input_shape
waveform_to_stft = STFT(
**stft_kwargs,
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
window_name=window_name,
pad_begin=pad_begin,
pad_end=pad_end,
input_data_format=input_data_format,
output_data_format=output_data_format,
)
stft_to_stftm = Magnitude()
kwargs = {
'sample_rate': sample_rate,
'n_freq': n_fft // 2 + 1,
'n_mels': n_mels,
'f_min': mel_f_min,
'f_max': mel_f_max,
'htk': mel_htk,
'norm': mel_norm,
}
stftm_to_melgram = ApplyFilterbank(
type='mel', filterbank_kwargs=kwargs, data_format=output_data_format
)
layers = [waveform_to_stft, stft_to_stftm, stftm_to_melgram]
if return_decibel:
mag_to_decibel = MagnitudeToDecibel(
ref_value=db_ref_value, amin=db_amin, dynamic_range=db_dynamic_range
)
layers.append(mag_to_decibel)
return Sequential(layers, name=name) | A function that returns a melspectrogram layer, which is a `keras.Sequential` model consists of
`STFT`, `Magnitude`, `ApplyFilterbank(_mel_filterbank)`, and optionally `MagnitudeToDecibel`.
Args:
input_shape (None or tuple of integers): input shape of the model. Necessary only if this melspectrogram layer is
is the first layer of your model (see `keras.model.Sequential()` for more details)
n_fft (int): number of FFT points in `STFT`
win_length (int): window length of `STFT`
hop_length (int): hop length of `STFT`
window_name (str or None): *Name* of `tf.signal` function that returns a 1D tensor window that is used in analysis.
Defaults to `hann_window` which uses `tf.signal.hann_window`.
Window availability depends on Tensorflow version. More details are at `kapre.backend.get_window()`.
pad_begin (bool): Whether to pad with zeros along time axis (length: win_length - hop_length). Defaults to `False`.
pad_end (bool): whether to pad the input signal at the end in `STFT`.
sample_rate (int): sample rate of the input audio
n_mels (int): number of mel bins in the mel filterbank
mel_f_min (float): lowest frequency of the mel filterbank
mel_f_max (float): highest frequency of the mel filterbank
mel_htk (bool): whether to follow the htk mel filterbank fomula or not
mel_norm ('slaney' or int): normalization policy of the mel filterbank triangles
return_decibel (bool): whether to apply decibel scaling at the end
db_amin (float): noise floor of decibel scaling input. See `MagnitudeToDecibel` for more details.
db_ref_value (float): reference value of decibel scaling. See `MagnitudeToDecibel` for more details.
db_dynamic_range (float): dynamic range of the decibel scaling result.
input_data_format (str): the audio data format of input waveform batch.
`'channels_last'` if it's `(batch, time, channels)`
`'channels_first'` if it's `(batch, channels, time)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
output_data_format (str): the data format of output melspectrogram.
`'channels_last'` if you want `(batch, time, frequency, channels)`
`'channels_first'` if you want `(batch, channels, time, frequency)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
name (str): name of the returned layer
Note:
Melspectrogram is originally developed for speech applications and has been *very* widely used for audio signal
analysis including music information retrieval. As its mel-axis is a non-linear compression of (linear)
frequency axis, a melspectrogram can be an efficient choice as an input of a machine learning model.
We recommend to set `return_decibel=True`.
**References**:
`Automatic tagging using deep convolutional neural networks <https://arxiv.org/abs/1606.00298>`_,
`Deep content-based music recommendation <http://papers.nips.cc/paper/5004-deep-content-based-music-recommen>`_,
`CNN Architectures for Large-Scale Audio Classification <https://arxiv.org/abs/1609.09430>`_,
`Multi-label vs. combined single-label sound event detection with deep neural networks <http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.711.74&rep=rep1&type=pdf>`_,
`Deep Convolutional Neural Networks and Data Augmentation for Environmental Sound Classification <https://arxiv.org/pdf/1608.04363.pdf>`_,
and way too many speech applications.
Example:
::
input_shape = (2, 2048) # stereo signal, audio is channels_first
melgram = get_melspectrogram_layer(input_shape=input_shape, n_fft=1024, return_decibel=True,
n_mels=96, input_data_format='channels_first', output_data_format='channels_last')
model = Sequential()
model.add(melgram)
# now the shape is (batch, n_frame=3, n_mels=96, n_ch=2) because output_data_format is 'channels_last'
# and the dtype is float
| get_melspectrogram_layer | python | keunwoochoi/kapre | kapre/composed.py | https://github.com/keunwoochoi/kapre/blob/master/kapre/composed.py | MIT |
def get_log_frequency_spectrogram_layer(
input_shape=None,
n_fft=2048,
win_length=None,
hop_length=None,
window_name=None,
pad_begin=False,
pad_end=False,
sample_rate=22050,
log_n_bins=84,
log_f_min=None,
log_bins_per_octave=12,
log_spread=0.125,
return_decibel=False,
db_amin=1e-5,
db_ref_value=1.0,
db_dynamic_range=80.0,
input_data_format='default',
output_data_format='default',
name='log_frequency_spectrogram',
):
"""A function that returns a log-frequency STFT layer, which is a `keras.Sequential` model consists of
`STFT`, `Magnitude`, `ApplyFilterbank(_log_filterbank)`, and optionally `MagnitudeToDecibel`.
Args:
input_shape (None or tuple of integers): input shape of the model if this melspectrogram layer is
is the first layer of your model (see `keras.model.Sequential()` for more details)
n_fft (int): number of FFT points in `STFT`
win_length (int): window length of `STFT`
hop_length (int): hop length of `STFT`
window_name (str or None): *Name* of `tf.signal` function that returns a 1D tensor window that is used in analysis.
Defaults to `hann_window` which uses `tf.signal.hann_window`.
Window availability depends on Tensorflow version. More details are at `kapre.backend.get_window()`.
pad_begin(bool): Whether to pad with zeros along time axis (length: win_length - hop_length). Defaults to `False`.
pad_end (bool): whether to pad the input signal at the end in `STFT`.
sample_rate (int): sample rate of the input audio
log_n_bins (int): number of the bins in the log-frequency filterbank
log_f_min (float): lowest frequency of the filterbank
log_bins_per_octave (int): number of bins in each octave in the filterbank
log_spread (float): spread constant (Q value) in the log filterbank.
return_decibel (bool): whether to apply decibel scaling at the end
db_amin (float): noise floor of decibel scaling input. See `MagnitudeToDecibel` for more details.
db_ref_value (float): reference value of decibel scaling. See `MagnitudeToDecibel` for more details.
db_dynamic_range (float): dynamic range of the decibel scaling result.
input_data_format (str): the audio data format of input waveform batch.
`'channels_last'` if it's `(batch, time, channels)`
`'channels_first'` if it's `(batch, channels, time)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
output_data_format (str): the data format of output mel spectrogram.
`'channels_last'` if you want `(batch, time, frequency, channels)`
`'channels_first'` if you want `(batch, channels, time, frequency)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
name (str): name of the returned layer
Note:
Log-frequency spectrogram is similar to melspectrogram but its frequency axis is perfectly linear to octave scale.
For some pitch-related applications, a log-frequency spectrogram can be a good choice.
Example:
::
input_shape = (2048, 2) # stereo signal, audio is channels_last
logfreq_stft_mag = get_log_frequency_spectrogram_layer(
input_shape=input_shape, n_fft=1024, return_decibel=True,
log_n_bins=84, input_data_format='channels_last', output_data_format='channels_last')
model = Sequential()
model.add(logfreq_stft_mag)
# now the shape is (batch, n_frame=3, n_bins=84, n_ch=2) because output_data_format is 'channels_last'
# and the dtype is float
"""
backend.validate_data_format_str(input_data_format)
backend.validate_data_format_str(output_data_format)
stft_kwargs = {}
if input_shape is not None:
stft_kwargs['input_shape'] = input_shape
waveform_to_stft = STFT(
**stft_kwargs,
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
window_name=window_name,
pad_begin=pad_begin,
pad_end=pad_end,
input_data_format=input_data_format,
output_data_format=output_data_format,
)
stft_to_stftm = Magnitude()
_log_filterbank = backend.filterbank_log(
sample_rate=sample_rate,
n_freq=n_fft // 2 + 1,
n_bins=log_n_bins,
bins_per_octave=log_bins_per_octave,
f_min=log_f_min,
spread=log_spread,
)
kwargs = {
'sample_rate': sample_rate,
'n_freq': n_fft // 2 + 1,
'n_bins': log_n_bins,
'bins_per_octave': log_bins_per_octave,
'f_min': log_f_min,
'spread': log_spread,
}
stftm_to_loggram = ApplyFilterbank(
type='log', filterbank_kwargs=kwargs, data_format=output_data_format
)
layers = [waveform_to_stft, stft_to_stftm, stftm_to_loggram]
if return_decibel:
mag_to_decibel = MagnitudeToDecibel(
ref_value=db_ref_value, amin=db_amin, dynamic_range=db_dynamic_range
)
layers.append(mag_to_decibel)
return Sequential(layers, name=name) | A function that returns a log-frequency STFT layer, which is a `keras.Sequential` model consists of
`STFT`, `Magnitude`, `ApplyFilterbank(_log_filterbank)`, and optionally `MagnitudeToDecibel`.
Args:
input_shape (None or tuple of integers): input shape of the model if this melspectrogram layer is
is the first layer of your model (see `keras.model.Sequential()` for more details)
n_fft (int): number of FFT points in `STFT`
win_length (int): window length of `STFT`
hop_length (int): hop length of `STFT`
window_name (str or None): *Name* of `tf.signal` function that returns a 1D tensor window that is used in analysis.
Defaults to `hann_window` which uses `tf.signal.hann_window`.
Window availability depends on Tensorflow version. More details are at `kapre.backend.get_window()`.
pad_begin(bool): Whether to pad with zeros along time axis (length: win_length - hop_length). Defaults to `False`.
pad_end (bool): whether to pad the input signal at the end in `STFT`.
sample_rate (int): sample rate of the input audio
log_n_bins (int): number of the bins in the log-frequency filterbank
log_f_min (float): lowest frequency of the filterbank
log_bins_per_octave (int): number of bins in each octave in the filterbank
log_spread (float): spread constant (Q value) in the log filterbank.
return_decibel (bool): whether to apply decibel scaling at the end
db_amin (float): noise floor of decibel scaling input. See `MagnitudeToDecibel` for more details.
db_ref_value (float): reference value of decibel scaling. See `MagnitudeToDecibel` for more details.
db_dynamic_range (float): dynamic range of the decibel scaling result.
input_data_format (str): the audio data format of input waveform batch.
`'channels_last'` if it's `(batch, time, channels)`
`'channels_first'` if it's `(batch, channels, time)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
output_data_format (str): the data format of output mel spectrogram.
`'channels_last'` if you want `(batch, time, frequency, channels)`
`'channels_first'` if you want `(batch, channels, time, frequency)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
name (str): name of the returned layer
Note:
Log-frequency spectrogram is similar to melspectrogram but its frequency axis is perfectly linear to octave scale.
For some pitch-related applications, a log-frequency spectrogram can be a good choice.
Example:
::
input_shape = (2048, 2) # stereo signal, audio is channels_last
logfreq_stft_mag = get_log_frequency_spectrogram_layer(
input_shape=input_shape, n_fft=1024, return_decibel=True,
log_n_bins=84, input_data_format='channels_last', output_data_format='channels_last')
model = Sequential()
model.add(logfreq_stft_mag)
# now the shape is (batch, n_frame=3, n_bins=84, n_ch=2) because output_data_format is 'channels_last'
# and the dtype is float
| get_log_frequency_spectrogram_layer | python | keunwoochoi/kapre | kapre/composed.py | https://github.com/keunwoochoi/kapre/blob/master/kapre/composed.py | MIT |
def get_perfectly_reconstructing_stft_istft(
stft_input_shape=None,
istft_input_shape=None,
n_fft=2048,
win_length=None,
hop_length=None,
forward_window_name=None,
waveform_data_format='default',
stft_data_format='default',
stft_name='stft',
istft_name='istft',
):
"""A function that returns two layers, stft and inverse stft, which would be perfectly reconstructing pair.
Args:
stft_input_shape (tuple): Input shape of single waveform.
Must specify this if the returned stft layer is going to be used as first layer of a Sequential model.
istft_input_shape (tuple): Input shape of single STFT.
Must specify this if the returned istft layer is going to be used as first layer of a Sequential model.
n_fft (int): Number of FFTs. Defaults to `2048`
win_length (`int` or `None`): Window length in sample. Defaults to `n_fft`.
hop_length (`int` or `None`): Hop length in sample between analysis windows. Defaults to `n_fft // 4` following librosa.
forward_window_name (function or `None`): *Name* of `tf.signal` function that returns a 1D tensor window that is used.
Defaults to `hann_window` which uses `tf.signal.hann_window`.
Window availability depends on Tensorflow version. More details are at `kapre.backend.get_window()`.
waveform_data_format (str): The audio data format of waveform batch.
`'channels_last'` if it's `(batch, time, channels)`
`'channels_first'` if it's `(batch, channels, time)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
stft_data_format (str): The data format of STFT.
`'channels_last'` if you want `(batch, time, frequency, channels)`
`'channels_first'` if you want `(batch, channels, time, frequency)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
stft_name (str): name of the returned STFT layer
istft_name (str): name of the returned ISTFT layer
Note:
Without a careful setting, `tf.signal.stft` and `tf.signal.istft` is not perfectly reconstructing.
Note:
Imagine `x` --> `STFT` --> `InverseSTFT` --> `y`.
The length of `x` will be longer than `y` due to the padding at the beginning and the end.
To compare them, you would need to trim `y` along time axis.
The formula: if `trim_begin = win_length - hop_length` and `len_signal` is length of `x`,
`y_trimmed = y[trim_begin: trim_begin + len_signal, :]` (in the case of `channels_last`).
Example:
::
stft_input_shape = (2048, 2) # stereo and channels_last
stft_layer, istft_layer = get_perfectly_reconstructing_stft_istft(
stft_input_shape=stft_input_shape
)
unet = get_unet() input: stft (complex value), output: stft (complex value)
model = Sequential()
model.add(stft_layer) # input is waveform
model.add(unet)
model.add(istft_layer) # output is also waveform
"""
backend.validate_data_format_str(waveform_data_format)
backend.validate_data_format_str(stft_data_format)
if win_length is None:
win_length = n_fft
if hop_length is None:
hop_length = win_length // 4
if (win_length / hop_length) % 2 != 0:
raise RuntimeError(
'The ratio of win_length and hop_length must be power of 2 to get a '
'perfectly reconstructing stft-istft pair.'
)
stft_kwargs = {}
if stft_input_shape is not None:
stft_kwargs['input_shape'] = stft_input_shape
istft_kwargs = {}
if istft_input_shape is not None:
istft_kwargs['input_shape'] = istft_input_shape
waveform_to_stft = STFT(
**stft_kwargs,
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
window_name=forward_window_name,
pad_begin=True,
pad_end=True,
input_data_format=waveform_data_format,
output_data_format=stft_data_format,
name=stft_name,
)
stft_to_waveform = InverseSTFT(
**istft_kwargs,
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
forward_window_name=forward_window_name,
input_data_format=stft_data_format,
output_data_format=waveform_data_format,
name=istft_name,
)
return waveform_to_stft, stft_to_waveform | A function that returns two layers, stft and inverse stft, which would be perfectly reconstructing pair.
Args:
stft_input_shape (tuple): Input shape of single waveform.
Must specify this if the returned stft layer is going to be used as first layer of a Sequential model.
istft_input_shape (tuple): Input shape of single STFT.
Must specify this if the returned istft layer is going to be used as first layer of a Sequential model.
n_fft (int): Number of FFTs. Defaults to `2048`
win_length (`int` or `None`): Window length in sample. Defaults to `n_fft`.
hop_length (`int` or `None`): Hop length in sample between analysis windows. Defaults to `n_fft // 4` following librosa.
forward_window_name (function or `None`): *Name* of `tf.signal` function that returns a 1D tensor window that is used.
Defaults to `hann_window` which uses `tf.signal.hann_window`.
Window availability depends on Tensorflow version. More details are at `kapre.backend.get_window()`.
waveform_data_format (str): The audio data format of waveform batch.
`'channels_last'` if it's `(batch, time, channels)`
`'channels_first'` if it's `(batch, channels, time)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
stft_data_format (str): The data format of STFT.
`'channels_last'` if you want `(batch, time, frequency, channels)`
`'channels_first'` if you want `(batch, channels, time, frequency)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
stft_name (str): name of the returned STFT layer
istft_name (str): name of the returned ISTFT layer
Note:
Without a careful setting, `tf.signal.stft` and `tf.signal.istft` is not perfectly reconstructing.
Note:
Imagine `x` --> `STFT` --> `InverseSTFT` --> `y`.
The length of `x` will be longer than `y` due to the padding at the beginning and the end.
To compare them, you would need to trim `y` along time axis.
The formula: if `trim_begin = win_length - hop_length` and `len_signal` is length of `x`,
`y_trimmed = y[trim_begin: trim_begin + len_signal, :]` (in the case of `channels_last`).
Example:
::
stft_input_shape = (2048, 2) # stereo and channels_last
stft_layer, istft_layer = get_perfectly_reconstructing_stft_istft(
stft_input_shape=stft_input_shape
)
unet = get_unet() input: stft (complex value), output: stft (complex value)
model = Sequential()
model.add(stft_layer) # input is waveform
model.add(unet)
model.add(istft_layer) # output is also waveform
| get_perfectly_reconstructing_stft_istft | python | keunwoochoi/kapre | kapre/composed.py | https://github.com/keunwoochoi/kapre/blob/master/kapre/composed.py | MIT |
def get_stft_mag_phase(
input_shape,
n_fft=2048,
win_length=None,
hop_length=None,
window_name=None,
pad_begin=False,
pad_end=False,
return_decibel=False,
db_amin=1e-5,
db_ref_value=1.0,
db_dynamic_range=80.0,
input_data_format='default',
output_data_format='default',
name='stft_mag_phase',
):
"""A function that returns magnitude and phase of input audio.
Args:
input_shape (None or tuple of integers): input shape of the stft layer.
Because this mag_phase is based on keras.Functional model, it is required to specify the input shape.
E.g., (44100, 2) for 44100-sample stereo audio with `input_data_format=='channels_last'`.
n_fft (int): number of FFT points in `STFT`
win_length (int): window length of `STFT`
hop_length (int): hop length of `STFT`
window_name (str or None): *Name* of `tf.signal` function that returns a 1D tensor window that is used in analysis.
Defaults to `hann_window` which uses `tf.signal.hann_window`.
Window availability depends on Tensorflow version. More details are at `kapre.backend.get_window()`
.pad_begin(bool): Whether to pad with zeros along time axis (length: win_length - hop_length). Defaults to `False`.
pad_end (bool): whether to pad the input signal at the end in `STFT`.
return_decibel (bool): whether to apply decibel scaling at the end
db_amin (float): noise floor of decibel scaling input. See `MagnitudeToDecibel` for more details.
db_ref_value (float): reference value of decibel scaling. See `MagnitudeToDecibel` for more details.
db_dynamic_range (float): dynamic range of the decibel scaling result.
input_data_format (str): the audio data format of input waveform batch.
`'channels_last'` if it's `(batch, time, channels)`
`'channels_first'` if it's `(batch, channels, time)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
output_data_format (str): the data format of output mel spectrogram.
`'channels_last'` if you want `(batch, time, frequency, channels)`
`'channels_first'` if you want `(batch, channels, time, frequency)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
name (str): name of the returned layer
Example:
::
input_shape = (2048, 3) # stereo and channels_last
model = Sequential()
model.add(
get_stft_mag_phase(input_shape=input_shape, return_decibel=True, n_fft=1024)
)
# now output shape is (batch, n_frame=3, freq=513, ch=6). 6 channels = [3 mag ch; 3 phase ch]
"""
backend.validate_data_format_str(input_data_format)
backend.validate_data_format_str(output_data_format)
waveform_to_stft = STFT(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
window_name=window_name,
pad_begin=pad_begin,
pad_end=pad_end,
input_data_format=input_data_format,
output_data_format=output_data_format,
)
stft_to_stftm = Magnitude()
stft_to_stftp = Phase()
waveforms = keras.Input(shape=input_shape)
stfts = waveform_to_stft(waveforms)
mag_stfts = stft_to_stftm(stfts) # magnitude
phase_stfts = stft_to_stftp(stfts) # phase
if return_decibel:
mag_to_decibel = MagnitudeToDecibel(
ref_value=db_ref_value, amin=db_amin, dynamic_range=db_dynamic_range
)
mag_stfts = mag_to_decibel(mag_stfts)
ch_axis = 1 if output_data_format == _CH_FIRST_STR else 3
concat_layer = keras.layers.Concatenate(axis=ch_axis)
stfts_mag_phase = concat_layer([mag_stfts, phase_stfts])
model = Model(inputs=waveforms, outputs=stfts_mag_phase, name=name)
return model | A function that returns magnitude and phase of input audio.
Args:
input_shape (None or tuple of integers): input shape of the stft layer.
Because this mag_phase is based on keras.Functional model, it is required to specify the input shape.
E.g., (44100, 2) for 44100-sample stereo audio with `input_data_format=='channels_last'`.
n_fft (int): number of FFT points in `STFT`
win_length (int): window length of `STFT`
hop_length (int): hop length of `STFT`
window_name (str or None): *Name* of `tf.signal` function that returns a 1D tensor window that is used in analysis.
Defaults to `hann_window` which uses `tf.signal.hann_window`.
Window availability depends on Tensorflow version. More details are at `kapre.backend.get_window()`
.pad_begin(bool): Whether to pad with zeros along time axis (length: win_length - hop_length). Defaults to `False`.
pad_end (bool): whether to pad the input signal at the end in `STFT`.
return_decibel (bool): whether to apply decibel scaling at the end
db_amin (float): noise floor of decibel scaling input. See `MagnitudeToDecibel` for more details.
db_ref_value (float): reference value of decibel scaling. See `MagnitudeToDecibel` for more details.
db_dynamic_range (float): dynamic range of the decibel scaling result.
input_data_format (str): the audio data format of input waveform batch.
`'channels_last'` if it's `(batch, time, channels)`
`'channels_first'` if it's `(batch, channels, time)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
output_data_format (str): the data format of output mel spectrogram.
`'channels_last'` if you want `(batch, time, frequency, channels)`
`'channels_first'` if you want `(batch, channels, time, frequency)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
name (str): name of the returned layer
Example:
::
input_shape = (2048, 3) # stereo and channels_last
model = Sequential()
model.add(
get_stft_mag_phase(input_shape=input_shape, return_decibel=True, n_fft=1024)
)
# now output shape is (batch, n_frame=3, freq=513, ch=6). 6 channels = [3 mag ch; 3 phase ch]
| get_stft_mag_phase | python | keunwoochoi/kapre | kapre/composed.py | https://github.com/keunwoochoi/kapre/blob/master/kapre/composed.py | MIT |
def get_frequency_aware_conv2d(
data_format='default', freq_aware_name='frequency_aware_conv2d', *args, **kwargs
):
"""Returns a frequency-aware conv2d layer.
Args:
data_format (str): specifies the data format of batch input/output.
freq_aware_name (str): name of the returned layer
*args: position args for `keras.layers.Conv2D`.
**kwargs: keyword args for `keras.layers.Conv2D`.
Returns:
A sequential model of ConcatenateFrequencyMap and Conv2D.
References:
Koutini, K., Eghbal-zadeh, H., & Widmer, G. (2019).
`Receptive-Field-Regularized CNN Variants for Acoustic Scene Classification <https://arxiv.org/abs/1909.02859>`_.
In Proceedings of the Detection and Classification of Acoustic Scenes and Events 2019 Workshop (DCASE2019).
"""
if ('groups' in kwargs and kwargs.get('groups') > 1) or (len(args) >= 7 and args[7] > 1):
raise ValueError(
'Group convolution is not supported with frequency_aware layer because only the last group'
'would be frequency-aware, which might not be expected.'
)
freq_map_concat_layer = ConcatenateFrequencyMap(data_format=data_format)
if data_format != _CH_DEFAULT_STR:
kwargs['data_format'] = data_format
conv2d = keras.layers.Conv2D(*args, **kwargs)
return Sequential([freq_map_concat_layer, conv2d], name=freq_aware_name) | Returns a frequency-aware conv2d layer.
Args:
data_format (str): specifies the data format of batch input/output.
freq_aware_name (str): name of the returned layer
*args: position args for `keras.layers.Conv2D`.
**kwargs: keyword args for `keras.layers.Conv2D`.
Returns:
A sequential model of ConcatenateFrequencyMap and Conv2D.
References:
Koutini, K., Eghbal-zadeh, H., & Widmer, G. (2019).
`Receptive-Field-Regularized CNN Variants for Acoustic Scene Classification <https://arxiv.org/abs/1909.02859>`_.
In Proceedings of the Detection and Classification of Acoustic Scenes and Events 2019 Workshop (DCASE2019).
| get_frequency_aware_conv2d | python | keunwoochoi/kapre | kapre/composed.py | https://github.com/keunwoochoi/kapre/blob/master/kapre/composed.py | MIT |
def call(self, x):
"""
Args:
x (`Tensor`): batch audio signal in the specified 1D format in initiation.
Returns:
(`Tensor`): A framed tensor. The shape is (batch, time (frames), frame_length, channel) if `channels_last`,
or (batch, channel, time (frames), frame_length) if `channels_first`.
"""
return tf.signal.frame(
x,
frame_length=self.frame_length,
frame_step=self.hop_length,
pad_end=self.pad_end,
pad_value=self.pad_value,
axis=self.time_axis,
) |
Args:
x (`Tensor`): batch audio signal in the specified 1D format in initiation.
Returns:
(`Tensor`): A framed tensor. The shape is (batch, time (frames), frame_length, channel) if `channels_last`,
or (batch, channel, time (frames), frame_length) if `channels_first`.
| call | python | keunwoochoi/kapre | kapre/signal.py | https://github.com/keunwoochoi/kapre/blob/master/kapre/signal.py | MIT |
def call(self, x):
"""
Args:
x (`Tensor`): batch audio signal in the specified 1D format in initiation.
Returns:
(`Tensor`): A framed tensor. The shape is (batch, time (frames), channel) if `channels_last`, or
(batch, channel, time (frames)) if `channels_first`.
"""
frames = tf.signal.frame(
x,
frame_length=self.frame_length,
frame_step=self.hop_length,
pad_end=self.pad_end,
pad_value=self.pad_value,
axis=self.time_axis,
)
frames = tf.math.square(frames) # batch, ndim=4
frame_axis = 2 if self.data_format == _CH_LAST_STR else 3
energies = tf.math.reduce_sum(
frames, axis=frame_axis
) # batch, ndim=3. (b, t, ch) or (b, ch, t)
# normalize it to self.ref_duration
nor_coeff = self.ref_duration / (self.frame_length / self.sample_rate)
return nor_coeff * energies |
Args:
x (`Tensor`): batch audio signal in the specified 1D format in initiation.
Returns:
(`Tensor`): A framed tensor. The shape is (batch, time (frames), channel) if `channels_last`, or
(batch, channel, time (frames)) if `channels_first`.
| call | python | keunwoochoi/kapre | kapre/signal.py | https://github.com/keunwoochoi/kapre/blob/master/kapre/signal.py | MIT |
def call(self, log_melgrams):
"""
Args:
log_melgrams (float `Tensor`): a batch of log_melgrams. `(b, time, mel, ch)` if `channels_last`
and `(b, ch, time, mel)` if `channels_first`.
Returns:
(float `Tensor`):
MFCCs. `(batch, time, n_mfccs, ch)` if `channels_last`, `(batch, ch, time, n_mfccs)` if `channels_first`.
"""
if self.permutation is not None: # reshape so that last channel == mel
log_melgrams = K.permute_dimensions(log_melgrams, pattern=self.permutation)
mfccs = tf.signal.mfccs_from_log_mel_spectrograms(log_melgrams)
mfccs = mfccs[..., : self.n_mfccs]
if self.permutation is not None:
mfccs = K.permute_dimensions(mfccs, pattern=self.permutation)
return mfccs |
Args:
log_melgrams (float `Tensor`): a batch of log_melgrams. `(b, time, mel, ch)` if `channels_last`
and `(b, ch, time, mel)` if `channels_first`.
Returns:
(float `Tensor`):
MFCCs. `(batch, time, n_mfccs, ch)` if `channels_last`, `(batch, ch, time, n_mfccs)` if `channels_first`.
| call | python | keunwoochoi/kapre | kapre/signal.py | https://github.com/keunwoochoi/kapre/blob/master/kapre/signal.py | MIT |
def _rdft(signal, dft_length):
"""DFT for real signals.
Calculates the onesided dft, assuming real signal implies complex conjugate symetry,
hence only onesided DFT is returned.
Args:
signal (tensor) signal to transform, assumes that the last dimension is the time dimension
signal can be framed, e.g. (1, 40, 1024) for a single batch of 40 frames of
length 1024
dft_length (int) - DFT length
Returns:
spec_real (float32 tensor) - real part of spectrogram, e.g. (1, 40, 513) for a 1024 length dft
spec_imag (float32 tensor) - imag part of spectrogram, e.g. (1, 40, 513) for a 1024 length dft
"""
# calculate the positive frequency atoms, and tell tensorflow this is a constant.
rdft_mat = _rdft_matrix(dft_length)
# tflite doest support complex types so split into real and imaginary:
rdft_mat_real = tf.constant(np.real(rdft_mat))
rdft_mat_imag = tf.constant(np.imag(rdft_mat))
frame_length = tf.shape(signal)[-1]
# Right-padding, in case the frame length and DFT lenght are different,
# pad the signal on the right hand side of the frame
pad_values = tf.concat(
[tf.zeros([tf.rank(signal) - 1, 2], tf.int32), [[0, dft_length - frame_length]]], axis=0
)
signal_padded = tf.pad(signal, pad_values)
# matrix multiplying real and imag separately is faster than using complex types.
spec_real = tf.matmul(signal_padded, rdft_mat_real)
spec_imag = tf.matmul(signal_padded, rdft_mat_imag)
spectrogram = tf.stack([spec_real, spec_imag], axis=-1)
return spectrogram | DFT for real signals.
Calculates the onesided dft, assuming real signal implies complex conjugate symetry,
hence only onesided DFT is returned.
Args:
signal (tensor) signal to transform, assumes that the last dimension is the time dimension
signal can be framed, e.g. (1, 40, 1024) for a single batch of 40 frames of
length 1024
dft_length (int) - DFT length
Returns:
spec_real (float32 tensor) - real part of spectrogram, e.g. (1, 40, 513) for a 1024 length dft
spec_imag (float32 tensor) - imag part of spectrogram, e.g. (1, 40, 513) for a 1024 length dft
| _rdft | python | keunwoochoi/kapre | kapre/tflite_compatible_stft.py | https://github.com/keunwoochoi/kapre/blob/master/kapre/tflite_compatible_stft.py | MIT |
def fixed_frame(signal, frame_length, frame_step):
"""tflite-compatible tf.signal.frame for fixed-size input.
Args:
signal: Tensor containing signal(s).
frame_length: Number of samples to put in each frame.
frame_step: Sample advance between successive frames.
Returns:
A new tensor where the last axis (or first, if first_axis) of input
signal has been replaced by a (num_frames, frame_length) array of individual
frames where each frame is drawn frame_step samples after the previous one.
Raises:
ValueError: if signal has an undefined axis length. This routine only
supports framing of signals whose shape is fixed at graph-build time.
"""
signal_shape = list(signal.shape)
length_samples = signal_shape[-1]
if length_samples <= 0:
raise ValueError("fixed framing requires predefined constant signal length")
# the number of whole frames
num_frames = max(0, 1 + (length_samples - frame_length) // frame_step)
# define the output_shape, if we receive a None dimension, replace with 1
outer_dimensions = [dim if dim else 1 for dim in signal_shape[:-1]]
# outer_dimensions = signal_shape[:-1]
output_shape = outer_dimensions + [num_frames, frame_length]
# Currently tflite's gather only supports axis==0, but that may still
# work if we want the last of 1 axes.
gather_axis = len(outer_dimensions)
# subframe length is the largest int that as a common divisor of the frame
# length and hop length. We will slice the signal up into these subframes
# in order to then construct the frames.
subframe_length = math.gcd(frame_length, frame_step)
subframes_per_frame = frame_length // subframe_length
subframes_per_hop = frame_step // subframe_length
num_subframes = length_samples // subframe_length
# define the subframe shape and the trimmed audio length, removeing any unused
# excess audio, so subframe fit exactly.
subframe_shape = outer_dimensions + [num_subframes, subframe_length]
trimmed_input_size = outer_dimensions + [num_subframes * subframe_length]
# slice up the audio into subframes
subframes = tf.reshape(
tf.slice(signal, begin=np.zeros(len(signal_shape), np.int32), size=trimmed_input_size),
subframe_shape,
)
# frame_selector is a [num_frames, subframes_per_frame] tensor
# that indexes into the appropriate frame in subframes. For example:
# [[0, 0, 0, 0], [2, 2, 2, 2], [4, 4, 4, 4]]
frame_selector = np.reshape(np.arange(num_frames) * subframes_per_hop, [num_frames, 1])
# subframe_selector is a [num_frames, subframes_per_frame] tensor
# that indexes into the appropriate subframe within a frame. For example:
# [[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]]
subframe_selector = np.reshape(np.arange(subframes_per_frame), [1, subframes_per_frame])
# Adding the 2 selector tensors together produces a [num_frames,
# subframes_per_frame] tensor of indices to use with tf.gather to select
# subframes from subframes. We then reshape the inner-most subframes_per_frame
# dimension to stitch the subframes together into frames. For example:
# [[0, 1, 2, 3], [2, 3, 4, 5], [4, 5, 6, 7]].
selector = frame_selector + subframe_selector
frames = tf.reshape(
tf.gather(subframes, selector.astype(np.int32), axis=gather_axis), output_shape
)
return frames | tflite-compatible tf.signal.frame for fixed-size input.
Args:
signal: Tensor containing signal(s).
frame_length: Number of samples to put in each frame.
frame_step: Sample advance between successive frames.
Returns:
A new tensor where the last axis (or first, if first_axis) of input
signal has been replaced by a (num_frames, frame_length) array of individual
frames where each frame is drawn frame_step samples after the previous one.
Raises:
ValueError: if signal has an undefined axis length. This routine only
supports framing of signals whose shape is fixed at graph-build time.
| fixed_frame | python | keunwoochoi/kapre | kapre/tflite_compatible_stft.py | https://github.com/keunwoochoi/kapre/blob/master/kapre/tflite_compatible_stft.py | MIT |
def stft_tflite(signal, frame_length, frame_step, fft_length, window_fn, pad_end):
"""tflite-compatible implementation of tf.signal.stft.
Compute the short-time Fourier transform of a 1D input while avoiding tf ops
that are not currently supported in tflite (Rfft, Range, SplitV).
fft_length must be fixed. A Hann window is of frame_length is always
applied.
Since fixed (precomputed) framing must be used, signal.shape[-1] must be a
specific value (so "?"/None is not supported).
Args:
signal: 1D tensor containing the time-domain waveform to be transformed.
frame_length: int, the number of points in each Fourier frame.
frame_step: int, the number of samples to advance between successive frames.
fft_length: int, the size of the Fourier transform to apply.
window_fn: tf.signal.window, the return of backend.get_window_fn(window_name)
pad_end: bool, if true pads the end with zeros so that signal contains
an integer number of frames
Returns:
spectrogram: Two (num_frames, fft_length) tensors containing the real and
imaginary parts of the short-time Fourier transform of the input signal.
"""
signal = tf.cast(signal, tf.float32)
if pad_end:
# the number of whole frames
# (NOTE: kenders2000), padding is pre-calculated and thus fixed in graph
length_samples = signal.shape[-1]
num_steps_round_up = int(np.ceil(length_samples / frame_step))
pad_amount = (num_steps_round_up * frame_step + frame_length - frame_step) - length_samples
signal = tf.pad(signal, tf.constant([[0, 0], [0, 0], [0, pad_amount]]))
# Make the window be shape (1, frame_length) instead of just frame_length
# in an effort to help the tflite broadcast logic.
window = tf.reshape(window_fn(frame_length), [1, frame_length])
framed_signal = fixed_frame(signal, frame_length, frame_step)
framed_signal *= window
spectrogram = _rdft(framed_signal, fft_length)
return spectrogram | tflite-compatible implementation of tf.signal.stft.
Compute the short-time Fourier transform of a 1D input while avoiding tf ops
that are not currently supported in tflite (Rfft, Range, SplitV).
fft_length must be fixed. A Hann window is of frame_length is always
applied.
Since fixed (precomputed) framing must be used, signal.shape[-1] must be a
specific value (so "?"/None is not supported).
Args:
signal: 1D tensor containing the time-domain waveform to be transformed.
frame_length: int, the number of points in each Fourier frame.
frame_step: int, the number of samples to advance between successive frames.
fft_length: int, the size of the Fourier transform to apply.
window_fn: tf.signal.window, the return of backend.get_window_fn(window_name)
pad_end: bool, if true pads the end with zeros so that signal contains
an integer number of frames
Returns:
spectrogram: Two (num_frames, fft_length) tensors containing the real and
imaginary parts of the short-time Fourier transform of the input signal.
| stft_tflite | python | keunwoochoi/kapre | kapre/tflite_compatible_stft.py | https://github.com/keunwoochoi/kapre/blob/master/kapre/tflite_compatible_stft.py | MIT |
def continued_fraction_arctan(x, n=100, dtype=tf.float32):
"""Continued fraction Approximation to the arctan function
Approximate solution to arctan(x), atan is not a natively supported tflite
op (or a flex op). n is the number of iterations, the high the more accurate.
Accuracy is poor when the argument is large.
https://functions.wolfram.com/ElementaryFunctions/ArcTan/10/
Args:
x (tensor) - argument tensor to calculate arctan of
n (int) - The number of iterations, large means arctan is more accurate
dtype (tf.dtype) - tf.float32, or tf.float64
Returns
arctan(x) (tensor) - approx value of arctan(x)
"""
x = tf.cast(x, dtype)
x2 = x * x
d = tf.zeros(tf.shape(x), dtype) + tf.cast(n * 2 + 1, dtype)
for k in tf.range(n, 0.0, -1.0, dtype):
f = k * 2.0 - 1.0
d = f + k * k * x2 / d
return x / d | Continued fraction Approximation to the arctan function
Approximate solution to arctan(x), atan is not a natively supported tflite
op (or a flex op). n is the number of iterations, the high the more accurate.
Accuracy is poor when the argument is large.
https://functions.wolfram.com/ElementaryFunctions/ArcTan/10/
Args:
x (tensor) - argument tensor to calculate arctan of
n (int) - The number of iterations, large means arctan is more accurate
dtype (tf.dtype) - tf.float32, or tf.float64
Returns
arctan(x) (tensor) - approx value of arctan(x)
| continued_fraction_arctan | python | keunwoochoi/kapre | kapre/tflite_compatible_stft.py | https://github.com/keunwoochoi/kapre/blob/master/kapre/tflite_compatible_stft.py | MIT |
def atan2_tflite(y, x, n=100, dtype=tf.float32):
"""Approximation to the atan2 function
atan is not a tflite supported op or flex op, thus this uses an Approximation
Poor accuracy when either x is very small or y is very large.
https://en.wikipedia.org/wiki/Atan2
Args:
y (tensor) - vertical component of tangent (or imaginary part of number for phase)
x (tensor) - horizontal component of tangent (or real part of number for phase)
n (int) - The number of iterations to use for atan approximations,
larger means arctan is more accurate
dtype (tf.dtype) - tf.float32, or tf.float64
Returns
atan2(x) (tensor) - approx value of atan2(x)
"""
pi = tf.zeros(tf.shape(x), dtype) + tf.cast(np.pi, dtype)
zeros = tf.zeros(tf.shape(x), dtype)
atan2 = continued_fraction_arctan(y / x, n, dtype)
atan2 = tf.where(x > 0, atan2, atan2) # implicit
atan2 = tf.where(tf.logical_and(x < 0.0, y >= 0.0), atan2 + pi, atan2)
atan2 = tf.where(tf.logical_and(x < 0.0, y < 0.0), atan2 - pi, atan2)
atan2 = tf.where(tf.logical_and(tf.equal(x, 0.0), y > 0.0), pi, atan2)
atan2 = tf.where(tf.logical_and(tf.equal(x, 0.0), y < 0.0), -pi, atan2)
# undefined (return 0)
atan2 = tf.where(tf.logical_and(tf.equal(x, 0.0), tf.equal(y, 0.0)), zeros, atan2)
return atan2 | Approximation to the atan2 function
atan is not a tflite supported op or flex op, thus this uses an Approximation
Poor accuracy when either x is very small or y is very large.
https://en.wikipedia.org/wiki/Atan2
Args:
y (tensor) - vertical component of tangent (or imaginary part of number for phase)
x (tensor) - horizontal component of tangent (or real part of number for phase)
n (int) - The number of iterations to use for atan approximations,
larger means arctan is more accurate
dtype (tf.dtype) - tf.float32, or tf.float64
Returns
atan2(x) (tensor) - approx value of atan2(x)
| atan2_tflite | python | keunwoochoi/kapre | kapre/tflite_compatible_stft.py | https://github.com/keunwoochoi/kapre/blob/master/kapre/tflite_compatible_stft.py | MIT |
def _shape_spectrum_output(spectrums, data_format):
"""Shape batch spectrograms into the right format.
Args:
spectrums (`Tensor`): result of tf.signal.stft or similar, i.e., (..., time, freq).
data_format (`str`): 'channels_first' or 'channels_last'
Returns:
spectrums (`Tensor`): a transposed version of input `spectrums`
"""
if data_format == _CH_FIRST_STR:
pass # probably it's already (batch, channel, time, freq)
else:
spectrums = tf.transpose(spectrums, perm=(0, 2, 3, 1)) # (batch, time, freq, channel)
return spectrums | Shape batch spectrograms into the right format.
Args:
spectrums (`Tensor`): result of tf.signal.stft or similar, i.e., (..., time, freq).
data_format (`str`): 'channels_first' or 'channels_last'
Returns:
spectrums (`Tensor`): a transposed version of input `spectrums`
| _shape_spectrum_output | python | keunwoochoi/kapre | kapre/time_frequency.py | https://github.com/keunwoochoi/kapre/blob/master/kapre/time_frequency.py | MIT |
def call(self, x):
"""
Compute STFT of the input signal. If the `time` axis is not the last axis of `x`, it should be transposed first.
Args:
x (float `Tensor`): batch of audio signals, (batch, ch, time) or (batch, time, ch) based on input_data_format
Return:
(complex `Tensor`): A STFT representation of x in a 2D batch shape.
`complex64` if `x` is `float32`, `complex128` if `x` is `float64`.
Its shape is (batch, time, freq, ch) or (batch. ch, time, freq) depending on `output_data_format` and
`time` is the number of frames, which is `((len_src + (win_length - hop_length) / hop_length) // win_length )` if `pad_end` is `True`.
`freq` is the number of fft unique bins, which is `n_fft // 2 + 1` (the unique components of the FFT).
"""
waveforms = x # (batch, ch, time) if input_data_format == 'channels_first'.
# (batch, time, ch) if input_data_format == 'channels_last'.
# this is needed because tf.signal.stft lives in channels_first land.
if self.input_data_format == _CH_LAST_STR:
waveforms = tf.transpose(
waveforms, perm=(0, 2, 1)
) # always (batch, ch, time) from here
if self.pad_begin:
waveforms = tf.pad(
waveforms, tf.constant([[0, 0], [0, 0], [int(self.n_fft - self.hop_length), 0]])
)
stfts = tf.signal.stft(
signals=waveforms,
frame_length=self.win_length,
frame_step=self.hop_length,
fft_length=self.n_fft,
window_fn=self.window_fn,
pad_end=self.pad_end,
name='%s_tf.signal.stft' % self.name,
) # (batch, ch, time, freq)
if self.output_data_format == _CH_LAST_STR:
stfts = tf.transpose(stfts, perm=(0, 2, 3, 1)) # (batch, t, f, ch)
return stfts |
Compute STFT of the input signal. If the `time` axis is not the last axis of `x`, it should be transposed first.
Args:
x (float `Tensor`): batch of audio signals, (batch, ch, time) or (batch, time, ch) based on input_data_format
Return:
(complex `Tensor`): A STFT representation of x in a 2D batch shape.
`complex64` if `x` is `float32`, `complex128` if `x` is `float64`.
Its shape is (batch, time, freq, ch) or (batch. ch, time, freq) depending on `output_data_format` and
`time` is the number of frames, which is `((len_src + (win_length - hop_length) / hop_length) // win_length )` if `pad_end` is `True`.
`freq` is the number of fft unique bins, which is `n_fft // 2 + 1` (the unique components of the FFT).
| call | python | keunwoochoi/kapre | kapre/time_frequency.py | https://github.com/keunwoochoi/kapre/blob/master/kapre/time_frequency.py | MIT |
def call(self, x):
"""
Compute inverse STFT of the input STFT.
Args:
x (complex `Tensor`): batch of STFTs, (batch, ch, time, freq) or (batch, time, freq, ch) depending on `input_data_format`
Return:
(`float`): audio signals of x. Shape: 1D batch shape. I.e., (batch, time, ch) or (batch, ch, time) depending on `output_data_format`
"""
stfts = x # (batch, ch, time, freq) if input_data_format == 'channels_first'.
# (batch, time, freq, ch) if input_data_format == 'channels_last'.
# this is needed because tf.signal.stft lives in channels_first land.
if self.input_data_format == _CH_LAST_STR:
stfts = tf.transpose(stfts, perm=(0, 3, 1, 2)) # now always (b, ch, t, f)
waveforms = tf.signal.inverse_stft(
stfts=stfts,
frame_length=self.win_length,
frame_step=self.hop_length,
fft_length=self.n_fft,
window_fn=self.window_fn,
name='%s_tf.signal.istft' % self.name,
) # (batch, ch, time)
if self.output_data_format == _CH_LAST_STR:
waveforms = tf.transpose(waveforms, perm=(0, 2, 1)) # (batch, time, ch)
return waveforms |
Compute inverse STFT of the input STFT.
Args:
x (complex `Tensor`): batch of STFTs, (batch, ch, time, freq) or (batch, time, freq, ch) depending on `input_data_format`
Return:
(`float`): audio signals of x. Shape: 1D batch shape. I.e., (batch, time, ch) or (batch, ch, time) depending on `output_data_format`
| call | python | keunwoochoi/kapre | kapre/time_frequency.py | https://github.com/keunwoochoi/kapre/blob/master/kapre/time_frequency.py | MIT |
def call(self, x):
"""
Args:
x (complex `Tensor`): input complex tensor
Returns:
(float `Tensor`): phase of `x` (Radian)
"""
if self.approx_atan_accuracy:
return atan2_tflite(tf.math.imag(x), tf.math.real(x), n=self.approx_atan_accuracy)
return tf.math.angle(x) |
Args:
x (complex `Tensor`): input complex tensor
Returns:
(float `Tensor`): phase of `x` (Radian)
| call | python | keunwoochoi/kapre | kapre/time_frequency.py | https://github.com/keunwoochoi/kapre/blob/master/kapre/time_frequency.py | MIT |
def call(self, x):
"""
Args:
x (`Tensor`): float tensor. Can be batch or not. Something like magnitude of STFT.
Returns:
(`Tensor`): decibel-scaled float tensor of `x`.
"""
return backend.magnitude_to_decibel(
x, ref_value=self.ref_value, amin=self.amin, dynamic_range=self.dynamic_range
) |
Args:
x (`Tensor`): float tensor. Can be batch or not. Something like magnitude of STFT.
Returns:
(`Tensor`): decibel-scaled float tensor of `x`.
| call | python | keunwoochoi/kapre | kapre/time_frequency.py | https://github.com/keunwoochoi/kapre/blob/master/kapre/time_frequency.py | MIT |
def call(self, x):
"""
Apply filterbank to `x`.
Args:
x (`Tensor`): float tensor in 2D batch shape.
"""
# x: 2d batch input. (b, t, fr, ch) or (b, ch, t, fr)
output = tf.tensordot(x, self.filterbank, axes=(self.freq_axis, 0))
# ch_last -> (b, t, ch, new_fr). ch_first -> (b, ch, t, new_fr)
if self.data_format == _CH_LAST_STR:
output = tf.transpose(output, (0, 1, 3, 2))
return output |
Apply filterbank to `x`.
Args:
x (`Tensor`): float tensor in 2D batch shape.
| call | python | keunwoochoi/kapre | kapre/time_frequency.py | https://github.com/keunwoochoi/kapre/blob/master/kapre/time_frequency.py | MIT |
def call(self, x):
"""
Args:
x (`Tensor`): a 2d batch (b, t, f, ch) or (b, ch, t, f)
Returns:
(`Tensor`): A tensor with the same shape as input data.
"""
if self.data_format == 'channels_first':
x = K.permute_dimensions(x, (0, 2, 3, 1))
x = tf.pad(
x, tf.constant([[0, 0], [self.n, self.n], [0, 0], [0, 0]]), mode=self.mode
) # pad over time
kernel = K.arange(-self.n, self.n + 1, 1, dtype=K.floatx())
kernel = K.reshape(kernel, (-1, 1, 1, 1)) # time, freq, in_ch, out_ch
x = K.conv2d(x, kernel, data_format=_CH_LAST_STR) / self.denom
if self.data_format == _CH_FIRST_STR:
x = K.permute_dimensions(x, (0, 3, 1, 2))
return x |
Args:
x (`Tensor`): a 2d batch (b, t, f, ch) or (b, ch, t, f)
Returns:
(`Tensor`): A tensor with the same shape as input data.
| call | python | keunwoochoi/kapre | kapre/time_frequency.py | https://github.com/keunwoochoi/kapre/blob/master/kapre/time_frequency.py | MIT |
def call(self, x):
"""
Compute STFT of the input signal. If the `time` axis is not the last axis of `x`, it should be transposed first.
Args:
x (float `Tensor`): batch of audio signals, (batch, ch, time) or (batch, time, ch) based on input_data_format
Return:
(real `Tensor`): A STFT representation of x in a 2D batch shape. The last dimension is size two and contains
the real and imaginary parts of the stft.
Its shape is (batch, time, freq, ch, 2) or (batch. ch, time, freq, 2) depending on `output_data_format` and
`time` is the number of frames, which is `((len_src + (win_length - hop_length) / hop_length) // win_length )`
if `pad_end` is `True`. `freq` is the number of fft unique bins, which is `n_fft // 2 + 1` (the unique components of the FFT).
"""
waveforms = x # (batch, ch, time) if input_data_format == 'channels_first'.
# (batch, time, ch) if input_data_format == 'channels_last'.
# this is needed because tf.signal.stft lives in channels_first land.
if self.input_data_format == _CH_LAST_STR:
waveforms = tf.transpose(
waveforms, perm=(0, 2, 1)
) # always (batch, ch, time) from here
if self.pad_begin:
waveforms = tf.pad(
waveforms, tf.constant([[0, 0], [0, 0], [int(self.n_fft - self.hop_length), 0]])
)
stfts = stft_tflite(
waveforms,
frame_length=self.win_length,
frame_step=self.hop_length,
fft_length=self.n_fft,
window_fn=self.window_fn,
pad_end=self.pad_end,
) # (batch, ch, time, freq, re/imag)
if self.output_data_format == _CH_LAST_STR:
# tflite compatible stft produces real and imag in 1st dim
stfts = tf.transpose(stfts, perm=(0, 2, 3, 1, 4)) # (batch, t, f, ch, re/im)
return stfts |
Compute STFT of the input signal. If the `time` axis is not the last axis of `x`, it should be transposed first.
Args:
x (float `Tensor`): batch of audio signals, (batch, ch, time) or (batch, time, ch) based on input_data_format
Return:
(real `Tensor`): A STFT representation of x in a 2D batch shape. The last dimension is size two and contains
the real and imaginary parts of the stft.
Its shape is (batch, time, freq, ch, 2) or (batch. ch, time, freq, 2) depending on `output_data_format` and
`time` is the number of frames, which is `((len_src + (win_length - hop_length) / hop_length) // win_length )`
if `pad_end` is `True`. `freq` is the number of fft unique bins, which is `n_fft // 2 + 1` (the unique components of the FFT).
| call | python | keunwoochoi/kapre | kapre/time_frequency_tflite.py | https://github.com/keunwoochoi/kapre/blob/master/kapre/time_frequency_tflite.py | MIT |
def test_spec_augment_apply_masks_to_axis(inputs):
"""
Tests the method _apply_masks_to_axis to see if shape is kept and
exceptions are caught
"""
data_format, axis, mask_param, n_masks = inputs
batch_src, input_shape = get_spectrogram(data_format)
spec_augment = SpecAugment(
input_shape=input_shape,
freq_mask_param=5,
time_mask_param=10,
n_freq_masks=4,
n_time_masks=3,
mask_value=0.0,
data_format=data_format,
)
# We force axis that will trigger NotImplementedError
if axis not in [0, 1, 2]:
# Check axis error
with pytest.raises(NotImplementedError):
# We use batch_src instead of batch_src[0] to simulate a 4D spectrogram
inputs = (batch_src, axis, mask_param, n_masks)
spec_augment._apply_masks_to_axis(*inputs)
# We force mask_params that will trigger the ValueError. If it is not triggered, then
# inputs are ok, so we must only test if the shapes are kept during transformation
elif mask_param != 5:
# Check mask_param error
with pytest.raises(ValueError):
inputs = (batch_src[0], axis, mask_param, n_masks)
spec_augment._apply_masks_to_axis(*inputs)
else:
# Check that transformation keeps shape
inputs = (batch_src[0], axis, mask_param, n_masks)
mask = spec_augment._apply_masks_to_axis(*inputs)
np.testing.assert_equal(mask.shape[axis], input_shape[axis]) |
Tests the method _apply_masks_to_axis to see if shape is kept and
exceptions are caught
| test_spec_augment_apply_masks_to_axis | python | keunwoochoi/kapre | tests/test_augmentation.py | https://github.com/keunwoochoi/kapre/blob/master/tests/test_augmentation.py | MIT |
def test_spec_augment_depth_exception():
"""
Checks that SpecAugments fails if Spectrogram has depth greater than 1.
"""
data_format = "default"
with pytest.raises(RuntimeError):
batch_src, input_shape = get_spectrogram(data_format=data_format, n_ch=4)
model = tf.keras.Sequential()
spec_augment = SpecAugment(
input_shape=input_shape, freq_mask_param=5, time_mask_param=10, data_format=data_format
)
model.add(spec_augment)
_ = model(batch_src, training=True)[0] |
Checks that SpecAugments fails if Spectrogram has depth greater than 1.
| test_spec_augment_depth_exception | python | keunwoochoi/kapre | tests/test_augmentation.py | https://github.com/keunwoochoi/kapre/blob/master/tests/test_augmentation.py | MIT |
def test_spec_augment_layer(data_format, atol=1e-4):
"""
Tests the complete layer, checking if the parameter `training` has the expected behaviour.
"""
batch_src, input_shape = get_spectrogram(data_format)
model = tf.keras.Sequential()
spec_augment = SpecAugment(
input_shape=input_shape,
freq_mask_param=5,
time_mask_param=10,
n_freq_masks=4,
n_time_masks=3,
mask_value=0.0,
data_format=data_format,
)
model.add(spec_augment)
# Fist, enforce training to True and check the shapes
spec_augmented = model(batch_src, training=True)
np.testing.assert_equal(model.layers[0].output_shape[1:], spec_augmented[0].shape)
# Second, check that it doesn't change anything in default
spec_augmented = model(batch_src)
np.testing.assert_allclose(spec_augmented, batch_src, atol) |
Tests the complete layer, checking if the parameter `training` has the expected behaviour.
| test_spec_augment_layer | python | keunwoochoi/kapre | tests/test_augmentation.py | https://github.com/keunwoochoi/kapre/blob/master/tests/test_augmentation.py | MIT |
def test_filterbank_log(sample_rate, n_freq, n_bins, bins_per_octave, f_min, spread):
"""It only tests if the function is a valid wrapper"""
log_fb = KPB.filterbank_log(
sample_rate=sample_rate,
n_freq=n_freq,
n_bins=n_bins,
bins_per_octave=bins_per_octave,
f_min=f_min,
spread=spread,
)
assert log_fb.dtype == K.floatx()
assert log_fb.shape == (n_freq, n_bins) | It only tests if the function is a valid wrapper | test_filterbank_log | python | keunwoochoi/kapre | tests/test_backend.py | https://github.com/keunwoochoi/kapre/blob/master/tests/test_backend.py | MIT |
def allclose_phase(a, b, atol=1e-3):
"""Testing phase.
Remember that a small error in complex value may lead to a large phase difference
if the norm is very small.
Therefore, it makes more sense to test it on the complex value itself rather than breaking it down to phase.
"""
np.testing.assert_allclose(np.sin(a), np.sin(b), atol=atol)
np.testing.assert_allclose(np.cos(a), np.cos(b), atol=atol) | Testing phase.
Remember that a small error in complex value may lead to a large phase difference
if the norm is very small.
Therefore, it makes more sense to test it on the complex value itself rather than breaking it down to phase.
| allclose_phase | python | keunwoochoi/kapre | tests/test_time_frequency.py | https://github.com/keunwoochoi/kapre/blob/master/tests/test_time_frequency.py | MIT |
def assert_approx_phase(a, b, atol=1e-2, acceptable_fail_ratio=0.01):
"""Testing approximate phase.
Tflite phase is approximate, some values will always have a large error
So makes more sense to count the number that are within tolerance
"""
count_failed = np.sum(np.abs(a - b) > atol)
assert (
count_failed / a.size < acceptable_fail_ratio
), "too many inaccuracte phase bins: {} bins out of {} incorrect".format(count_failed, a.size) | Testing approximate phase.
Tflite phase is approximate, some values will always have a large error
So makes more sense to count the number that are within tolerance
| assert_approx_phase | python | keunwoochoi/kapre | tests/test_time_frequency.py | https://github.com/keunwoochoi/kapre/blob/master/tests/test_time_frequency.py | MIT |
def test_melspectrogram_correctness(
n_fft, sr, hop_length, n_ch, data_format, amin, dynamic_range, n_mels, mel_f_min, mel_f_max
):
"""Test the correctness of melspectrogram.
Note that mel filterbank is tested separated
"""
def _get_melgram_model(return_decibel, amin, dynamic_range, input_shape=None):
# compute with kapre
melgram_model = get_melspectrogram_layer(
n_fft=n_fft,
sample_rate=sr,
n_mels=n_mels,
mel_f_min=mel_f_min,
mel_f_max=mel_f_max,
win_length=win_length,
hop_length=hop_length,
input_data_format=data_format,
output_data_format=data_format,
return_decibel=return_decibel,
input_shape=input_shape,
db_amin=amin,
db_dynamic_range=dynamic_range,
)
return melgram_model
src_mono, batch_src, input_shape = get_audio(data_format=data_format, n_ch=n_ch)
win_length = n_fft # test with x2
# compute with librosa
S_ref = librosa.feature.melspectrogram(
src_mono,
sr=sr,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
center=False,
power=1.0,
n_mels=n_mels,
fmin=mel_f_min,
fmax=mel_f_max,
).T
S_ref = np.expand_dims(S_ref, axis=2) # time, freq, ch=1
S_ref = np.tile(S_ref, [1, 1, n_ch]) # time, freq, ch=n_ch
if data_format == 'channels_first':
S_ref = np.transpose(S_ref, (2, 0, 1)) # ch, time, freq
# melgram
melgram_model = _get_melgram_model(
return_decibel=False, input_shape=input_shape, amin=None, dynamic_range=120.0
)
S = melgram_model.predict(batch_src)[0] # 3d representation
np.testing.assert_allclose(S_ref, S, atol=1e-4)
# log melgram
melgram_model = _get_melgram_model(
return_decibel=True, input_shape=input_shape, amin=amin, dynamic_range=dynamic_range
)
S = melgram_model.predict(batch_src)[0] # 3d representation
S_ref_db = librosa.power_to_db(S_ref, ref=1.0, amin=amin, top_db=dynamic_range)
np.testing.assert_allclose(
S_ref_db, S, rtol=3e-3
) # decibel is evaluated with relative tolerance | Test the correctness of melspectrogram.
Note that mel filterbank is tested separated
| test_melspectrogram_correctness | python | keunwoochoi/kapre | tests/test_time_frequency.py | https://github.com/keunwoochoi/kapre/blob/master/tests/test_time_frequency.py | MIT |
def test_log_spectrogram_runnable(data_format):
"""test if log spectrogram layer works well"""
src_mono, batch_src, input_shape = get_audio(data_format=data_format, n_ch=1)
_ = get_log_frequency_spectrogram_layer(input_shape, return_decibel=True)
_ = get_log_frequency_spectrogram_layer(input_shape, return_decibel=False) | test if log spectrogram layer works well | test_log_spectrogram_runnable | python | keunwoochoi/kapre | tests/test_time_frequency.py | https://github.com/keunwoochoi/kapre/blob/master/tests/test_time_frequency.py | MIT |
def test_log_spectrogram_fail():
"""test if log spectrogram layer works well"""
src_mono, batch_src, input_shape = get_audio(data_format='channels_last', n_ch=1)
_ = get_log_frequency_spectrogram_layer(input_shape, return_decibel=True, log_n_bins=200) | test if log spectrogram layer works well | test_log_spectrogram_fail | python | keunwoochoi/kapre | tests/test_time_frequency.py | https://github.com/keunwoochoi/kapre/blob/master/tests/test_time_frequency.py | MIT |
def test_save_load(save_format):
"""test saving/loading of models that has stft, melspectorgrma, and log frequency."""
src_mono, batch_src, input_shape = get_audio(data_format='channels_last', n_ch=1)
# test STFT save/load
save_load_compare(
STFT(input_shape=input_shape, pad_begin=True),
batch_src,
allclose_complex_numbers,
save_format,
STFT,
)
# test ConcatenateFrequencyMap
specs_batch = np.random.randn(2, 3, 5, 4).astype(np.float32)
save_load_compare(
ConcatenateFrequencyMap(input_shape=specs_batch.shape[1:]),
specs_batch,
np.testing.assert_allclose,
save_format,
ConcatenateFrequencyMap,
)
if save_format == 'tf':
# test melspectrogram save/load
save_load_compare(
get_melspectrogram_layer(input_shape=input_shape, return_decibel=True),
batch_src,
np.testing.assert_allclose,
save_format,
)
# test log frequency spectrogram save/load
save_load_compare(
get_log_frequency_spectrogram_layer(input_shape=input_shape, return_decibel=True),
batch_src,
np.testing.assert_allclose,
save_format,
)
# test stft_mag_phase
save_load_compare(
get_stft_mag_phase(input_shape=input_shape, return_decibel=True),
batch_src,
np.testing.assert_allclose,
save_format,
)
# test stft mag
save_load_compare(
get_stft_magnitude_layer(input_shape=input_shape),
batch_src,
np.testing.assert_allclose,
save_format,
) | test saving/loading of models that has stft, melspectorgrma, and log frequency. | test_save_load | python | keunwoochoi/kapre | tests/test_time_frequency.py | https://github.com/keunwoochoi/kapre/blob/master/tests/test_time_frequency.py | MIT |
def save_load_compare(
layer, input_batch, allclose_func, save_format, layer_class=None, training=None, atol=1e-4
):
"""test a model with `layer` with the given `input_batch`.
The model prediction result is compared using `allclose_func` which may depend on the
data type of the model output (e.g., float or complex).
"""
model = tf.keras.models.Sequential()
model.add(layer)
result_ref = model(input_batch, training=training)
os_temp_dir = tempfile.gettempdir()
model_temp_dir = tempfile.TemporaryDirectory(dir=os_temp_dir)
if save_format == 'tf':
model_path = model_temp_dir.name
elif save_format == 'h5':
model_path = os.path.join(model_temp_dir.name, 'model.h5')
else:
raise ValueError
model.save(filepath=model_path, save_format=save_format)
# if save_format == 'h5':
# import ipdb; ipdb.set_trace()
if save_format == 'h5':
new_model = tf.keras.models.load_model(
model_path, custom_objects={layer.__class__.__name__: layer_class}
)
else:
new_model = tf.keras.models.load_model(model_path)
result_new = new_model(input_batch)
allclose_func(result_ref, result_new, atol)
model_temp_dir.cleanup()
return model | test a model with `layer` with the given `input_batch`.
The model prediction result is compared using `allclose_func` which may depend on the
data type of the model output (e.g., float or complex).
| save_load_compare | python | keunwoochoi/kapre | tests/utils.py | https://github.com/keunwoochoi/kapre/blob/master/tests/utils.py | MIT |
def predict_using_tflite(model, batch_src):
"""Convert a keras model to tflite and infer on batch_src
Attempts to convert a keras model to a tflite model, load the tflite model,
then infer on the data in batch_src
Args:
model (keras model)
batch_src (numpy array) - audio to test model
Returns:
pred_tflite (numpy array) - array of predictions.
"""
############################################################################
# TF lite conversion
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.SELECT_TF_OPS,
tf.lite.OpsSet.TFLITE_BUILTINS,
]
tflite_model = converter.convert()
model_name = 'test_tflite'
path = Path("/tmp/tflite_tests/")
# make a temporary location
if path.exists():
shutil.rmtree(path)
os.makedirs(path)
tflite_file = path / Path(model_name + ".tflite")
open(tflite_file.as_posix(), "wb").write(tflite_model)
############################################################################
# Make sure we can load and infer on the TFLITE model
interpreter = tf.lite.Interpreter(tflite_file.as_posix())
# infer on each input separately and collect the predictions
pred_tflite = []
for x in batch_src:
# set batch size for tflite
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# apply input tensors, expand first dimension to create batch dimension
interpreter.set_tensor(input_details[0]["index"], np.expand_dims(x, 0))
# infer
interpreter.invoke()
tflite_results = interpreter.get_tensor(output_details[0]["index"])
pred_tflite.append(tflite_results)
return np.concatenate(pred_tflite, axis=0) | Convert a keras model to tflite and infer on batch_src
Attempts to convert a keras model to a tflite model, load the tflite model,
then infer on the data in batch_src
Args:
model (keras model)
batch_src (numpy array) - audio to test model
Returns:
pred_tflite (numpy array) - array of predictions.
| predict_using_tflite | python | keunwoochoi/kapre | tests/utils.py | https://github.com/keunwoochoi/kapre/blob/master/tests/utils.py | MIT |
def add(ctx, task, priority, tags, extra, category, labels):
"""Add a new task to the to-do list.
Note:
Control the output of this using the verbosity option.
"""
if ctx.obj["verbose"] >= 2:
click.echo(f"Adding task: {task}")
click.echo(f"Priority: {priority}")
click.echo(f'Tags: {", ".join(tags)}')
click.echo(f"Extra data: {extra}")
elif ctx.obj["verbose"] >= 1:
click.echo(f"Adding task: {task}")
else:
pass
# Implement the task adding functionality here | Add a new task to the to-do list.
Note:
Control the output of this using the verbosity option.
| add | python | Textualize/trogon | examples/demo.py | https://github.com/Textualize/trogon/blob/master/examples/demo.py | MIT |
def remove(ctx, task_id):
"""Remove a task from the to-do list by its ID."""
if ctx.obj["verbose"] >= 1:
click.echo(f"Removing task with ID: {task_id}")
# Implement the task removal functionality here | Remove a task from the to-do list by its ID. | remove | python | Textualize/trogon | examples/demo.py | https://github.com/Textualize/trogon/blob/master/examples/demo.py | MIT |
def list_tasks(ctx, all, completed):
"""List tasks from the to-do list."""
if ctx.obj["verbose"] >= 1:
click.echo(f"Listing tasks:")
# Implement the task listing functionality here | List tasks from the to-do list. | list_tasks | python | Textualize/trogon | examples/demo.py | https://github.com/Textualize/trogon/blob/master/examples/demo.py | MIT |
def add(verbose, task, priority, tags, extra, category, labels):
"""Add a new task to the to-do list."""
if verbose >= 2:
click.echo(f"Adding task: {task}")
click.echo(f"Priority: {priority}")
click.echo(f'Tags: {", ".join(tags)}')
click.echo(f"Extra data: {extra}")
click.echo(f"Category: {category}")
click.echo(f'Labels: {", ".join(labels)}')
elif verbose >= 1:
click.echo(f"Adding task: {task}")
else:
pass
# Implement the task adding functionality here | Add a new task to the to-do list. | add | python | Textualize/trogon | examples/nogroup_demo.py | https://github.com/Textualize/trogon/blob/master/examples/nogroup_demo.py | MIT |
def detect_run_string(_main: ModuleType = sys.modules["__main__"]) -> str:
"""This is a slightly modified version of a function from Click."""
path = sys.argv[0]
# The value of __package__ indicates how Python was called. It may
# not exist if a setuptools script is installed as an egg. It may be
# set incorrectly for entry points created with pip on Windows.
if getattr(_main, "__package__", None) is None or (
os.name == "nt"
and _main.__package__ == ""
and not os.path.exists(path)
and os.path.exists(f"{path}.exe")
):
# Executed a file, like "python app.py".
file_path = shlex.quote(os.path.basename(path))
argv = get_orig_argv()
if argv[0] == "python":
prefix = f"{argv[0]} "
else:
prefix = ""
return f"{prefix}{file_path}"
# Executed a module, like "python -m example".
# Rewritten by Python from "-m script" to "/path/to/script.py".
# Need to look at main module to determine how it was executed.
py_module = _main.__package__
name = os.path.splitext(os.path.basename(path))[0]
# A submodule like "example.cli".
if name != "__main__":
py_module = f"{py_module}.{name}"
return f"python -m {py_module.lstrip('.')}" | This is a slightly modified version of a function from Click. | detect_run_string | python | Textualize/trogon | trogon/detect_run_string.py | https://github.com/Textualize/trogon/blob/master/trogon/detect_run_string.py | MIT |
def introspect_click_app(app: BaseCommand) -> dict[CommandName, CommandSchema]:
"""
Introspect a Click application and build a data structure containing
information about all commands, options, arguments, and subcommands,
including the docstrings and command function references.
This function recursively processes each command and its subcommands
(if any), creating a nested dictionary that includes details about
options, arguments, and subcommands, as well as the docstrings and
command function references.
Args:
app (click.BaseCommand): The Click application's top-level group or command instance.
Returns:
Dict[str, CommandData]: A nested dictionary containing the Click application's
structure. The structure is defined by the CommandData TypedDict and its related
TypedDicts (OptionData and ArgumentData).
"""
def process_command(
cmd_name: CommandName, cmd_obj: click.Command, parent=None
) -> CommandSchema:
cmd_data = CommandSchema(
name=cmd_name,
docstring=cmd_obj.help,
function=cmd_obj.callback,
options=[],
arguments=[],
subcommands={},
parent=parent,
is_group=isinstance(cmd_obj, click.Group),
)
for param in cmd_obj.params:
default = MultiValueParamData.process_cli_option(param.default)
if isinstance(param, (click.Option, click.core.Group)):
option_data = OptionSchema(
name=param.opts,
type=param.type,
is_flag=param.is_flag,
is_boolean_flag=param.is_bool_flag,
flag_value=param.flag_value,
counting=param.count,
opts=param.opts,
secondary_opts=param.secondary_opts,
required=param.required,
default=default,
help=param.help,
multiple=param.multiple,
nargs=param.nargs,
)
if isinstance(param.type, click.Choice):
option_data.choices = param.type.choices
cmd_data.options.append(option_data)
elif isinstance(param, click.Argument):
argument_data = ArgumentSchema(
name=param.name,
type=param.type,
required=param.required,
multiple=param.multiple,
default=default,
nargs=param.nargs,
)
if isinstance(param.type, click.Choice):
argument_data.choices = param.type.choices
cmd_data.arguments.append(argument_data)
if isinstance(cmd_obj, click.core.Group):
for subcmd_name, subcmd_obj in cmd_obj.commands.items():
cmd_data.subcommands[CommandName(subcmd_name)] = process_command(
CommandName(subcmd_name), subcmd_obj, parent=cmd_data
)
return cmd_data
data: dict[CommandName, CommandSchema] = {}
# Special case for the root group
if isinstance(app, click.Group):
root_cmd_name = CommandName("root")
data[root_cmd_name] = process_command(root_cmd_name, app)
app = data[root_cmd_name]
if isinstance(app, click.Group):
for cmd_name, cmd_obj in app.commands.items():
data[CommandName(cmd_name)] = process_command(
CommandName(cmd_name), cmd_obj
)
elif isinstance(app, click.Command):
cmd_name = CommandName(app.name)
data[cmd_name] = process_command(cmd_name, app)
return data |
Introspect a Click application and build a data structure containing
information about all commands, options, arguments, and subcommands,
including the docstrings and command function references.
This function recursively processes each command and its subcommands
(if any), creating a nested dictionary that includes details about
options, arguments, and subcommands, as well as the docstrings and
command function references.
Args:
app (click.BaseCommand): The Click application's top-level group or command instance.
Returns:
Dict[str, CommandData]: A nested dictionary containing the Click application's
structure. The structure is defined by the CommandData TypedDict and its related
TypedDicts (OptionData and ArgumentData).
| introspect_click_app | python | Textualize/trogon | trogon/introspect.py | https://github.com/Textualize/trogon/blob/master/trogon/introspect.py | MIT |
def to_cli_args(self, include_root_command: bool = False) -> list[str]:
"""
Generates a list of strings representing the CLI invocation based on the user input data.
Returns:
A list of strings that can be passed to subprocess.run to execute the command.
"""
cli_args = self._to_cli_args()
if not include_root_command:
cli_args = cli_args[1:]
return cli_args |
Generates a list of strings representing the CLI invocation based on the user input data.
Returns:
A list of strings that can be passed to subprocess.run to execute the command.
| to_cli_args | python | Textualize/trogon | trogon/run_command.py | https://github.com/Textualize/trogon/blob/master/trogon/run_command.py | MIT |
def to_cli_string(self, include_root_command: bool = False) -> Text:
"""
Generates a string representing the CLI invocation as if typed directly into the
command line.
Returns:
A string representing the command invocation.
"""
args = self.to_cli_args(include_root_command=include_root_command)
text_renderables: list[Text] = []
for arg in args:
text_renderables.append(
Text(shlex.quote(str(arg)))
if arg != ValueNotSupplied()
else Text("???", style="bold black on red")
)
return Text(" ").join(text_renderables) |
Generates a string representing the CLI invocation as if typed directly into the
command line.
Returns:
A string representing the command invocation.
| to_cli_string | python | Textualize/trogon | trogon/run_command.py | https://github.com/Textualize/trogon/blob/master/trogon/run_command.py | MIT |
async def selected_command_changed(
self, event: Tree.NodeHighlighted[CommandSchema]
) -> None:
"""When we highlight a node in the CommandTree, the main body of the home page updates
to display a form specific to the highlighted command."""
await self._refresh_command_form(event.node) | When we highlight a node in the CommandTree, the main body of the home page updates
to display a form specific to the highlighted command. | selected_command_changed | python | Textualize/trogon | trogon/trogon.py | https://github.com/Textualize/trogon/blob/master/trogon/trogon.py | MIT |
def _update_command_description(self, command: CommandSchema) -> None:
"""Update the description of the command at the bottom of the sidebar
based on the currently selected node in the command tree."""
description_box = self.query_one("#home-command-description", Static)
description_text = command.docstring or ""
description_text = description_text.lstrip()
description_text = f"[b]{command.name}[/]\n{description_text}"
description_box.update(description_text) | Update the description of the command at the bottom of the sidebar
based on the currently selected node in the command tree. | _update_command_description | python | Textualize/trogon | trogon/trogon.py | https://github.com/Textualize/trogon/blob/master/trogon/trogon.py | MIT |
def _update_execution_string_preview(self) -> None:
"""Update the preview box showing the command string to be executed"""
command_name_syntax_style = self.get_component_rich_style("command-name-syntax")
prefix = Text(f"{self.click_app_name} ", command_name_syntax_style)
new_value = self.command_data.to_cli_string(include_root_command=False)
highlighted_new_value = Text.assemble(prefix, self.highlighter(new_value))
prompt_style = self.get_component_rich_style("prompt")
preview_string = Text.assemble(("$ ", prompt_style), highlighted_new_value)
self.query_one("#home-exec-preview-static", Static).update(preview_string) | Update the preview box showing the command string to be executed | _update_execution_string_preview | python | Textualize/trogon | trogon/trogon.py | https://github.com/Textualize/trogon/blob/master/trogon/trogon.py | MIT |
def __init__(self, title: TextType, message: TextType) -> None:
"""Initialise the dialog.
Args:
title: The title for the dialog.
message: The message to show.
"""
super().__init__()
self._title = title
self._message = message | Initialise the dialog.
Args:
title: The title for the dialog.
message: The message to show.
| __init__ | python | Textualize/trogon | trogon/widgets/about.py | https://github.com/Textualize/trogon/blob/master/trogon/widgets/about.py | MIT |
def compose(self) -> ComposeResult:
"""Compose the content of the modal dialog."""
with Vertical():
with Center():
yield Static(self._title, classes="spaced")
yield Static(self._message, id="message", classes="spaced")
with Center(classes="spaced"):
yield Button("OK", variant=self.button_style) | Compose the content of the modal dialog. | compose | python | Textualize/trogon | trogon/widgets/about.py | https://github.com/Textualize/trogon/blob/master/trogon/widgets/about.py | MIT |
def _form_changed(self) -> None:
"""Take the current state of the form and build a UserCommandData from it,
then post a FormChanged message"""
command_schema = self.command_schema
path_from_root = command_schema.path_from_root
# Sentinel root value to make constructing the tree a little easier.
parent_command_data = UserCommandData(
name=CommandName("_"), options=[], arguments=[]
)
root_command_data = parent_command_data
for command in path_from_root:
option_datas = []
# For each of the options in the schema for this command,
# lets grab the values the user has supplied for them in the form.
for option in command.options:
parameter_control = self.query_one(f"#{option.key}", ParameterControls)
value = parameter_control.get_values()
for v in value.values:
assert isinstance(v, tuple)
option_data = UserOptionData(option.name, v, option)
option_datas.append(option_data)
# Now do the same for the arguments
argument_datas = []
for argument in command.arguments:
form_control_widget = self.query_one(
f"#{argument.key}", ParameterControls
)
value = form_control_widget.get_values()
# This should only ever loop once since arguments can be multi-value but not multiple=True.
for v in value.values:
assert isinstance(v, tuple)
argument_data = UserArgumentData(argument.name, v, argument)
argument_datas.append(argument_data)
assert all(isinstance(option.value, tuple) for option in option_datas)
assert all(isinstance(argument.value, tuple) for argument in argument_datas)
command_data = UserCommandData(
name=command.name,
options=option_datas,
arguments=argument_datas,
parent=parent_command_data,
command_schema=command,
)
parent_command_data.subcommand = command_data
parent_command_data = command_data
# Trim the sentinel
root_command_data = root_command_data.subcommand
root_command_data.parent = None
self.post_message(self.Changed(root_command_data)) | Take the current state of the form and build a UserCommandData from it,
then post a FormChanged message | _form_changed | python | Textualize/trogon | trogon/widgets/form.py | https://github.com/Textualize/trogon/blob/master/trogon/widgets/form.py | MIT |
def apply_filter(self, filter_query: str) -> bool:
"""Show or hide this ParameterControls depending on whether it matches the filter query or not.
Args:
filter_query: The string to filter on.
Returns:
True if the filter matched (and the widget is visible).
"""
help_text = getattr(self.schema, "help", "") or ""
if not filter_query:
should_be_visible = True
self.display = should_be_visible
else:
name = self.schema.name
if isinstance(name, str):
# Argument names are strings, there's only one name
name_contains_query = filter_query in name.casefold()
should_be_visible = name_contains_query
else:
# Option names are lists since they can have multiple names (e.g. -v and --verbose)
name_contains_query = any(
filter_query in name.casefold() for name in self.schema.name
)
help_contains_query = filter_query in help_text.casefold()
should_be_visible = name_contains_query or help_contains_query
self.display = should_be_visible
# Update the highlighting of the help text
if help_text:
try:
help_label = self.query_one(".command-form-control-help-text", Static)
new_help_text = Text(help_text)
new_help_text.highlight_words(
filter_query.split(), "black on yellow", case_sensitive=False
)
help_label.update(new_help_text)
except NoMatches:
pass
return should_be_visible | Show or hide this ParameterControls depending on whether it matches the filter query or not.
Args:
filter_query: The string to filter on.
Returns:
True if the filter matched (and the widget is visible).
| apply_filter | python | Textualize/trogon | trogon/widgets/parameter_controls.py | https://github.com/Textualize/trogon/blob/master/trogon/widgets/parameter_controls.py | MIT |
def compose(self) -> ComposeResult:
"""Takes the schemas for each parameter of the current command, and converts it into a
form consisting of Textual widgets."""
schema = self.schema
name = schema.name
argument_type = schema.type
default = schema.default
help_text = getattr(schema, "help", "") or ""
multiple = schema.multiple
is_option = isinstance(schema, OptionSchema)
nargs = schema.nargs
label = self._make_command_form_control_label(
name, argument_type, is_option, schema.required, multiple=multiple
)
first_focus_control: Widget | None = (
None # The widget that will be focused when the form is focused.
)
# If there are N defaults, we render the "group" N times.
# Each group will contain `nargs` widgets.
with ControlGroupsContainer():
if not argument_type == click.BOOL:
yield Label(label, classes="command-form-label")
if isinstance(argument_type, click.Choice) and multiple:
# Display a MultipleChoice widget
# There's a special case where we have a Choice with multiple=True,
# in this case, we can just render a single MultipleChoice widget
# instead of multiple radio-sets.
control_method = self.get_control_method(argument_type)
multiple_choice_widget = control_method(
default=default,
label=label,
multiple=multiple,
schema=schema,
control_id=schema.key,
)
yield from multiple_choice_widget
else:
# For other widgets, we'll render as normal...
# If required, we'll generate widgets containing the defaults
for default_value_tuple in default.values:
widget_group = list(self.make_widget_group())
with ControlGroup() as control_group:
if len(widget_group) == 1:
control_group.add_class("single-item")
# Parameter types can be of length 1, but there could still
# be multiple defaults. We need to render a widget for each
# of those defaults. Extend the widget group such that
# there's a slot available for each default...
for default_value, control_widget in zip(
default_value_tuple, widget_group
):
self._apply_default_value(control_widget, default_value)
yield control_widget
# Keep track of the first control we render, for easy focus
if first_focus_control is None:
first_focus_control = control_widget
# We always need to display the original group of controls,
# regardless of whether there are defaults
if multiple or not default.values:
widget_group = list(self.make_widget_group())
with ControlGroup() as control_group:
if len(widget_group) == 1:
control_group.add_class("single-item")
# No need to apply defaults to this group
for control_widget in widget_group:
yield control_widget
if first_focus_control is None:
first_focus_control = control_widget
# Take note of the first form control, so we can easily focus it
if self.first_control is None:
self.first_control = first_focus_control
# If it's a multiple, and it's a Choice parameter, then we display
# our special case MultiChoice widget, and so there's no need for this
# button.
if (multiple or nargs == -1) and not isinstance(argument_type, click.Choice):
with Horizontal(classes="add-another-button-container"):
yield Button("+ value", variant="success", classes="add-another-button")
# Render the dim help text below the form controls
if help_text:
yield Static(help_text, classes="command-form-control-help-text") | Takes the schemas for each parameter of the current command, and converts it into a
form consisting of Textual widgets. | compose | python | Textualize/trogon | trogon/widgets/parameter_controls.py | https://github.com/Textualize/trogon/blob/master/trogon/widgets/parameter_controls.py | MIT |
def make_widget_group(self) -> Iterable[ControlWidgetType]:
"""For this option, yield a single set of widgets required to receive user input for it."""
schema = self.schema
default = schema.default
parameter_type = schema.type
name = schema.name
multiple = schema.multiple
required = schema.required
is_option = isinstance(schema, OptionSchema)
label = self._make_command_form_control_label(
name, parameter_type, is_option, required, multiple
)
# Get the types of the parameter. We can map these types on to widgets that will be rendered.
parameter_types = (
parameter_type.types
if isinstance(parameter_type, click.Tuple)
else [parameter_type]
)
# For each of the these parameters, render the corresponding widget for it.
# At this point we don't care about filling in the default values.
for _type in parameter_types:
control_method = self.get_control_method(_type)
control_widgets = control_method(
default, label, multiple, schema, schema.key
)
yield from control_widgets | For this option, yield a single set of widgets required to receive user input for it. | make_widget_group | python | Textualize/trogon | trogon/widgets/parameter_controls.py | https://github.com/Textualize/trogon/blob/master/trogon/widgets/parameter_controls.py | MIT |
def _apply_default_value(
control_widget: ControlWidgetType, default_value: Any
) -> None:
"""Set the default value of a parameter-handling widget."""
if isinstance(control_widget, Input):
control_widget.value = str(default_value)
control_widget.placeholder = f"{default_value} (default)"
elif isinstance(control_widget, Select):
control_widget.value = str(default_value)
control_widget.prompt = f"{default_value} (default)" | Set the default value of a parameter-handling widget. | _apply_default_value | python | Textualize/trogon | trogon/widgets/parameter_controls.py | https://github.com/Textualize/trogon/blob/master/trogon/widgets/parameter_controls.py | MIT |
def actions(self, state):
'actions are index where we can make a move'
actions = []
for index, char in enumerate(state):
if char == '_':
actions.append(index)
return actions | actions are index where we can make a move | actions | python | simpleai-team/simpleai | samples/machine_learning/tic_tac_toe.py | https://github.com/simpleai-team/simpleai/blob/master/samples/machine_learning/tic_tac_toe.py | MIT |
def find_location(rows, element_to_find):
'''Find the location of a piece in the puzzle.
Returns a tuple: row, column'''
for ir, row in enumerate(rows):
for ic, element in enumerate(row):
if element == element_to_find:
return ir, ic | Find the location of a piece in the puzzle.
Returns a tuple: row, column | find_location | python | simpleai-team/simpleai | samples/search/eight_puzzle.py | https://github.com/simpleai-team/simpleai/blob/master/samples/search/eight_puzzle.py | MIT |
def actions(self, state):
'''Returns a list of the pieces we can move to the empty space.'''
rows = string_to_list(state)
row_e, col_e = find_location(rows, 'e')
actions = []
if row_e > 0:
actions.append(rows[row_e - 1][col_e])
if row_e < 2:
actions.append(rows[row_e + 1][col_e])
if col_e > 0:
actions.append(rows[row_e][col_e - 1])
if col_e < 2:
actions.append(rows[row_e][col_e + 1])
return actions | Returns a list of the pieces we can move to the empty space. | actions | python | simpleai-team/simpleai | samples/search/eight_puzzle.py | https://github.com/simpleai-team/simpleai/blob/master/samples/search/eight_puzzle.py | MIT |
def result(self, state, action):
'''Return the resulting state after moving a piece to the empty space.
(the "action" parameter contains the piece to move)
'''
rows = string_to_list(state)
row_e, col_e = find_location(rows, 'e')
row_n, col_n = find_location(rows, action)
rows[row_e][col_e], rows[row_n][col_n] = rows[row_n][col_n], rows[row_e][col_e]
return list_to_string(rows) | Return the resulting state after moving a piece to the empty space.
(the "action" parameter contains the piece to move)
| result | python | simpleai-team/simpleai | samples/search/eight_puzzle.py | https://github.com/simpleai-team/simpleai/blob/master/samples/search/eight_puzzle.py | MIT |
def heuristic(self, state):
'''Returns an *estimation* of the distance from a state to the goal.
We are using the manhattan distance.
'''
rows = string_to_list(state)
distance = 0
for number in '12345678e':
row_n, col_n = find_location(rows, number)
row_n_goal, col_n_goal = goal_positions[number]
distance += abs(row_n - row_n_goal) + abs(col_n - col_n_goal)
return distance | Returns an *estimation* of the distance from a state to the goal.
We are using the manhattan distance.
| heuristic | python | simpleai-team/simpleai | samples/search/eight_puzzle.py | https://github.com/simpleai-team/simpleai/blob/master/samples/search/eight_puzzle.py | MIT |
def result(self, s, a):
'''Result of applying an action to a state.'''
# result: boat on opposite side, and numbers of missioners and
# cannibals updated according to the move
if s[2] == 0:
return (s[0] - a[1][0], s[1] - a[1][1], 1)
else:
return (s[0] + a[1][0], s[1] + a[1][1], 0) | Result of applying an action to a state. | result | python | simpleai-team/simpleai | samples/search/missioners.py | https://github.com/simpleai-team/simpleai/blob/master/samples/search/missioners.py | MIT |
def mkconstraints():
"""
Make constraint list for binary constraint problem.
"""
constraints = []
for j in range(1, 10):
vars = ["%s%d" % (i, j) for i in uppercase[:9]]
constraints.extend((c, const_different) for c in combinations(vars, 2))
for i in uppercase[:9]:
vars = ["%s%d" % (i, j) for j in range(1, 10)]
constraints.extend((c, const_different) for c in combinations(vars, 2))
for b0 in ['ABC', 'DEF', 'GHI']:
for b1 in [[1, 2, 3], [4, 5, 6], [7, 8, 9]]:
vars = ["%s%d" % (i, j) for i in b0 for j in b1]
l = list((c, const_different) for c in combinations(vars, 2))
constraints.extend(l)
return constraints |
Make constraint list for binary constraint problem.
| mkconstraints | python | simpleai-team/simpleai | samples/search/sudoku.py | https://github.com/simpleai-team/simpleai/blob/master/samples/search/sudoku.py | MIT |
def step(self, viewer=None):
"This method evolves one step in time"
if not self.is_completed(self.state):
for agent in self.agents:
action = agent.program(self.percept(agent, self.state))
next_state = self.do_action(self.state, action, agent)
if viewer:
viewer.event(self.state, action, next_state, agent)
self.state = next_state
if self.is_completed(self.state):
return | This method evolves one step in time | step | python | simpleai-team/simpleai | simpleai/environments.py | https://github.com/simpleai-team/simpleai/blob/master/simpleai/environments.py | MIT |
def learn(self, examples, attributes, parent_examples):
"""
A decision tree learner that *strictly* follows the pseudocode given in
AIMA. In 3rd edition, see Figure 18.5, page 702.
"""
if not examples:
return self.plurality_value(parent_examples)
elif len(set(map(self.target, examples))) == 1:
return self.plurality_value(examples)
elif not attributes:
return self.plurality_value(examples)
A = max(attributes, key=lambda a: self.importance(a, examples))
tree = DecisionTreeNode(attribute=A)
for value in set(map(A, examples)):
exs = [e for e in examples if A(e) == value]
subtree = self.learn(exs, attributes - set([A]), examples)
tree.add_branch(value, subtree)
return tree |
A decision tree learner that *strictly* follows the pseudocode given in
AIMA. In 3rd edition, see Figure 18.5, page 702.
| learn | python | simpleai-team/simpleai | simpleai/machine_learning/classifiers.py | https://github.com/simpleai-team/simpleai/blob/master/simpleai/machine_learning/classifiers.py | MIT |
def importance(self, attribute, examples):
"""
AIMA implies that importance should be information gain.
Since AIMA only defines it for binary features this implementation
was based on the wikipedia article:
http://en.wikipedia.org/wiki/Information_gain_in_decision_trees
"""
gain_counter = OnlineInformationGain(attribute, self.target)
for example in examples:
gain_counter.add(example)
return gain_counter.get_gain() |
AIMA implies that importance should be information gain.
Since AIMA only defines it for binary features this implementation
was based on the wikipedia article:
http://en.wikipedia.org/wiki/Information_gain_in_decision_trees
| importance | python | simpleai-team/simpleai | simpleai/machine_learning/classifiers.py | https://github.com/simpleai-team/simpleai/blob/master/simpleai/machine_learning/classifiers.py | MIT |
def save(self, filepath):
"""
Saves the classifier to `filepath`.
Because this classifier needs to save the dataset, it must
be something that can be pickled and not something like an
iterator.
"""
if not filepath or not isinstance(filepath, str):
raise ValueError("Invalid filepath")
with open(filepath, "wb") as filehandler:
pickle.dump(self, filehandler) |
Saves the classifier to `filepath`.
Because this classifier needs to save the dataset, it must
be something that can be pickled and not something like an
iterator.
| save | python | simpleai-team/simpleai | simpleai/machine_learning/classifiers.py | https://github.com/simpleai-team/simpleai/blob/master/simpleai/machine_learning/classifiers.py | MIT |
def tree_to_str(root):
"""
Returns a string representation of a decision tree with
root node `root`.
"""
xs = []
for value, node, depth in iter_tree(root):
template = "{indent}"
if node is not root:
template += "case={value}\t"
if node.attribute is None:
template += "result={result} -- P={prob:.2}"
else:
template += "split by {split}:\t" +\
"(partial result={result} -- P={prob:.2})"
line = template.format(indent=" " * depth,
value=value,
result=node.result[0],
prob=node.result[1],
split=str(node.attribute))
xs.append(line)
return "\n".join(xs) |
Returns a string representation of a decision tree with
root node `root`.
| tree_to_str | python | simpleai-team/simpleai | simpleai/machine_learning/classifiers.py | https://github.com/simpleai-team/simpleai/blob/master/simpleai/machine_learning/classifiers.py | MIT |
def take_branch(self, example):
"""
Returns a `DecisionTreeNode` instance that can better classify
`example` based on the selectors value.
If there are no more branches (ie, this node is a leaf) or the
attribute gives a value for an unexistent branch then this method
returns None.
"""
if self.attribute is None:
return None
value = self.attribute(example)
return self.branches.get(value, None) |
Returns a `DecisionTreeNode` instance that can better classify
`example` based on the selectors value.
If there are no more branches (ie, this node is a leaf) or the
attribute gives a value for an unexistent branch then this method
returns None.
| take_branch | python | simpleai-team/simpleai | simpleai/machine_learning/classifiers.py | https://github.com/simpleai-team/simpleai/blob/master/simpleai/machine_learning/classifiers.py | MIT |
def _max_gain_split(self, examples):
"""
Returns an OnlineInformationGain of the attribute with
max gain based on `examples`.
"""
gains = self._new_set_of_gain_counters()
for example in examples:
for gain in gains:
gain.add(example)
winner = max(gains, key=lambda gain: gain.get_gain())
if not winner.get_target_class_counts():
raise ValueError("Dataset is empty")
return winner |
Returns an OnlineInformationGain of the attribute with
max gain based on `examples`.
| _max_gain_split | python | simpleai-team/simpleai | simpleai/machine_learning/classifiers.py | https://github.com/simpleai-team/simpleai/blob/master/simpleai/machine_learning/classifiers.py | MIT |
def _new_set_of_gain_counters(self):
"""
Creates a new set of OnlineInformationGain objects
for each attribute.
"""
return [OnlineInformationGain(attribute, self.target)
for attribute in self.attributes] |
Creates a new set of OnlineInformationGain objects
for each attribute.
| _new_set_of_gain_counters | python | simpleai-team/simpleai | simpleai/machine_learning/classifiers.py | https://github.com/simpleai-team/simpleai/blob/master/simpleai/machine_learning/classifiers.py | MIT |
def precision(classifier, testset):
"""
Runs the classifier for each example in `testset`
and verifies that the classification is correct
using the `target`.
Returns a number between 0.0 and 1.0 with the
precision of classification for this test set.
"""
hit = 0
total = 0
for example in testset:
if classifier.classify(example)[0] == classifier.target(example):
hit += 1
total += 1
if total == 0:
raise ValueError("Empty testset!")
return hit / float(total) |
Runs the classifier for each example in `testset`
and verifies that the classification is correct
using the `target`.
Returns a number between 0.0 and 1.0 with the
precision of classification for this test set.
| precision | python | simpleai-team/simpleai | simpleai/machine_learning/evaluation.py | https://github.com/simpleai-team/simpleai/blob/master/simpleai/machine_learning/evaluation.py | MIT |
def kfold(dataset, problem, method, k=10):
"""
Does a k-fold on `dataset` with `method`.
This is, it randomly creates k-partitions of the dataset, and k-times
trains the method with k-1 parts and runs it with the partition left.
After all this, returns the overall success ratio.
"""
if k <= 1:
raise ValueError("k argument must be at least 2")
dataset = list(dataset)
random.shuffle(dataset)
trials = 0
positive = 0
for i in range(k):
train = [x for j, x in enumerate(dataset) if j % k != i]
test = [x for j, x in enumerate(dataset) if j % k == i]
classifier = method(train, problem)
for data in test:
trials += 1
result = classifier.classify(data)
if result is not None and result[0] == problem.target(data):
positive += 1
return float(positive) / float(trials) |
Does a k-fold on `dataset` with `method`.
This is, it randomly creates k-partitions of the dataset, and k-times
trains the method with k-1 parts and runs it with the partition left.
After all this, returns the overall success ratio.
| kfold | python | simpleai-team/simpleai | simpleai/machine_learning/evaluation.py | https://github.com/simpleai-team/simpleai/blob/master/simpleai/machine_learning/evaluation.py | MIT |
def save(self, filepath):
"""
Pickles the tree and saves it into `filepath`
"""
if not filepath or not isinstance(filepath, str):
raise ValueError("Invalid filepath")
# Removes dataset so is not saved in the pickle
self.dataset = None
with open(filepath, "wb") as filehandler:
pickle.dump(self, filehandler) |
Pickles the tree and saves it into `filepath`
| save | python | simpleai-team/simpleai | simpleai/machine_learning/models.py | https://github.com/simpleai-team/simpleai/blob/master/simpleai/machine_learning/models.py | MIT |
def load(cls, filepath):
"""
Loads a pickled version of the classifier saved in `filepath`
"""
with open(filepath, "rb") as filehandler:
classifier = pickle.load(filehandler)
if not isinstance(classifier, Classifier):
raise ValueError("Pickled object is not a Classifier")
return classifier |
Loads a pickled version of the classifier saved in `filepath`
| load | python | simpleai-team/simpleai | simpleai/machine_learning/models.py | https://github.com/simpleai-team/simpleai/blob/master/simpleai/machine_learning/models.py | MIT |
def __init__(self, dataset, target_index):
"""
`dataset` should be an iterable, *not* an iterator.
`target_index` is the index in the vector where the classification
of an example is defined.
"""
super(VectorDataClassificationProblem, self).__init__()
try:
example = next(iter(dataset))
except StopIteration:
raise ValueError("Dataset is empty")
self.target_index = target_index
N = len(example)
if self.target_index < 0: # Negative number allowed, counts in reverse
self.target_index = N + self.target_index
if self.target_index < 0 or N <= self.target_index:
raise ValueError("Target index is out of range")
for i in range(N):
if i == self.target_index:
continue
attribute = VectorIndexAttribute(i, "data at index {}".format(i))
self.attributes.append(attribute) |
`dataset` should be an iterable, *not* an iterator.
`target_index` is the index in the vector where the classification
of an example is defined.
| __init__ | python | simpleai-team/simpleai | simpleai/machine_learning/models.py | https://github.com/simpleai-team/simpleai/blob/master/simpleai/machine_learning/models.py | MIT |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.