python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia/clara/platform/common.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia/clara/platform/common.proto',
package='nvidia.clara.platform',
syntax='proto3',
serialized_options=_b('\n\031com.nvidia.clara.platformZ\004apis\252\002\032Nvidia.Clara.Platform.Grpc'),
serialized_pb=_b('\n\"nvidia/clara/platform/common.proto\x12\x15nvidia.clara.platform\"\x1b\n\nIdentifier\x12\r\n\x05value\x18\x01 \x01(\t\"E\n\x07Version\x12\r\n\x05major\x18\x01 \x01(\x05\x12\r\n\x05minor\x18\x02 \x01(\x05\x12\r\n\x05patch\x18\x03 \x01(\x05\x12\r\n\x05label\x18\x04 \x01(\t\"X\n\rRequestHeader\x12\x33\n\x0b\x61pi_version\x18\x01 \x01(\x0b\x32\x1e.nvidia.clara.platform.Version\x12\x12\n\nuser_agent\x18\x02 \x01(\t\"0\n\x0eResponseHeader\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x11\x12\x10\n\x08messages\x18\x02 \x03(\t\"\x1a\n\tTimestamp\x12\r\n\x05value\x18\x01 \x01(\x12\x42>\n\x19\x63om.nvidia.clara.platformZ\x04\x61pis\xaa\x02\x1aNvidia.Clara.Platform.Grpcb\x06proto3')
)
_IDENTIFIER = _descriptor.Descriptor(
name='Identifier',
full_name='nvidia.clara.platform.Identifier',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='nvidia.clara.platform.Identifier.value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=61,
serialized_end=88,
)
_VERSION = _descriptor.Descriptor(
name='Version',
full_name='nvidia.clara.platform.Version',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='major', full_name='nvidia.clara.platform.Version.major', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='minor', full_name='nvidia.clara.platform.Version.minor', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='patch', full_name='nvidia.clara.platform.Version.patch', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='label', full_name='nvidia.clara.platform.Version.label', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=90,
serialized_end=159,
)
_REQUESTHEADER = _descriptor.Descriptor(
name='RequestHeader',
full_name='nvidia.clara.platform.RequestHeader',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='api_version', full_name='nvidia.clara.platform.RequestHeader.api_version', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user_agent', full_name='nvidia.clara.platform.RequestHeader.user_agent', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=161,
serialized_end=249,
)
_RESPONSEHEADER = _descriptor.Descriptor(
name='ResponseHeader',
full_name='nvidia.clara.platform.ResponseHeader',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='nvidia.clara.platform.ResponseHeader.code', index=0,
number=1, type=17, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='messages', full_name='nvidia.clara.platform.ResponseHeader.messages', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=251,
serialized_end=299,
)
_TIMESTAMP = _descriptor.Descriptor(
name='Timestamp',
full_name='nvidia.clara.platform.Timestamp',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='nvidia.clara.platform.Timestamp.value', index=0,
number=1, type=18, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=301,
serialized_end=327,
)
_REQUESTHEADER.fields_by_name['api_version'].message_type = _VERSION
DESCRIPTOR.message_types_by_name['Identifier'] = _IDENTIFIER
DESCRIPTOR.message_types_by_name['Version'] = _VERSION
DESCRIPTOR.message_types_by_name['RequestHeader'] = _REQUESTHEADER
DESCRIPTOR.message_types_by_name['ResponseHeader'] = _RESPONSEHEADER
DESCRIPTOR.message_types_by_name['Timestamp'] = _TIMESTAMP
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Identifier = _reflection.GeneratedProtocolMessageType('Identifier', (_message.Message,), dict(
DESCRIPTOR = _IDENTIFIER,
__module__ = 'nvidia.clara.platform.common_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.Identifier)
))
_sym_db.RegisterMessage(Identifier)
Version = _reflection.GeneratedProtocolMessageType('Version', (_message.Message,), dict(
DESCRIPTOR = _VERSION,
__module__ = 'nvidia.clara.platform.common_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.Version)
))
_sym_db.RegisterMessage(Version)
RequestHeader = _reflection.GeneratedProtocolMessageType('RequestHeader', (_message.Message,), dict(
DESCRIPTOR = _REQUESTHEADER,
__module__ = 'nvidia.clara.platform.common_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.RequestHeader)
))
_sym_db.RegisterMessage(RequestHeader)
ResponseHeader = _reflection.GeneratedProtocolMessageType('ResponseHeader', (_message.Message,), dict(
DESCRIPTOR = _RESPONSEHEADER,
__module__ = 'nvidia.clara.platform.common_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ResponseHeader)
))
_sym_db.RegisterMessage(ResponseHeader)
Timestamp = _reflection.GeneratedProtocolMessageType('Timestamp', (_message.Message,), dict(
DESCRIPTOR = _TIMESTAMP,
__module__ = 'nvidia.clara.platform.common_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.Timestamp)
))
_sym_db.RegisterMessage(Timestamp)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| clara-platform-python-client-main | nvidia_clara/grpc/common_pb2.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia/clara/platform/pipelines.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_clara.grpc import common_pb2 as nvidia_dot_clara_dot_platform_dot_common__pb2
from nvidia_clara.grpc.common_pb2 import *
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia/clara/platform/pipelines.proto',
package='nvidia.clara.platform',
syntax='proto3',
serialized_options=_b('\n\031com.nvidia.clara.platformZ\004apis\252\002\032Nvidia.Clara.Platform.Grpc'),
serialized_pb=_b('\n%nvidia/clara/platform/pipelines.proto\x12\x15nvidia.clara.platform\x1a\"nvidia/clara/platform/common.proto\"7\n\x16PipelineDefinitionFile\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x0f\n\x07\x63ontent\x18\x02 \x01(\t\"\x90\x02\n\x1bPipelinesAddMetadataRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\x36\n\x0bpipeline_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12R\n\x08metadata\x18\x03 \x03(\x0b\x32@.nvidia.clara.platform.PipelinesAddMetadataRequest.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x93\x02\n\x1cPipelinesAddMetadataResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12\x36\n\x0bpipeline_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12S\n\x08metadata\x18\x03 \x03(\x0b\x32\x41.nvidia.clara.platform.PipelinesAddMetadataResponse.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xc9\x02\n\x16PipelinesCreateRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\x41\n\ndefinition\x18\x02 \x01(\x0b\x32-.nvidia.clara.platform.PipelineDefinitionFile\x12\x36\n\x0bpipeline_id\x18\x03 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12M\n\x08metadata\x18\x04 \x03(\x0b\x32;.nvidia.clara.platform.PipelinesCreateRequest.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x88\x01\n\x17PipelinesCreateResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12\x36\n\x0bpipeline_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\"\x87\x01\n\x17PipelinesDetailsRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\x36\n\x0bpipeline_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\"\x9a\x04\n\x18PipelinesDetailsResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12\x36\n\x0bpipeline_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x41\n\ndefinition\x18\x04 \x01(\x0b\x32-.nvidia.clara.platform.PipelineDefinitionFile\x12L\n\x03\x64\x61g\x18\x05 \x03(\x0b\x32?.nvidia.clara.platform.PipelinesDetailsResponse.PipelineDagNode\x12O\n\x08metadata\x18\x06 \x03(\x0b\x32=.nvidia.clara.platform.PipelinesDetailsResponse.MetadataEntry\x1an\n\x0fPipelineDagNode\x12\x0c\n\x04name\x18\x01 \x01(\t\x12M\n\x04next\x18\x02 \x03(\x0b\x32?.nvidia.clara.platform.PipelinesDetailsResponse.PipelineDagNode\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"L\n\x14PipelinesListRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\"\x86\x03\n\x15PipelinesListResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12M\n\x07\x64\x65tails\x18\x02 \x01(\x0b\x32<.nvidia.clara.platform.PipelinesListResponse.PipelineDetails\x1a\xe6\x01\n\x0fPipelineDetails\x12\x36\n\x0bpipeline_id\x18\x01 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\\\n\x08metadata\x18\x03 \x03(\x0b\x32J.nvidia.clara.platform.PipelinesListResponse.PipelineDetails.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x9c\x01\n\x1ePipelinesRemoveMetadataRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\x36\n\x0bpipeline_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12\x0c\n\x04keys\x18\x03 \x03(\t\"\x99\x02\n\x1fPipelinesRemoveMetadataResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12\x36\n\x0bpipeline_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12V\n\x08metadata\x18\x03 \x03(\x0b\x32\x44.nvidia.clara.platform.PipelinesRemoveMetadataResponse.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x86\x01\n\x16PipelinesRemoveRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\x36\n\x0bpipeline_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\"P\n\x17PipelinesRemoveResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\"\xc9\x01\n\x16PipelinesUpdateRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\x36\n\x0bpipeline_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12\x41\n\ndefinition\x18\x03 \x01(\x0b\x32-.nvidia.clara.platform.PipelineDefinitionFile\"P\n\x17PipelinesUpdateResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader2\x96\x06\n\tPipelines\x12v\n\x0b\x41\x64\x64Metadata\x12\x32.nvidia.clara.platform.PipelinesAddMetadataRequest\x1a\x33.nvidia.clara.platform.PipelinesAddMetadataResponse\x12i\n\x06\x43reate\x12-.nvidia.clara.platform.PipelinesCreateRequest\x1a..nvidia.clara.platform.PipelinesCreateResponse(\x01\x12l\n\x07\x44\x65tails\x12..nvidia.clara.platform.PipelinesDetailsRequest\x1a/.nvidia.clara.platform.PipelinesDetailsResponse0\x01\x12\x63\n\x04List\x12+.nvidia.clara.platform.PipelinesListRequest\x1a,.nvidia.clara.platform.PipelinesListResponse0\x01\x12g\n\x06Remove\x12-.nvidia.clara.platform.PipelinesRemoveRequest\x1a..nvidia.clara.platform.PipelinesRemoveResponse\x12\x7f\n\x0eRemoveMetadata\x12\x35.nvidia.clara.platform.PipelinesRemoveMetadataRequest\x1a\x36.nvidia.clara.platform.PipelinesRemoveMetadataResponse\x12i\n\x06Update\x12-.nvidia.clara.platform.PipelinesUpdateRequest\x1a..nvidia.clara.platform.PipelinesUpdateResponse(\x01\x42>\n\x19\x63om.nvidia.clara.platformZ\x04\x61pis\xaa\x02\x1aNvidia.Clara.Platform.GrpcP\x00\x62\x06proto3')
,
dependencies=[nvidia_dot_clara_dot_platform_dot_common__pb2.DESCRIPTOR,],
public_dependencies=[nvidia_dot_clara_dot_platform_dot_common__pb2.DESCRIPTOR,])
_PIPELINEDEFINITIONFILE = _descriptor.Descriptor(
name='PipelineDefinitionFile',
full_name='nvidia.clara.platform.PipelineDefinitionFile',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='path', full_name='nvidia.clara.platform.PipelineDefinitionFile.path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='content', full_name='nvidia.clara.platform.PipelineDefinitionFile.content', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=100,
serialized_end=155,
)
_PIPELINESADDMETADATAREQUEST_METADATAENTRY = _descriptor.Descriptor(
name='MetadataEntry',
full_name='nvidia.clara.platform.PipelinesAddMetadataRequest.MetadataEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='nvidia.clara.platform.PipelinesAddMetadataRequest.MetadataEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='nvidia.clara.platform.PipelinesAddMetadataRequest.MetadataEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=383,
serialized_end=430,
)
_PIPELINESADDMETADATAREQUEST = _descriptor.Descriptor(
name='PipelinesAddMetadataRequest',
full_name='nvidia.clara.platform.PipelinesAddMetadataRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.PipelinesAddMetadataRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pipeline_id', full_name='nvidia.clara.platform.PipelinesAddMetadataRequest.pipeline_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='nvidia.clara.platform.PipelinesAddMetadataRequest.metadata', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_PIPELINESADDMETADATAREQUEST_METADATAENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=158,
serialized_end=430,
)
_PIPELINESADDMETADATARESPONSE_METADATAENTRY = _descriptor.Descriptor(
name='MetadataEntry',
full_name='nvidia.clara.platform.PipelinesAddMetadataResponse.MetadataEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='nvidia.clara.platform.PipelinesAddMetadataResponse.MetadataEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='nvidia.clara.platform.PipelinesAddMetadataResponse.MetadataEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=383,
serialized_end=430,
)
_PIPELINESADDMETADATARESPONSE = _descriptor.Descriptor(
name='PipelinesAddMetadataResponse',
full_name='nvidia.clara.platform.PipelinesAddMetadataResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.PipelinesAddMetadataResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pipeline_id', full_name='nvidia.clara.platform.PipelinesAddMetadataResponse.pipeline_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='nvidia.clara.platform.PipelinesAddMetadataResponse.metadata', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_PIPELINESADDMETADATARESPONSE_METADATAENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=433,
serialized_end=708,
)
_PIPELINESCREATEREQUEST_METADATAENTRY = _descriptor.Descriptor(
name='MetadataEntry',
full_name='nvidia.clara.platform.PipelinesCreateRequest.MetadataEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='nvidia.clara.platform.PipelinesCreateRequest.MetadataEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='nvidia.clara.platform.PipelinesCreateRequest.MetadataEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=383,
serialized_end=430,
)
_PIPELINESCREATEREQUEST = _descriptor.Descriptor(
name='PipelinesCreateRequest',
full_name='nvidia.clara.platform.PipelinesCreateRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.PipelinesCreateRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='definition', full_name='nvidia.clara.platform.PipelinesCreateRequest.definition', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pipeline_id', full_name='nvidia.clara.platform.PipelinesCreateRequest.pipeline_id', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='nvidia.clara.platform.PipelinesCreateRequest.metadata', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_PIPELINESCREATEREQUEST_METADATAENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=711,
serialized_end=1040,
)
_PIPELINESCREATERESPONSE = _descriptor.Descriptor(
name='PipelinesCreateResponse',
full_name='nvidia.clara.platform.PipelinesCreateResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.PipelinesCreateResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pipeline_id', full_name='nvidia.clara.platform.PipelinesCreateResponse.pipeline_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1043,
serialized_end=1179,
)
_PIPELINESDETAILSREQUEST = _descriptor.Descriptor(
name='PipelinesDetailsRequest',
full_name='nvidia.clara.platform.PipelinesDetailsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.PipelinesDetailsRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pipeline_id', full_name='nvidia.clara.platform.PipelinesDetailsRequest.pipeline_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1182,
serialized_end=1317,
)
_PIPELINESDETAILSRESPONSE_PIPELINEDAGNODE = _descriptor.Descriptor(
name='PipelineDagNode',
full_name='nvidia.clara.platform.PipelinesDetailsResponse.PipelineDagNode',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='nvidia.clara.platform.PipelinesDetailsResponse.PipelineDagNode.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='next', full_name='nvidia.clara.platform.PipelinesDetailsResponse.PipelineDagNode.next', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1699,
serialized_end=1809,
)
_PIPELINESDETAILSRESPONSE_METADATAENTRY = _descriptor.Descriptor(
name='MetadataEntry',
full_name='nvidia.clara.platform.PipelinesDetailsResponse.MetadataEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='nvidia.clara.platform.PipelinesDetailsResponse.MetadataEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='nvidia.clara.platform.PipelinesDetailsResponse.MetadataEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=383,
serialized_end=430,
)
_PIPELINESDETAILSRESPONSE = _descriptor.Descriptor(
name='PipelinesDetailsResponse',
full_name='nvidia.clara.platform.PipelinesDetailsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.PipelinesDetailsResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pipeline_id', full_name='nvidia.clara.platform.PipelinesDetailsResponse.pipeline_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='nvidia.clara.platform.PipelinesDetailsResponse.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='definition', full_name='nvidia.clara.platform.PipelinesDetailsResponse.definition', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dag', full_name='nvidia.clara.platform.PipelinesDetailsResponse.dag', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='nvidia.clara.platform.PipelinesDetailsResponse.metadata', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_PIPELINESDETAILSRESPONSE_PIPELINEDAGNODE, _PIPELINESDETAILSRESPONSE_METADATAENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1320,
serialized_end=1858,
)
_PIPELINESLISTREQUEST = _descriptor.Descriptor(
name='PipelinesListRequest',
full_name='nvidia.clara.platform.PipelinesListRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.PipelinesListRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1860,
serialized_end=1936,
)
_PIPELINESLISTRESPONSE_PIPELINEDETAILS_METADATAENTRY = _descriptor.Descriptor(
name='MetadataEntry',
full_name='nvidia.clara.platform.PipelinesListResponse.PipelineDetails.MetadataEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='nvidia.clara.platform.PipelinesListResponse.PipelineDetails.MetadataEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='nvidia.clara.platform.PipelinesListResponse.PipelineDetails.MetadataEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=383,
serialized_end=430,
)
_PIPELINESLISTRESPONSE_PIPELINEDETAILS = _descriptor.Descriptor(
name='PipelineDetails',
full_name='nvidia.clara.platform.PipelinesListResponse.PipelineDetails',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pipeline_id', full_name='nvidia.clara.platform.PipelinesListResponse.PipelineDetails.pipeline_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='nvidia.clara.platform.PipelinesListResponse.PipelineDetails.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='nvidia.clara.platform.PipelinesListResponse.PipelineDetails.metadata', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_PIPELINESLISTRESPONSE_PIPELINEDETAILS_METADATAENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2099,
serialized_end=2329,
)
_PIPELINESLISTRESPONSE = _descriptor.Descriptor(
name='PipelinesListResponse',
full_name='nvidia.clara.platform.PipelinesListResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.PipelinesListResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='details', full_name='nvidia.clara.platform.PipelinesListResponse.details', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_PIPELINESLISTRESPONSE_PIPELINEDETAILS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1939,
serialized_end=2329,
)
_PIPELINESREMOVEMETADATAREQUEST = _descriptor.Descriptor(
name='PipelinesRemoveMetadataRequest',
full_name='nvidia.clara.platform.PipelinesRemoveMetadataRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.PipelinesRemoveMetadataRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pipeline_id', full_name='nvidia.clara.platform.PipelinesRemoveMetadataRequest.pipeline_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='keys', full_name='nvidia.clara.platform.PipelinesRemoveMetadataRequest.keys', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2332,
serialized_end=2488,
)
_PIPELINESREMOVEMETADATARESPONSE_METADATAENTRY = _descriptor.Descriptor(
name='MetadataEntry',
full_name='nvidia.clara.platform.PipelinesRemoveMetadataResponse.MetadataEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='nvidia.clara.platform.PipelinesRemoveMetadataResponse.MetadataEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='nvidia.clara.platform.PipelinesRemoveMetadataResponse.MetadataEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=383,
serialized_end=430,
)
_PIPELINESREMOVEMETADATARESPONSE = _descriptor.Descriptor(
name='PipelinesRemoveMetadataResponse',
full_name='nvidia.clara.platform.PipelinesRemoveMetadataResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.PipelinesRemoveMetadataResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pipeline_id', full_name='nvidia.clara.platform.PipelinesRemoveMetadataResponse.pipeline_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='nvidia.clara.platform.PipelinesRemoveMetadataResponse.metadata', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_PIPELINESREMOVEMETADATARESPONSE_METADATAENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2491,
serialized_end=2772,
)
_PIPELINESREMOVEREQUEST = _descriptor.Descriptor(
name='PipelinesRemoveRequest',
full_name='nvidia.clara.platform.PipelinesRemoveRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.PipelinesRemoveRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pipeline_id', full_name='nvidia.clara.platform.PipelinesRemoveRequest.pipeline_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2775,
serialized_end=2909,
)
_PIPELINESREMOVERESPONSE = _descriptor.Descriptor(
name='PipelinesRemoveResponse',
full_name='nvidia.clara.platform.PipelinesRemoveResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.PipelinesRemoveResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2911,
serialized_end=2991,
)
_PIPELINESUPDATEREQUEST = _descriptor.Descriptor(
name='PipelinesUpdateRequest',
full_name='nvidia.clara.platform.PipelinesUpdateRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.PipelinesUpdateRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pipeline_id', full_name='nvidia.clara.platform.PipelinesUpdateRequest.pipeline_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='definition', full_name='nvidia.clara.platform.PipelinesUpdateRequest.definition', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2994,
serialized_end=3195,
)
_PIPELINESUPDATERESPONSE = _descriptor.Descriptor(
name='PipelinesUpdateResponse',
full_name='nvidia.clara.platform.PipelinesUpdateResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.PipelinesUpdateResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3197,
serialized_end=3277,
)
_PIPELINESADDMETADATAREQUEST_METADATAENTRY.containing_type = _PIPELINESADDMETADATAREQUEST
_PIPELINESADDMETADATAREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_PIPELINESADDMETADATAREQUEST.fields_by_name['pipeline_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_PIPELINESADDMETADATAREQUEST.fields_by_name['metadata'].message_type = _PIPELINESADDMETADATAREQUEST_METADATAENTRY
_PIPELINESADDMETADATARESPONSE_METADATAENTRY.containing_type = _PIPELINESADDMETADATARESPONSE
_PIPELINESADDMETADATARESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_PIPELINESADDMETADATARESPONSE.fields_by_name['pipeline_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_PIPELINESADDMETADATARESPONSE.fields_by_name['metadata'].message_type = _PIPELINESADDMETADATARESPONSE_METADATAENTRY
_PIPELINESCREATEREQUEST_METADATAENTRY.containing_type = _PIPELINESCREATEREQUEST
_PIPELINESCREATEREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_PIPELINESCREATEREQUEST.fields_by_name['definition'].message_type = _PIPELINEDEFINITIONFILE
_PIPELINESCREATEREQUEST.fields_by_name['pipeline_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_PIPELINESCREATEREQUEST.fields_by_name['metadata'].message_type = _PIPELINESCREATEREQUEST_METADATAENTRY
_PIPELINESCREATERESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_PIPELINESCREATERESPONSE.fields_by_name['pipeline_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_PIPELINESDETAILSREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_PIPELINESDETAILSREQUEST.fields_by_name['pipeline_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_PIPELINESDETAILSRESPONSE_PIPELINEDAGNODE.fields_by_name['next'].message_type = _PIPELINESDETAILSRESPONSE_PIPELINEDAGNODE
_PIPELINESDETAILSRESPONSE_PIPELINEDAGNODE.containing_type = _PIPELINESDETAILSRESPONSE
_PIPELINESDETAILSRESPONSE_METADATAENTRY.containing_type = _PIPELINESDETAILSRESPONSE
_PIPELINESDETAILSRESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_PIPELINESDETAILSRESPONSE.fields_by_name['pipeline_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_PIPELINESDETAILSRESPONSE.fields_by_name['definition'].message_type = _PIPELINEDEFINITIONFILE
_PIPELINESDETAILSRESPONSE.fields_by_name['dag'].message_type = _PIPELINESDETAILSRESPONSE_PIPELINEDAGNODE
_PIPELINESDETAILSRESPONSE.fields_by_name['metadata'].message_type = _PIPELINESDETAILSRESPONSE_METADATAENTRY
_PIPELINESLISTREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_PIPELINESLISTRESPONSE_PIPELINEDETAILS_METADATAENTRY.containing_type = _PIPELINESLISTRESPONSE_PIPELINEDETAILS
_PIPELINESLISTRESPONSE_PIPELINEDETAILS.fields_by_name['pipeline_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_PIPELINESLISTRESPONSE_PIPELINEDETAILS.fields_by_name['metadata'].message_type = _PIPELINESLISTRESPONSE_PIPELINEDETAILS_METADATAENTRY
_PIPELINESLISTRESPONSE_PIPELINEDETAILS.containing_type = _PIPELINESLISTRESPONSE
_PIPELINESLISTRESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_PIPELINESLISTRESPONSE.fields_by_name['details'].message_type = _PIPELINESLISTRESPONSE_PIPELINEDETAILS
_PIPELINESREMOVEMETADATAREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_PIPELINESREMOVEMETADATAREQUEST.fields_by_name['pipeline_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_PIPELINESREMOVEMETADATARESPONSE_METADATAENTRY.containing_type = _PIPELINESREMOVEMETADATARESPONSE
_PIPELINESREMOVEMETADATARESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_PIPELINESREMOVEMETADATARESPONSE.fields_by_name['pipeline_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_PIPELINESREMOVEMETADATARESPONSE.fields_by_name['metadata'].message_type = _PIPELINESREMOVEMETADATARESPONSE_METADATAENTRY
_PIPELINESREMOVEREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_PIPELINESREMOVEREQUEST.fields_by_name['pipeline_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_PIPELINESREMOVERESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_PIPELINESUPDATEREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_PIPELINESUPDATEREQUEST.fields_by_name['pipeline_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_PIPELINESUPDATEREQUEST.fields_by_name['definition'].message_type = _PIPELINEDEFINITIONFILE
_PIPELINESUPDATERESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
DESCRIPTOR.message_types_by_name['PipelineDefinitionFile'] = _PIPELINEDEFINITIONFILE
DESCRIPTOR.message_types_by_name['PipelinesAddMetadataRequest'] = _PIPELINESADDMETADATAREQUEST
DESCRIPTOR.message_types_by_name['PipelinesAddMetadataResponse'] = _PIPELINESADDMETADATARESPONSE
DESCRIPTOR.message_types_by_name['PipelinesCreateRequest'] = _PIPELINESCREATEREQUEST
DESCRIPTOR.message_types_by_name['PipelinesCreateResponse'] = _PIPELINESCREATERESPONSE
DESCRIPTOR.message_types_by_name['PipelinesDetailsRequest'] = _PIPELINESDETAILSREQUEST
DESCRIPTOR.message_types_by_name['PipelinesDetailsResponse'] = _PIPELINESDETAILSRESPONSE
DESCRIPTOR.message_types_by_name['PipelinesListRequest'] = _PIPELINESLISTREQUEST
DESCRIPTOR.message_types_by_name['PipelinesListResponse'] = _PIPELINESLISTRESPONSE
DESCRIPTOR.message_types_by_name['PipelinesRemoveMetadataRequest'] = _PIPELINESREMOVEMETADATAREQUEST
DESCRIPTOR.message_types_by_name['PipelinesRemoveMetadataResponse'] = _PIPELINESREMOVEMETADATARESPONSE
DESCRIPTOR.message_types_by_name['PipelinesRemoveRequest'] = _PIPELINESREMOVEREQUEST
DESCRIPTOR.message_types_by_name['PipelinesRemoveResponse'] = _PIPELINESREMOVERESPONSE
DESCRIPTOR.message_types_by_name['PipelinesUpdateRequest'] = _PIPELINESUPDATEREQUEST
DESCRIPTOR.message_types_by_name['PipelinesUpdateResponse'] = _PIPELINESUPDATERESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PipelineDefinitionFile = _reflection.GeneratedProtocolMessageType('PipelineDefinitionFile', (_message.Message,), dict(
DESCRIPTOR = _PIPELINEDEFINITIONFILE,
__module__ = 'nvidia.clara.platform.pipelines_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PipelineDefinitionFile)
))
_sym_db.RegisterMessage(PipelineDefinitionFile)
PipelinesAddMetadataRequest = _reflection.GeneratedProtocolMessageType('PipelinesAddMetadataRequest', (_message.Message,), dict(
MetadataEntry = _reflection.GeneratedProtocolMessageType('MetadataEntry', (_message.Message,), dict(
DESCRIPTOR = _PIPELINESADDMETADATAREQUEST_METADATAENTRY,
__module__ = 'nvidia.clara.platform.pipelines_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PipelinesAddMetadataRequest.MetadataEntry)
))
,
DESCRIPTOR = _PIPELINESADDMETADATAREQUEST,
__module__ = 'nvidia.clara.platform.pipelines_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PipelinesAddMetadataRequest)
))
_sym_db.RegisterMessage(PipelinesAddMetadataRequest)
_sym_db.RegisterMessage(PipelinesAddMetadataRequest.MetadataEntry)
PipelinesAddMetadataResponse = _reflection.GeneratedProtocolMessageType('PipelinesAddMetadataResponse', (_message.Message,), dict(
MetadataEntry = _reflection.GeneratedProtocolMessageType('MetadataEntry', (_message.Message,), dict(
DESCRIPTOR = _PIPELINESADDMETADATARESPONSE_METADATAENTRY,
__module__ = 'nvidia.clara.platform.pipelines_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PipelinesAddMetadataResponse.MetadataEntry)
))
,
DESCRIPTOR = _PIPELINESADDMETADATARESPONSE,
__module__ = 'nvidia.clara.platform.pipelines_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PipelinesAddMetadataResponse)
))
_sym_db.RegisterMessage(PipelinesAddMetadataResponse)
_sym_db.RegisterMessage(PipelinesAddMetadataResponse.MetadataEntry)
PipelinesCreateRequest = _reflection.GeneratedProtocolMessageType('PipelinesCreateRequest', (_message.Message,), dict(
MetadataEntry = _reflection.GeneratedProtocolMessageType('MetadataEntry', (_message.Message,), dict(
DESCRIPTOR = _PIPELINESCREATEREQUEST_METADATAENTRY,
__module__ = 'nvidia.clara.platform.pipelines_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PipelinesCreateRequest.MetadataEntry)
))
,
DESCRIPTOR = _PIPELINESCREATEREQUEST,
__module__ = 'nvidia.clara.platform.pipelines_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PipelinesCreateRequest)
))
_sym_db.RegisterMessage(PipelinesCreateRequest)
_sym_db.RegisterMessage(PipelinesCreateRequest.MetadataEntry)
PipelinesCreateResponse = _reflection.GeneratedProtocolMessageType('PipelinesCreateResponse', (_message.Message,), dict(
DESCRIPTOR = _PIPELINESCREATERESPONSE,
__module__ = 'nvidia.clara.platform.pipelines_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PipelinesCreateResponse)
))
_sym_db.RegisterMessage(PipelinesCreateResponse)
PipelinesDetailsRequest = _reflection.GeneratedProtocolMessageType('PipelinesDetailsRequest', (_message.Message,), dict(
DESCRIPTOR = _PIPELINESDETAILSREQUEST,
__module__ = 'nvidia.clara.platform.pipelines_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PipelinesDetailsRequest)
))
_sym_db.RegisterMessage(PipelinesDetailsRequest)
PipelinesDetailsResponse = _reflection.GeneratedProtocolMessageType('PipelinesDetailsResponse', (_message.Message,), dict(
PipelineDagNode = _reflection.GeneratedProtocolMessageType('PipelineDagNode', (_message.Message,), dict(
DESCRIPTOR = _PIPELINESDETAILSRESPONSE_PIPELINEDAGNODE,
__module__ = 'nvidia.clara.platform.pipelines_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PipelinesDetailsResponse.PipelineDagNode)
))
,
MetadataEntry = _reflection.GeneratedProtocolMessageType('MetadataEntry', (_message.Message,), dict(
DESCRIPTOR = _PIPELINESDETAILSRESPONSE_METADATAENTRY,
__module__ = 'nvidia.clara.platform.pipelines_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PipelinesDetailsResponse.MetadataEntry)
))
,
DESCRIPTOR = _PIPELINESDETAILSRESPONSE,
__module__ = 'nvidia.clara.platform.pipelines_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PipelinesDetailsResponse)
))
_sym_db.RegisterMessage(PipelinesDetailsResponse)
_sym_db.RegisterMessage(PipelinesDetailsResponse.PipelineDagNode)
_sym_db.RegisterMessage(PipelinesDetailsResponse.MetadataEntry)
PipelinesListRequest = _reflection.GeneratedProtocolMessageType('PipelinesListRequest', (_message.Message,), dict(
DESCRIPTOR = _PIPELINESLISTREQUEST,
__module__ = 'nvidia.clara.platform.pipelines_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PipelinesListRequest)
))
_sym_db.RegisterMessage(PipelinesListRequest)
PipelinesListResponse = _reflection.GeneratedProtocolMessageType('PipelinesListResponse', (_message.Message,), dict(
PipelineDetails = _reflection.GeneratedProtocolMessageType('PipelineDetails', (_message.Message,), dict(
MetadataEntry = _reflection.GeneratedProtocolMessageType('MetadataEntry', (_message.Message,), dict(
DESCRIPTOR = _PIPELINESLISTRESPONSE_PIPELINEDETAILS_METADATAENTRY,
__module__ = 'nvidia.clara.platform.pipelines_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PipelinesListResponse.PipelineDetails.MetadataEntry)
))
,
DESCRIPTOR = _PIPELINESLISTRESPONSE_PIPELINEDETAILS,
__module__ = 'nvidia.clara.platform.pipelines_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PipelinesListResponse.PipelineDetails)
))
,
DESCRIPTOR = _PIPELINESLISTRESPONSE,
__module__ = 'nvidia.clara.platform.pipelines_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PipelinesListResponse)
))
_sym_db.RegisterMessage(PipelinesListResponse)
_sym_db.RegisterMessage(PipelinesListResponse.PipelineDetails)
_sym_db.RegisterMessage(PipelinesListResponse.PipelineDetails.MetadataEntry)
PipelinesRemoveMetadataRequest = _reflection.GeneratedProtocolMessageType('PipelinesRemoveMetadataRequest', (_message.Message,), dict(
DESCRIPTOR = _PIPELINESREMOVEMETADATAREQUEST,
__module__ = 'nvidia.clara.platform.pipelines_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PipelinesRemoveMetadataRequest)
))
_sym_db.RegisterMessage(PipelinesRemoveMetadataRequest)
PipelinesRemoveMetadataResponse = _reflection.GeneratedProtocolMessageType('PipelinesRemoveMetadataResponse', (_message.Message,), dict(
MetadataEntry = _reflection.GeneratedProtocolMessageType('MetadataEntry', (_message.Message,), dict(
DESCRIPTOR = _PIPELINESREMOVEMETADATARESPONSE_METADATAENTRY,
__module__ = 'nvidia.clara.platform.pipelines_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PipelinesRemoveMetadataResponse.MetadataEntry)
))
,
DESCRIPTOR = _PIPELINESREMOVEMETADATARESPONSE,
__module__ = 'nvidia.clara.platform.pipelines_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PipelinesRemoveMetadataResponse)
))
_sym_db.RegisterMessage(PipelinesRemoveMetadataResponse)
_sym_db.RegisterMessage(PipelinesRemoveMetadataResponse.MetadataEntry)
PipelinesRemoveRequest = _reflection.GeneratedProtocolMessageType('PipelinesRemoveRequest', (_message.Message,), dict(
DESCRIPTOR = _PIPELINESREMOVEREQUEST,
__module__ = 'nvidia.clara.platform.pipelines_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PipelinesRemoveRequest)
))
_sym_db.RegisterMessage(PipelinesRemoveRequest)
PipelinesRemoveResponse = _reflection.GeneratedProtocolMessageType('PipelinesRemoveResponse', (_message.Message,), dict(
DESCRIPTOR = _PIPELINESREMOVERESPONSE,
__module__ = 'nvidia.clara.platform.pipelines_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PipelinesRemoveResponse)
))
_sym_db.RegisterMessage(PipelinesRemoveResponse)
PipelinesUpdateRequest = _reflection.GeneratedProtocolMessageType('PipelinesUpdateRequest', (_message.Message,), dict(
DESCRIPTOR = _PIPELINESUPDATEREQUEST,
__module__ = 'nvidia.clara.platform.pipelines_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PipelinesUpdateRequest)
))
_sym_db.RegisterMessage(PipelinesUpdateRequest)
PipelinesUpdateResponse = _reflection.GeneratedProtocolMessageType('PipelinesUpdateResponse', (_message.Message,), dict(
DESCRIPTOR = _PIPELINESUPDATERESPONSE,
__module__ = 'nvidia.clara.platform.pipelines_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PipelinesUpdateResponse)
))
_sym_db.RegisterMessage(PipelinesUpdateResponse)
DESCRIPTOR._options = None
_PIPELINESADDMETADATAREQUEST_METADATAENTRY._options = None
_PIPELINESADDMETADATARESPONSE_METADATAENTRY._options = None
_PIPELINESCREATEREQUEST_METADATAENTRY._options = None
_PIPELINESDETAILSRESPONSE_METADATAENTRY._options = None
_PIPELINESLISTRESPONSE_PIPELINEDETAILS_METADATAENTRY._options = None
_PIPELINESREMOVEMETADATARESPONSE_METADATAENTRY._options = None
_PIPELINES = _descriptor.ServiceDescriptor(
name='Pipelines',
full_name='nvidia.clara.platform.Pipelines',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=3280,
serialized_end=4070,
methods=[
_descriptor.MethodDescriptor(
name='AddMetadata',
full_name='nvidia.clara.platform.Pipelines.AddMetadata',
index=0,
containing_service=None,
input_type=_PIPELINESADDMETADATAREQUEST,
output_type=_PIPELINESADDMETADATARESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='Create',
full_name='nvidia.clara.platform.Pipelines.Create',
index=1,
containing_service=None,
input_type=_PIPELINESCREATEREQUEST,
output_type=_PIPELINESCREATERESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='Details',
full_name='nvidia.clara.platform.Pipelines.Details',
index=2,
containing_service=None,
input_type=_PIPELINESDETAILSREQUEST,
output_type=_PIPELINESDETAILSRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='List',
full_name='nvidia.clara.platform.Pipelines.List',
index=3,
containing_service=None,
input_type=_PIPELINESLISTREQUEST,
output_type=_PIPELINESLISTRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='Remove',
full_name='nvidia.clara.platform.Pipelines.Remove',
index=4,
containing_service=None,
input_type=_PIPELINESREMOVEREQUEST,
output_type=_PIPELINESREMOVERESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='RemoveMetadata',
full_name='nvidia.clara.platform.Pipelines.RemoveMetadata',
index=5,
containing_service=None,
input_type=_PIPELINESREMOVEMETADATAREQUEST,
output_type=_PIPELINESREMOVEMETADATARESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='Update',
full_name='nvidia.clara.platform.Pipelines.Update',
index=6,
containing_service=None,
input_type=_PIPELINESUPDATEREQUEST,
output_type=_PIPELINESUPDATERESPONSE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_PIPELINES)
DESCRIPTOR.services_by_name['Pipelines'] = _PIPELINES
# @@protoc_insertion_point(module_scope)
| clara-platform-python-client-main | nvidia_clara/grpc/pipelines_pb2.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia/clara/platform/node-monitor/metrics.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_clara.grpc import common_pb2 as nvidia_dot_clara_dot_platform_dot_common__pb2
from nvidia_clara.grpc.common_pb2 import *
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia/clara/platform/node-monitor/metrics.proto',
package='nvidia.clara.platform.node_monitor',
syntax='proto3',
serialized_options=_b('\n%com.nvidia.clara.platform.nodemonitorZ\004apis\252\002&Nvidia.Clara.Platform.NodeMonitor.Grpc'),
serialized_pb=_b('\n0nvidia/clara/platform/node-monitor/metrics.proto\x12\"nvidia.clara.platform.node_monitor\x1a\"nvidia/clara/platform/common.proto\"\xbb\x02\n\nGpuDetails\x12\x11\n\tdevice_id\x18\x01 \x01(\x05\x12G\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x39.nvidia.clara.platform.node_monitor.GpuDetails.GpuMetrics\x12\x33\n\ttimestamp\x18\x03 \x01(\x0b\x32 .nvidia.clara.platform.Timestamp\x1a\x9b\x01\n\nGpuMetrics\x12\x1a\n\x12memory_utilization\x18\x01 \x01(\x02\x12\x17\n\x0fgpu_utilization\x18\x02 \x01(\x02\x12\x12\n\nfree_bar_1\x18\x03 \x01(\x03\x12\x12\n\nused_bar_1\x18\x04 \x01(\x03\x12\x17\n\x0f\x66ree_gpu_memory\x18\x05 \x01(\x03\x12\x17\n\x0fused_gpu_memory\x18\x06 \x01(\x03\"P\n\x18MonitorGpuMetricsRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\"\x97\x01\n\x19MonitorGpuMetricsResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12\x43\n\x0bgpu_details\x18\x02 \x03(\x0b\x32..nvidia.clara.platform.node_monitor.GpuDetails2\x97\x01\n\x07Monitor\x12\x8b\x01\n\nGpuMetrics\x12<.nvidia.clara.platform.node_monitor.MonitorGpuMetricsRequest\x1a=.nvidia.clara.platform.node_monitor.MonitorGpuMetricsResponse0\x01\x42V\n%com.nvidia.clara.platform.nodemonitorZ\x04\x61pis\xaa\x02&Nvidia.Clara.Platform.NodeMonitor.GrpcP\x00\x62\x06proto3')
,
dependencies=[nvidia_dot_clara_dot_platform_dot_common__pb2.DESCRIPTOR,],
public_dependencies=[nvidia_dot_clara_dot_platform_dot_common__pb2.DESCRIPTOR,])
_GPUDETAILS_GPUMETRICS = _descriptor.Descriptor(
name='GpuMetrics',
full_name='nvidia.clara.platform.node_monitor.GpuDetails.GpuMetrics',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='memory_utilization', full_name='nvidia.clara.platform.node_monitor.GpuDetails.GpuMetrics.memory_utilization', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gpu_utilization', full_name='nvidia.clara.platform.node_monitor.GpuDetails.GpuMetrics.gpu_utilization', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='free_bar_1', full_name='nvidia.clara.platform.node_monitor.GpuDetails.GpuMetrics.free_bar_1', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='used_bar_1', full_name='nvidia.clara.platform.node_monitor.GpuDetails.GpuMetrics.used_bar_1', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='free_gpu_memory', full_name='nvidia.clara.platform.node_monitor.GpuDetails.GpuMetrics.free_gpu_memory', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='used_gpu_memory', full_name='nvidia.clara.platform.node_monitor.GpuDetails.GpuMetrics.used_gpu_memory', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=285,
serialized_end=440,
)
_GPUDETAILS = _descriptor.Descriptor(
name='GpuDetails',
full_name='nvidia.clara.platform.node_monitor.GpuDetails',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='device_id', full_name='nvidia.clara.platform.node_monitor.GpuDetails.device_id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='nvidia.clara.platform.node_monitor.GpuDetails.data', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='timestamp', full_name='nvidia.clara.platform.node_monitor.GpuDetails.timestamp', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_GPUDETAILS_GPUMETRICS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=125,
serialized_end=440,
)
_MONITORGPUMETRICSREQUEST = _descriptor.Descriptor(
name='MonitorGpuMetricsRequest',
full_name='nvidia.clara.platform.node_monitor.MonitorGpuMetricsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.node_monitor.MonitorGpuMetricsRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=442,
serialized_end=522,
)
_MONITORGPUMETRICSRESPONSE = _descriptor.Descriptor(
name='MonitorGpuMetricsResponse',
full_name='nvidia.clara.platform.node_monitor.MonitorGpuMetricsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.node_monitor.MonitorGpuMetricsResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gpu_details', full_name='nvidia.clara.platform.node_monitor.MonitorGpuMetricsResponse.gpu_details', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=525,
serialized_end=676,
)
_GPUDETAILS_GPUMETRICS.containing_type = _GPUDETAILS
_GPUDETAILS.fields_by_name['data'].message_type = _GPUDETAILS_GPUMETRICS
_GPUDETAILS.fields_by_name['timestamp'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._TIMESTAMP
_MONITORGPUMETRICSREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_MONITORGPUMETRICSRESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_MONITORGPUMETRICSRESPONSE.fields_by_name['gpu_details'].message_type = _GPUDETAILS
DESCRIPTOR.message_types_by_name['GpuDetails'] = _GPUDETAILS
DESCRIPTOR.message_types_by_name['MonitorGpuMetricsRequest'] = _MONITORGPUMETRICSREQUEST
DESCRIPTOR.message_types_by_name['MonitorGpuMetricsResponse'] = _MONITORGPUMETRICSRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GpuDetails = _reflection.GeneratedProtocolMessageType('GpuDetails', (_message.Message,), dict(
GpuMetrics = _reflection.GeneratedProtocolMessageType('GpuMetrics', (_message.Message,), dict(
DESCRIPTOR = _GPUDETAILS_GPUMETRICS,
__module__ = 'nvidia.clara.platform.node_monitor.metrics_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.node_monitor.GpuDetails.GpuMetrics)
))
,
DESCRIPTOR = _GPUDETAILS,
__module__ = 'nvidia.clara.platform.node_monitor.metrics_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.node_monitor.GpuDetails)
))
_sym_db.RegisterMessage(GpuDetails)
_sym_db.RegisterMessage(GpuDetails.GpuMetrics)
MonitorGpuMetricsRequest = _reflection.GeneratedProtocolMessageType('MonitorGpuMetricsRequest', (_message.Message,), dict(
DESCRIPTOR = _MONITORGPUMETRICSREQUEST,
__module__ = 'nvidia.clara.platform.node_monitor.metrics_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.node_monitor.MonitorGpuMetricsRequest)
))
_sym_db.RegisterMessage(MonitorGpuMetricsRequest)
MonitorGpuMetricsResponse = _reflection.GeneratedProtocolMessageType('MonitorGpuMetricsResponse', (_message.Message,), dict(
DESCRIPTOR = _MONITORGPUMETRICSRESPONSE,
__module__ = 'nvidia.clara.platform.node_monitor.metrics_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.node_monitor.MonitorGpuMetricsResponse)
))
_sym_db.RegisterMessage(MonitorGpuMetricsResponse)
DESCRIPTOR._options = None
_MONITOR = _descriptor.ServiceDescriptor(
name='Monitor',
full_name='nvidia.clara.platform.node_monitor.Monitor',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=679,
serialized_end=830,
methods=[
_descriptor.MethodDescriptor(
name='GpuMetrics',
full_name='nvidia.clara.platform.node_monitor.Monitor.GpuMetrics',
index=0,
containing_service=None,
input_type=_MONITORGPUMETRICSREQUEST,
output_type=_MONITORGPUMETRICSRESPONSE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_MONITOR)
DESCRIPTOR.services_by_name['Monitor'] = _MONITOR
# @@protoc_insertion_point(module_scope)
| clara-platform-python-client-main | nvidia_clara/grpc/metrics_pb2.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from nvidia_clara.grpc import clara_pb2 as nvidia_dot_clara_dot_platform_dot_clara__pb2
class ClaraStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Stop = channel.unary_unary(
'/nvidia.clara.platform.Clara/Stop',
request_serializer=nvidia_dot_clara_dot_platform_dot_clara__pb2.ClaraStopRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_clara__pb2.ClaraStopResponse.FromString,
)
self.Utilization = channel.unary_stream(
'/nvidia.clara.platform.Clara/Utilization',
request_serializer=nvidia_dot_clara_dot_platform_dot_clara__pb2.ClaraUtilizationRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_clara__pb2.ClaraUtilizationResponse.FromString,
)
self.Version = channel.unary_unary(
'/nvidia.clara.platform.Clara/Version',
request_serializer=nvidia_dot_clara_dot_platform_dot_clara__pb2.ClaraVersionRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_clara__pb2.ClaraVersionResponse.FromString,
)
class ClaraServicer(object):
# missing associated documentation comment in .proto file
pass
def Stop(self, request, context):
"""Requests the termination of Clara Platform Server and associated resource cleanup.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Utilization(self, request, context):
"""Requests utilization data for all Clara Platform managed GPUs.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Version(self, request, context):
"""Requests version information from Clara Platform Server.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ClaraServicer_to_server(servicer, server):
rpc_method_handlers = {
'Stop': grpc.unary_unary_rpc_method_handler(
servicer.Stop,
request_deserializer=nvidia_dot_clara_dot_platform_dot_clara__pb2.ClaraStopRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_clara__pb2.ClaraStopResponse.SerializeToString,
),
'Utilization': grpc.unary_stream_rpc_method_handler(
servicer.Utilization,
request_deserializer=nvidia_dot_clara_dot_platform_dot_clara__pb2.ClaraUtilizationRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_clara__pb2.ClaraUtilizationResponse.SerializeToString,
),
'Version': grpc.unary_unary_rpc_method_handler(
servicer.Version,
request_deserializer=nvidia_dot_clara_dot_platform_dot_clara__pb2.ClaraVersionRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_clara__pb2.ClaraVersionResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'nvidia.clara.platform.Clara', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| clara-platform-python-client-main | nvidia_clara/grpc/clara_pb2_grpc.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| clara-platform-python-client-main | nvidia_clara/grpc/__init__.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia/clara/platform/models.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_clara.grpc import common_pb2 as nvidia_dot_clara_dot_platform_dot_common__pb2
from nvidia_clara.grpc.common_pb2 import *
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia/clara/platform/models.proto',
package='nvidia.clara.platform',
syntax='proto3',
serialized_options=_b('\n\031com.nvidia.clara.platformZ\004apis\252\002\032Nvidia.Clara.Platform.Grpc'),
serialized_pb=_b('\n\"nvidia/clara/platform/models.proto\x12\x15nvidia.clara.platform\x1a\"nvidia/clara/platform/common.proto\"\x81\x01\n\x13ModelCatalogDetails\x12\x35\n\ncatalog_id\x18\x01 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12\x33\n\x06models\x18\x02 \x03(\x0b\x32#.nvidia.clara.platform.ModelDetails\"\xf7\x01\n\x0cModelDetails\x12\x33\n\x08model_id\x18\x01 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12\x0c\n\x04name\x18\x02 \x01(\t\x12.\n\x04type\x18\x04 \x01(\x0e\x32 .nvidia.clara.platform.ModelType\x12\x43\n\x08metadata\x18\x08 \x03(\x0b\x32\x31.nvidia.clara.platform.ModelDetails.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x87\x02\n\x18ModelsAddMetadataRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\x33\n\x08model_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12O\n\x08metadata\x18\x03 \x03(\x0b\x32=.nvidia.clara.platform.ModelsAddMetadataRequest.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x8a\x02\n\x19ModelsAddMetadataResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12\x33\n\x08model_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12P\n\x08metadata\x18\x03 \x03(\x0b\x32>.nvidia.clara.platform.ModelsAddMetadataResponse.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"R\n\x1aModelsCreateCatalogRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\"\x8b\x01\n\x1bModelsCreateCatalogResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12\x35\n\ncatalog_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\"S\n\x1bModelsCreateInstanceRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\"\x8d\x01\n\x1cModelsCreateInstanceResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12\x36\n\x0binstance_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\"\x89\x01\n\x1aModelsDeleteCatalogRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\x35\n\ncatalog_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\"\x8b\x01\n\x1bModelsDeleteCatalogResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12\x35\n\ncatalog_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\"\x8b\x01\n\x1bModelsDeleteInstanceRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\x36\n\x0binstance_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\"\x8d\x01\n\x1cModelsDeleteInstanceResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12\x36\n\x0binstance_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\"\x85\x01\n\x18ModelsDeleteModelRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\x33\n\x08model_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\"\x87\x01\n\x19ModelsDeleteModelResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12\x33\n\x08model_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\"\x87\x01\n\x1aModelsDownloadModelRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\x33\n\x08model_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\"\x98\x01\n\x1bModelsDownloadModelResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12\x34\n\x07\x64\x65tails\x18\x02 \x01(\x0b\x32#.nvidia.clara.platform.ModelDetails\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\"Q\n\x19ModelsListCatalogsRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\"\x91\x01\n\x1aModelsListCatalogsResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12<\n\x08\x63\x61talogs\x18\x02 \x03(\x0b\x32*.nvidia.clara.platform.ModelCatalogDetails\"R\n\x1aModelsListInstancesRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\"\x93\x01\n\x1bModelsListInstancesResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12=\n\tinstances\x18\x02 \x03(\x0b\x32*.nvidia.clara.platform.ModelCatalogDetails\"O\n\x17ModelsListModelsRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\"\x86\x01\n\x18ModelsListModelsResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12\x33\n\x06models\x18\x02 \x03(\x0b\x32#.nvidia.clara.platform.ModelDetails\"\x87\x01\n\x18ModelsReadCatalogRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\x35\n\ncatalog_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\"\xbe\x01\n\x19ModelsReadCatalogResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12\x35\n\ncatalog_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12\x33\n\x06models\x18\x04 \x03(\x0b\x32#.nvidia.clara.platform.ModelDetails\"\x89\x01\n\x19ModelsReadInstanceRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\x36\n\x0binstance_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\"\xc0\x01\n\x1aModelsReadInstanceResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12\x36\n\x0binstance_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12\x33\n\x06models\x18\x04 \x03(\x0b\x32#.nvidia.clara.platform.ModelDetails\"\x96\x01\n\x1bModelsRemoveMetadataRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\x33\n\x08model_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12\x0c\n\x04keys\x18\x03 \x03(\t\"\x90\x02\n\x1cModelsRemoveMetadataResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12\x33\n\x08model_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12S\n\x08metadata\x18\x03 \x03(\x0b\x32\x41.nvidia.clara.platform.ModelsRemoveMetadataResponse.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xbf\x01\n\x1aModelsUpdateCatalogRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\x35\n\ncatalog_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12\x34\n\tmodel_ids\x18\x04 \x03(\x0b\x32!.nvidia.clara.platform.Identifier\"\x8b\x01\n\x1bModelsUpdateCatalogResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12\x35\n\ncatalog_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\"\xc1\x01\n\x1bModelsUpdateInstanceRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\x36\n\x0binstance_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12\x34\n\tmodel_ids\x18\x04 \x03(\x0b\x32!.nvidia.clara.platform.Identifier\"\x8d\x01\n\x1cModelsUpdateInstanceResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12\x36\n\x0binstance_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\"\x94\x01\n\x18ModelsUploadModelRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\x34\n\x07\x64\x65tails\x18\x02 \x01(\x0b\x32#.nvidia.clara.platform.ModelDetails\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\"\x88\x01\n\x19ModelsUploadModelResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12\x34\n\x07\x64\x65tails\x18\x02 \x01(\x0b\x32#.nvidia.clara.platform.ModelDetails*r\n\tModelType\x12\x16\n\x12MODEL_TYPE_UNKNOWN\x10\x00\x12\x1a\n\x16MODEL_TYPE_TENSOR_FLOW\x10\x01\x12\x18\n\x14MODEL_TYPE_TENSOR_RT\x10\x02\x12\x17\n\x13MODEL_TYPE_PY_TORCH\x10\x03\x32\xff\x0e\n\x06Models\x12p\n\x0b\x41\x64\x64Metadata\x12/.nvidia.clara.platform.ModelsAddMetadataRequest\x1a\x30.nvidia.clara.platform.ModelsAddMetadataResponse\x12v\n\rCreateCatalog\x12\x31.nvidia.clara.platform.ModelsCreateCatalogRequest\x1a\x32.nvidia.clara.platform.ModelsCreateCatalogResponse\x12y\n\x0e\x43reateInstance\x12\x32.nvidia.clara.platform.ModelsCreateInstanceRequest\x1a\x33.nvidia.clara.platform.ModelsCreateInstanceResponse\x12v\n\rDeleteCatalog\x12\x31.nvidia.clara.platform.ModelsDeleteCatalogRequest\x1a\x32.nvidia.clara.platform.ModelsDeleteCatalogResponse\x12y\n\x0e\x44\x65leteInstance\x12\x32.nvidia.clara.platform.ModelsDeleteInstanceRequest\x1a\x33.nvidia.clara.platform.ModelsDeleteInstanceResponse\x12p\n\x0b\x44\x65leteModel\x12/.nvidia.clara.platform.ModelsDeleteModelRequest\x1a\x30.nvidia.clara.platform.ModelsDeleteModelResponse\x12x\n\rDownloadModel\x12\x31.nvidia.clara.platform.ModelsDownloadModelRequest\x1a\x32.nvidia.clara.platform.ModelsDownloadModelResponse0\x01\x12u\n\x0cListCatalogs\x12\x30.nvidia.clara.platform.ModelsListCatalogsRequest\x1a\x31.nvidia.clara.platform.ModelsListCatalogsResponse0\x01\x12x\n\rListInstances\x12\x31.nvidia.clara.platform.ModelsListInstancesRequest\x1a\x32.nvidia.clara.platform.ModelsListInstancesResponse0\x01\x12o\n\nListModels\x12..nvidia.clara.platform.ModelsListModelsRequest\x1a/.nvidia.clara.platform.ModelsListModelsResponse0\x01\x12r\n\x0bReadCatalog\x12/.nvidia.clara.platform.ModelsReadCatalogRequest\x1a\x30.nvidia.clara.platform.ModelsReadCatalogResponse0\x01\x12u\n\x0cReadInstance\x12\x30.nvidia.clara.platform.ModelsReadInstanceRequest\x1a\x31.nvidia.clara.platform.ModelsReadInstanceResponse0\x01\x12y\n\x0eRemoveMetadata\x12\x32.nvidia.clara.platform.ModelsRemoveMetadataRequest\x1a\x33.nvidia.clara.platform.ModelsRemoveMetadataResponse\x12x\n\rUpdateCatalog\x12\x31.nvidia.clara.platform.ModelsUpdateCatalogRequest\x1a\x32.nvidia.clara.platform.ModelsUpdateCatalogResponse(\x01\x12{\n\x0eUpdateInstance\x12\x32.nvidia.clara.platform.ModelsUpdateInstanceRequest\x1a\x33.nvidia.clara.platform.ModelsUpdateInstanceResponse(\x01\x12r\n\x0bUploadModel\x12/.nvidia.clara.platform.ModelsUploadModelRequest\x1a\x30.nvidia.clara.platform.ModelsUploadModelResponse(\x01\x42>\n\x19\x63om.nvidia.clara.platformZ\x04\x61pis\xaa\x02\x1aNvidia.Clara.Platform.GrpcP\x00\x62\x06proto3')
,
dependencies=[nvidia_dot_clara_dot_platform_dot_common__pb2.DESCRIPTOR,],
public_dependencies=[nvidia_dot_clara_dot_platform_dot_common__pb2.DESCRIPTOR,])
_MODELTYPE = _descriptor.EnumDescriptor(
name='ModelType',
full_name='nvidia.clara.platform.ModelType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MODEL_TYPE_UNKNOWN', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_TYPE_TENSOR_FLOW', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_TYPE_TENSOR_RT', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_TYPE_PY_TORCH', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=5347,
serialized_end=5461,
)
_sym_db.RegisterEnumDescriptor(_MODELTYPE)
ModelType = enum_type_wrapper.EnumTypeWrapper(_MODELTYPE)
MODEL_TYPE_UNKNOWN = 0
MODEL_TYPE_TENSOR_FLOW = 1
MODEL_TYPE_TENSOR_RT = 2
MODEL_TYPE_PY_TORCH = 3
_MODELCATALOGDETAILS = _descriptor.Descriptor(
name='ModelCatalogDetails',
full_name='nvidia.clara.platform.ModelCatalogDetails',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='catalog_id', full_name='nvidia.clara.platform.ModelCatalogDetails.catalog_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='models', full_name='nvidia.clara.platform.ModelCatalogDetails.models', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=98,
serialized_end=227,
)
_MODELDETAILS_METADATAENTRY = _descriptor.Descriptor(
name='MetadataEntry',
full_name='nvidia.clara.platform.ModelDetails.MetadataEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='nvidia.clara.platform.ModelDetails.MetadataEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='nvidia.clara.platform.ModelDetails.MetadataEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=430,
serialized_end=477,
)
_MODELDETAILS = _descriptor.Descriptor(
name='ModelDetails',
full_name='nvidia.clara.platform.ModelDetails',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='model_id', full_name='nvidia.clara.platform.ModelDetails.model_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='nvidia.clara.platform.ModelDetails.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='nvidia.clara.platform.ModelDetails.type', index=2,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='nvidia.clara.platform.ModelDetails.metadata', index=3,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_MODELDETAILS_METADATAENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=230,
serialized_end=477,
)
_MODELSADDMETADATAREQUEST_METADATAENTRY = _descriptor.Descriptor(
name='MetadataEntry',
full_name='nvidia.clara.platform.ModelsAddMetadataRequest.MetadataEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='nvidia.clara.platform.ModelsAddMetadataRequest.MetadataEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='nvidia.clara.platform.ModelsAddMetadataRequest.MetadataEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=430,
serialized_end=477,
)
_MODELSADDMETADATAREQUEST = _descriptor.Descriptor(
name='ModelsAddMetadataRequest',
full_name='nvidia.clara.platform.ModelsAddMetadataRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsAddMetadataRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model_id', full_name='nvidia.clara.platform.ModelsAddMetadataRequest.model_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='nvidia.clara.platform.ModelsAddMetadataRequest.metadata', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_MODELSADDMETADATAREQUEST_METADATAENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=480,
serialized_end=743,
)
_MODELSADDMETADATARESPONSE_METADATAENTRY = _descriptor.Descriptor(
name='MetadataEntry',
full_name='nvidia.clara.platform.ModelsAddMetadataResponse.MetadataEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='nvidia.clara.platform.ModelsAddMetadataResponse.MetadataEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='nvidia.clara.platform.ModelsAddMetadataResponse.MetadataEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=430,
serialized_end=477,
)
_MODELSADDMETADATARESPONSE = _descriptor.Descriptor(
name='ModelsAddMetadataResponse',
full_name='nvidia.clara.platform.ModelsAddMetadataResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsAddMetadataResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model_id', full_name='nvidia.clara.platform.ModelsAddMetadataResponse.model_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='nvidia.clara.platform.ModelsAddMetadataResponse.metadata', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_MODELSADDMETADATARESPONSE_METADATAENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=746,
serialized_end=1012,
)
_MODELSCREATECATALOGREQUEST = _descriptor.Descriptor(
name='ModelsCreateCatalogRequest',
full_name='nvidia.clara.platform.ModelsCreateCatalogRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsCreateCatalogRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1014,
serialized_end=1096,
)
_MODELSCREATECATALOGRESPONSE = _descriptor.Descriptor(
name='ModelsCreateCatalogResponse',
full_name='nvidia.clara.platform.ModelsCreateCatalogResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsCreateCatalogResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='catalog_id', full_name='nvidia.clara.platform.ModelsCreateCatalogResponse.catalog_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1099,
serialized_end=1238,
)
_MODELSCREATEINSTANCEREQUEST = _descriptor.Descriptor(
name='ModelsCreateInstanceRequest',
full_name='nvidia.clara.platform.ModelsCreateInstanceRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsCreateInstanceRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1240,
serialized_end=1323,
)
_MODELSCREATEINSTANCERESPONSE = _descriptor.Descriptor(
name='ModelsCreateInstanceResponse',
full_name='nvidia.clara.platform.ModelsCreateInstanceResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsCreateInstanceResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instance_id', full_name='nvidia.clara.platform.ModelsCreateInstanceResponse.instance_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1326,
serialized_end=1467,
)
_MODELSDELETECATALOGREQUEST = _descriptor.Descriptor(
name='ModelsDeleteCatalogRequest',
full_name='nvidia.clara.platform.ModelsDeleteCatalogRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsDeleteCatalogRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='catalog_id', full_name='nvidia.clara.platform.ModelsDeleteCatalogRequest.catalog_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1470,
serialized_end=1607,
)
_MODELSDELETECATALOGRESPONSE = _descriptor.Descriptor(
name='ModelsDeleteCatalogResponse',
full_name='nvidia.clara.platform.ModelsDeleteCatalogResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsDeleteCatalogResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='catalog_id', full_name='nvidia.clara.platform.ModelsDeleteCatalogResponse.catalog_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1610,
serialized_end=1749,
)
_MODELSDELETEINSTANCEREQUEST = _descriptor.Descriptor(
name='ModelsDeleteInstanceRequest',
full_name='nvidia.clara.platform.ModelsDeleteInstanceRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsDeleteInstanceRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instance_id', full_name='nvidia.clara.platform.ModelsDeleteInstanceRequest.instance_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1752,
serialized_end=1891,
)
_MODELSDELETEINSTANCERESPONSE = _descriptor.Descriptor(
name='ModelsDeleteInstanceResponse',
full_name='nvidia.clara.platform.ModelsDeleteInstanceResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsDeleteInstanceResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instance_id', full_name='nvidia.clara.platform.ModelsDeleteInstanceResponse.instance_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1894,
serialized_end=2035,
)
_MODELSDELETEMODELREQUEST = _descriptor.Descriptor(
name='ModelsDeleteModelRequest',
full_name='nvidia.clara.platform.ModelsDeleteModelRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsDeleteModelRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model_id', full_name='nvidia.clara.platform.ModelsDeleteModelRequest.model_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2038,
serialized_end=2171,
)
_MODELSDELETEMODELRESPONSE = _descriptor.Descriptor(
name='ModelsDeleteModelResponse',
full_name='nvidia.clara.platform.ModelsDeleteModelResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsDeleteModelResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model_id', full_name='nvidia.clara.platform.ModelsDeleteModelResponse.model_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2174,
serialized_end=2309,
)
_MODELSDOWNLOADMODELREQUEST = _descriptor.Descriptor(
name='ModelsDownloadModelRequest',
full_name='nvidia.clara.platform.ModelsDownloadModelRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsDownloadModelRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model_id', full_name='nvidia.clara.platform.ModelsDownloadModelRequest.model_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2312,
serialized_end=2447,
)
_MODELSDOWNLOADMODELRESPONSE = _descriptor.Descriptor(
name='ModelsDownloadModelResponse',
full_name='nvidia.clara.platform.ModelsDownloadModelResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsDownloadModelResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='details', full_name='nvidia.clara.platform.ModelsDownloadModelResponse.details', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='nvidia.clara.platform.ModelsDownloadModelResponse.data', index=2,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2450,
serialized_end=2602,
)
_MODELSLISTCATALOGSREQUEST = _descriptor.Descriptor(
name='ModelsListCatalogsRequest',
full_name='nvidia.clara.platform.ModelsListCatalogsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsListCatalogsRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2604,
serialized_end=2685,
)
_MODELSLISTCATALOGSRESPONSE = _descriptor.Descriptor(
name='ModelsListCatalogsResponse',
full_name='nvidia.clara.platform.ModelsListCatalogsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsListCatalogsResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='catalogs', full_name='nvidia.clara.platform.ModelsListCatalogsResponse.catalogs', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2688,
serialized_end=2833,
)
_MODELSLISTINSTANCESREQUEST = _descriptor.Descriptor(
name='ModelsListInstancesRequest',
full_name='nvidia.clara.platform.ModelsListInstancesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsListInstancesRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2835,
serialized_end=2917,
)
_MODELSLISTINSTANCESRESPONSE = _descriptor.Descriptor(
name='ModelsListInstancesResponse',
full_name='nvidia.clara.platform.ModelsListInstancesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsListInstancesResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instances', full_name='nvidia.clara.platform.ModelsListInstancesResponse.instances', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2920,
serialized_end=3067,
)
_MODELSLISTMODELSREQUEST = _descriptor.Descriptor(
name='ModelsListModelsRequest',
full_name='nvidia.clara.platform.ModelsListModelsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsListModelsRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3069,
serialized_end=3148,
)
_MODELSLISTMODELSRESPONSE = _descriptor.Descriptor(
name='ModelsListModelsResponse',
full_name='nvidia.clara.platform.ModelsListModelsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsListModelsResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='models', full_name='nvidia.clara.platform.ModelsListModelsResponse.models', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3151,
serialized_end=3285,
)
_MODELSREADCATALOGREQUEST = _descriptor.Descriptor(
name='ModelsReadCatalogRequest',
full_name='nvidia.clara.platform.ModelsReadCatalogRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsReadCatalogRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='catalog_id', full_name='nvidia.clara.platform.ModelsReadCatalogRequest.catalog_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3288,
serialized_end=3423,
)
_MODELSREADCATALOGRESPONSE = _descriptor.Descriptor(
name='ModelsReadCatalogResponse',
full_name='nvidia.clara.platform.ModelsReadCatalogResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsReadCatalogResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='catalog_id', full_name='nvidia.clara.platform.ModelsReadCatalogResponse.catalog_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='models', full_name='nvidia.clara.platform.ModelsReadCatalogResponse.models', index=2,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3426,
serialized_end=3616,
)
_MODELSREADINSTANCEREQUEST = _descriptor.Descriptor(
name='ModelsReadInstanceRequest',
full_name='nvidia.clara.platform.ModelsReadInstanceRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsReadInstanceRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instance_id', full_name='nvidia.clara.platform.ModelsReadInstanceRequest.instance_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3619,
serialized_end=3756,
)
_MODELSREADINSTANCERESPONSE = _descriptor.Descriptor(
name='ModelsReadInstanceResponse',
full_name='nvidia.clara.platform.ModelsReadInstanceResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsReadInstanceResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instance_id', full_name='nvidia.clara.platform.ModelsReadInstanceResponse.instance_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='models', full_name='nvidia.clara.platform.ModelsReadInstanceResponse.models', index=2,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3759,
serialized_end=3951,
)
_MODELSREMOVEMETADATAREQUEST = _descriptor.Descriptor(
name='ModelsRemoveMetadataRequest',
full_name='nvidia.clara.platform.ModelsRemoveMetadataRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsRemoveMetadataRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model_id', full_name='nvidia.clara.platform.ModelsRemoveMetadataRequest.model_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='keys', full_name='nvidia.clara.platform.ModelsRemoveMetadataRequest.keys', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3954,
serialized_end=4104,
)
_MODELSREMOVEMETADATARESPONSE_METADATAENTRY = _descriptor.Descriptor(
name='MetadataEntry',
full_name='nvidia.clara.platform.ModelsRemoveMetadataResponse.MetadataEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='nvidia.clara.platform.ModelsRemoveMetadataResponse.MetadataEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='nvidia.clara.platform.ModelsRemoveMetadataResponse.MetadataEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=430,
serialized_end=477,
)
_MODELSREMOVEMETADATARESPONSE = _descriptor.Descriptor(
name='ModelsRemoveMetadataResponse',
full_name='nvidia.clara.platform.ModelsRemoveMetadataResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsRemoveMetadataResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model_id', full_name='nvidia.clara.platform.ModelsRemoveMetadataResponse.model_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='nvidia.clara.platform.ModelsRemoveMetadataResponse.metadata', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_MODELSREMOVEMETADATARESPONSE_METADATAENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4107,
serialized_end=4379,
)
_MODELSUPDATECATALOGREQUEST = _descriptor.Descriptor(
name='ModelsUpdateCatalogRequest',
full_name='nvidia.clara.platform.ModelsUpdateCatalogRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsUpdateCatalogRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='catalog_id', full_name='nvidia.clara.platform.ModelsUpdateCatalogRequest.catalog_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model_ids', full_name='nvidia.clara.platform.ModelsUpdateCatalogRequest.model_ids', index=2,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4382,
serialized_end=4573,
)
_MODELSUPDATECATALOGRESPONSE = _descriptor.Descriptor(
name='ModelsUpdateCatalogResponse',
full_name='nvidia.clara.platform.ModelsUpdateCatalogResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsUpdateCatalogResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='catalog_id', full_name='nvidia.clara.platform.ModelsUpdateCatalogResponse.catalog_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4576,
serialized_end=4715,
)
_MODELSUPDATEINSTANCEREQUEST = _descriptor.Descriptor(
name='ModelsUpdateInstanceRequest',
full_name='nvidia.clara.platform.ModelsUpdateInstanceRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsUpdateInstanceRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instance_id', full_name='nvidia.clara.platform.ModelsUpdateInstanceRequest.instance_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model_ids', full_name='nvidia.clara.platform.ModelsUpdateInstanceRequest.model_ids', index=2,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4718,
serialized_end=4911,
)
_MODELSUPDATEINSTANCERESPONSE = _descriptor.Descriptor(
name='ModelsUpdateInstanceResponse',
full_name='nvidia.clara.platform.ModelsUpdateInstanceResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsUpdateInstanceResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instance_id', full_name='nvidia.clara.platform.ModelsUpdateInstanceResponse.instance_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4914,
serialized_end=5055,
)
_MODELSUPLOADMODELREQUEST = _descriptor.Descriptor(
name='ModelsUploadModelRequest',
full_name='nvidia.clara.platform.ModelsUploadModelRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsUploadModelRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='details', full_name='nvidia.clara.platform.ModelsUploadModelRequest.details', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='nvidia.clara.platform.ModelsUploadModelRequest.data', index=2,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5058,
serialized_end=5206,
)
_MODELSUPLOADMODELRESPONSE = _descriptor.Descriptor(
name='ModelsUploadModelResponse',
full_name='nvidia.clara.platform.ModelsUploadModelResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ModelsUploadModelResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='details', full_name='nvidia.clara.platform.ModelsUploadModelResponse.details', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5209,
serialized_end=5345,
)
_MODELCATALOGDETAILS.fields_by_name['catalog_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_MODELCATALOGDETAILS.fields_by_name['models'].message_type = _MODELDETAILS
_MODELDETAILS_METADATAENTRY.containing_type = _MODELDETAILS
_MODELDETAILS.fields_by_name['model_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_MODELDETAILS.fields_by_name['type'].enum_type = _MODELTYPE
_MODELDETAILS.fields_by_name['metadata'].message_type = _MODELDETAILS_METADATAENTRY
_MODELSADDMETADATAREQUEST_METADATAENTRY.containing_type = _MODELSADDMETADATAREQUEST
_MODELSADDMETADATAREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_MODELSADDMETADATAREQUEST.fields_by_name['model_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_MODELSADDMETADATAREQUEST.fields_by_name['metadata'].message_type = _MODELSADDMETADATAREQUEST_METADATAENTRY
_MODELSADDMETADATARESPONSE_METADATAENTRY.containing_type = _MODELSADDMETADATARESPONSE
_MODELSADDMETADATARESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_MODELSADDMETADATARESPONSE.fields_by_name['model_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_MODELSADDMETADATARESPONSE.fields_by_name['metadata'].message_type = _MODELSADDMETADATARESPONSE_METADATAENTRY
_MODELSCREATECATALOGREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_MODELSCREATECATALOGRESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_MODELSCREATECATALOGRESPONSE.fields_by_name['catalog_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_MODELSCREATEINSTANCEREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_MODELSCREATEINSTANCERESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_MODELSCREATEINSTANCERESPONSE.fields_by_name['instance_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_MODELSDELETECATALOGREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_MODELSDELETECATALOGREQUEST.fields_by_name['catalog_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_MODELSDELETECATALOGRESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_MODELSDELETECATALOGRESPONSE.fields_by_name['catalog_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_MODELSDELETEINSTANCEREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_MODELSDELETEINSTANCEREQUEST.fields_by_name['instance_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_MODELSDELETEINSTANCERESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_MODELSDELETEINSTANCERESPONSE.fields_by_name['instance_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_MODELSDELETEMODELREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_MODELSDELETEMODELREQUEST.fields_by_name['model_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_MODELSDELETEMODELRESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_MODELSDELETEMODELRESPONSE.fields_by_name['model_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_MODELSDOWNLOADMODELREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_MODELSDOWNLOADMODELREQUEST.fields_by_name['model_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_MODELSDOWNLOADMODELRESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_MODELSDOWNLOADMODELRESPONSE.fields_by_name['details'].message_type = _MODELDETAILS
_MODELSLISTCATALOGSREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_MODELSLISTCATALOGSRESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_MODELSLISTCATALOGSRESPONSE.fields_by_name['catalogs'].message_type = _MODELCATALOGDETAILS
_MODELSLISTINSTANCESREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_MODELSLISTINSTANCESRESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_MODELSLISTINSTANCESRESPONSE.fields_by_name['instances'].message_type = _MODELCATALOGDETAILS
_MODELSLISTMODELSREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_MODELSLISTMODELSRESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_MODELSLISTMODELSRESPONSE.fields_by_name['models'].message_type = _MODELDETAILS
_MODELSREADCATALOGREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_MODELSREADCATALOGREQUEST.fields_by_name['catalog_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_MODELSREADCATALOGRESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_MODELSREADCATALOGRESPONSE.fields_by_name['catalog_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_MODELSREADCATALOGRESPONSE.fields_by_name['models'].message_type = _MODELDETAILS
_MODELSREADINSTANCEREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_MODELSREADINSTANCEREQUEST.fields_by_name['instance_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_MODELSREADINSTANCERESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_MODELSREADINSTANCERESPONSE.fields_by_name['instance_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_MODELSREADINSTANCERESPONSE.fields_by_name['models'].message_type = _MODELDETAILS
_MODELSREMOVEMETADATAREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_MODELSREMOVEMETADATAREQUEST.fields_by_name['model_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_MODELSREMOVEMETADATARESPONSE_METADATAENTRY.containing_type = _MODELSREMOVEMETADATARESPONSE
_MODELSREMOVEMETADATARESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_MODELSREMOVEMETADATARESPONSE.fields_by_name['model_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_MODELSREMOVEMETADATARESPONSE.fields_by_name['metadata'].message_type = _MODELSREMOVEMETADATARESPONSE_METADATAENTRY
_MODELSUPDATECATALOGREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_MODELSUPDATECATALOGREQUEST.fields_by_name['catalog_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_MODELSUPDATECATALOGREQUEST.fields_by_name['model_ids'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_MODELSUPDATECATALOGRESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_MODELSUPDATECATALOGRESPONSE.fields_by_name['catalog_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_MODELSUPDATEINSTANCEREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_MODELSUPDATEINSTANCEREQUEST.fields_by_name['instance_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_MODELSUPDATEINSTANCEREQUEST.fields_by_name['model_ids'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_MODELSUPDATEINSTANCERESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_MODELSUPDATEINSTANCERESPONSE.fields_by_name['instance_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_MODELSUPLOADMODELREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_MODELSUPLOADMODELREQUEST.fields_by_name['details'].message_type = _MODELDETAILS
_MODELSUPLOADMODELRESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_MODELSUPLOADMODELRESPONSE.fields_by_name['details'].message_type = _MODELDETAILS
DESCRIPTOR.message_types_by_name['ModelCatalogDetails'] = _MODELCATALOGDETAILS
DESCRIPTOR.message_types_by_name['ModelDetails'] = _MODELDETAILS
DESCRIPTOR.message_types_by_name['ModelsAddMetadataRequest'] = _MODELSADDMETADATAREQUEST
DESCRIPTOR.message_types_by_name['ModelsAddMetadataResponse'] = _MODELSADDMETADATARESPONSE
DESCRIPTOR.message_types_by_name['ModelsCreateCatalogRequest'] = _MODELSCREATECATALOGREQUEST
DESCRIPTOR.message_types_by_name['ModelsCreateCatalogResponse'] = _MODELSCREATECATALOGRESPONSE
DESCRIPTOR.message_types_by_name['ModelsCreateInstanceRequest'] = _MODELSCREATEINSTANCEREQUEST
DESCRIPTOR.message_types_by_name['ModelsCreateInstanceResponse'] = _MODELSCREATEINSTANCERESPONSE
DESCRIPTOR.message_types_by_name['ModelsDeleteCatalogRequest'] = _MODELSDELETECATALOGREQUEST
DESCRIPTOR.message_types_by_name['ModelsDeleteCatalogResponse'] = _MODELSDELETECATALOGRESPONSE
DESCRIPTOR.message_types_by_name['ModelsDeleteInstanceRequest'] = _MODELSDELETEINSTANCEREQUEST
DESCRIPTOR.message_types_by_name['ModelsDeleteInstanceResponse'] = _MODELSDELETEINSTANCERESPONSE
DESCRIPTOR.message_types_by_name['ModelsDeleteModelRequest'] = _MODELSDELETEMODELREQUEST
DESCRIPTOR.message_types_by_name['ModelsDeleteModelResponse'] = _MODELSDELETEMODELRESPONSE
DESCRIPTOR.message_types_by_name['ModelsDownloadModelRequest'] = _MODELSDOWNLOADMODELREQUEST
DESCRIPTOR.message_types_by_name['ModelsDownloadModelResponse'] = _MODELSDOWNLOADMODELRESPONSE
DESCRIPTOR.message_types_by_name['ModelsListCatalogsRequest'] = _MODELSLISTCATALOGSREQUEST
DESCRIPTOR.message_types_by_name['ModelsListCatalogsResponse'] = _MODELSLISTCATALOGSRESPONSE
DESCRIPTOR.message_types_by_name['ModelsListInstancesRequest'] = _MODELSLISTINSTANCESREQUEST
DESCRIPTOR.message_types_by_name['ModelsListInstancesResponse'] = _MODELSLISTINSTANCESRESPONSE
DESCRIPTOR.message_types_by_name['ModelsListModelsRequest'] = _MODELSLISTMODELSREQUEST
DESCRIPTOR.message_types_by_name['ModelsListModelsResponse'] = _MODELSLISTMODELSRESPONSE
DESCRIPTOR.message_types_by_name['ModelsReadCatalogRequest'] = _MODELSREADCATALOGREQUEST
DESCRIPTOR.message_types_by_name['ModelsReadCatalogResponse'] = _MODELSREADCATALOGRESPONSE
DESCRIPTOR.message_types_by_name['ModelsReadInstanceRequest'] = _MODELSREADINSTANCEREQUEST
DESCRIPTOR.message_types_by_name['ModelsReadInstanceResponse'] = _MODELSREADINSTANCERESPONSE
DESCRIPTOR.message_types_by_name['ModelsRemoveMetadataRequest'] = _MODELSREMOVEMETADATAREQUEST
DESCRIPTOR.message_types_by_name['ModelsRemoveMetadataResponse'] = _MODELSREMOVEMETADATARESPONSE
DESCRIPTOR.message_types_by_name['ModelsUpdateCatalogRequest'] = _MODELSUPDATECATALOGREQUEST
DESCRIPTOR.message_types_by_name['ModelsUpdateCatalogResponse'] = _MODELSUPDATECATALOGRESPONSE
DESCRIPTOR.message_types_by_name['ModelsUpdateInstanceRequest'] = _MODELSUPDATEINSTANCEREQUEST
DESCRIPTOR.message_types_by_name['ModelsUpdateInstanceResponse'] = _MODELSUPDATEINSTANCERESPONSE
DESCRIPTOR.message_types_by_name['ModelsUploadModelRequest'] = _MODELSUPLOADMODELREQUEST
DESCRIPTOR.message_types_by_name['ModelsUploadModelResponse'] = _MODELSUPLOADMODELRESPONSE
DESCRIPTOR.enum_types_by_name['ModelType'] = _MODELTYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ModelCatalogDetails = _reflection.GeneratedProtocolMessageType('ModelCatalogDetails', (_message.Message,), dict(
DESCRIPTOR = _MODELCATALOGDETAILS,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelCatalogDetails)
))
_sym_db.RegisterMessage(ModelCatalogDetails)
ModelDetails = _reflection.GeneratedProtocolMessageType('ModelDetails', (_message.Message,), dict(
MetadataEntry = _reflection.GeneratedProtocolMessageType('MetadataEntry', (_message.Message,), dict(
DESCRIPTOR = _MODELDETAILS_METADATAENTRY,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelDetails.MetadataEntry)
))
,
DESCRIPTOR = _MODELDETAILS,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelDetails)
))
_sym_db.RegisterMessage(ModelDetails)
_sym_db.RegisterMessage(ModelDetails.MetadataEntry)
ModelsAddMetadataRequest = _reflection.GeneratedProtocolMessageType('ModelsAddMetadataRequest', (_message.Message,), dict(
MetadataEntry = _reflection.GeneratedProtocolMessageType('MetadataEntry', (_message.Message,), dict(
DESCRIPTOR = _MODELSADDMETADATAREQUEST_METADATAENTRY,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsAddMetadataRequest.MetadataEntry)
))
,
DESCRIPTOR = _MODELSADDMETADATAREQUEST,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsAddMetadataRequest)
))
_sym_db.RegisterMessage(ModelsAddMetadataRequest)
_sym_db.RegisterMessage(ModelsAddMetadataRequest.MetadataEntry)
ModelsAddMetadataResponse = _reflection.GeneratedProtocolMessageType('ModelsAddMetadataResponse', (_message.Message,), dict(
MetadataEntry = _reflection.GeneratedProtocolMessageType('MetadataEntry', (_message.Message,), dict(
DESCRIPTOR = _MODELSADDMETADATARESPONSE_METADATAENTRY,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsAddMetadataResponse.MetadataEntry)
))
,
DESCRIPTOR = _MODELSADDMETADATARESPONSE,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsAddMetadataResponse)
))
_sym_db.RegisterMessage(ModelsAddMetadataResponse)
_sym_db.RegisterMessage(ModelsAddMetadataResponse.MetadataEntry)
ModelsCreateCatalogRequest = _reflection.GeneratedProtocolMessageType('ModelsCreateCatalogRequest', (_message.Message,), dict(
DESCRIPTOR = _MODELSCREATECATALOGREQUEST,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsCreateCatalogRequest)
))
_sym_db.RegisterMessage(ModelsCreateCatalogRequest)
ModelsCreateCatalogResponse = _reflection.GeneratedProtocolMessageType('ModelsCreateCatalogResponse', (_message.Message,), dict(
DESCRIPTOR = _MODELSCREATECATALOGRESPONSE,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsCreateCatalogResponse)
))
_sym_db.RegisterMessage(ModelsCreateCatalogResponse)
ModelsCreateInstanceRequest = _reflection.GeneratedProtocolMessageType('ModelsCreateInstanceRequest', (_message.Message,), dict(
DESCRIPTOR = _MODELSCREATEINSTANCEREQUEST,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsCreateInstanceRequest)
))
_sym_db.RegisterMessage(ModelsCreateInstanceRequest)
ModelsCreateInstanceResponse = _reflection.GeneratedProtocolMessageType('ModelsCreateInstanceResponse', (_message.Message,), dict(
DESCRIPTOR = _MODELSCREATEINSTANCERESPONSE,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsCreateInstanceResponse)
))
_sym_db.RegisterMessage(ModelsCreateInstanceResponse)
ModelsDeleteCatalogRequest = _reflection.GeneratedProtocolMessageType('ModelsDeleteCatalogRequest', (_message.Message,), dict(
DESCRIPTOR = _MODELSDELETECATALOGREQUEST,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsDeleteCatalogRequest)
))
_sym_db.RegisterMessage(ModelsDeleteCatalogRequest)
ModelsDeleteCatalogResponse = _reflection.GeneratedProtocolMessageType('ModelsDeleteCatalogResponse', (_message.Message,), dict(
DESCRIPTOR = _MODELSDELETECATALOGRESPONSE,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsDeleteCatalogResponse)
))
_sym_db.RegisterMessage(ModelsDeleteCatalogResponse)
ModelsDeleteInstanceRequest = _reflection.GeneratedProtocolMessageType('ModelsDeleteInstanceRequest', (_message.Message,), dict(
DESCRIPTOR = _MODELSDELETEINSTANCEREQUEST,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsDeleteInstanceRequest)
))
_sym_db.RegisterMessage(ModelsDeleteInstanceRequest)
ModelsDeleteInstanceResponse = _reflection.GeneratedProtocolMessageType('ModelsDeleteInstanceResponse', (_message.Message,), dict(
DESCRIPTOR = _MODELSDELETEINSTANCERESPONSE,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsDeleteInstanceResponse)
))
_sym_db.RegisterMessage(ModelsDeleteInstanceResponse)
ModelsDeleteModelRequest = _reflection.GeneratedProtocolMessageType('ModelsDeleteModelRequest', (_message.Message,), dict(
DESCRIPTOR = _MODELSDELETEMODELREQUEST,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsDeleteModelRequest)
))
_sym_db.RegisterMessage(ModelsDeleteModelRequest)
ModelsDeleteModelResponse = _reflection.GeneratedProtocolMessageType('ModelsDeleteModelResponse', (_message.Message,), dict(
DESCRIPTOR = _MODELSDELETEMODELRESPONSE,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsDeleteModelResponse)
))
_sym_db.RegisterMessage(ModelsDeleteModelResponse)
ModelsDownloadModelRequest = _reflection.GeneratedProtocolMessageType('ModelsDownloadModelRequest', (_message.Message,), dict(
DESCRIPTOR = _MODELSDOWNLOADMODELREQUEST,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsDownloadModelRequest)
))
_sym_db.RegisterMessage(ModelsDownloadModelRequest)
ModelsDownloadModelResponse = _reflection.GeneratedProtocolMessageType('ModelsDownloadModelResponse', (_message.Message,), dict(
DESCRIPTOR = _MODELSDOWNLOADMODELRESPONSE,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsDownloadModelResponse)
))
_sym_db.RegisterMessage(ModelsDownloadModelResponse)
ModelsListCatalogsRequest = _reflection.GeneratedProtocolMessageType('ModelsListCatalogsRequest', (_message.Message,), dict(
DESCRIPTOR = _MODELSLISTCATALOGSREQUEST,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsListCatalogsRequest)
))
_sym_db.RegisterMessage(ModelsListCatalogsRequest)
ModelsListCatalogsResponse = _reflection.GeneratedProtocolMessageType('ModelsListCatalogsResponse', (_message.Message,), dict(
DESCRIPTOR = _MODELSLISTCATALOGSRESPONSE,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsListCatalogsResponse)
))
_sym_db.RegisterMessage(ModelsListCatalogsResponse)
ModelsListInstancesRequest = _reflection.GeneratedProtocolMessageType('ModelsListInstancesRequest', (_message.Message,), dict(
DESCRIPTOR = _MODELSLISTINSTANCESREQUEST,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsListInstancesRequest)
))
_sym_db.RegisterMessage(ModelsListInstancesRequest)
ModelsListInstancesResponse = _reflection.GeneratedProtocolMessageType('ModelsListInstancesResponse', (_message.Message,), dict(
DESCRIPTOR = _MODELSLISTINSTANCESRESPONSE,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsListInstancesResponse)
))
_sym_db.RegisterMessage(ModelsListInstancesResponse)
ModelsListModelsRequest = _reflection.GeneratedProtocolMessageType('ModelsListModelsRequest', (_message.Message,), dict(
DESCRIPTOR = _MODELSLISTMODELSREQUEST,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsListModelsRequest)
))
_sym_db.RegisterMessage(ModelsListModelsRequest)
ModelsListModelsResponse = _reflection.GeneratedProtocolMessageType('ModelsListModelsResponse', (_message.Message,), dict(
DESCRIPTOR = _MODELSLISTMODELSRESPONSE,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsListModelsResponse)
))
_sym_db.RegisterMessage(ModelsListModelsResponse)
ModelsReadCatalogRequest = _reflection.GeneratedProtocolMessageType('ModelsReadCatalogRequest', (_message.Message,), dict(
DESCRIPTOR = _MODELSREADCATALOGREQUEST,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsReadCatalogRequest)
))
_sym_db.RegisterMessage(ModelsReadCatalogRequest)
ModelsReadCatalogResponse = _reflection.GeneratedProtocolMessageType('ModelsReadCatalogResponse', (_message.Message,), dict(
DESCRIPTOR = _MODELSREADCATALOGRESPONSE,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsReadCatalogResponse)
))
_sym_db.RegisterMessage(ModelsReadCatalogResponse)
ModelsReadInstanceRequest = _reflection.GeneratedProtocolMessageType('ModelsReadInstanceRequest', (_message.Message,), dict(
DESCRIPTOR = _MODELSREADINSTANCEREQUEST,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsReadInstanceRequest)
))
_sym_db.RegisterMessage(ModelsReadInstanceRequest)
ModelsReadInstanceResponse = _reflection.GeneratedProtocolMessageType('ModelsReadInstanceResponse', (_message.Message,), dict(
DESCRIPTOR = _MODELSREADINSTANCERESPONSE,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsReadInstanceResponse)
))
_sym_db.RegisterMessage(ModelsReadInstanceResponse)
ModelsRemoveMetadataRequest = _reflection.GeneratedProtocolMessageType('ModelsRemoveMetadataRequest', (_message.Message,), dict(
DESCRIPTOR = _MODELSREMOVEMETADATAREQUEST,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsRemoveMetadataRequest)
))
_sym_db.RegisterMessage(ModelsRemoveMetadataRequest)
ModelsRemoveMetadataResponse = _reflection.GeneratedProtocolMessageType('ModelsRemoveMetadataResponse', (_message.Message,), dict(
MetadataEntry = _reflection.GeneratedProtocolMessageType('MetadataEntry', (_message.Message,), dict(
DESCRIPTOR = _MODELSREMOVEMETADATARESPONSE_METADATAENTRY,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsRemoveMetadataResponse.MetadataEntry)
))
,
DESCRIPTOR = _MODELSREMOVEMETADATARESPONSE,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsRemoveMetadataResponse)
))
_sym_db.RegisterMessage(ModelsRemoveMetadataResponse)
_sym_db.RegisterMessage(ModelsRemoveMetadataResponse.MetadataEntry)
ModelsUpdateCatalogRequest = _reflection.GeneratedProtocolMessageType('ModelsUpdateCatalogRequest', (_message.Message,), dict(
DESCRIPTOR = _MODELSUPDATECATALOGREQUEST,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsUpdateCatalogRequest)
))
_sym_db.RegisterMessage(ModelsUpdateCatalogRequest)
ModelsUpdateCatalogResponse = _reflection.GeneratedProtocolMessageType('ModelsUpdateCatalogResponse', (_message.Message,), dict(
DESCRIPTOR = _MODELSUPDATECATALOGRESPONSE,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsUpdateCatalogResponse)
))
_sym_db.RegisterMessage(ModelsUpdateCatalogResponse)
ModelsUpdateInstanceRequest = _reflection.GeneratedProtocolMessageType('ModelsUpdateInstanceRequest', (_message.Message,), dict(
DESCRIPTOR = _MODELSUPDATEINSTANCEREQUEST,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsUpdateInstanceRequest)
))
_sym_db.RegisterMessage(ModelsUpdateInstanceRequest)
ModelsUpdateInstanceResponse = _reflection.GeneratedProtocolMessageType('ModelsUpdateInstanceResponse', (_message.Message,), dict(
DESCRIPTOR = _MODELSUPDATEINSTANCERESPONSE,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsUpdateInstanceResponse)
))
_sym_db.RegisterMessage(ModelsUpdateInstanceResponse)
ModelsUploadModelRequest = _reflection.GeneratedProtocolMessageType('ModelsUploadModelRequest', (_message.Message,), dict(
DESCRIPTOR = _MODELSUPLOADMODELREQUEST,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsUploadModelRequest)
))
_sym_db.RegisterMessage(ModelsUploadModelRequest)
ModelsUploadModelResponse = _reflection.GeneratedProtocolMessageType('ModelsUploadModelResponse', (_message.Message,), dict(
DESCRIPTOR = _MODELSUPLOADMODELRESPONSE,
__module__ = 'nvidia.clara.platform.models_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ModelsUploadModelResponse)
))
_sym_db.RegisterMessage(ModelsUploadModelResponse)
DESCRIPTOR._options = None
_MODELDETAILS_METADATAENTRY._options = None
_MODELSADDMETADATAREQUEST_METADATAENTRY._options = None
_MODELSADDMETADATARESPONSE_METADATAENTRY._options = None
_MODELSREMOVEMETADATARESPONSE_METADATAENTRY._options = None
_MODELS = _descriptor.ServiceDescriptor(
name='Models',
full_name='nvidia.clara.platform.Models',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=5464,
serialized_end=7383,
methods=[
_descriptor.MethodDescriptor(
name='AddMetadata',
full_name='nvidia.clara.platform.Models.AddMetadata',
index=0,
containing_service=None,
input_type=_MODELSADDMETADATAREQUEST,
output_type=_MODELSADDMETADATARESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='CreateCatalog',
full_name='nvidia.clara.platform.Models.CreateCatalog',
index=1,
containing_service=None,
input_type=_MODELSCREATECATALOGREQUEST,
output_type=_MODELSCREATECATALOGRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='CreateInstance',
full_name='nvidia.clara.platform.Models.CreateInstance',
index=2,
containing_service=None,
input_type=_MODELSCREATEINSTANCEREQUEST,
output_type=_MODELSCREATEINSTANCERESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='DeleteCatalog',
full_name='nvidia.clara.platform.Models.DeleteCatalog',
index=3,
containing_service=None,
input_type=_MODELSDELETECATALOGREQUEST,
output_type=_MODELSDELETECATALOGRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='DeleteInstance',
full_name='nvidia.clara.platform.Models.DeleteInstance',
index=4,
containing_service=None,
input_type=_MODELSDELETEINSTANCEREQUEST,
output_type=_MODELSDELETEINSTANCERESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='DeleteModel',
full_name='nvidia.clara.platform.Models.DeleteModel',
index=5,
containing_service=None,
input_type=_MODELSDELETEMODELREQUEST,
output_type=_MODELSDELETEMODELRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='DownloadModel',
full_name='nvidia.clara.platform.Models.DownloadModel',
index=6,
containing_service=None,
input_type=_MODELSDOWNLOADMODELREQUEST,
output_type=_MODELSDOWNLOADMODELRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='ListCatalogs',
full_name='nvidia.clara.platform.Models.ListCatalogs',
index=7,
containing_service=None,
input_type=_MODELSLISTCATALOGSREQUEST,
output_type=_MODELSLISTCATALOGSRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='ListInstances',
full_name='nvidia.clara.platform.Models.ListInstances',
index=8,
containing_service=None,
input_type=_MODELSLISTINSTANCESREQUEST,
output_type=_MODELSLISTINSTANCESRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='ListModels',
full_name='nvidia.clara.platform.Models.ListModels',
index=9,
containing_service=None,
input_type=_MODELSLISTMODELSREQUEST,
output_type=_MODELSLISTMODELSRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='ReadCatalog',
full_name='nvidia.clara.platform.Models.ReadCatalog',
index=10,
containing_service=None,
input_type=_MODELSREADCATALOGREQUEST,
output_type=_MODELSREADCATALOGRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='ReadInstance',
full_name='nvidia.clara.platform.Models.ReadInstance',
index=11,
containing_service=None,
input_type=_MODELSREADINSTANCEREQUEST,
output_type=_MODELSREADINSTANCERESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='RemoveMetadata',
full_name='nvidia.clara.platform.Models.RemoveMetadata',
index=12,
containing_service=None,
input_type=_MODELSREMOVEMETADATAREQUEST,
output_type=_MODELSREMOVEMETADATARESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='UpdateCatalog',
full_name='nvidia.clara.platform.Models.UpdateCatalog',
index=13,
containing_service=None,
input_type=_MODELSUPDATECATALOGREQUEST,
output_type=_MODELSUPDATECATALOGRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='UpdateInstance',
full_name='nvidia.clara.platform.Models.UpdateInstance',
index=14,
containing_service=None,
input_type=_MODELSUPDATEINSTANCEREQUEST,
output_type=_MODELSUPDATEINSTANCERESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='UploadModel',
full_name='nvidia.clara.platform.Models.UploadModel',
index=15,
containing_service=None,
input_type=_MODELSUPLOADMODELREQUEST,
output_type=_MODELSUPLOADMODELRESPONSE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_MODELS)
DESCRIPTOR.services_by_name['Models'] = _MODELS
# @@protoc_insertion_point(module_scope)
| clara-platform-python-client-main | nvidia_clara/grpc/models_pb2.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia/clara/platform/clara.proto
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from nvidia_clara.grpc import pipelines_pb2 as nvidia_dot_clara_dot_platform_dot_pipelines__pb2
class PipelinesStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.AddMetadata = channel.unary_unary(
'/nvidia.clara.platform.Pipelines/AddMetadata',
request_serializer=nvidia_dot_clara_dot_platform_dot_pipelines__pb2.PipelinesAddMetadataRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_pipelines__pb2.PipelinesAddMetadataResponse.FromString,
)
self.Create = channel.stream_unary(
'/nvidia.clara.platform.Pipelines/Create',
request_serializer=nvidia_dot_clara_dot_platform_dot_pipelines__pb2.PipelinesCreateRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_pipelines__pb2.PipelinesCreateResponse.FromString,
)
self.Details = channel.unary_stream(
'/nvidia.clara.platform.Pipelines/Details',
request_serializer=nvidia_dot_clara_dot_platform_dot_pipelines__pb2.PipelinesDetailsRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_pipelines__pb2.PipelinesDetailsResponse.FromString,
)
self.List = channel.unary_stream(
'/nvidia.clara.platform.Pipelines/List',
request_serializer=nvidia_dot_clara_dot_platform_dot_pipelines__pb2.PipelinesListRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_pipelines__pb2.PipelinesListResponse.FromString,
)
self.Remove = channel.unary_unary(
'/nvidia.clara.platform.Pipelines/Remove',
request_serializer=nvidia_dot_clara_dot_platform_dot_pipelines__pb2.PipelinesRemoveRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_pipelines__pb2.PipelinesRemoveResponse.FromString,
)
self.RemoveMetadata = channel.unary_unary(
'/nvidia.clara.platform.Pipelines/RemoveMetadata',
request_serializer=nvidia_dot_clara_dot_platform_dot_pipelines__pb2.PipelinesRemoveMetadataRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_pipelines__pb2.PipelinesRemoveMetadataResponse.FromString,
)
self.Update = channel.stream_unary(
'/nvidia.clara.platform.Pipelines/Update',
request_serializer=nvidia_dot_clara_dot_platform_dot_pipelines__pb2.PipelinesUpdateRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_pipelines__pb2.PipelinesUpdateResponse.FromString,
)
class PipelinesServicer(object):
# missing associated documentation comment in .proto file
pass
def AddMetadata(self, request, context):
"""Requests the addition of metadata to a pipeline.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Create(self, request_iterator, context):
"""Requests the creation of a new pipeline.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Details(self, request, context):
"""Requests details of a pipeline.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def List(self, request, context):
"""Requests a listing of all pipelines known by the service.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Remove(self, request, context):
"""Requests the removal of a pipeline definition from the service.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RemoveMetadata(self, request, context):
"""Requests the removal of specified metadata of a pipeline.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Update(self, request_iterator, context):
"""Requests an update to a known pipeline definition.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_PipelinesServicer_to_server(servicer, server):
rpc_method_handlers = {
'AddMetadata': grpc.unary_unary_rpc_method_handler(
servicer.AddMetadata,
request_deserializer=nvidia_dot_clara_dot_platform_dot_pipelines__pb2.PipelinesAddMetadataRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_pipelines__pb2.PipelinesAddMetadataResponse.SerializeToString,
),
'Create': grpc.stream_unary_rpc_method_handler(
servicer.Create,
request_deserializer=nvidia_dot_clara_dot_platform_dot_pipelines__pb2.PipelinesCreateRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_pipelines__pb2.PipelinesCreateResponse.SerializeToString,
),
'Details': grpc.unary_stream_rpc_method_handler(
servicer.Details,
request_deserializer=nvidia_dot_clara_dot_platform_dot_pipelines__pb2.PipelinesDetailsRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_pipelines__pb2.PipelinesDetailsResponse.SerializeToString,
),
'List': grpc.unary_stream_rpc_method_handler(
servicer.List,
request_deserializer=nvidia_dot_clara_dot_platform_dot_pipelines__pb2.PipelinesListRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_pipelines__pb2.PipelinesListResponse.SerializeToString,
),
'Remove': grpc.unary_unary_rpc_method_handler(
servicer.Remove,
request_deserializer=nvidia_dot_clara_dot_platform_dot_pipelines__pb2.PipelinesRemoveRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_pipelines__pb2.PipelinesRemoveResponse.SerializeToString,
),
'RemoveMetadata': grpc.unary_unary_rpc_method_handler(
servicer.RemoveMetadata,
request_deserializer=nvidia_dot_clara_dot_platform_dot_pipelines__pb2.PipelinesRemoveMetadataRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_pipelines__pb2.PipelinesRemoveMetadataResponse.SerializeToString,
),
'Update': grpc.stream_unary_rpc_method_handler(
servicer.Update,
request_deserializer=nvidia_dot_clara_dot_platform_dot_pipelines__pb2.PipelinesUpdateRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_pipelines__pb2.PipelinesUpdateResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'nvidia.clara.platform.Pipelines', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| clara-platform-python-client-main | nvidia_clara/grpc/pipelines_pb2_grpc.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia/clara/platform/clara.proto
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from nvidia_clara.grpc import payloads_pb2 as nvidia_dot_clara_dot_platform_dot_payloads__pb2
class PayloadsStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.AddMetadata = channel.unary_unary(
'/nvidia.clara.platform.Payloads/AddMetadata',
request_serializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsAddMetadataRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsAddMetadataResponse.FromString,
)
self.Create = channel.unary_unary(
'/nvidia.clara.platform.Payloads/Create',
request_serializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsCreateRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsCreateResponse.FromString,
)
self.Delete = channel.unary_unary(
'/nvidia.clara.platform.Payloads/Delete',
request_serializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsDeleteRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsDeleteResponse.FromString,
)
self.Details = channel.unary_stream(
'/nvidia.clara.platform.Payloads/Details',
request_serializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsDetailsRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsDetailsResponse.FromString,
)
self.Download = channel.unary_stream(
'/nvidia.clara.platform.Payloads/Download',
request_serializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsDownloadRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsDownloadResponse.FromString,
)
self.Remove = channel.unary_unary(
'/nvidia.clara.platform.Payloads/Remove',
request_serializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsRemoveRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsRemoveResponse.FromString,
)
self.RemoveMetadata = channel.unary_unary(
'/nvidia.clara.platform.Payloads/RemoveMetadata',
request_serializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsRemoveMetadataRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsRemoveMetadataResponse.FromString,
)
self.Upload = channel.stream_unary(
'/nvidia.clara.platform.Payloads/Upload',
request_serializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsUploadRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsUploadResponse.FromString,
)
class PayloadsServicer(object):
# missing associated documentation comment in .proto file
pass
def AddMetadata(self, request, context):
"""Requests the addition of metadata to a payload.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Create(self, request, context):
"""Requests the creation of a new payload.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
"""Requests the deletion of a known payload.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Details(self, request, context):
"""Requests the details (file listing) of a known payload.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Download(self, request, context):
"""Requests the download of a blob (file) from a known payload.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Remove(self, request, context):
"""Requests the removal, or deletion, of a blob from a known payload.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RemoveMetadata(self, request, context):
"""Requests the removal of metadata from a payload.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Upload(self, request_iterator, context):
"""Requests the upload of a blob (file) to a known payload.
When payload type is PAYLOAD_TYPE_PIPELINE, uploads are written to the ~/input/ folder of the payload.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_PayloadsServicer_to_server(servicer, server):
rpc_method_handlers = {
'AddMetadata': grpc.unary_unary_rpc_method_handler(
servicer.AddMetadata,
request_deserializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsAddMetadataRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsAddMetadataResponse.SerializeToString,
),
'Create': grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsCreateRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsCreateResponse.SerializeToString,
),
'Delete': grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsDeleteRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsDeleteResponse.SerializeToString,
),
'Details': grpc.unary_stream_rpc_method_handler(
servicer.Details,
request_deserializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsDetailsRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsDetailsResponse.SerializeToString,
),
'Download': grpc.unary_stream_rpc_method_handler(
servicer.Download,
request_deserializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsDownloadRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsDownloadResponse.SerializeToString,
),
'Remove': grpc.unary_unary_rpc_method_handler(
servicer.Remove,
request_deserializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsRemoveRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsRemoveResponse.SerializeToString,
),
'RemoveMetadata': grpc.unary_unary_rpc_method_handler(
servicer.RemoveMetadata,
request_deserializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsRemoveMetadataRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsRemoveMetadataResponse.SerializeToString,
),
'Upload': grpc.stream_unary_rpc_method_handler(
servicer.Upload,
request_deserializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsUploadRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_payloads__pb2.PayloadsUploadResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'nvidia.clara.platform.Payloads', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| clara-platform-python-client-main | nvidia_clara/grpc/payloads_pb2_grpc.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia/clara/platform/clara.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_clara.grpc import common_pb2 as nvidia_dot_clara_dot_platform_dot_common__pb2
from nvidia_clara.grpc.common_pb2 import *
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia/clara/platform/clara.proto',
package='nvidia.clara.platform',
syntax='proto3',
serialized_options=_b('\n\031com.nvidia.clara.platformZ\004apis\252\002\032Nvidia.Clara.Platform.Grpc'),
serialized_pb=_b('\n!nvidia/clara/platform/clara.proto\x12\x15nvidia.clara.platform\x1a\"nvidia/clara/platform/common.proto\"H\n\x10\x43laraStopRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\"J\n\x11\x43laraStopResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\"^\n\x17\x43laraUtilizationRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\r\n\x05watch\x18\x02 \x01(\x08\"\xae\x04\n\x18\x43laraUtilizationResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12S\n\x0bgpu_metrics\x18\x02 \x03(\x0b\x32>.nvidia.clara.platform.ClaraUtilizationResponse.GpuUtilization\x1a\x85\x03\n\x0eGpuUtilization\x12\x0f\n\x07node_id\x18\x01 \x01(\t\x12\x0f\n\x07pcie_id\x18\x02 \x01(\r\x12\x1b\n\x13\x63ompute_utilization\x18\x06 \x01(\x02\x12\x13\n\x0bmemory_free\x18\x07 \x01(\x04\x12\x13\n\x0bmemory_used\x18\x08 \x01(\x04\x12\x1a\n\x12memory_utilization\x18\t \x01(\x02\x12\x33\n\ttimestamp\x18\x0b \x01(\x0b\x32 .nvidia.clara.platform.Timestamp\x12\x66\n\x0fprocess_details\x18\x0c \x03(\x0b\x32M.nvidia.clara.platform.ClaraUtilizationResponse.GpuUtilization.ProcessDetails\x1aQ\n\x0eProcessDetails\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x31\n\x06job_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\"K\n\x13\x43laraVersionRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\"~\n\x14\x43laraVersionResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12/\n\x07version\x18\x02 \x01(\x0b\x32\x1e.nvidia.clara.platform.Version2\xb8\x02\n\x05\x43lara\x12Y\n\x04Stop\x12\'.nvidia.clara.platform.ClaraStopRequest\x1a(.nvidia.clara.platform.ClaraStopResponse\x12p\n\x0bUtilization\x12..nvidia.clara.platform.ClaraUtilizationRequest\x1a/.nvidia.clara.platform.ClaraUtilizationResponse0\x01\x12\x62\n\x07Version\x12*.nvidia.clara.platform.ClaraVersionRequest\x1a+.nvidia.clara.platform.ClaraVersionResponseB>\n\x19\x63om.nvidia.clara.platformZ\x04\x61pis\xaa\x02\x1aNvidia.Clara.Platform.GrpcP\x00\x62\x06proto3')
,
dependencies=[nvidia_dot_clara_dot_platform_dot_common__pb2.DESCRIPTOR,],
public_dependencies=[nvidia_dot_clara_dot_platform_dot_common__pb2.DESCRIPTOR,])
_CLARASTOPREQUEST = _descriptor.Descriptor(
name='ClaraStopRequest',
full_name='nvidia.clara.platform.ClaraStopRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ClaraStopRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=96,
serialized_end=168,
)
_CLARASTOPRESPONSE = _descriptor.Descriptor(
name='ClaraStopResponse',
full_name='nvidia.clara.platform.ClaraStopResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ClaraStopResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=170,
serialized_end=244,
)
_CLARAUTILIZATIONREQUEST = _descriptor.Descriptor(
name='ClaraUtilizationRequest',
full_name='nvidia.clara.platform.ClaraUtilizationRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ClaraUtilizationRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='watch', full_name='nvidia.clara.platform.ClaraUtilizationRequest.watch', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=246,
serialized_end=340,
)
_CLARAUTILIZATIONRESPONSE_GPUUTILIZATION_PROCESSDETAILS = _descriptor.Descriptor(
name='ProcessDetails',
full_name='nvidia.clara.platform.ClaraUtilizationResponse.GpuUtilization.ProcessDetails',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='nvidia.clara.platform.ClaraUtilizationResponse.GpuUtilization.ProcessDetails.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='job_id', full_name='nvidia.clara.platform.ClaraUtilizationResponse.GpuUtilization.ProcessDetails.job_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=820,
serialized_end=901,
)
_CLARAUTILIZATIONRESPONSE_GPUUTILIZATION = _descriptor.Descriptor(
name='GpuUtilization',
full_name='nvidia.clara.platform.ClaraUtilizationResponse.GpuUtilization',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='node_id', full_name='nvidia.clara.platform.ClaraUtilizationResponse.GpuUtilization.node_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pcie_id', full_name='nvidia.clara.platform.ClaraUtilizationResponse.GpuUtilization.pcie_id', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='compute_utilization', full_name='nvidia.clara.platform.ClaraUtilizationResponse.GpuUtilization.compute_utilization', index=2,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='memory_free', full_name='nvidia.clara.platform.ClaraUtilizationResponse.GpuUtilization.memory_free', index=3,
number=7, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='memory_used', full_name='nvidia.clara.platform.ClaraUtilizationResponse.GpuUtilization.memory_used', index=4,
number=8, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='memory_utilization', full_name='nvidia.clara.platform.ClaraUtilizationResponse.GpuUtilization.memory_utilization', index=5,
number=9, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='timestamp', full_name='nvidia.clara.platform.ClaraUtilizationResponse.GpuUtilization.timestamp', index=6,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='process_details', full_name='nvidia.clara.platform.ClaraUtilizationResponse.GpuUtilization.process_details', index=7,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_CLARAUTILIZATIONRESPONSE_GPUUTILIZATION_PROCESSDETAILS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=512,
serialized_end=901,
)
_CLARAUTILIZATIONRESPONSE = _descriptor.Descriptor(
name='ClaraUtilizationResponse',
full_name='nvidia.clara.platform.ClaraUtilizationResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ClaraUtilizationResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gpu_metrics', full_name='nvidia.clara.platform.ClaraUtilizationResponse.gpu_metrics', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_CLARAUTILIZATIONRESPONSE_GPUUTILIZATION, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=343,
serialized_end=901,
)
_CLARAVERSIONREQUEST = _descriptor.Descriptor(
name='ClaraVersionRequest',
full_name='nvidia.clara.platform.ClaraVersionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ClaraVersionRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=903,
serialized_end=978,
)
_CLARAVERSIONRESPONSE = _descriptor.Descriptor(
name='ClaraVersionResponse',
full_name='nvidia.clara.platform.ClaraVersionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.ClaraVersionResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='nvidia.clara.platform.ClaraVersionResponse.version', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=980,
serialized_end=1106,
)
_CLARASTOPREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_CLARASTOPRESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_CLARAUTILIZATIONREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_CLARAUTILIZATIONRESPONSE_GPUUTILIZATION_PROCESSDETAILS.fields_by_name['job_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_CLARAUTILIZATIONRESPONSE_GPUUTILIZATION_PROCESSDETAILS.containing_type = _CLARAUTILIZATIONRESPONSE_GPUUTILIZATION
_CLARAUTILIZATIONRESPONSE_GPUUTILIZATION.fields_by_name['timestamp'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._TIMESTAMP
_CLARAUTILIZATIONRESPONSE_GPUUTILIZATION.fields_by_name['process_details'].message_type = _CLARAUTILIZATIONRESPONSE_GPUUTILIZATION_PROCESSDETAILS
_CLARAUTILIZATIONRESPONSE_GPUUTILIZATION.containing_type = _CLARAUTILIZATIONRESPONSE
_CLARAUTILIZATIONRESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_CLARAUTILIZATIONRESPONSE.fields_by_name['gpu_metrics'].message_type = _CLARAUTILIZATIONRESPONSE_GPUUTILIZATION
_CLARAVERSIONREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_CLARAVERSIONRESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_CLARAVERSIONRESPONSE.fields_by_name['version'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._VERSION
DESCRIPTOR.message_types_by_name['ClaraStopRequest'] = _CLARASTOPREQUEST
DESCRIPTOR.message_types_by_name['ClaraStopResponse'] = _CLARASTOPRESPONSE
DESCRIPTOR.message_types_by_name['ClaraUtilizationRequest'] = _CLARAUTILIZATIONREQUEST
DESCRIPTOR.message_types_by_name['ClaraUtilizationResponse'] = _CLARAUTILIZATIONRESPONSE
DESCRIPTOR.message_types_by_name['ClaraVersionRequest'] = _CLARAVERSIONREQUEST
DESCRIPTOR.message_types_by_name['ClaraVersionResponse'] = _CLARAVERSIONRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ClaraStopRequest = _reflection.GeneratedProtocolMessageType('ClaraStopRequest', (_message.Message,), dict(
DESCRIPTOR = _CLARASTOPREQUEST,
__module__ = 'nvidia.clara.platform.clara_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ClaraStopRequest)
))
_sym_db.RegisterMessage(ClaraStopRequest)
ClaraStopResponse = _reflection.GeneratedProtocolMessageType('ClaraStopResponse', (_message.Message,), dict(
DESCRIPTOR = _CLARASTOPRESPONSE,
__module__ = 'nvidia.clara.platform.clara_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ClaraStopResponse)
))
_sym_db.RegisterMessage(ClaraStopResponse)
ClaraUtilizationRequest = _reflection.GeneratedProtocolMessageType('ClaraUtilizationRequest', (_message.Message,), dict(
DESCRIPTOR = _CLARAUTILIZATIONREQUEST,
__module__ = 'nvidia.clara.platform.clara_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ClaraUtilizationRequest)
))
_sym_db.RegisterMessage(ClaraUtilizationRequest)
ClaraUtilizationResponse = _reflection.GeneratedProtocolMessageType('ClaraUtilizationResponse', (_message.Message,), dict(
GpuUtilization = _reflection.GeneratedProtocolMessageType('GpuUtilization', (_message.Message,), dict(
ProcessDetails = _reflection.GeneratedProtocolMessageType('ProcessDetails', (_message.Message,), dict(
DESCRIPTOR = _CLARAUTILIZATIONRESPONSE_GPUUTILIZATION_PROCESSDETAILS,
__module__ = 'nvidia.clara.platform.clara_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ClaraUtilizationResponse.GpuUtilization.ProcessDetails)
))
,
DESCRIPTOR = _CLARAUTILIZATIONRESPONSE_GPUUTILIZATION,
__module__ = 'nvidia.clara.platform.clara_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ClaraUtilizationResponse.GpuUtilization)
))
,
DESCRIPTOR = _CLARAUTILIZATIONRESPONSE,
__module__ = 'nvidia.clara.platform.clara_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ClaraUtilizationResponse)
))
_sym_db.RegisterMessage(ClaraUtilizationResponse)
_sym_db.RegisterMessage(ClaraUtilizationResponse.GpuUtilization)
_sym_db.RegisterMessage(ClaraUtilizationResponse.GpuUtilization.ProcessDetails)
ClaraVersionRequest = _reflection.GeneratedProtocolMessageType('ClaraVersionRequest', (_message.Message,), dict(
DESCRIPTOR = _CLARAVERSIONREQUEST,
__module__ = 'nvidia.clara.platform.clara_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ClaraVersionRequest)
))
_sym_db.RegisterMessage(ClaraVersionRequest)
ClaraVersionResponse = _reflection.GeneratedProtocolMessageType('ClaraVersionResponse', (_message.Message,), dict(
DESCRIPTOR = _CLARAVERSIONRESPONSE,
__module__ = 'nvidia.clara.platform.clara_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ClaraVersionResponse)
))
_sym_db.RegisterMessage(ClaraVersionResponse)
DESCRIPTOR._options = None
_CLARA = _descriptor.ServiceDescriptor(
name='Clara',
full_name='nvidia.clara.platform.Clara',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=1109,
serialized_end=1421,
methods=[
_descriptor.MethodDescriptor(
name='Stop',
full_name='nvidia.clara.platform.Clara.Stop',
index=0,
containing_service=None,
input_type=_CLARASTOPREQUEST,
output_type=_CLARASTOPRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='Utilization',
full_name='nvidia.clara.platform.Clara.Utilization',
index=1,
containing_service=None,
input_type=_CLARAUTILIZATIONREQUEST,
output_type=_CLARAUTILIZATIONRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='Version',
full_name='nvidia.clara.platform.Clara.Version',
index=2,
containing_service=None,
input_type=_CLARAVERSIONREQUEST,
output_type=_CLARAVERSIONRESPONSE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_CLARA)
DESCRIPTOR.services_by_name['Clara'] = _CLARA
# @@protoc_insertion_point(module_scope)
| clara-platform-python-client-main | nvidia_clara/grpc/clara_pb2.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia/clara/platform/jobs.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_clara.grpc import common_pb2 as nvidia_dot_clara_dot_platform_dot_common__pb2
from nvidia_clara.grpc import clara_pb2 as nvidia_dot_clara_dot_platform_dot_clara__pb2
try:
nvidia_dot_clara_dot_platform_dot_common__pb2 = nvidia_dot_clara_dot_platform_dot_clara__pb2.nvidia_dot_clara_dot_platform_dot_common__pb2
except AttributeError:
nvidia_dot_clara_dot_platform_dot_common__pb2 = nvidia_dot_clara_dot_platform_dot_clara__pb2.nvidia.clara.platform.common_pb2
from nvidia_clara.grpc.common_pb2 import *
from nvidia_clara.grpc.clara_pb2 import *
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia/clara/platform/jobs.proto',
package='nvidia.clara.platform',
syntax='proto3',
serialized_options=_b('\n\031com.nvidia.clara.platformZ\004apis\252\002\032Nvidia.Clara.Platform.Grpc'),
serialized_pb=_b('\n nvidia/clara/platform/jobs.proto\x12\x15nvidia.clara.platform\x1a\"nvidia/clara/platform/common.proto\x1a!nvidia/clara/platform/clara.proto\"\x81\x02\n\x16JobsAddMetadataRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\x31\n\x06job_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12M\n\x08metadata\x18\x03 \x03(\x0b\x32;.nvidia.clara.platform.JobsAddMetadataRequest.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x84\x02\n\x17JobsAddMetadataResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12\x31\n\x06job_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12N\n\x08metadata\x18\x03 \x03(\x0b\x32<.nvidia.clara.platform.JobsAddMetadataResponse.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x8c\x01\n\x11JobsCancelRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\x31\n\x06job_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12\x0e\n\x06reason\x18\x03 \x01(\t\"\xe8\x01\n\x12JobsCancelResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12\x31\n\x06job_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12\x32\n\tjob_state\x18\x03 \x01(\x0e\x32\x1f.nvidia.clara.platform.JobState\x12\x34\n\njob_status\x18\x04 \x01(\x0e\x32 .nvidia.clara.platform.JobStatus\"\xfb\x02\n\x11JobsCreateRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\x36\n\x0bpipeline_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x34\n\x08priority\x18\x04 \x01(\x0e\x32\".nvidia.clara.platform.JobPriority\x12\x39\n\x0einput_payloads\x18\x05 \x03(\x0b\x32!.nvidia.clara.platform.Identifier\x12H\n\x08metadata\x18\x06 \x03(\x0b\x32\x36.nvidia.clara.platform.JobsCreateRequest.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xb5\x01\n\x12JobsCreateResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12\x31\n\x06job_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12\x35\n\npayload_id\x18\x03 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\"\xae\x03\n\x0fJobsListRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12@\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\x30.nvidia.clara.platform.JobsListRequest.JobFilter\x1a\xa2\x02\n\tJobFilter\x12:\n\x10\x63ompleted_before\x18\x01 \x01(\x0b\x32 .nvidia.clara.platform.Timestamp\x12\x37\n\rcreated_after\x18\x02 \x01(\x0b\x32 .nvidia.clara.platform.Timestamp\x12\x32\n\thas_state\x18\x03 \x03(\x0e\x32\x1f.nvidia.clara.platform.JobState\x12\x34\n\nhas_status\x18\x04 \x03(\x0e\x32 .nvidia.clara.platform.JobStatus\x12\x36\n\x0bpipeline_id\x18\x05 \x03(\x0b\x32!.nvidia.clara.platform.Identifier\"\xe8\x06\n\x10JobsListResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12G\n\x0bjob_details\x18\x02 \x01(\x0b\x32\x32.nvidia.clara.platform.JobsListResponse.JobDetails\x1a\xd3\x05\n\nJobDetails\x12\x31\n\x06job_id\x18\x01 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12\x35\n\npayload_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12\x36\n\x0bpipeline_id\x18\x03 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12\x10\n\x08job_name\x18\x04 \x01(\t\x12.\n\x05state\x18\x05 \x01(\x0e\x32\x1f.nvidia.clara.platform.JobState\x12\x30\n\x06status\x18\x06 \x01(\x0e\x32 .nvidia.clara.platform.JobStatus\x12\x34\n\x08priority\x18\x07 \x01(\x0e\x32\".nvidia.clara.platform.JobPriority\x12\x31\n\x07\x63reated\x18\r \x01(\x0b\x32 .nvidia.clara.platform.Timestamp\x12\x31\n\x07started\x18\x0e \x01(\x0b\x32 .nvidia.clara.platform.Timestamp\x12\x31\n\x07stopped\x18\x0f \x01(\x0b\x32 .nvidia.clara.platform.Timestamp\x12R\n\x08metadata\x18\x10 \x03(\x0b\x32@.nvidia.clara.platform.JobsListResponse.JobDetails.MetadataEntry\x12\x1d\n\x11timestamp_created\x18\n \x01(\tB\x02\x18\x01\x12\x1d\n\x11timestamp_started\x18\x0b \x01(\tB\x02\x18\x01\x12\x1d\n\x11timestamp_stopped\x18\x0c \x01(\tB\x02\x18\x01\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x95\x01\n\x13JobsReadLogsRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\x31\n\x06job_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12\x15\n\roperator_name\x18\x03 \x01(\t\"\xa5\x01\n\x14JobsReadLogsResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12\x31\n\x06job_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12\x15\n\roperator_name\x18\x03 \x01(\t\x12\x0c\n\x04logs\x18\x04 \x03(\t\"\x92\x01\n\x19JobsRemoveMetadataRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\x31\n\x06job_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12\x0c\n\x04keys\x18\x03 \x03(\t\"\x8a\x02\n\x1aJobsRemoveMetadataResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12\x31\n\x06job_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12Q\n\x08metadata\x18\x03 \x03(\x0b\x32?.nvidia.clara.platform.JobsRemoveMetadataResponse.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xed\x01\n\x10JobsStartRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\x31\n\x06job_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12\x45\n\tVariables\x18\x03 \x03(\x0b\x32\x32.nvidia.clara.platform.JobsStartRequest.NamedValue\x1a)\n\nNamedValue\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\xe2\x01\n\x11JobsStartResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12\x34\n\x08priority\x18\x04 \x01(\x0e\x32\".nvidia.clara.platform.JobPriority\x12.\n\x05state\x18\x02 \x01(\x0e\x32\x1f.nvidia.clara.platform.JobState\x12\x30\n\x06status\x18\x03 \x01(\x0e\x32 .nvidia.clara.platform.JobStatus\"|\n\x11JobsStatusRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\x31\n\x06job_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\"\xeb\n\n\x12JobsStatusResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12\x31\n\x06job_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12\x36\n\x0bpipeline_id\x18\x03 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12\x35\n\npayload_id\x18\x04 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12.\n\x05state\x18\x05 \x01(\x0e\x32\x1f.nvidia.clara.platform.JobState\x12\x30\n\x06status\x18\x06 \x01(\x0e\x32 .nvidia.clara.platform.JobStatus\x12\x0c\n\x04name\x18\x07 \x01(\t\x12\x34\n\x08priority\x18\t \x01(\x0e\x32\".nvidia.clara.platform.JobPriority\x12\x31\n\x07\x63reated\x18\r \x01(\x0b\x32 .nvidia.clara.platform.Timestamp\x12\x31\n\x07started\x18\x0e \x01(\x0b\x32 .nvidia.clara.platform.Timestamp\x12\x31\n\x07stopped\x18\x0f \x01(\x0b\x32 .nvidia.clara.platform.Timestamp\x12V\n\x10operator_details\x18\x10 \x03(\x0b\x32<.nvidia.clara.platform.JobsStatusResponse.JobOperatorDetails\x12I\n\x08metadata\x18\x11 \x03(\x0b\x32\x37.nvidia.clara.platform.JobsStatusResponse.MetadataEntry\x12\x41\n\x03\x64\x61g\x18\x12 \x03(\x0b\x32\x34.nvidia.clara.platform.JobsStatusResponse.JobDagNode\x12\x10\n\x08messages\x18\x08 \x03(\t\x12\x1d\n\x11timestamp_created\x18\n \x01(\tB\x02\x18\x01\x12\x1d\n\x11timestamp_started\x18\x0b \x01(\tB\x02\x18\x01\x12\x1d\n\x11timestamp_stopped\x18\x0c \x01(\tB\x02\x18\x01\x1a\xf5\x01\n\x12JobOperatorDetails\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x38\n\x06status\x18\x02 \x01(\x0e\x32(.nvidia.clara.platform.JobOperatorStatus\x12\x31\n\x07\x63reated\x18\x03 \x01(\x0b\x32 .nvidia.clara.platform.Timestamp\x12\x31\n\x07started\x18\x04 \x01(\x0b\x32 .nvidia.clara.platform.Timestamp\x12\x31\n\x07stopped\x18\x05 \x01(\x0b\x32 .nvidia.clara.platform.Timestamp\x1a\xbe\x01\n\nJobDagNode\x12\x0c\n\x04name\x18\x01 \x01(\t\x12P\n\x12input_dependencies\x18\x02 \x03(\x0b\x32\x34.nvidia.clara.platform.JobsStatusResponse.JobDagNode\x12P\n\x12order_dependencies\x18\x03 \x03(\x0b\x32\x34.nvidia.clara.platform.JobsStatusResponse.JobDagNode\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01*\xba\x01\n\x11JobOperatorStatus\x12\x1f\n\x1bJOB_OPERATOR_STATUS_UNKNOWN\x10\x00\x12\x1f\n\x1bJOB_OPERATOR_STATUS_PENDING\x10\x01\x12\x1f\n\x1bJOB_OPERATOR_STATUS_RUNNING\x10\x02\x12!\n\x1dJOB_OPERATOR_STATUS_COMPLETED\x10\x03\x12\x1f\n\x1bJOB_OPERATOR_STATUS_FAULTED\x10\x04*\x8d\x01\n\x0bJobPriority\x12\x18\n\x14JOB_PRIORITY_UNKNOWN\x10\x00\x12\x16\n\x12JOB_PRIORITY_LOWER\x10\x01\x12\x17\n\x13JOB_PRIORITY_NORMAL\x10\x02\x12\x17\n\x13JOB_PRIORITY_HIGHER\x10\x03\x12\x1a\n\x16JOB_PRIORITY_IMMEDIATE\x10\x04*f\n\x08JobState\x12\x15\n\x11JOB_STATE_UNKNOWN\x10\x00\x12\x15\n\x11JOB_STATE_PENDING\x10\x01\x12\x15\n\x11JOB_STATE_RUNNING\x10\x02\x12\x15\n\x11JOB_STATE_STOPPED\x10\x03*\x9f\x01\n\tJobStatus\x12\x16\n\x12JOB_STATUS_UNKNOWN\x10\x00\x12\x16\n\x12JOB_STATUS_HEALTHY\x10\x01\x12\x16\n\x12JOB_STATUS_FAULTED\x10\x02\x12\x17\n\x13JOB_STATUS_CANCELED\x10\x03\x12\x16\n\x12JOB_STATUS_EVICTED\x10\x04\x12\x19\n\x15JOB_STATUS_TERMINATED\x10\x05\x32\xa6\x06\n\x04Jobs\x12l\n\x0b\x41\x64\x64Metadata\x12-.nvidia.clara.platform.JobsAddMetadataRequest\x1a..nvidia.clara.platform.JobsAddMetadataResponse\x12]\n\x06\x43\x61ncel\x12(.nvidia.clara.platform.JobsCancelRequest\x1a).nvidia.clara.platform.JobsCancelResponse\x12]\n\x06\x43reate\x12(.nvidia.clara.platform.JobsCreateRequest\x1a).nvidia.clara.platform.JobsCreateResponse\x12Y\n\x04List\x12&.nvidia.clara.platform.JobsListRequest\x1a\'.nvidia.clara.platform.JobsListResponse0\x01\x12\x65\n\x08ReadLogs\x12*.nvidia.clara.platform.JobsReadLogsRequest\x1a+.nvidia.clara.platform.JobsReadLogsResponse0\x01\x12u\n\x0eRemoveMetadata\x12\x30.nvidia.clara.platform.JobsRemoveMetadataRequest\x1a\x31.nvidia.clara.platform.JobsRemoveMetadataResponse\x12Z\n\x05Start\x12\'.nvidia.clara.platform.JobsStartRequest\x1a(.nvidia.clara.platform.JobsStartResponse\x12]\n\x06Status\x12(.nvidia.clara.platform.JobsStatusRequest\x1a).nvidia.clara.platform.JobsStatusResponseB>\n\x19\x63om.nvidia.clara.platformZ\x04\x61pis\xaa\x02\x1aNvidia.Clara.Platform.GrpcP\x00P\x01\x62\x06proto3')
,
dependencies=[nvidia_dot_clara_dot_platform_dot_common__pb2.DESCRIPTOR,nvidia_dot_clara_dot_platform_dot_clara__pb2.DESCRIPTOR,],
public_dependencies=[nvidia_dot_clara_dot_platform_dot_common__pb2.DESCRIPTOR,nvidia_dot_clara_dot_platform_dot_clara__pb2.DESCRIPTOR,])
_JOBOPERATORSTATUS = _descriptor.EnumDescriptor(
name='JobOperatorStatus',
full_name='nvidia.clara.platform.JobOperatorStatus',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='JOB_OPERATOR_STATUS_UNKNOWN', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JOB_OPERATOR_STATUS_PENDING', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JOB_OPERATOR_STATUS_RUNNING', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JOB_OPERATOR_STATUS_COMPLETED', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JOB_OPERATOR_STATUS_FAULTED', index=4, number=4,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=5629,
serialized_end=5815,
)
_sym_db.RegisterEnumDescriptor(_JOBOPERATORSTATUS)
JobOperatorStatus = enum_type_wrapper.EnumTypeWrapper(_JOBOPERATORSTATUS)
_JOBPRIORITY = _descriptor.EnumDescriptor(
name='JobPriority',
full_name='nvidia.clara.platform.JobPriority',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='JOB_PRIORITY_UNKNOWN', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JOB_PRIORITY_LOWER', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JOB_PRIORITY_NORMAL', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JOB_PRIORITY_HIGHER', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JOB_PRIORITY_IMMEDIATE', index=4, number=4,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=5818,
serialized_end=5959,
)
_sym_db.RegisterEnumDescriptor(_JOBPRIORITY)
JobPriority = enum_type_wrapper.EnumTypeWrapper(_JOBPRIORITY)
_JOBSTATE = _descriptor.EnumDescriptor(
name='JobState',
full_name='nvidia.clara.platform.JobState',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='JOB_STATE_UNKNOWN', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JOB_STATE_PENDING', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JOB_STATE_RUNNING', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JOB_STATE_STOPPED', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=5961,
serialized_end=6063,
)
_sym_db.RegisterEnumDescriptor(_JOBSTATE)
JobState = enum_type_wrapper.EnumTypeWrapper(_JOBSTATE)
_JOBSTATUS = _descriptor.EnumDescriptor(
name='JobStatus',
full_name='nvidia.clara.platform.JobStatus',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='JOB_STATUS_UNKNOWN', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JOB_STATUS_HEALTHY', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JOB_STATUS_FAULTED', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JOB_STATUS_CANCELED', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JOB_STATUS_EVICTED', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JOB_STATUS_TERMINATED', index=5, number=5,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=6066,
serialized_end=6225,
)
_sym_db.RegisterEnumDescriptor(_JOBSTATUS)
JobStatus = enum_type_wrapper.EnumTypeWrapper(_JOBSTATUS)
JOB_OPERATOR_STATUS_UNKNOWN = 0
JOB_OPERATOR_STATUS_PENDING = 1
JOB_OPERATOR_STATUS_RUNNING = 2
JOB_OPERATOR_STATUS_COMPLETED = 3
JOB_OPERATOR_STATUS_FAULTED = 4
JOB_PRIORITY_UNKNOWN = 0
JOB_PRIORITY_LOWER = 1
JOB_PRIORITY_NORMAL = 2
JOB_PRIORITY_HIGHER = 3
JOB_PRIORITY_IMMEDIATE = 4
JOB_STATE_UNKNOWN = 0
JOB_STATE_PENDING = 1
JOB_STATE_RUNNING = 2
JOB_STATE_STOPPED = 3
JOB_STATUS_UNKNOWN = 0
JOB_STATUS_HEALTHY = 1
JOB_STATUS_FAULTED = 2
JOB_STATUS_CANCELED = 3
JOB_STATUS_EVICTED = 4
JOB_STATUS_TERMINATED = 5
_JOBSADDMETADATAREQUEST_METADATAENTRY = _descriptor.Descriptor(
name='MetadataEntry',
full_name='nvidia.clara.platform.JobsAddMetadataRequest.MetadataEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='nvidia.clara.platform.JobsAddMetadataRequest.MetadataEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='nvidia.clara.platform.JobsAddMetadataRequest.MetadataEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=341,
serialized_end=388,
)
_JOBSADDMETADATAREQUEST = _descriptor.Descriptor(
name='JobsAddMetadataRequest',
full_name='nvidia.clara.platform.JobsAddMetadataRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.JobsAddMetadataRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='job_id', full_name='nvidia.clara.platform.JobsAddMetadataRequest.job_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='nvidia.clara.platform.JobsAddMetadataRequest.metadata', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_JOBSADDMETADATAREQUEST_METADATAENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=131,
serialized_end=388,
)
_JOBSADDMETADATARESPONSE_METADATAENTRY = _descriptor.Descriptor(
name='MetadataEntry',
full_name='nvidia.clara.platform.JobsAddMetadataResponse.MetadataEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='nvidia.clara.platform.JobsAddMetadataResponse.MetadataEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='nvidia.clara.platform.JobsAddMetadataResponse.MetadataEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=341,
serialized_end=388,
)
_JOBSADDMETADATARESPONSE = _descriptor.Descriptor(
name='JobsAddMetadataResponse',
full_name='nvidia.clara.platform.JobsAddMetadataResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.JobsAddMetadataResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='job_id', full_name='nvidia.clara.platform.JobsAddMetadataResponse.job_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='nvidia.clara.platform.JobsAddMetadataResponse.metadata', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_JOBSADDMETADATARESPONSE_METADATAENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=391,
serialized_end=651,
)
_JOBSCANCELREQUEST = _descriptor.Descriptor(
name='JobsCancelRequest',
full_name='nvidia.clara.platform.JobsCancelRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.JobsCancelRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='job_id', full_name='nvidia.clara.platform.JobsCancelRequest.job_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='nvidia.clara.platform.JobsCancelRequest.reason', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=654,
serialized_end=794,
)
_JOBSCANCELRESPONSE = _descriptor.Descriptor(
name='JobsCancelResponse',
full_name='nvidia.clara.platform.JobsCancelResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.JobsCancelResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='job_id', full_name='nvidia.clara.platform.JobsCancelResponse.job_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='job_state', full_name='nvidia.clara.platform.JobsCancelResponse.job_state', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='job_status', full_name='nvidia.clara.platform.JobsCancelResponse.job_status', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=797,
serialized_end=1029,
)
_JOBSCREATEREQUEST_METADATAENTRY = _descriptor.Descriptor(
name='MetadataEntry',
full_name='nvidia.clara.platform.JobsCreateRequest.MetadataEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='nvidia.clara.platform.JobsCreateRequest.MetadataEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='nvidia.clara.platform.JobsCreateRequest.MetadataEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=341,
serialized_end=388,
)
_JOBSCREATEREQUEST = _descriptor.Descriptor(
name='JobsCreateRequest',
full_name='nvidia.clara.platform.JobsCreateRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.JobsCreateRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pipeline_id', full_name='nvidia.clara.platform.JobsCreateRequest.pipeline_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='nvidia.clara.platform.JobsCreateRequest.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='priority', full_name='nvidia.clara.platform.JobsCreateRequest.priority', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='input_payloads', full_name='nvidia.clara.platform.JobsCreateRequest.input_payloads', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='nvidia.clara.platform.JobsCreateRequest.metadata', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_JOBSCREATEREQUEST_METADATAENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1032,
serialized_end=1411,
)
_JOBSCREATERESPONSE = _descriptor.Descriptor(
name='JobsCreateResponse',
full_name='nvidia.clara.platform.JobsCreateResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.JobsCreateResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='job_id', full_name='nvidia.clara.platform.JobsCreateResponse.job_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='payload_id', full_name='nvidia.clara.platform.JobsCreateResponse.payload_id', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1414,
serialized_end=1595,
)
_JOBSLISTREQUEST_JOBFILTER = _descriptor.Descriptor(
name='JobFilter',
full_name='nvidia.clara.platform.JobsListRequest.JobFilter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='completed_before', full_name='nvidia.clara.platform.JobsListRequest.JobFilter.completed_before', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='created_after', full_name='nvidia.clara.platform.JobsListRequest.JobFilter.created_after', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='has_state', full_name='nvidia.clara.platform.JobsListRequest.JobFilter.has_state', index=2,
number=3, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='has_status', full_name='nvidia.clara.platform.JobsListRequest.JobFilter.has_status', index=3,
number=4, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pipeline_id', full_name='nvidia.clara.platform.JobsListRequest.JobFilter.pipeline_id', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1738,
serialized_end=2028,
)
_JOBSLISTREQUEST = _descriptor.Descriptor(
name='JobsListRequest',
full_name='nvidia.clara.platform.JobsListRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.JobsListRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='filter', full_name='nvidia.clara.platform.JobsListRequest.filter', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_JOBSLISTREQUEST_JOBFILTER, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1598,
serialized_end=2028,
)
_JOBSLISTRESPONSE_JOBDETAILS_METADATAENTRY = _descriptor.Descriptor(
name='MetadataEntry',
full_name='nvidia.clara.platform.JobsListResponse.JobDetails.MetadataEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='nvidia.clara.platform.JobsListResponse.JobDetails.MetadataEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='nvidia.clara.platform.JobsListResponse.JobDetails.MetadataEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=341,
serialized_end=388,
)
_JOBSLISTRESPONSE_JOBDETAILS = _descriptor.Descriptor(
name='JobDetails',
full_name='nvidia.clara.platform.JobsListResponse.JobDetails',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='job_id', full_name='nvidia.clara.platform.JobsListResponse.JobDetails.job_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='payload_id', full_name='nvidia.clara.platform.JobsListResponse.JobDetails.payload_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pipeline_id', full_name='nvidia.clara.platform.JobsListResponse.JobDetails.pipeline_id', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='job_name', full_name='nvidia.clara.platform.JobsListResponse.JobDetails.job_name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='state', full_name='nvidia.clara.platform.JobsListResponse.JobDetails.state', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='nvidia.clara.platform.JobsListResponse.JobDetails.status', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='priority', full_name='nvidia.clara.platform.JobsListResponse.JobDetails.priority', index=6,
number=7, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='created', full_name='nvidia.clara.platform.JobsListResponse.JobDetails.created', index=7,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='started', full_name='nvidia.clara.platform.JobsListResponse.JobDetails.started', index=8,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stopped', full_name='nvidia.clara.platform.JobsListResponse.JobDetails.stopped', index=9,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='nvidia.clara.platform.JobsListResponse.JobDetails.metadata', index=10,
number=16, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='timestamp_created', full_name='nvidia.clara.platform.JobsListResponse.JobDetails.timestamp_created', index=11,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\030\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='timestamp_started', full_name='nvidia.clara.platform.JobsListResponse.JobDetails.timestamp_started', index=12,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\030\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='timestamp_stopped', full_name='nvidia.clara.platform.JobsListResponse.JobDetails.timestamp_stopped', index=13,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\030\001'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_JOBSLISTRESPONSE_JOBDETAILS_METADATAENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2180,
serialized_end=2903,
)
_JOBSLISTRESPONSE = _descriptor.Descriptor(
name='JobsListResponse',
full_name='nvidia.clara.platform.JobsListResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.JobsListResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='job_details', full_name='nvidia.clara.platform.JobsListResponse.job_details', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_JOBSLISTRESPONSE_JOBDETAILS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2031,
serialized_end=2903,
)
_JOBSREADLOGSREQUEST = _descriptor.Descriptor(
name='JobsReadLogsRequest',
full_name='nvidia.clara.platform.JobsReadLogsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.JobsReadLogsRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='job_id', full_name='nvidia.clara.platform.JobsReadLogsRequest.job_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='operator_name', full_name='nvidia.clara.platform.JobsReadLogsRequest.operator_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2906,
serialized_end=3055,
)
_JOBSREADLOGSRESPONSE = _descriptor.Descriptor(
name='JobsReadLogsResponse',
full_name='nvidia.clara.platform.JobsReadLogsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.JobsReadLogsResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='job_id', full_name='nvidia.clara.platform.JobsReadLogsResponse.job_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='operator_name', full_name='nvidia.clara.platform.JobsReadLogsResponse.operator_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='logs', full_name='nvidia.clara.platform.JobsReadLogsResponse.logs', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3058,
serialized_end=3223,
)
_JOBSREMOVEMETADATAREQUEST = _descriptor.Descriptor(
name='JobsRemoveMetadataRequest',
full_name='nvidia.clara.platform.JobsRemoveMetadataRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.JobsRemoveMetadataRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='job_id', full_name='nvidia.clara.platform.JobsRemoveMetadataRequest.job_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='keys', full_name='nvidia.clara.platform.JobsRemoveMetadataRequest.keys', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3226,
serialized_end=3372,
)
_JOBSREMOVEMETADATARESPONSE_METADATAENTRY = _descriptor.Descriptor(
name='MetadataEntry',
full_name='nvidia.clara.platform.JobsRemoveMetadataResponse.MetadataEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='nvidia.clara.platform.JobsRemoveMetadataResponse.MetadataEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='nvidia.clara.platform.JobsRemoveMetadataResponse.MetadataEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=341,
serialized_end=388,
)
_JOBSREMOVEMETADATARESPONSE = _descriptor.Descriptor(
name='JobsRemoveMetadataResponse',
full_name='nvidia.clara.platform.JobsRemoveMetadataResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.JobsRemoveMetadataResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='job_id', full_name='nvidia.clara.platform.JobsRemoveMetadataResponse.job_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='nvidia.clara.platform.JobsRemoveMetadataResponse.metadata', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_JOBSREMOVEMETADATARESPONSE_METADATAENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3375,
serialized_end=3641,
)
_JOBSSTARTREQUEST_NAMEDVALUE = _descriptor.Descriptor(
name='NamedValue',
full_name='nvidia.clara.platform.JobsStartRequest.NamedValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='nvidia.clara.platform.JobsStartRequest.NamedValue.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='nvidia.clara.platform.JobsStartRequest.NamedValue.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3840,
serialized_end=3881,
)
_JOBSSTARTREQUEST = _descriptor.Descriptor(
name='JobsStartRequest',
full_name='nvidia.clara.platform.JobsStartRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.JobsStartRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='job_id', full_name='nvidia.clara.platform.JobsStartRequest.job_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Variables', full_name='nvidia.clara.platform.JobsStartRequest.Variables', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_JOBSSTARTREQUEST_NAMEDVALUE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3644,
serialized_end=3881,
)
_JOBSSTARTRESPONSE = _descriptor.Descriptor(
name='JobsStartResponse',
full_name='nvidia.clara.platform.JobsStartResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.JobsStartResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='priority', full_name='nvidia.clara.platform.JobsStartResponse.priority', index=1,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='state', full_name='nvidia.clara.platform.JobsStartResponse.state', index=2,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='nvidia.clara.platform.JobsStartResponse.status', index=3,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3884,
serialized_end=4110,
)
_JOBSSTATUSREQUEST = _descriptor.Descriptor(
name='JobsStatusRequest',
full_name='nvidia.clara.platform.JobsStatusRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.JobsStatusRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='job_id', full_name='nvidia.clara.platform.JobsStatusRequest.job_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4112,
serialized_end=4236,
)
_JOBSSTATUSRESPONSE_JOBOPERATORDETAILS = _descriptor.Descriptor(
name='JobOperatorDetails',
full_name='nvidia.clara.platform.JobsStatusResponse.JobOperatorDetails',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='nvidia.clara.platform.JobsStatusResponse.JobOperatorDetails.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='nvidia.clara.platform.JobsStatusResponse.JobOperatorDetails.status', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='created', full_name='nvidia.clara.platform.JobsStatusResponse.JobOperatorDetails.created', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='started', full_name='nvidia.clara.platform.JobsStatusResponse.JobOperatorDetails.started', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stopped', full_name='nvidia.clara.platform.JobsStatusResponse.JobOperatorDetails.stopped', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5139,
serialized_end=5384,
)
_JOBSSTATUSRESPONSE_JOBDAGNODE = _descriptor.Descriptor(
name='JobDagNode',
full_name='nvidia.clara.platform.JobsStatusResponse.JobDagNode',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='nvidia.clara.platform.JobsStatusResponse.JobDagNode.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='input_dependencies', full_name='nvidia.clara.platform.JobsStatusResponse.JobDagNode.input_dependencies', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='order_dependencies', full_name='nvidia.clara.platform.JobsStatusResponse.JobDagNode.order_dependencies', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5387,
serialized_end=5577,
)
_JOBSSTATUSRESPONSE_METADATAENTRY = _descriptor.Descriptor(
name='MetadataEntry',
full_name='nvidia.clara.platform.JobsStatusResponse.MetadataEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='nvidia.clara.platform.JobsStatusResponse.MetadataEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='nvidia.clara.platform.JobsStatusResponse.MetadataEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=341,
serialized_end=388,
)
_JOBSSTATUSRESPONSE = _descriptor.Descriptor(
name='JobsStatusResponse',
full_name='nvidia.clara.platform.JobsStatusResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.JobsStatusResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='job_id', full_name='nvidia.clara.platform.JobsStatusResponse.job_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pipeline_id', full_name='nvidia.clara.platform.JobsStatusResponse.pipeline_id', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='payload_id', full_name='nvidia.clara.platform.JobsStatusResponse.payload_id', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='state', full_name='nvidia.clara.platform.JobsStatusResponse.state', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='nvidia.clara.platform.JobsStatusResponse.status', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='nvidia.clara.platform.JobsStatusResponse.name', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='priority', full_name='nvidia.clara.platform.JobsStatusResponse.priority', index=7,
number=9, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='created', full_name='nvidia.clara.platform.JobsStatusResponse.created', index=8,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='started', full_name='nvidia.clara.platform.JobsStatusResponse.started', index=9,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stopped', full_name='nvidia.clara.platform.JobsStatusResponse.stopped', index=10,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='operator_details', full_name='nvidia.clara.platform.JobsStatusResponse.operator_details', index=11,
number=16, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='nvidia.clara.platform.JobsStatusResponse.metadata', index=12,
number=17, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dag', full_name='nvidia.clara.platform.JobsStatusResponse.dag', index=13,
number=18, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='messages', full_name='nvidia.clara.platform.JobsStatusResponse.messages', index=14,
number=8, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='timestamp_created', full_name='nvidia.clara.platform.JobsStatusResponse.timestamp_created', index=15,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\030\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='timestamp_started', full_name='nvidia.clara.platform.JobsStatusResponse.timestamp_started', index=16,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\030\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='timestamp_stopped', full_name='nvidia.clara.platform.JobsStatusResponse.timestamp_stopped', index=17,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\030\001'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_JOBSSTATUSRESPONSE_JOBOPERATORDETAILS, _JOBSSTATUSRESPONSE_JOBDAGNODE, _JOBSSTATUSRESPONSE_METADATAENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4239,
serialized_end=5626,
)
_JOBSADDMETADATAREQUEST_METADATAENTRY.containing_type = _JOBSADDMETADATAREQUEST
_JOBSADDMETADATAREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_JOBSADDMETADATAREQUEST.fields_by_name['job_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_JOBSADDMETADATAREQUEST.fields_by_name['metadata'].message_type = _JOBSADDMETADATAREQUEST_METADATAENTRY
_JOBSADDMETADATARESPONSE_METADATAENTRY.containing_type = _JOBSADDMETADATARESPONSE
_JOBSADDMETADATARESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_JOBSADDMETADATARESPONSE.fields_by_name['job_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_JOBSADDMETADATARESPONSE.fields_by_name['metadata'].message_type = _JOBSADDMETADATARESPONSE_METADATAENTRY
_JOBSCANCELREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_JOBSCANCELREQUEST.fields_by_name['job_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_JOBSCANCELRESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_JOBSCANCELRESPONSE.fields_by_name['job_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_JOBSCANCELRESPONSE.fields_by_name['job_state'].enum_type = _JOBSTATE
_JOBSCANCELRESPONSE.fields_by_name['job_status'].enum_type = _JOBSTATUS
_JOBSCREATEREQUEST_METADATAENTRY.containing_type = _JOBSCREATEREQUEST
_JOBSCREATEREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_JOBSCREATEREQUEST.fields_by_name['pipeline_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_JOBSCREATEREQUEST.fields_by_name['priority'].enum_type = _JOBPRIORITY
_JOBSCREATEREQUEST.fields_by_name['input_payloads'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_JOBSCREATEREQUEST.fields_by_name['metadata'].message_type = _JOBSCREATEREQUEST_METADATAENTRY
_JOBSCREATERESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_JOBSCREATERESPONSE.fields_by_name['job_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_JOBSCREATERESPONSE.fields_by_name['payload_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_JOBSLISTREQUEST_JOBFILTER.fields_by_name['completed_before'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._TIMESTAMP
_JOBSLISTREQUEST_JOBFILTER.fields_by_name['created_after'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._TIMESTAMP
_JOBSLISTREQUEST_JOBFILTER.fields_by_name['has_state'].enum_type = _JOBSTATE
_JOBSLISTREQUEST_JOBFILTER.fields_by_name['has_status'].enum_type = _JOBSTATUS
_JOBSLISTREQUEST_JOBFILTER.fields_by_name['pipeline_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_JOBSLISTREQUEST_JOBFILTER.containing_type = _JOBSLISTREQUEST
_JOBSLISTREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_JOBSLISTREQUEST.fields_by_name['filter'].message_type = _JOBSLISTREQUEST_JOBFILTER
_JOBSLISTRESPONSE_JOBDETAILS_METADATAENTRY.containing_type = _JOBSLISTRESPONSE_JOBDETAILS
_JOBSLISTRESPONSE_JOBDETAILS.fields_by_name['job_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_JOBSLISTRESPONSE_JOBDETAILS.fields_by_name['payload_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_JOBSLISTRESPONSE_JOBDETAILS.fields_by_name['pipeline_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_JOBSLISTRESPONSE_JOBDETAILS.fields_by_name['state'].enum_type = _JOBSTATE
_JOBSLISTRESPONSE_JOBDETAILS.fields_by_name['status'].enum_type = _JOBSTATUS
_JOBSLISTRESPONSE_JOBDETAILS.fields_by_name['priority'].enum_type = _JOBPRIORITY
_JOBSLISTRESPONSE_JOBDETAILS.fields_by_name['created'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._TIMESTAMP
_JOBSLISTRESPONSE_JOBDETAILS.fields_by_name['started'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._TIMESTAMP
_JOBSLISTRESPONSE_JOBDETAILS.fields_by_name['stopped'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._TIMESTAMP
_JOBSLISTRESPONSE_JOBDETAILS.fields_by_name['metadata'].message_type = _JOBSLISTRESPONSE_JOBDETAILS_METADATAENTRY
_JOBSLISTRESPONSE_JOBDETAILS.containing_type = _JOBSLISTRESPONSE
_JOBSLISTRESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_JOBSLISTRESPONSE.fields_by_name['job_details'].message_type = _JOBSLISTRESPONSE_JOBDETAILS
_JOBSREADLOGSREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_JOBSREADLOGSREQUEST.fields_by_name['job_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_JOBSREADLOGSRESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_JOBSREADLOGSRESPONSE.fields_by_name['job_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_JOBSREMOVEMETADATAREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_JOBSREMOVEMETADATAREQUEST.fields_by_name['job_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_JOBSREMOVEMETADATARESPONSE_METADATAENTRY.containing_type = _JOBSREMOVEMETADATARESPONSE
_JOBSREMOVEMETADATARESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_JOBSREMOVEMETADATARESPONSE.fields_by_name['job_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_JOBSREMOVEMETADATARESPONSE.fields_by_name['metadata'].message_type = _JOBSREMOVEMETADATARESPONSE_METADATAENTRY
_JOBSSTARTREQUEST_NAMEDVALUE.containing_type = _JOBSSTARTREQUEST
_JOBSSTARTREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_JOBSSTARTREQUEST.fields_by_name['job_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_JOBSSTARTREQUEST.fields_by_name['Variables'].message_type = _JOBSSTARTREQUEST_NAMEDVALUE
_JOBSSTARTRESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_JOBSSTARTRESPONSE.fields_by_name['priority'].enum_type = _JOBPRIORITY
_JOBSSTARTRESPONSE.fields_by_name['state'].enum_type = _JOBSTATE
_JOBSSTARTRESPONSE.fields_by_name['status'].enum_type = _JOBSTATUS
_JOBSSTATUSREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_JOBSSTATUSREQUEST.fields_by_name['job_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_JOBSSTATUSRESPONSE_JOBOPERATORDETAILS.fields_by_name['status'].enum_type = _JOBOPERATORSTATUS
_JOBSSTATUSRESPONSE_JOBOPERATORDETAILS.fields_by_name['created'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._TIMESTAMP
_JOBSSTATUSRESPONSE_JOBOPERATORDETAILS.fields_by_name['started'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._TIMESTAMP
_JOBSSTATUSRESPONSE_JOBOPERATORDETAILS.fields_by_name['stopped'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._TIMESTAMP
_JOBSSTATUSRESPONSE_JOBOPERATORDETAILS.containing_type = _JOBSSTATUSRESPONSE
_JOBSSTATUSRESPONSE_JOBDAGNODE.fields_by_name['input_dependencies'].message_type = _JOBSSTATUSRESPONSE_JOBDAGNODE
_JOBSSTATUSRESPONSE_JOBDAGNODE.fields_by_name['order_dependencies'].message_type = _JOBSSTATUSRESPONSE_JOBDAGNODE
_JOBSSTATUSRESPONSE_JOBDAGNODE.containing_type = _JOBSSTATUSRESPONSE
_JOBSSTATUSRESPONSE_METADATAENTRY.containing_type = _JOBSSTATUSRESPONSE
_JOBSSTATUSRESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_JOBSSTATUSRESPONSE.fields_by_name['job_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_JOBSSTATUSRESPONSE.fields_by_name['pipeline_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_JOBSSTATUSRESPONSE.fields_by_name['payload_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_JOBSSTATUSRESPONSE.fields_by_name['state'].enum_type = _JOBSTATE
_JOBSSTATUSRESPONSE.fields_by_name['status'].enum_type = _JOBSTATUS
_JOBSSTATUSRESPONSE.fields_by_name['priority'].enum_type = _JOBPRIORITY
_JOBSSTATUSRESPONSE.fields_by_name['created'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._TIMESTAMP
_JOBSSTATUSRESPONSE.fields_by_name['started'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._TIMESTAMP
_JOBSSTATUSRESPONSE.fields_by_name['stopped'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._TIMESTAMP
_JOBSSTATUSRESPONSE.fields_by_name['operator_details'].message_type = _JOBSSTATUSRESPONSE_JOBOPERATORDETAILS
_JOBSSTATUSRESPONSE.fields_by_name['metadata'].message_type = _JOBSSTATUSRESPONSE_METADATAENTRY
_JOBSSTATUSRESPONSE.fields_by_name['dag'].message_type = _JOBSSTATUSRESPONSE_JOBDAGNODE
DESCRIPTOR.message_types_by_name['JobsAddMetadataRequest'] = _JOBSADDMETADATAREQUEST
DESCRIPTOR.message_types_by_name['JobsAddMetadataResponse'] = _JOBSADDMETADATARESPONSE
DESCRIPTOR.message_types_by_name['JobsCancelRequest'] = _JOBSCANCELREQUEST
DESCRIPTOR.message_types_by_name['JobsCancelResponse'] = _JOBSCANCELRESPONSE
DESCRIPTOR.message_types_by_name['JobsCreateRequest'] = _JOBSCREATEREQUEST
DESCRIPTOR.message_types_by_name['JobsCreateResponse'] = _JOBSCREATERESPONSE
DESCRIPTOR.message_types_by_name['JobsListRequest'] = _JOBSLISTREQUEST
DESCRIPTOR.message_types_by_name['JobsListResponse'] = _JOBSLISTRESPONSE
DESCRIPTOR.message_types_by_name['JobsReadLogsRequest'] = _JOBSREADLOGSREQUEST
DESCRIPTOR.message_types_by_name['JobsReadLogsResponse'] = _JOBSREADLOGSRESPONSE
DESCRIPTOR.message_types_by_name['JobsRemoveMetadataRequest'] = _JOBSREMOVEMETADATAREQUEST
DESCRIPTOR.message_types_by_name['JobsRemoveMetadataResponse'] = _JOBSREMOVEMETADATARESPONSE
DESCRIPTOR.message_types_by_name['JobsStartRequest'] = _JOBSSTARTREQUEST
DESCRIPTOR.message_types_by_name['JobsStartResponse'] = _JOBSSTARTRESPONSE
DESCRIPTOR.message_types_by_name['JobsStatusRequest'] = _JOBSSTATUSREQUEST
DESCRIPTOR.message_types_by_name['JobsStatusResponse'] = _JOBSSTATUSRESPONSE
DESCRIPTOR.enum_types_by_name['JobOperatorStatus'] = _JOBOPERATORSTATUS
DESCRIPTOR.enum_types_by_name['JobPriority'] = _JOBPRIORITY
DESCRIPTOR.enum_types_by_name['JobState'] = _JOBSTATE
DESCRIPTOR.enum_types_by_name['JobStatus'] = _JOBSTATUS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
JobsAddMetadataRequest = _reflection.GeneratedProtocolMessageType('JobsAddMetadataRequest', (_message.Message,), dict(
MetadataEntry = _reflection.GeneratedProtocolMessageType('MetadataEntry', (_message.Message,), dict(
DESCRIPTOR = _JOBSADDMETADATAREQUEST_METADATAENTRY,
__module__ = 'nvidia.clara.platform.jobs_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.JobsAddMetadataRequest.MetadataEntry)
))
,
DESCRIPTOR = _JOBSADDMETADATAREQUEST,
__module__ = 'nvidia.clara.platform.jobs_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.JobsAddMetadataRequest)
))
_sym_db.RegisterMessage(JobsAddMetadataRequest)
_sym_db.RegisterMessage(JobsAddMetadataRequest.MetadataEntry)
JobsAddMetadataResponse = _reflection.GeneratedProtocolMessageType('JobsAddMetadataResponse', (_message.Message,), dict(
MetadataEntry = _reflection.GeneratedProtocolMessageType('MetadataEntry', (_message.Message,), dict(
DESCRIPTOR = _JOBSADDMETADATARESPONSE_METADATAENTRY,
__module__ = 'nvidia.clara.platform.jobs_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.JobsAddMetadataResponse.MetadataEntry)
))
,
DESCRIPTOR = _JOBSADDMETADATARESPONSE,
__module__ = 'nvidia.clara.platform.jobs_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.JobsAddMetadataResponse)
))
_sym_db.RegisterMessage(JobsAddMetadataResponse)
_sym_db.RegisterMessage(JobsAddMetadataResponse.MetadataEntry)
JobsCancelRequest = _reflection.GeneratedProtocolMessageType('JobsCancelRequest', (_message.Message,), dict(
DESCRIPTOR = _JOBSCANCELREQUEST,
__module__ = 'nvidia.clara.platform.jobs_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.JobsCancelRequest)
))
_sym_db.RegisterMessage(JobsCancelRequest)
JobsCancelResponse = _reflection.GeneratedProtocolMessageType('JobsCancelResponse', (_message.Message,), dict(
DESCRIPTOR = _JOBSCANCELRESPONSE,
__module__ = 'nvidia.clara.platform.jobs_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.JobsCancelResponse)
))
_sym_db.RegisterMessage(JobsCancelResponse)
JobsCreateRequest = _reflection.GeneratedProtocolMessageType('JobsCreateRequest', (_message.Message,), dict(
MetadataEntry = _reflection.GeneratedProtocolMessageType('MetadataEntry', (_message.Message,), dict(
DESCRIPTOR = _JOBSCREATEREQUEST_METADATAENTRY,
__module__ = 'nvidia.clara.platform.jobs_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.JobsCreateRequest.MetadataEntry)
))
,
DESCRIPTOR = _JOBSCREATEREQUEST,
__module__ = 'nvidia.clara.platform.jobs_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.JobsCreateRequest)
))
_sym_db.RegisterMessage(JobsCreateRequest)
_sym_db.RegisterMessage(JobsCreateRequest.MetadataEntry)
JobsCreateResponse = _reflection.GeneratedProtocolMessageType('JobsCreateResponse', (_message.Message,), dict(
DESCRIPTOR = _JOBSCREATERESPONSE,
__module__ = 'nvidia.clara.platform.jobs_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.JobsCreateResponse)
))
_sym_db.RegisterMessage(JobsCreateResponse)
JobsListRequest = _reflection.GeneratedProtocolMessageType('JobsListRequest', (_message.Message,), dict(
JobFilter = _reflection.GeneratedProtocolMessageType('JobFilter', (_message.Message,), dict(
DESCRIPTOR = _JOBSLISTREQUEST_JOBFILTER,
__module__ = 'nvidia.clara.platform.jobs_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.JobsListRequest.JobFilter)
))
,
DESCRIPTOR = _JOBSLISTREQUEST,
__module__ = 'nvidia.clara.platform.jobs_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.JobsListRequest)
))
_sym_db.RegisterMessage(JobsListRequest)
_sym_db.RegisterMessage(JobsListRequest.JobFilter)
JobsListResponse = _reflection.GeneratedProtocolMessageType('JobsListResponse', (_message.Message,), dict(
JobDetails = _reflection.GeneratedProtocolMessageType('JobDetails', (_message.Message,), dict(
MetadataEntry = _reflection.GeneratedProtocolMessageType('MetadataEntry', (_message.Message,), dict(
DESCRIPTOR = _JOBSLISTRESPONSE_JOBDETAILS_METADATAENTRY,
__module__ = 'nvidia.clara.platform.jobs_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.JobsListResponse.JobDetails.MetadataEntry)
))
,
DESCRIPTOR = _JOBSLISTRESPONSE_JOBDETAILS,
__module__ = 'nvidia.clara.platform.jobs_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.JobsListResponse.JobDetails)
))
,
DESCRIPTOR = _JOBSLISTRESPONSE,
__module__ = 'nvidia.clara.platform.jobs_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.JobsListResponse)
))
_sym_db.RegisterMessage(JobsListResponse)
_sym_db.RegisterMessage(JobsListResponse.JobDetails)
_sym_db.RegisterMessage(JobsListResponse.JobDetails.MetadataEntry)
JobsReadLogsRequest = _reflection.GeneratedProtocolMessageType('JobsReadLogsRequest', (_message.Message,), dict(
DESCRIPTOR = _JOBSREADLOGSREQUEST,
__module__ = 'nvidia.clara.platform.jobs_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.JobsReadLogsRequest)
))
_sym_db.RegisterMessage(JobsReadLogsRequest)
JobsReadLogsResponse = _reflection.GeneratedProtocolMessageType('JobsReadLogsResponse', (_message.Message,), dict(
DESCRIPTOR = _JOBSREADLOGSRESPONSE,
__module__ = 'nvidia.clara.platform.jobs_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.JobsReadLogsResponse)
))
_sym_db.RegisterMessage(JobsReadLogsResponse)
JobsRemoveMetadataRequest = _reflection.GeneratedProtocolMessageType('JobsRemoveMetadataRequest', (_message.Message,), dict(
DESCRIPTOR = _JOBSREMOVEMETADATAREQUEST,
__module__ = 'nvidia.clara.platform.jobs_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.JobsRemoveMetadataRequest)
))
_sym_db.RegisterMessage(JobsRemoveMetadataRequest)
JobsRemoveMetadataResponse = _reflection.GeneratedProtocolMessageType('JobsRemoveMetadataResponse', (_message.Message,), dict(
MetadataEntry = _reflection.GeneratedProtocolMessageType('MetadataEntry', (_message.Message,), dict(
DESCRIPTOR = _JOBSREMOVEMETADATARESPONSE_METADATAENTRY,
__module__ = 'nvidia.clara.platform.jobs_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.JobsRemoveMetadataResponse.MetadataEntry)
))
,
DESCRIPTOR = _JOBSREMOVEMETADATARESPONSE,
__module__ = 'nvidia.clara.platform.jobs_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.JobsRemoveMetadataResponse)
))
_sym_db.RegisterMessage(JobsRemoveMetadataResponse)
_sym_db.RegisterMessage(JobsRemoveMetadataResponse.MetadataEntry)
JobsStartRequest = _reflection.GeneratedProtocolMessageType('JobsStartRequest', (_message.Message,), dict(
NamedValue = _reflection.GeneratedProtocolMessageType('NamedValue', (_message.Message,), dict(
DESCRIPTOR = _JOBSSTARTREQUEST_NAMEDVALUE,
__module__ = 'nvidia.clara.platform.jobs_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.JobsStartRequest.NamedValue)
))
,
DESCRIPTOR = _JOBSSTARTREQUEST,
__module__ = 'nvidia.clara.platform.jobs_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.JobsStartRequest)
))
_sym_db.RegisterMessage(JobsStartRequest)
_sym_db.RegisterMessage(JobsStartRequest.NamedValue)
JobsStartResponse = _reflection.GeneratedProtocolMessageType('JobsStartResponse', (_message.Message,), dict(
DESCRIPTOR = _JOBSSTARTRESPONSE,
__module__ = 'nvidia.clara.platform.jobs_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.JobsStartResponse)
))
_sym_db.RegisterMessage(JobsStartResponse)
JobsStatusRequest = _reflection.GeneratedProtocolMessageType('JobsStatusRequest', (_message.Message,), dict(
DESCRIPTOR = _JOBSSTATUSREQUEST,
__module__ = 'nvidia.clara.platform.jobs_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.JobsStatusRequest)
))
_sym_db.RegisterMessage(JobsStatusRequest)
JobsStatusResponse = _reflection.GeneratedProtocolMessageType('JobsStatusResponse', (_message.Message,), dict(
JobOperatorDetails = _reflection.GeneratedProtocolMessageType('JobOperatorDetails', (_message.Message,), dict(
DESCRIPTOR = _JOBSSTATUSRESPONSE_JOBOPERATORDETAILS,
__module__ = 'nvidia.clara.platform.jobs_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.JobsStatusResponse.JobOperatorDetails)
))
,
JobDagNode = _reflection.GeneratedProtocolMessageType('JobDagNode', (_message.Message,), dict(
DESCRIPTOR = _JOBSSTATUSRESPONSE_JOBDAGNODE,
__module__ = 'nvidia.clara.platform.jobs_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.JobsStatusResponse.JobDagNode)
))
,
MetadataEntry = _reflection.GeneratedProtocolMessageType('MetadataEntry', (_message.Message,), dict(
DESCRIPTOR = _JOBSSTATUSRESPONSE_METADATAENTRY,
__module__ = 'nvidia.clara.platform.jobs_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.JobsStatusResponse.MetadataEntry)
))
,
DESCRIPTOR = _JOBSSTATUSRESPONSE,
__module__ = 'nvidia.clara.platform.jobs_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.JobsStatusResponse)
))
_sym_db.RegisterMessage(JobsStatusResponse)
_sym_db.RegisterMessage(JobsStatusResponse.JobOperatorDetails)
_sym_db.RegisterMessage(JobsStatusResponse.JobDagNode)
_sym_db.RegisterMessage(JobsStatusResponse.MetadataEntry)
DESCRIPTOR._options = None
_JOBSADDMETADATAREQUEST_METADATAENTRY._options = None
_JOBSADDMETADATARESPONSE_METADATAENTRY._options = None
_JOBSCREATEREQUEST_METADATAENTRY._options = None
_JOBSLISTRESPONSE_JOBDETAILS_METADATAENTRY._options = None
_JOBSLISTRESPONSE_JOBDETAILS.fields_by_name['timestamp_created']._options = None
_JOBSLISTRESPONSE_JOBDETAILS.fields_by_name['timestamp_started']._options = None
_JOBSLISTRESPONSE_JOBDETAILS.fields_by_name['timestamp_stopped']._options = None
_JOBSREMOVEMETADATARESPONSE_METADATAENTRY._options = None
_JOBSSTATUSRESPONSE_METADATAENTRY._options = None
_JOBSSTATUSRESPONSE.fields_by_name['timestamp_created']._options = None
_JOBSSTATUSRESPONSE.fields_by_name['timestamp_started']._options = None
_JOBSSTATUSRESPONSE.fields_by_name['timestamp_stopped']._options = None
_JOBS = _descriptor.ServiceDescriptor(
name='Jobs',
full_name='nvidia.clara.platform.Jobs',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=6228,
serialized_end=7034,
methods=[
_descriptor.MethodDescriptor(
name='AddMetadata',
full_name='nvidia.clara.platform.Jobs.AddMetadata',
index=0,
containing_service=None,
input_type=_JOBSADDMETADATAREQUEST,
output_type=_JOBSADDMETADATARESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='Cancel',
full_name='nvidia.clara.platform.Jobs.Cancel',
index=1,
containing_service=None,
input_type=_JOBSCANCELREQUEST,
output_type=_JOBSCANCELRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='Create',
full_name='nvidia.clara.platform.Jobs.Create',
index=2,
containing_service=None,
input_type=_JOBSCREATEREQUEST,
output_type=_JOBSCREATERESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='List',
full_name='nvidia.clara.platform.Jobs.List',
index=3,
containing_service=None,
input_type=_JOBSLISTREQUEST,
output_type=_JOBSLISTRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='ReadLogs',
full_name='nvidia.clara.platform.Jobs.ReadLogs',
index=4,
containing_service=None,
input_type=_JOBSREADLOGSREQUEST,
output_type=_JOBSREADLOGSRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='RemoveMetadata',
full_name='nvidia.clara.platform.Jobs.RemoveMetadata',
index=5,
containing_service=None,
input_type=_JOBSREMOVEMETADATAREQUEST,
output_type=_JOBSREMOVEMETADATARESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='Start',
full_name='nvidia.clara.platform.Jobs.Start',
index=6,
containing_service=None,
input_type=_JOBSSTARTREQUEST,
output_type=_JOBSSTARTRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='Status',
full_name='nvidia.clara.platform.Jobs.Status',
index=7,
containing_service=None,
input_type=_JOBSSTATUSREQUEST,
output_type=_JOBSSTATUSRESPONSE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_JOBS)
DESCRIPTOR.services_by_name['Jobs'] = _JOBS
# @@protoc_insertion_point(module_scope)
| clara-platform-python-client-main | nvidia_clara/grpc/jobs_pb2.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia/clara/platform/clara.proto
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from nvidia_clara.grpc import jobs_pb2 as nvidia_dot_clara_dot_platform_dot_jobs__pb2
class JobsStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.AddMetadata = channel.unary_unary(
'/nvidia.clara.platform.Jobs/AddMetadata',
request_serializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsAddMetadataRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsAddMetadataResponse.FromString,
)
self.Cancel = channel.unary_unary(
'/nvidia.clara.platform.Jobs/Cancel',
request_serializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsCancelRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsCancelResponse.FromString,
)
self.Create = channel.unary_unary(
'/nvidia.clara.platform.Jobs/Create',
request_serializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsCreateRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsCreateResponse.FromString,
)
self.List = channel.unary_stream(
'/nvidia.clara.platform.Jobs/List',
request_serializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsListRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsListResponse.FromString,
)
self.ReadLogs = channel.unary_stream(
'/nvidia.clara.platform.Jobs/ReadLogs',
request_serializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsReadLogsRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsReadLogsResponse.FromString,
)
self.RemoveMetadata = channel.unary_unary(
'/nvidia.clara.platform.Jobs/RemoveMetadata',
request_serializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsRemoveMetadataRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsRemoveMetadataResponse.FromString,
)
self.Start = channel.unary_unary(
'/nvidia.clara.platform.Jobs/Start',
request_serializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsStartRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsStartResponse.FromString,
)
self.Status = channel.unary_unary(
'/nvidia.clara.platform.Jobs/Status',
request_serializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsStatusRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsStatusResponse.FromString,
)
class JobsServicer(object):
# missing associated documentation comment in .proto file
pass
def AddMetadata(self, request, context):
"""Requests the addition of metadata to a job.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Cancel(self, request, context):
"""Request cancellation of a running job.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Create(self, request, context):
"""Requests creation of a new job based on a known pipeline.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def List(self, request, context):
"""Requests a filtered list of all known jobs, or a list of all running jobs if no filter is provided.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ReadLogs(self, request, context):
"""Requests the download of logs for an operator of a job.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RemoveMetadata(self, request, context):
"""Requests the removal of metadata from a job.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Start(self, request, context):
"""Request starting of a job created by the Create RPC.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Status(self, request, context):
"""Requests the status of a known job.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_JobsServicer_to_server(servicer, server):
rpc_method_handlers = {
'AddMetadata': grpc.unary_unary_rpc_method_handler(
servicer.AddMetadata,
request_deserializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsAddMetadataRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsAddMetadataResponse.SerializeToString,
),
'Cancel': grpc.unary_unary_rpc_method_handler(
servicer.Cancel,
request_deserializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsCancelRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsCancelResponse.SerializeToString,
),
'Create': grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsCreateRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsCreateResponse.SerializeToString,
),
'List': grpc.unary_stream_rpc_method_handler(
servicer.List,
request_deserializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsListRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsListResponse.SerializeToString,
),
'ReadLogs': grpc.unary_stream_rpc_method_handler(
servicer.ReadLogs,
request_deserializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsReadLogsRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsReadLogsResponse.SerializeToString,
),
'RemoveMetadata': grpc.unary_unary_rpc_method_handler(
servicer.RemoveMetadata,
request_deserializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsRemoveMetadataRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsRemoveMetadataResponse.SerializeToString,
),
'Start': grpc.unary_unary_rpc_method_handler(
servicer.Start,
request_deserializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsStartRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsStartResponse.SerializeToString,
),
'Status': grpc.unary_unary_rpc_method_handler(
servicer.Status,
request_deserializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsStatusRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_jobs__pb2.JobsStatusResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'nvidia.clara.platform.Jobs', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| clara-platform-python-client-main | nvidia_clara/grpc/jobs_pb2_grpc.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia/clara/platform/common.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia/clara/platform/common.proto',
package='nvidia.clara.platform',
syntax='proto3',
serialized_options=_b('\n\031com.nvidia.clara.platformZ\004apis\252\002\032Nvidia.Clara.Platform.Grpc'),
serialized_pb=_b('\n\"nvidia/clara/platform/common.proto\x12\x15nvidia.clara.platform\"\x1b\n\nIdentifier\x12\r\n\x05value\x18\x01 \x01(\t\"E\n\x07Version\x12\r\n\x05major\x18\x01 \x01(\x05\x12\r\n\x05minor\x18\x02 \x01(\x05\x12\r\n\x05patch\x18\x03 \x01(\x05\x12\r\n\x05label\x18\x04 \x01(\t\"X\n\rRequestHeader\x12\x33\n\x0b\x61pi_version\x18\x01 \x01(\x0b\x32\x1e.nvidia.clara.platform.Version\x12\x12\n\nuser_agent\x18\x02 \x01(\t\"0\n\x0eResponseHeader\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x11\x12\x10\n\x08messages\x18\x02 \x03(\t\"\x1a\n\tTimestamp\x12\r\n\x05value\x18\x01 \x01(\x12\x42>\n\x19\x63om.nvidia.clara.platformZ\x04\x61pis\xaa\x02\x1aNvidia.Clara.Platform.Grpcb\x06proto3')
)
_IDENTIFIER = _descriptor.Descriptor(
name='Identifier',
full_name='nvidia.clara.platform.Identifier',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='nvidia.clara.platform.Identifier.value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=61,
serialized_end=88,
)
_VERSION = _descriptor.Descriptor(
name='Version',
full_name='nvidia.clara.platform.Version',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='major', full_name='nvidia.clara.platform.Version.major', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='minor', full_name='nvidia.clara.platform.Version.minor', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='patch', full_name='nvidia.clara.platform.Version.patch', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='label', full_name='nvidia.clara.platform.Version.label', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=90,
serialized_end=159,
)
_REQUESTHEADER = _descriptor.Descriptor(
name='RequestHeader',
full_name='nvidia.clara.platform.RequestHeader',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='api_version', full_name='nvidia.clara.platform.RequestHeader.api_version', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user_agent', full_name='nvidia.clara.platform.RequestHeader.user_agent', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=161,
serialized_end=249,
)
_RESPONSEHEADER = _descriptor.Descriptor(
name='ResponseHeader',
full_name='nvidia.clara.platform.ResponseHeader',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='nvidia.clara.platform.ResponseHeader.code', index=0,
number=1, type=17, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='messages', full_name='nvidia.clara.platform.ResponseHeader.messages', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=251,
serialized_end=299,
)
_TIMESTAMP = _descriptor.Descriptor(
name='Timestamp',
full_name='nvidia.clara.platform.Timestamp',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='nvidia.clara.platform.Timestamp.value', index=0,
number=1, type=18, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=301,
serialized_end=327,
)
_REQUESTHEADER.fields_by_name['api_version'].message_type = _VERSION
DESCRIPTOR.message_types_by_name['Identifier'] = _IDENTIFIER
DESCRIPTOR.message_types_by_name['Version'] = _VERSION
DESCRIPTOR.message_types_by_name['RequestHeader'] = _REQUESTHEADER
DESCRIPTOR.message_types_by_name['ResponseHeader'] = _RESPONSEHEADER
DESCRIPTOR.message_types_by_name['Timestamp'] = _TIMESTAMP
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Identifier = _reflection.GeneratedProtocolMessageType('Identifier', (_message.Message,), dict(
DESCRIPTOR = _IDENTIFIER,
__module__ = 'nvidia.clara.platform.common_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.Identifier)
))
_sym_db.RegisterMessage(Identifier)
Version = _reflection.GeneratedProtocolMessageType('Version', (_message.Message,), dict(
DESCRIPTOR = _VERSION,
__module__ = 'nvidia.clara.platform.common_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.Version)
))
_sym_db.RegisterMessage(Version)
RequestHeader = _reflection.GeneratedProtocolMessageType('RequestHeader', (_message.Message,), dict(
DESCRIPTOR = _REQUESTHEADER,
__module__ = 'nvidia.clara.platform.common_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.RequestHeader)
))
_sym_db.RegisterMessage(RequestHeader)
ResponseHeader = _reflection.GeneratedProtocolMessageType('ResponseHeader', (_message.Message,), dict(
DESCRIPTOR = _RESPONSEHEADER,
__module__ = 'nvidia.clara.platform.common_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.ResponseHeader)
))
_sym_db.RegisterMessage(ResponseHeader)
Timestamp = _reflection.GeneratedProtocolMessageType('Timestamp', (_message.Message,), dict(
DESCRIPTOR = _TIMESTAMP,
__module__ = 'nvidia.clara.platform.common_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.Timestamp)
))
_sym_db.RegisterMessage(Timestamp)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| clara-platform-python-client-main | nvidia_clara/grpc/common_pb2_grpc.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import signal
import time
import grpc
import grpc_testing
from grpc.framework.foundation import logging_pool
import nvidia_clara.grpc.common_pb2 as common_pb2
import nvidia_clara.grpc.jobs_pb2 as jobs_pb2
import nvidia_clara.grpc.jobs_pb2_grpc as jobs_pb2_grpc
import nvidia_clara.grpc.payloads_pb2 as payloads_pb2
import nvidia_clara.grpc.payloads_pb2_grpc as payloads_pb2_grpc
import nvidia_clara.grpc.pipelines_pb2 as pipelines_pb2
import nvidia_clara.grpc.pipelines_pb2_grpc as pipelines_pb2_grpc
SERVICES = {
'Pipelines': pipelines_pb2.DESCRIPTOR.services_by_name,
'Jobs': jobs_pb2.DESCRIPTOR.services_by_name,
'Payloads': payloads_pb2.DESCRIPTOR.services_by_name
}
def get_stubs(service, channel):
if service == 'Jobs':
return jobs_pb2_grpc.JobsStub(channel)
elif service == 'Payloads':
return payloads_pb2_grpc.PayloadsStub(channel)
elif service == 'Pipelines':
return pipelines_pb2_grpc.PipelinesStub(channel)
class Timeout(Exception):
pass
# Reference: https://github.com/grpc/grpc/blob/master/src/python/grpcio_tests/tests/testing/_client_test.py
def verify_request(channel, stub_method, call_sig, expected_requests, responses, timeout=1):
def timeout_handler(signum, frame):
raise Timeout('Timeout while taking requests')
try:
# setting up timeout handler because grpc_testing module doesn't support timeout for take_xxx_xxx methods
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(timeout)
if call_sig == 'stream_unary':
invocation_metadata, rpc = channel.take_stream_unary(stub_method)
rpc.send_initial_metadata(())
for expected_request in expected_requests:
request = rpc.take_request()
assert expected_request == request
rpc.requests_closed()
rpc.terminate(next(iter(responses)), (), grpc.StatusCode.OK, '')
elif call_sig == 'unary_stream':
invocation_metadata, request, rpc = channel.take_unary_stream(stub_method)
assert next(iter(expected_requests)) == request
rpc.send_initial_metadata(())
for response in responses:
rpc.send_response(response)
rpc.terminate((), grpc.StatusCode.OK, '')
elif call_sig == 'unary_unary':
invocation_metadata, request, rpc = channel.take_unary_unary(stub_method)
assert next(iter(expected_requests)) == request
rpc.send_initial_metadata(())
rpc.terminate(next(iter(responses)), (), grpc.StatusCode.OK, '')
except Timeout:
raise
finally:
signal.alarm(0)
def run_client_test(service_name, method_name, test_method, stub_method_handlers, *args, **kwargs):
fake_time = grpc_testing.strict_fake_time(
time.time())
channel = grpc_testing.channel(SERVICES[service_name].values(),
fake_time)
stub = get_stubs(service_name, channel)
service = SERVICES[service_name][service_name]
client_execution_thread_pool = logging_pool.pool(1)
try:
test_client_only = kwargs.pop('_test_client_only', None)
application_future = client_execution_thread_pool.submit(
test_method,
stub, method_name, *args, **kwargs)
# if the client method call is expected to raise exception before grpc call
if test_client_only:
pass # do not simulate grpc response
else:
for stub_method_name, call_sig, handlers in stub_method_handlers:
expected_requests, responses = handlers
stub_method = service.methods_by_name[stub_method_name]
verify_request(channel, stub_method, call_sig, expected_requests, responses)
application_return_value = application_future.result()
application_exception = application_future.exception()
if application_exception:
raise application_exception
return application_return_value
except Timeout:
raise
finally:
client_execution_thread_pool.shutdown(False)
del channel
| clara-platform-python-client-main | tests/test_client_tools.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import nvidia_clara.grpc.common_pb2 as common_pb2
import nvidia_clara.grpc.payloads_pb2 as payloads_pb2
from nvidia_clara.base_client import BaseClient
from nvidia_clara.payloads_client import PayloadsClient
import nvidia_clara.payload_types as payload_types
from tests.test_jobs_client import run_client_test
def run_payload_client(stub, method_name, *args, **kwargs):
with PayloadsClient(target='10.0.0.1:50051', stub=stub) as client:
response = getattr(client, method_name)(*args, **kwargs)
return response
class MockClaraPayloadServiceClient:
stub_method_handlers = []
def __init__(self, channel, stub=None, request_header=None, logger=None):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
def create_payload(self, *args, **kwargs):
return run_client_test(
'Payloads',
'create_payload',
run_payload_client,
stub_method_handlers=MockClaraPayloadServiceClient.stub_method_handlers,
*args, **kwargs)
def download_from(self, *args, **kwargs):
return run_client_test(
'Payloads',
'download_from',
run_payload_client,
stub_method_handlers=MockClaraPayloadServiceClient.stub_method_handlers,
*args, **kwargs)
def upload(self, *args, **kwargs):
return run_client_test(
'Payloads',
'upload',
run_payload_client,
stub_method_handlers=MockClaraPayloadServiceClient.stub_method_handlers,
*args, **kwargs)
def close(self):
pass
def test_create_payload():
requests = [
payloads_pb2.PayloadsCreateRequest(
header=BaseClient.get_request_header()
)
]
responses = [
payloads_pb2.PayloadsCreateResponse(
header=common_pb2.ResponseHeader(
code=0,
messages=[]),
payload_id=common_pb2.Identifier(
value='92656d79fa414db6b294069c0e9e6df5'
),
type=payloads_pb2.PAYLOAD_TYPE_REUSABLE
)
]
stub_method_handlers = [(
'Create',
'unary_unary',
(
requests,
responses
)
)]
# set handlers
MockClaraPayloadServiceClient.stub_method_handlers = stub_method_handlers
with MockClaraPayloadServiceClient('localhost:50051') as client:
payload_details = client.create_payload()
print(payload_details.payload_id)
print(payload_details.payload_type)
assert payload_details.payload_id.value == '92656d79fa414db6b294069c0e9e6df5'
assert payload_details.payload_type == 2
MHD_TEXT = '''ObjectType = Image
NDims = 3
BinaryData = True
BinaryDataByteOrderMSB = False
CompressedData = False
TransformMatrix = -1 0 0 0 1 0 0 0 1
Offset = 0 0 0
CenterOfRotation = 0 0 0
AnatomicalOrientation = RAI
ElementSpacing = 0.98 0.98 1.5
DimSize = 460 286 1182
ElementType = MET_SHORT
ElementDataFile = highResCT.raw
'''
def test_download_file():
fake_payload_id = '7ac5c691e13d4f45894a3a70d9925936'
fake_request_file_name = '/input/highResCT.mhd'
requests = [
payloads_pb2.PayloadsDownloadRequest(
header=BaseClient.get_request_header(),
payload_id=common_pb2.Identifier(value=fake_payload_id),
name=fake_request_file_name)
]
responses = [
payloads_pb2.PayloadsDownloadResponse(
header=common_pb2.ResponseHeader(
code=0,
messages=[]),
details=payloads_pb2.PayloadFileDetails(mode=0, name=fake_request_file_name,
size=len(MHD_TEXT)),
data=MHD_TEXT.encode('utf-8')
)
]
stub_method_handlers = [(
'Download',
'unary_stream',
(
requests,
responses
)
)]
MockClaraPayloadServiceClient.stub_method_handlers = stub_method_handlers
with MockClaraPayloadServiceClient('localhost:50051') as client:
if os.path.exists('./highResCT.mhd'):
os.remove('./highResCT.mhd')
with open('./highResCT.mhd', 'wb+') as wb:
file_details = client.download_from(payload_id=payload_types.PayloadId(fake_payload_id),
blob_name=fake_request_file_name,
dest_obj=wb)
assert file_details.mode == 0
assert file_details.name == fake_request_file_name
assert file_details.size == len(MHD_TEXT)
data = ''
with open('./highResCT.mhd', 'r') as file:
data = file.read()
os.remove('./highResCT.mhd')
print("Data Returned: ")
print(data)
assert data == MHD_TEXT
def test_upload(tmp_path):
fake_payload_id = '7ac5c691e13d4f45894a3a70d9925936'
fake_file_name = './image.mhd'
fake_response_file_name = './input/image.mhd'
requests = [
payloads_pb2.PayloadsUploadRequest(
header=BaseClient.get_request_header(),
payload_id=common_pb2.Identifier(value=fake_payload_id),
details=payloads_pb2.PayloadFileDetails(mode=0, name=fake_response_file_name, size=len(MHD_TEXT)),
data=MHD_TEXT.encode('utf-8')
)
]
responses = [
payloads_pb2.PayloadsUploadResponse(
header=common_pb2.ResponseHeader(
code=0,
messages=[]),
details=payloads_pb2.PayloadFileDetails(mode=0, name=fake_response_file_name,
size=len(MHD_TEXT))
)
]
stub_method_handlers = [(
'Upload',
'stream_unary',
(
requests,
responses
)
)]
MockClaraPayloadServiceClient.stub_method_handlers = stub_method_handlers
with MockClaraPayloadServiceClient('localhost:50051') as client:
if os.path.exists(fake_file_name):
os.remove(fake_file_name)
with open(fake_file_name, 'w') as wb:
wb.write(MHD_TEXT)
file_details = None
with open(fake_file_name, 'rb+') as fp:
file_details = client.upload(payload_id=payload_types.PayloadId(fake_payload_id),
blob_name=fake_response_file_name, file_object=fp)
os.remove(fake_file_name)
print(file_details.mode, file_details.name, file_details.size)
assert file_details.mode == 0
assert file_details.name == fake_response_file_name
assert file_details.size == len(MHD_TEXT)
| clara-platform-python-client-main | tests/test_payloads_client.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nvidia_clara.grpc.common_pb2 as common_pb2
import nvidia_clara.grpc.pipelines_pb2 as pipelines_pb2
from nvidia_clara.base_client import BaseClient
from nvidia_clara.pipelines_client import PipelinesClient
import nvidia_clara.pipeline_types as pipeline_types
from tests.test_client_tools import run_client_test
def run_pipeline_client(stub, method_name, *args, **kwargs):
with PipelinesClient(target='10.0.0.1:50051', stub=stub) as client:
response = getattr(client, method_name)(*args, **kwargs)
return response
class MockClaraPipelineServiceClient:
stub_method_handlers = []
def __init__(self, channel, stub=None, request_header=None, logger=None):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
def create_pipeline(self, *args, **kwargs):
return run_client_test(
'Pipelines',
'create_pipeline',
run_pipeline_client,
stub_method_handlers=MockClaraPipelineServiceClient.stub_method_handlers,
*args, **kwargs)
def list_pipelines(self, *args, **kwargs):
return run_client_test(
'Pipelines',
'list_pipelines',
run_pipeline_client,
stub_method_handlers=MockClaraPipelineServiceClient.stub_method_handlers,
*args, **kwargs)
def close(self):
pass
PIPELINE_TEXT = '''api-version: 0.2.0
name: sample-pipeline
operators:
- name: producer
import:
path: producer.yaml
- name: consumer
import:
path: consumer.yaml
args:
input-from: producer
'''
def test_create_pipeline():
pipeline_yaml = 'pipeline.yaml'
requests = [
pipelines_pb2.PipelinesCreateRequest(
header=BaseClient.get_request_header(),
definition=pipelines_pb2.PipelineDefinitionFile(
path='pipeline.yaml',
content=PIPELINE_TEXT)
)
]
responses = [
pipelines_pb2.PipelinesCreateResponse(
header=common_pb2.ResponseHeader(
code=0,
messages=[]),
pipeline_id=common_pb2.Identifier(
value='92656d79fa414db6b294069c0e9e6df5'
)
)
]
stub_method_handlers = [(
'Create',
'stream_unary',
(
requests,
responses
)
)]
# set handlers
MockClaraPipelineServiceClient.stub_method_handlers = stub_method_handlers
def_list = [
pipeline_types.PipelineDefinition(name=pipeline_yaml, content=PIPELINE_TEXT)
]
with MockClaraPipelineServiceClient('localhost:50051') as client:
pipeline_id = client.create_pipeline(definition=def_list)
print(pipeline_id)
assert pipeline_id.value == '92656d79fa414db6b294069c0e9e6df5'
def test_create_pipeline_with_id():
pipeline_yaml = 'pipeline.yaml'
requests = [
pipelines_pb2.PipelinesCreateRequest(
header=BaseClient.get_request_header(),
pipeline_id=common_pb2.Identifier(
value='92656d79fa414db6b294069c0e9e6df5'
),
definition=pipelines_pb2.PipelineDefinitionFile(
path='pipeline.yaml',
content=PIPELINE_TEXT)
)
]
responses = [
pipelines_pb2.PipelinesCreateResponse(
header=common_pb2.ResponseHeader(
code=0,
messages=[]),
pipeline_id=common_pb2.Identifier(
value='92656d79fa414db6b294069c0e9e6df5'
)
)
]
stub_method_handlers = [(
'Create',
'stream_unary',
(
requests,
responses
)
)]
# set handlers
MockClaraPipelineServiceClient.stub_method_handlers = stub_method_handlers
def_list = [
pipeline_types.PipelineDefinition(name=pipeline_yaml, content=PIPELINE_TEXT)
]
pipeline_id = pipeline_types.PipelineId('92656d79fa414db6b294069c0e9e6df5')
with MockClaraPipelineServiceClient('localhost:50051') as client:
pipeline_id = client.create_pipeline(definition=def_list, pipeline_id=pipeline_id)
print(pipeline_id)
assert pipeline_id.value == '92656d79fa414db6b294069c0e9e6df5'
def test_list_pipeline():
requests = [
pipelines_pb2.PipelinesListRequest(
header=BaseClient.get_request_header()
)
]
responses = [
pipelines_pb2.PipelinesListResponse(
header=common_pb2.ResponseHeader(
code=0,
messages=[]),
details=pipelines_pb2.PipelinesListResponse.PipelineDetails(
name='Pipeline_1',
pipeline_id=common_pb2.Identifier(
value='92656d79fa414db6b294069c0e9e6df5'
)
)
),
pipelines_pb2.PipelinesListResponse(
header=common_pb2.ResponseHeader(
code=0,
messages=[]),
details=pipelines_pb2.PipelinesListResponse.PipelineDetails(
name='Pipeline_2',
pipeline_id=common_pb2.Identifier(
value='21656d79fa414db6b294069c0e9e6r23'
)
)
)
]
stub_method_handlers = [(
'List',
'unary_stream',
(
requests,
responses
)
)]
# set handlers
MockClaraPipelineServiceClient.stub_method_handlers = stub_method_handlers
with MockClaraPipelineServiceClient('localhost:50051') as client:
pipeline_list = client.list_pipelines()
print(pipeline_list)
assert len(pipeline_list) == 2
assert pipeline_list[0].pipeline_id.value == '92656d79fa414db6b294069c0e9e6df5'
assert pipeline_list[1].pipeline_id.value == '21656d79fa414db6b294069c0e9e6r23'
| clara-platform-python-client-main | tests/test_pipelines_client.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import nvidia_clara.grpc.common_pb2 as common_pb2
import nvidia_clara.grpc.jobs_pb2 as jobs_pb2
from nvidia_clara.base_client import BaseClient
from nvidia_clara.jobs_client import JobsClient
import nvidia_clara.pipeline_types as pipeline_types
import nvidia_clara.job_types as job_types
from tests.test_client_tools import run_client_test
def run_job_client(stub, method_name, *args, **kwargs):
with JobsClient(target='10.0.0.1:50051', stub=stub) as client:
response = getattr(client, method_name)(*args, **kwargs)
return response
class MockClaraJobsServiceClient:
stub_method_handlers = []
def __init__(self, channel, stub=None, request_header=None, logger=None):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
def create_job(self, *args, **kwargs):
return run_client_test(
'Jobs',
'create_job',
run_job_client,
stub_method_handlers=MockClaraJobsServiceClient.stub_method_handlers,
*args, **kwargs)
def cancel_job(self, *args, **kwargs):
return run_client_test(
'Jobs',
'cancel_job',
run_job_client,
stub_method_handlers=MockClaraJobsServiceClient.stub_method_handlers,
*args, **kwargs)
def get_status(self, *args, **kwargs):
return run_client_test(
'Jobs',
'get_status',
run_job_client,
stub_method_handlers=MockClaraJobsServiceClient.stub_method_handlers,
*args, **kwargs)
def list_jobs(self, *args, **kwargs):
return run_client_test(
'Jobs',
'list_jobs',
run_job_client,
stub_method_handlers=MockClaraJobsServiceClient.stub_method_handlers,
*args, **kwargs)
def start_job(self, *args, **kwargs):
return run_client_test(
'Jobs',
'start_job',
run_job_client,
stub_method_handlers=MockClaraJobsServiceClient.stub_method_handlers,
*args, **kwargs)
def job_logs(self, *args, **kwargs):
return run_client_test(
'Jobs',
'job_logs',
run_job_client,
stub_method_handlers=MockClaraJobsServiceClient.stub_method_handlers,
*args, **kwargs)
def close(self):
pass
def test_create_job():
requests = [
jobs_pb2.JobsCreateRequest(
header=BaseClient.get_request_header(),
name='test job',
pipeline_id=common_pb2.Identifier(
value='92656d79fa414db6b294069c0e9e6df5'
),
priority=jobs_pb2.JOB_PRIORITY_NORMAL
)
]
responses = [
jobs_pb2.JobsCreateResponse(
header=common_pb2.ResponseHeader(
code=0,
messages=[]),
job_id=common_pb2.Identifier(
value='432b274a8f754968888807fe1eba237b'
),
payload_id=common_pb2.Identifier(
value='7ac5c691e13d4f45894a3a70d9925936'
)
)
]
stub_method_handlers = [(
'Create',
'unary_unary',
(
requests,
responses
)
)]
MockClaraJobsServiceClient.stub_method_handlers = stub_method_handlers
with MockClaraJobsServiceClient('localhost:50051') as client:
job_info = client.create_job(
job_name='test job',
pipeline_id=pipeline_types.PipelineId('92656d79fa414db6b294069c0e9e6df5')
)
print(job_info.job_id.value, job_info.payload_id.value)
assert job_info.job_id.value == '432b274a8f754968888807fe1eba237b'
assert job_info.payload_id.value == '7ac5c691e13d4f45894a3a70d9925936'
def test_cancel_job():
requests = [
jobs_pb2.JobsCancelRequest(
header=BaseClient.get_request_header(),
job_id=common_pb2.Identifier(
value='432b274a8f754968888807fe1eba237b'
)
)
]
responses = [
jobs_pb2.JobsCancelResponse(
header=common_pb2.ResponseHeader(
code=0,
messages=[]),
job_id=common_pb2.Identifier(
value='432b274a8f754968888807fe1eba237b'
),
job_state=jobs_pb2.JOB_STATE_STOPPED,
job_status=jobs_pb2.JOB_STATUS_CANCELED
)
]
stub_method_handlers = [(
'Cancel',
'unary_unary',
(
requests,
responses
)
)]
MockClaraJobsServiceClient.stub_method_handlers = stub_method_handlers
with MockClaraJobsServiceClient('10.0.0.1:50051') as client:
job_token = client.cancel_job(
job_id=job_types.JobId(value='432b274a8f754968888807fe1eba237b')
)
print(job_token.job_id.value, job_token.job_state, job_token.job_status)
assert job_token.job_id.value == '432b274a8f754968888807fe1eba237b'
assert job_token.job_state == 3
assert job_token.job_status == 3
def test_get_status():
requests = [
jobs_pb2.JobsStatusRequest(
header=BaseClient.get_request_header(),
job_id=common_pb2.Identifier(
value='432b274a8f754968888807fe1eba237b'
)
)
]
fake_seconds_from_epoch = 63763345820
responses = [
jobs_pb2.JobsStatusResponse(
header=common_pb2.ResponseHeader(
code=0,
messages=[]),
name="job_1",
job_id=common_pb2.Identifier(
value='432b274a8f754968888807fe1eba237b'
),
pipeline_id=common_pb2.Identifier(
value='92656d79fa414db6b294069c0e9e6df5'
),
payload_id=common_pb2.Identifier(
value='7ac5c691e13d4f45894a3a70d9925936'
),
state=jobs_pb2.JOB_STATE_RUNNING,
status=jobs_pb2.JOB_STATUS_HEALTHY,
created=common_pb2.Timestamp(value=fake_seconds_from_epoch)
)
]
stub_method_handlers = [(
'Status',
'unary_unary',
(
requests,
responses
)
)]
MockClaraJobsServiceClient.stub_method_handlers = stub_method_handlers
with MockClaraJobsServiceClient('10.0.0.1:50051') as client:
job_details = client.get_status(
job_id=job_types.JobId(value='432b274a8f754968888807fe1eba237b')
)
print(job_details.job_id.value, job_details.job_state, job_details.job_status)
print(job_details.date_created)
print(datetime.datetime.fromtimestamp(float(fake_seconds_from_epoch) - 62135596800))
assert job_details.name == "job_1"
assert job_details.job_id.value == '432b274a8f754968888807fe1eba237b'
assert job_details.pipeline_id.value == '92656d79fa414db6b294069c0e9e6df5'
assert job_details.payload_id.value == '7ac5c691e13d4f45894a3a70d9925936'
assert job_details.job_state == 2
assert job_details.job_status == 1
assert job_details.date_created == datetime.datetime.fromtimestamp(
float(fake_seconds_from_epoch) - 62135596800).astimezone(datetime.timezone.utc)
def test_list_jobs():
requests = [
jobs_pb2.JobsListRequest(
header=BaseClient.get_request_header()
)
]
responses = [
jobs_pb2.JobsListResponse(
header=common_pb2.ResponseHeader(
code=0,
messages=[]),
job_details=jobs_pb2.JobsListResponse.JobDetails(
job_name="job_1",
job_id=common_pb2.Identifier(
value="432b274a8f754968888807fe1eba237b"
),
payload_id=common_pb2.Identifier(
value='532b274a8f754968888807fe1eba237b'
),
pipeline_id=common_pb2.Identifier(
value='932b274a8f754968888807fe1eba237b'
),
created=common_pb2.Timestamp(
value=63750823591
)
)
),
jobs_pb2.JobsListResponse(
header=common_pb2.ResponseHeader(
code=0,
messages=[]),
job_details=jobs_pb2.JobsListResponse.JobDetails(
job_name="job_2",
job_id=common_pb2.Identifier(
value='212b274a8f754968888807fe1eba237b'
),
payload_id=common_pb2.Identifier(
value='212b274a8f754968888807fe1eba237b'
),
pipeline_id=common_pb2.Identifier(
value='322b274a8f754968888807fe1eba237b'
),
created=common_pb2.Timestamp(
value=63750823591
)
)
)
]
stub_method_handlers = [(
'List',
'unary_stream',
(
requests,
responses
)
)]
MockClaraJobsServiceClient.stub_method_handlers = stub_method_handlers
with MockClaraJobsServiceClient('10.0.0.1:50051') as client:
list_jobs = client.list_jobs()
print("Length of list response: " + str(len(list_jobs)))
assert len(list_jobs) == 2
assert list_jobs[0].name == "job_1"
assert list_jobs[0].job_id.value == "432b274a8f754968888807fe1eba237b"
assert list_jobs[0].payload_id.value == "532b274a8f754968888807fe1eba237b"
assert list_jobs[0].pipeline_id.value == "932b274a8f754968888807fe1eba237b"
assert list_jobs[0].date_created == datetime.datetime(2021, 3, 8, 18, 6, 31, tzinfo=datetime.timezone.utc)
assert list_jobs[1].name == "job_2"
assert list_jobs[1].job_id.value == '212b274a8f754968888807fe1eba237b'
assert list_jobs[1].payload_id.value == '212b274a8f754968888807fe1eba237b'
assert list_jobs[1].pipeline_id.value == '322b274a8f754968888807fe1eba237b'
assert list_jobs[1].date_created == datetime.datetime(2021, 3, 8, 18, 6, 31, tzinfo=datetime.timezone.utc)
def test_start_job():
requests = [
jobs_pb2.JobsStartRequest(
header=BaseClient.get_request_header(),
job_id=common_pb2.Identifier(
value='432b274a8f754968888807fe1eba237b'
)
)
]
responses = [
jobs_pb2.JobsStartResponse(
header=common_pb2.ResponseHeader(
code=0,
messages=[]),
state=jobs_pb2.JOB_STATE_RUNNING,
status=jobs_pb2.JOB_STATUS_HEALTHY,
priority=jobs_pb2.JOB_PRIORITY_NORMAL
)
]
stub_method_handlers = [(
'Start',
'unary_unary',
(
requests,
responses
)
)]
MockClaraJobsServiceClient.stub_method_handlers = stub_method_handlers
with MockClaraJobsServiceClient('10.0.0.1:50051') as client:
job_token = client.start_job(
job_id=job_types.JobId(value='432b274a8f754968888807fe1eba237b')
)
print(job_token.job_id.value, job_token.job_state, job_token.job_status)
assert job_token.job_id.value == '432b274a8f754968888807fe1eba237b'
assert job_token.job_state == 2
assert job_token.job_status == 1
def test_read_logs():
requests = [
jobs_pb2.JobsReadLogsRequest(
header=BaseClient.get_request_header(),
job_id=common_pb2.Identifier(
value='432b274a8f754968888807fe1eba237b'
),
operator_name="dicom-reader"
)
]
responses = [
jobs_pb2.JobsReadLogsResponse(
header=common_pb2.ResponseHeader(
code=0,
messages=[]),
job_id=common_pb2.Identifier(
value='432b274a8f754968888807fe1eba237b'
),
operator_name="Dicom Reader",
logs=["Log_String_0", "Log_String_1"]
),
jobs_pb2.JobsReadLogsResponse(
header=common_pb2.ResponseHeader(
code=0,
messages=[]),
job_id=common_pb2.Identifier(
value='432b274a8f754968888807fe1eba237b'
),
operator_name="Dicom Reader",
logs=["Log_String_2", "Log_String_3"]
)
]
stub_method_handlers = [(
'ReadLogs',
'unary_stream',
(
requests,
responses
)
)]
MockClaraJobsServiceClient.stub_method_handlers = stub_method_handlers
with MockClaraJobsServiceClient('10.0.0.1:50051') as client:
job_logs = client.job_logs(
job_id=job_types.JobId(value='432b274a8f754968888807fe1eba237b'),
operator_name="dicom-reader"
)
print(len(job_logs))
assert len(job_logs) == 4
assert job_logs[0] == "Log_String_0"
assert job_logs[1] == "Log_String_1"
assert job_logs[2] == "Log_String_2"
assert job_logs[3] == "Log_String_3"
| clara-platform-python-client-main | tests/test_jobs_client.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia_clara.payloads_client import PayloadsClient
import nvidia_clara.payload_types as payload_types
# Client Creation with IP and Port of running instance of Clara
clara_ip_address = "10.0.0.1"
clara_port = "30031"
payload_client = PayloadsClient(target=clara_ip_address, port=clara_port)
# Create static re-usable Payload
payload_details = payload_client.create_payload()
# Delete Payload
payload_client.delete_payload(payload_id=payload_details.payload_id)
# Download from existing Payload ex. Payload with identifier '61a477bf-6bcc-4fdd-abad-ccb8886eb52f' with blob/file name ./input/I114.dcm
example_payload_identifier = '61a477bf-6bcc-4fdd-abad-ccb8886eb52f'
# Create BinaryIO stream object with write permissions and download from payload identifier: example_payload_identifier
with open('output.dcm', 'wb') as wb:
payload_client.download_from(payload_id=payload_types.PayloadId(example_payload_identifier),
blob_name='./input/I114.dcm',
dest_obj=wb)
# Uploading BinaryIO stream to a new blob
# Create BinaryIO stream with read permissions (for sake of example: reading previous output stream)
with open('output.dcm', 'rb') as rb:
payload_client.upload(payload_id=payload_types.PayloadId(example_payload_identifier),
blob_name='./test/new_blob.dcm', file_object=rb)
# Get Details of a Payload
confirming_details = payload_client.get_details(
payload_id=payload_types.PayloadId(example_payload_identifier))
| clara-platform-python-client-main | examples/payloads_client_example.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia_clara.jobs_client import JobsClient
from nvidia_clara.pipelines_client import PipelinesClient
from nvidia_clara.payloads_client import PayloadsClient
import nvidia_clara.pipeline_types as pipeline_types
import os
from pathlib import Path
# Clients creation
clara_ip_address = "10.0.0.1"
clara_port = "30031"
jobs_client = JobsClient(target=clara_ip_address, port=clara_port)
payloads_client = PayloadsClient(target=clara_ip_address, port=clara_port)
pipeline_client = PipelinesClient(target=clara_ip_address, port=clara_port)
# Create list of pipeline_types.PipelineDefinition with local path to pipeline .yaml
file_path = "../spleen_pipeline.yaml"
definitions = [pipeline_types.PipelineDefinition(name=file_path, content=Path(file_path).read_text())]
# Create Pipeline with definition list created
pipeline_id = pipeline_client.create_pipeline(definition=definitions)
# Create Job with newly created Pipeline
job_info = jobs_client.create_job(job_name="spleenjob", pipeline_id=pipeline_types.PipelineId(pipeline_id.value))
job_id = job_info.job_id
payload_id = job_info.payload_id
# Local path to directory of files to upload to the job's payload on the Server
input_path = "../app_spleen-input_v1/dcm"
# Go through files in directory and upload to the job using the payload identifier
for file in os.listdir(input_path):
file_path = input_path + "/" + str(file)
with open(file_path, 'rb') as fp:
payloads_client.upload(payload_id=payload_id, blob_name=file, file_object=fp)
# Get a List of the jobs
job_list = jobs_client.list_jobs()
# Start Job
job_token = jobs_client.start_job(job_id=job_id)
# Loop until job completes
job_status = jobs_client.get_status(job_id=job_id)
while job_status.job_state != 3:
job_status = jobs_client.get_status(job_id=job_id)
# Get Payload Details - Used to get list of payload files
payload_details = payloads_client.get_details(payload_id=payload_id)
# Download files from payload if pertaining to output payload directory (ex. "/operators)
for file in payload_details.file_details:
# Get file path on Server (ex. /operators/dicom-reader/example_file.raw")
file_name = file.name
# Split file path name (ex. ['','operators','dicom-reader','example_file.raw']
name = file_name.split('/')
# Check if file pertains to output directory (ex. "/operators)
if name[1] == 'operators':
# Download file to a local results directory to a file with same name on server (ex. example_file.raw)
with open("./results/"+name[-1], 'wb+') as wb:
payloads_client.download_from(payload_id=payload_id, blob_name="."+file_name, dest_obj=wb)
# Gets list of operator logs from job
jobs_logs = jobs_client.job_logs(job_id=job_id, operator_name="dicom-reader")
| clara-platform-python-client-main | examples/combined_example.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia_clara.jobs_client import JobsClient
import nvidia_clara.job_types as job_types
import nvidia_clara.pipeline_types as pipeline_types
# Client Creation with IP and Port of running instance of Clara
clara_ip_address = "10.0.0.1"
clara_port = "30031"
jobs_client = JobsClient(target=clara_ip_address, port=clara_port)
# Creates Filter of Healthy Jobs - Additionally could filter by Pipeline Id, State, Completion Time, and Creation Time
job_filter = job_types.JobFilter(has_job_status=[job_types.JobStatus.Healthy])
# List Current Jobs with Optional Filter
job_list = jobs_client.list_jobs(job_filter=job_filter)
print(job_list)
# Identifier of created pipeline (ex. colon tumor segmentation)
colon_tumor_pipeline_id = "f9a843935e654a30beb9d1b8352bfaac"
# Create Job
job_info = jobs_client.create_job(job_name="colontumor",pipeline_id=pipeline_types.PipelineId(colon_tumor_pipeline_id))
print(job_info.job_id.value)
# Start Job
job_token = jobs_client.start_job(job_id=job_info.job_id)
print(job_token.job_state)
print(job_token.job_status)
# Get Status of Job from Identifier
job_details = jobs_client.get_status(job_id=job_token.job_id)
print(job_details.job_state)
print(job_details.job_status)
# Gets List of Operators
print(job_details.operator_details.keys())
# Try Canceling Job (if still running)
try:
job_details = jobs_client.cancel_job(job_id=job_token.job_id)
except:
print("Scheduler Rejected Request")
| clara-platform-python-client-main | examples/jobs_client_example.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia_clara.clara_client import ClaraClient
import nvidia_clara.clara_types as clara_types
clara_ip_address = "10.0.0.1"
clara_port = "30031"
clara_client = ClaraClient(target=clara_ip_address, port=clara_port)
# Get Clara Version
version = clara_client.version()
# Getting Gpu Utilization
# Option 1: Getting list which will give snapshot of current GPU Utilization
utilization_list = clara_client.list_utilization()
# Option 2: Obtaining generator which will provide steam of GPU Utilization
utilization_stream = clara_client.stream_utilization()
# Stop Pipeline Service and Triton
clara_client.stop()
| clara-platform-python-client-main | examples/clara_client_example.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from nvidia_clara.pipelines_client import PipelinesClient
import nvidia_clara.pipeline_types as pipeline_types
# Client Creation with IP and Port of running instance of Clara
clara_ip_address = "10.0.0.1"
clara_port = "30031"
pipeline_client = PipelinesClient(target=clara_ip_address, port=clara_port)
# Create list of pipeline_types.PipelineDefinition with local path to pipeline .yaml
file_path = "./liver-tumor-pipeline.yaml"
definitions = [pipeline_types.PipelineDefinition(name=file_path, content=Path(file_path).read_text())]
# Create Pipeline with definition list created
pipeline_id = pipeline_client.create_pipeline(definition=definitions)
print(pipeline_id)
# Get List of Created Pipelines PipelinesClient.list_pipelines()
pipelines = [(pipe_info.pipeline_id.value, pipe_info.name) for pipe_info in pipeline_client.list_pipelines()]
print(pipelines)
# Get Details of Pipeline with PipelinesClient.pipeline_details()
pipeline_details = pipeline_client.pipeline_details(pipeline_id=pipeline_id)
# Remove Pipeline
pipeline_client.remove_pipeline(pipeline_id=pipeline_id)
| clara-platform-python-client-main | examples/pipelines_client_example.py |
# Copyright (c) 2020, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rapidAligner.util import *
from rapidAligner.ED import *
| rapidAligner-main | rapidAligner/__init__.py |
# Copyright (c) 2020, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from numba import cuda
class cudaTimer:
def __init__(self, label='', gpu=0):
self.label = label
self.gpu = gpu
self.start = cuda.event()
self.end = cuda.event()
cuda.select_device(self.gpu)
self.start.record(),
def __enter__(self):
pass
def __exit__(self, *args):
cuda.select_device(self.gpu)
suffix = 'ms ('+self.label+')' if self.label else 'ms'
self.end.record()
self.end.synchronize()
time = cuda.event_elapsed_time(self.start, self.end)
print('elapsed time:', int(time), suffix)
| rapidAligner-main | rapidAligner/util/Timer.py |
# Copyright (c) 2020, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rapidAligner.util.Timer import cudaTimer as Timer
from rapidAligner.util.Loader import * | rapidAligner-main | rapidAligner/util/__init__.py |
# Copyright (c) 2020, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['FakeSeriesGenerator', 'ECGLoader']
import os
import urllib
import zipfile
import cupy as cp
from scipy.io import loadmat
class ECGLoader:
def __init__(self, root='./data/ECG', url=None):
self.root = root
assert url != None, \
"provide the URL to 22h of ECG data stated on the bottom of https://www.cs.ucr.edu/~eamonn/UCRsuite.html"
filename = os.path.join(root, 'ECG_one_day.zip')
if not os.path.isdir(root):
os.makedirs(root)
if not os.path.isfile(filename):
urllib.request.urlretrieve(url, filename)
with zipfile.ZipFile(filename, 'r') as zip_ref:
zip_ref.extractall(root)
@property
def subject(self, alpha=400.0, beta=50.0):
return alpha*loadmat(os.path.join(self.root, 'ECG_one_day','ECG.mat'))['ECG'].flatten()+beta
@property
def query(self):
return loadmat(os.path.join(self.root, 'ECG_one_day','ECG_query.mat'))['ecg_query'].flatten()
@property
def data(self):
return self.query, self.subject
class FakeSeriesGenerator:
def __init__(self, query_length=3600, subject_length=2**20, seed=None, beta=1.0):
self.query_length = query_length
self.subject_length = subject_length
self.beta = beta
assert isinstance(query_length, int) and query_length > 0
assert isinstance(subject_length, int) and subject_length > 0
assert query_length <= subject_length
assert isinstance(beta, float) and beta >= 0
if isinstance(seed, int):
cp.random.seed(seed)
noise = cp.random.uniform(-1, +1, self.subject_length+self.query_length)
kernel = cp.exp(-self.beta*cp.linspace(0, 1, self.subject_length+self.query_length))
kernel /= cp.sqrt(cp.sum(kernel**2))
self.signal = cp.fft.irfft(cp.fft.rfft(noise)*cp.fft.rfft(kernel), n=self.subject_length+self.query_length)
@property
def subject(self):
return self.signal[self.query_length:].get()
@property
def query(self, alpha=2.0, beta=1.20):
signal = self.signal[:self.query_length]
mean = cp.mean(signal)
return (alpha*(signal-mean)+beta*mean).get()
@property
def data(self):
return self.query, self.subject | rapidAligner-main | rapidAligner/util/Loader.py |
# Copyright (c) 2020, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cupy as cp
###############################################################################
# helpers to avoid redundant code
###############################################################################
def cumsum(x, Kahan=0):
"""
Wrapper for exclusive prefix sum computation with an optional
refinement step using a approach similar to Kahan summation.
This function is not exposed to the user.
Arguments:
-------
x: cupy.core.core.ndarray
the input array of length n to be scanned with operation +
Kahan: int
non-negative number of Kahan summation adjustment rounds
Returns
-------
cupy.core.core.ndarray
the computed exclusive prefix scan of length n+1
"""
assert(isinstance(Kahan, int) and Kahan >= 0)
# allocate an empty array with leading 0
y = cp.empty(len(x)+1, dtype=x.dtype)
y[0] = 0
# compute the inclusive prefix sum starting at entry 1
cp.cumsum(x, out=y[1:])
# basically exploit that (d/dt int f(t) dt) - f(t) = r = 0 forall f(t)
# in case delta is non-vanishing due to numeric inaccuracies, we add
# the prefix scan of r to the final result (inaccuracies might add up)
if Kahan:
r = x-cp.diff(y)
if(cp.max(cp.abs(r))):
y += cumsum(r, Kahan-1)
return y
def mnorm(x):
"""
Mean-adjustment of a given time series. Afterwards the time series
has vanishing mean, i.e. sum_i x[i] = 0
Arguments:
-------
x: cupy.core.core.ndarray
the input array of length n to be normalized
Returns
-------
cupy.core.core.ndarray
the mean-adjusted array of length n
"""
return x-cp.mean(x)
def znorm(x, epsilon):
"""
Mean- and amplitude-adjustment of a given time series. Afterwards the time series
has vanishing mean, i.e. sum_i x[i] = 0 and unit standard devitation i.e.
sum_i x[i]*x[i] = n where n is the length of the sequence x
Arguments:
-------
x: cupy.core.core.ndarray
the input array of length n to be normalized
Returns
-------
cupy.core.core.ndarray
the mean-adjusted array of length n
"""
return (x-cp.mean(x))/max(cp.std(x, ddof=0), epsilon)
| rapidAligner-main | rapidAligner/ED/stream_dists_helpers.py |
# Copyright (c) 2020, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rapidAligner.ED.stream_dists_fft import fft_sdist, fft_mdist, fft_zdist
from rapidAligner.ED.stream_dists_kernels import sdist_kernel, mdist_kernel, zdist_kernel
from rapidAligner.ED.stream_dists_helpers import mnorm, znorm
import cupy as cp
from numba import cuda
__all__ = ["sdist", "mdist", "zdist"]
def sdist(Q, S, mode="fft"):
"""
Rolling Euclidean Distance
Arguments:
-------
Q: cupy.core.core.ndarray or numba.cuda.DeviceNDArray or cudf.Series or numpy.ndarray
the input query of length m to be aligned
S: cupy.core.core.ndarray or numba.cuda.DeviceNDArray or cudf.Series or numpy.ndarray
the input stream of length n>=m to be scanned
mode: str
either "naive" or "fft"
Returns
-------
cupy.core.core.ndarray
the computed distance array of length n-m+1
"""
if not isinstance(Q, cp.core.core.ndarray):
Q = cp.asarray(Q)
if not isinstance(S, cp.core.core.ndarray):
S = cp.asarray(S)
assert(Q.dtype == S.dtype)
assert((len(Q.shape) == len(S.shape) == 1 and Q.shape[0] <= S.shape[0]))
if mode == "fft":
Z = fft_sdist(Q, S)
else:
stream = cuda.stream()
Z = cp.empty(len(S)-len(Q)+1, dtype=Q.dtype)
sdist_kernel[80*32, 64, stream](Q, S, Z)
stream.synchronize()
return Z
def mdist(Q, S, mode="fft"):
"""
Rolling mean-adjusted Euclidean Distance
Arguments:
-------
Q: cupy.core.core.ndarray or numba.cuda.DeviceNDArray or cudf.Series or numpy.ndarray
the input query of length m to be aligned
S: cupy.core.core.ndarray or numba.cuda.DeviceNDArray or cudf.Series or numpy.ndarray
the input stream of length n>=m to be scanned
mode: str
either "naive" or "fft"
Returns
-------
cupy.core.core.ndarray
the computed distance array of length n-m+1
"""
if not isinstance(Q, cp.core.core.ndarray):
Q = cp.asarray(Q)
if not isinstance(S, cp.core.core.ndarray):
S = cp.asarray(S)
assert(Q.dtype == S.dtype)
assert((len(Q.shape) == len(S.shape) == 1 and Q.shape[0] <= S.shape[0]))
if mode == "fft":
Z = fft_mdist(Q, S)
else:
stream = cuda.stream()
Z = cp.empty(len(S)-len(Q)+1, dtype=Q.dtype)
mdist_kernel[80*32, 64, stream](mnorm(Q), S, Z)
stream.synchronize()
return Z
def zdist(Q, S, mode="fft", epsilon=1e-6):
"""
Rolling mean- and amplitude-adjusted Euclidean Distance
Arguments:
-------
Q: cupy.core.core.ndarray or numba.cuda.DeviceNDArray or cudf.Series or numpy.ndarray
the input query of length m to be aligned
S: cupy.core.core.ndarray or numba.cuda.DeviceNDArray or cudf.Series or numpy.ndarray
the input stream of length n>=m to be scanned
epsilon: float
non-negative number for regularizing zero stdev
mode: str
either "naive" or "fft"
Returns
-------
cupy.core.core.ndarray
the computed distance array of length n-m+1
"""
if not isinstance(Q, cp.core.core.ndarray):
Q = cp.asarray(Q)
if not isinstance(S, cp.core.core.ndarray):
S = cp.asarray(S)
assert(epsilon > 0)
assert(Q.dtype == S.dtype)
assert((len(Q.shape) == len(S.shape) == 1 and Q.shape[0] <= S.shape[0]))
assert(cp.std(Q, ddof=0) > 0)
if mode == "fft":
Z = fft_zdist(Q, S, epsilon)
else:
stream = cuda.stream()
Z = cp.empty(len(S)-len(Q)+1, dtype=Q.dtype)
zdist_kernel[80*32, 64, stream](znorm(Q, epsilon), S, Z, epsilon)
stream.synchronize()
return Z
| rapidAligner-main | rapidAligner/ED/__init__.py |
# Copyright (c) 2020, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from numba import cuda, float64
from math import sqrt
###############################################################################
# plain rolling Euclidean distance
###############################################################################
@cuda.jit
def sdist_kernel(Q, S, out):
"""Euclidean Distance naive kernel: nothing cached"""
warpDim = cuda.blockDim.x // 32
warpIdx = cuda.threadIdx.x // 32
laneIdx = cuda.threadIdx.x % 32
lower = cuda.blockIdx.x*warpDim+warpIdx
stride = cuda.gridDim.x*warpDim
for position in range(lower, S.shape[0]-Q.shape[0]+1, stride):
accum = float64(0)
for index in range(laneIdx, Q.shape[0], 32):
value = Q[index]-S[position+index]
accum += value*value
for delta in [16, 8, 4, 2, 1]:
value = cuda.shfl_down_sync(0xFFFFFFFF, accum, delta)
accum += value
if laneIdx == 0:
out[position] = accum
###############################################################################
# mean-adjusted rolling Euclidean distance
###############################################################################
@cuda.jit(max_registers=63)
def mdist_kernel(Q, S, out):
"""mean-adjusted Euclidean Distance naive kernel: nothing cached"""
warpDim = cuda.blockDim.x // 32
warpIdx = cuda.threadIdx.x // 32
laneIdx = cuda.threadIdx.x % 32
lower = cuda.blockIdx.x*warpDim+warpIdx
stride = cuda.gridDim.x*warpDim
for position in range(lower, S.shape[0]-Q.shape[0]+1, stride):
accum = float64(0)
for index in range(laneIdx, Q.shape[0], 32):
accum += S[position+index]
for delta in [16, 8, 4, 2, 1]:
accum += cuda.shfl_xor_sync(0xFFFFFFFF, accum, delta)
mean = accum/Q.shape[0]
accum = float64(0)
for index in range(laneIdx, Q.shape[0], 32):
value = Q[index]-S[position+index]+mean
accum += value*value
for delta in [16, 8, 4, 2, 1]:
value = cuda.shfl_down_sync(0xFFFFFFFF, accum, delta)
accum += value
if laneIdx == 0:
out[position] = accum
###############################################################################
# mean- and amplitude-adjusted rolling Euclidean distance
###############################################################################
@cuda.jit(max_registers=63)
def zdist_kernel(Q, S, out, epsilon):
"""z-normalized Euclidean Distance naive kernel: nothing cached"""
warpDim = cuda.blockDim.x // 32
warpIdx = cuda.threadIdx.x // 32
laneIdx = cuda.threadIdx.x % 32
lower = cuda.blockIdx.x*warpDim+warpIdx
stride = cuda.gridDim.x*warpDim
for position in range(lower, S.shape[0]-Q.shape[0]+1, stride):
accum1 = float64(0)
accum2 = float64(0)
for index in range(laneIdx, Q.shape[0], 32):
value = S[position+index]
accum1 += value
accum2 += value*value
for delta in [16, 8, 4, 2, 1]:
accum1 += cuda.shfl_xor_sync(0xFFFFFFFF, accum1, delta)
accum2 += cuda.shfl_xor_sync(0xFFFFFFFF, accum2, delta)
mean = accum1/Q.shape[0]
sigma = accum2/Q.shape[0]-mean*mean
sigma = sqrt(sigma) if sigma > 0.0 else epsilon
accum = float64(0)
for index in range(laneIdx, Q.shape[0], 32):
value = Q[index]-(S[position+index]-mean)/sigma
accum += value*value
for delta in [16, 8, 4, 2, 1]:
accum += cuda.shfl_down_sync(0xFFFFFFFF, accum, delta)
if laneIdx == 0:
out[position] = accum
| rapidAligner-main | rapidAligner/ED/stream_dists_kernels.py |
# Copyright (c) 2020, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rapidAligner.ED.stream_dists_helpers import cumsum
from rapidAligner.ED.stream_dists_helpers import mnorm, znorm
import cupy as cp
import math
def fft_sdist(Q, S, alignment=10000, Kahan=0):
"""
Rolling Euclidean Distance using FFT to run in loglinear time
Equation exploiting cross-correlation (Fourier) theorem:
d[k] = sum_i (Q[i] - S[i+k])**2
= sum_i (Q[i]**2 - 2*Q[i]*S[i+k] + S[i+k]**2)
= sum_i Q[i]**2 - 2*correlation[k] + sum_i S[i+k]**2
= sum_i Q[i]**2 - 2*correlation[k] + Y[k]
Arguments:
-------
Q: cupy.core.core.ndarray
the input query of length m to be aligned
S: cupy.core.core.ndarray
the input stream of length n>=m to be scanned
Kahan: int
non-negative number of Kahan summation adjustment rounds
Returns
-------
cupy.core.core.ndarray
the computed distance array of length n-m+1
"""
assert(Q.dtype == S.dtype)
m = len(Q)
n = (len(S)+alignment-1)//alignment*alignment
iS = cp.zeros(n, dtype=S.dtype)
iS[:len(S)] = S
Y = cumsum(iS**2, Kahan)
Y = Y[+m:]-Y[:-m]
E = cp.zeros(n, dtype=Q.dtype)
E[:m] = Q
R = cp.fft.irfft(cp.fft.rfft(E).conj()*cp.fft.rfft(iS), n=n)
return (cp.sum(cp.square(Q))-2*R[:-m+1]+Y)[:len(S)-m+1]
def fft_mdist(Q, S, alignment=10000, Kahan=0):
"""
Rolling mean-adjusted Euclidean Distance using FFT to run in loglinear time
Equation exploiting cross-correlation (Fourier) theorem:
d[k] = sum_i (f(Q[i]) - f(S[i+k]))**2
= sum_i (f(Q[i])**2 - 2*f(Q[i])*f(S[i+k]) + f(S[i+k])**2)
= sum_i (f(Q[i])**2 - 2*f(Q[i])*(S[i+k]-mu[k]) + (S[i+k]-mu[k])**2)
= sum_i (f(Q[i])**2 - 2*f(Q[i])*S[i+k] + 2*f(Q[i])*mu[k] + (S[i+k]-mu[k])**2)
= sum_i (f(Q[i])**2 - 2*f(Q[i])*S[i+k] + (S[i+k]-mu[k])**2)
since sum_i f(Q[i]) = 0 by definition
= sum_i (f(Q[i])**2 - 2*f(Q[i])*S[i+k] + S[i+k]**2 - 2*S[i+k]*mu[k] + mu[k]**2)
= sum_i (f(Q[i])**2 - 2*f(Q[i])*S[i+k] + S[i+k]**2 - 2*|Q|*mu[k]*mu[k] + mu[k]**2)
= sum_i f(Q[i])**2 - 2*correlation(k) + Y[k] - 2*X[k]**2/|Q| + X[k]**2/|Q|
= sum_i f(Q[i])**2 - 2*correlation(k) + Y[k] - X[k]**2/|Q|
= sum_i f(Q[i])**2 - 2*correlation(k) + |Q|*variance[k]
Arguments:
-------
Q: cupy.core.core.ndarray
the input query of length m to be aligned
S: cupy.core.core.ndarray
the input stream of length n>=m to be scanned
Kahan: int
non-negative number of Kahan summation adjustment rounds
Returns
-------
cupy.core.core.ndarray
the computed distance array of length n-m+1
"""
m, Q = len(Q), mnorm(Q)
n = (len(S)+alignment-1)//alignment*alignment
iS = cp.zeros(n).astype(S.dtype)
iS[:len(S)] = S
X, Y = cumsum(iS, Kahan), cumsum(iS**2, Kahan)
X = X[+m:]-X[:-m]
Y = Y[+m:]-Y[:-m]
Z = Y-X*X/m
E = cp.zeros(n, dtype=Q.dtype)
E[:m] = Q
R = cp.fft.irfft(cp.fft.rfft(E).conj()*cp.fft.rfft(iS), n=n)
return (cp.sum(cp.square(Q))-2*R[:-m+1]+Z)[:len(S)-m+1]
def fft_zdist(Q, S, epsilon, alignment=10000, Kahan=0):
"""
Rolling mean- and amplitude-adjusted Euclidean Distance using FFT to run in loglinear time
Equation exploiting cross-correlation (Fourier) theorem:
d[k] = sum_i (f(Q[i]) - f(S[i+k]))**2
= sum_i (f(Q[i])**2 - 2*f(Q[i])*f(S[i+k]) + f(S[i+k])**2)
= sum_i (f(Q[i])**2 - 2*f(Q[i])*(S[i+k]-mu[k]) + (S[i+k]-mu[k])**2)
= sum_i (f(Q[i])**2 - 2*f(Q[i])*S[i+k] + 2*f(Q[i])*mu[k] + (S[i+k]-mu[k])**2)
= sum_i (f(Q[i])**2 - 2*f(Q[i])*S[i+k] + (S[i+k]-mu[k])**2)
since sum_i f(Q[i]) = 0 by definition
= sum_i (f(Q[i])**2 - 2*f(Q[i])*S[i+k] + S[i+k]**2 - 2*S[i+k]*mu[k] + mu[k]**2)
= sum_i (f(Q[i])**2 - 2*f(Q[i])*S[i+k] + S[i+k]**2 - 2*|Q|*mu[k]*mu[k] + mu[k]**2)
= sum_i f(Q[i])**2 - 2*correlation(k) + Y[k] - 2*X[k]**2/|Q| + X[k]**2/|Q|
= sum_i f(Q[i])**2 - 2*correlation(k) + Y[k] - X[k]**2/|Q|
= sum_i f(Q[i])**2 - 2*correlation(k) + |Q|*variance[k]
Arguments:
-------
Q: cupy.core.core.ndarray
the input query of length m to be aligned
S: cupy.core.core.ndarray
the input stream of length n>=m to be scanned
epsilon: float
non-negative number for regularizing zero stdev
Kahan: int
non-negative number of Kahan summation adjustment rounds
Returns
-------
cupy.core.core.ndarray
the computed distance array of length n-m+1
"""
assert(epsilon > 0)
m, Q = len(Q), znorm(Q, epsilon)
n = (len(S)+alignment-1)//alignment*alignment
iS = cp.zeros(n, dtype=S.dtype)
iS[:len(S)] = S
delta = n-len(S)
X, Y = cumsum(iS, Kahan), cumsum(iS**2, Kahan)
X = X[+m:]-X[:-m]
Y = Y[+m:]-Y[:-m]
Z = cp.sqrt(cp.maximum(Y/m-cp.square(X/m), 0))
E = cp.zeros(n, dtype=Q.dtype)
E[:m] = Q
R = cp.fft.irfft(cp.fft.rfft(E).conj()*cp.fft.rfft(iS), n=n)
F = cp.where(Z > 0 , 2*(m-R[:-m+1]/Z), m*cp.ones_like(Z))
return F[:len(S)-m+1]
| rapidAligner-main | rapidAligner/ED/stream_dists_fft.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | __init__.py |
# ! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for pip package."""
import codecs
import importlib.util
import os
import subprocess
from distutils import cmd as distutils_cmd
from distutils import log as distutils_log
from itertools import chain
import setuptools
spec = importlib.util.spec_from_file_location('package_info', 'nemo_text_processing/package_info.py')
package_info = importlib.util.module_from_spec(spec)
spec.loader.exec_module(package_info)
__contact_emails__ = package_info.__contact_emails__
__contact_names__ = package_info.__contact_names__
__description__ = package_info.__description__
__download_url__ = package_info.__download_url__
__homepage__ = package_info.__homepage__
__keywords__ = package_info.__keywords__
__license__ = package_info.__license__
__package_name__ = package_info.__package_name__
__repository_url__ = package_info.__repository_url__
__version__ = package_info.__version__
if os.path.exists('README.md'):
with open("README.md", "r", encoding='utf-8') as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
elif os.path.exists('README.rst'):
# codec is used for consistent encoding
long_description = codecs.open(
os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.rst'), 'r', encoding='utf-8',
).read()
long_description_content_type = "text/x-rst"
else:
long_description = 'See ' + __homepage__
###############################################################################
# Dependency Loading #
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #
def req_file(filename, folder="requirements"):
with open(os.path.join(folder, filename), encoding='utf-8') as f:
content = f.readlines()
# you may also want to remove whitespace characters
# Example: `\n` at the end of each line
return [x.strip() for x in content]
install_requires = req_file("requirements.txt")
extras_require = {
# User packages
'test': req_file("requirements_test.txt")
}
extras_require['all'] = list(chain(extras_require.values()))
# Add lightning requirements as needed
# extras_require['nemo_text_processing'] = list(chain([extras_require['nemo_text_processing']]))
# extras_require['test'] = list(
# chain(
# [
# extras_require['nemo_text_processing'],
# ]
# )
# )
tests_requirements = extras_require["test"]
###############################################################################
# Code style checkers #
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #
class StyleCommand(distutils_cmd.Command):
__LINE_WIDTH = 119
__ISORT_BASE = (
'isort '
# These two lines makes isort compatible with black.
'--multi-line=3 --trailing-comma --force-grid-wrap=0 '
f'--use-parentheses --line-width={__LINE_WIDTH} -rc -ws'
)
__BLACK_BASE = f'black --skip-string-normalization --line-length={__LINE_WIDTH}'
description = 'Checks overall project code style.'
user_options = [
('scope=', None, 'Folder of file to operate within.'),
('fix', None, 'True if tries to fix issues in-place.'),
]
def __call_checker(self, base_command, scope, check):
command = list(base_command)
command.append(scope)
if check:
command.extend(['--check', '--diff'])
self.announce(
msg='Running command: %s' % str(' '.join(command)), level=distutils_log.INFO,
)
return_code = subprocess.call(command)
return return_code
def _isort(self, scope, check):
return self.__call_checker(base_command=self.__ISORT_BASE.split(), scope=scope, check=check,)
def _black(self, scope, check):
return self.__call_checker(base_command=self.__BLACK_BASE.split(), scope=scope, check=check,)
def _pass(self):
self.announce(msg='\033[32mPASS\x1b[0m', level=distutils_log.INFO)
def _fail(self):
self.announce(msg='\033[31mFAIL\x1b[0m', level=distutils_log.INFO)
# noinspection PyAttributeOutsideInit
def initialize_options(self):
self.scope = '.'
self.fix = ''
def run(self):
scope, check = self.scope, not self.fix
isort_return = self._isort(scope=scope, check=check)
black_return = self._black(scope=scope, check=check)
if isort_return == 0 and black_return == 0:
self._pass()
else:
self._fail()
exit(isort_return if isort_return != 0 else black_return)
def finalize_options(self):
pass
###############################################################################
setuptools.setup(
name=__package_name__,
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=__version__,
description=__description__,
long_description=long_description,
long_description_content_type=long_description_content_type,
# The project's main homepage.
url=__repository_url__,
download_url=__download_url__,
# Author details
author=__contact_names__,
author_email=__contact_emails__,
# maintainer Details
maintainer=__contact_names__,
maintainer_email=__contact_emails__,
# The licence under which the project is released
license=__license__,
classifiers=[
# How mature is this project? Common values are
# 1 - Planning
# 2 - Pre-Alpha
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
# 6 - Mature
# 7 - Inactive
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: Information Technology',
# Indicate what your project relates to
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache Software License',
# Supported python versions
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
# Additional Setting
'Environment :: Console',
'Natural Language :: English',
'Operating System :: OS Independent',
],
packages=setuptools.find_packages(),
install_requires=install_requires,
# setup_requires=['pytest-runner'],
tests_require=tests_requirements,
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# $ pip install -e ".[all]"
# $ pip install nemo_toolkit[all]
extras_require=extras_require,
# Add in any packaged data.
include_package_data=True,
exclude=['tools', 'tests', 'data'],
package_data={'': ['*.tsv', '*.far', '*.fst']},
zip_safe=False,
# PyPI package information.
keywords=__keywords__,
# Custom commands.
cmdclass={'style': StyleCommand},
)
| NeMo-text-processing-main | setup.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from argparse import ArgumentParser
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import generator_main
# This script exports compiled grammars inside nemo_text_processing into OpenFst finite state archive files
# tokenize_and_classify.far and verbalize.far for production purposes
def itn_grammars(**kwargs):
d = {}
d['classify'] = {
'TOKENIZE_AND_CLASSIFY': ITNClassifyFst(
cache_dir=kwargs["cache_dir"],
overwrite_cache=kwargs["overwrite_cache"],
whitelist=kwargs["whitelist"],
input_case=kwargs["input_case"],
).fst
}
d['verbalize'] = {'ALL': ITNVerbalizeFst().fst, 'REDUP': pynini.accep("REDUP")}
return d
def tn_grammars(**kwargs):
d = {}
d['classify'] = {
'TOKENIZE_AND_CLASSIFY': TNClassifyFst(
input_case=kwargs["input_case"],
deterministic=True,
cache_dir=kwargs["cache_dir"],
overwrite_cache=kwargs["overwrite_cache"],
whitelist=kwargs["whitelist"],
).fst
}
d['verbalize'] = {'ALL': TNVerbalizeFst(deterministic=True).fst, 'REDUP': pynini.accep("REDUP")}
return d
def export_grammars(output_dir, grammars):
"""
Exports tokenizer_and_classify and verbalize Fsts as OpenFst finite state archive (FAR) files.
Args:
output_dir: directory to export FAR files to. Subdirectories will be created for tagger and verbalizer respectively.
grammars: grammars to be exported
"""
for category, graphs in grammars.items():
out_dir = os.path.join(output_dir, category)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
time.sleep(1)
if category == "classify":
category = "tokenize_and_classify"
generator_main(f"{out_dir}/{category}.far", graphs)
def parse_args():
parser = ArgumentParser()
parser.add_argument("--output_dir", help="output directory for grammars", required=True, type=str)
parser.add_argument(
"--language",
help="language",
choices=["en", "de", "es", "pt", "ru", 'fr', 'hu', 'sv', 'vi', 'zh', 'ar', 'it', 'es_en'],
type=str,
default='en',
)
parser.add_argument(
"--grammars", help="grammars to be exported", choices=["tn_grammars", "itn_grammars"], type=str, required=True
)
parser.add_argument(
"--input_case", help="input capitalization", choices=["lower_cased", "cased"], default="cased", type=str
)
parser.add_argument(
"--whitelist",
help="Path to a file with with whitelist replacements,"
"e.g., for English whitelist files are stored under inverse_normalization/en/data/whitelist. If None,"
"the default file will be used.",
default=None,
type=lambda x: None if x == "None" else x,
)
parser.add_argument("--overwrite_cache", help="set to True to re-create .far grammar files", action="store_true")
parser.add_argument(
"--cache_dir",
help="path to a dir with .far grammar file. Set to None to avoid using cache",
default=None,
type=str,
)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
if args.language in ['pt', 'ru', 'vi', 'es_en'] and args.grammars == 'tn_grammars':
raise ValueError('Only ITN grammars could be deployed in Sparrowhawk for the selected languages.')
if args.language == 'en':
from nemo_text_processing.inverse_text_normalization.en.taggers.tokenize_and_classify import (
ClassifyFst as ITNClassifyFst,
)
from nemo_text_processing.inverse_text_normalization.en.verbalizers.verbalize import (
VerbalizeFst as ITNVerbalizeFst,
)
from nemo_text_processing.text_normalization.en.taggers.tokenize_and_classify import (
ClassifyFst as TNClassifyFst,
)
from nemo_text_processing.text_normalization.en.verbalizers.verbalize import VerbalizeFst as TNVerbalizeFst
elif args.language == 'de':
from nemo_text_processing.inverse_text_normalization.de.taggers.tokenize_and_classify import (
ClassifyFst as ITNClassifyFst,
)
from nemo_text_processing.inverse_text_normalization.de.verbalizers.verbalize import (
VerbalizeFst as ITNVerbalizeFst,
)
from nemo_text_processing.text_normalization.de.taggers.tokenize_and_classify import (
ClassifyFst as TNClassifyFst,
)
from nemo_text_processing.text_normalization.de.verbalizers.verbalize import VerbalizeFst as TNVerbalizeFst
elif args.language == 'ru':
from nemo_text_processing.inverse_text_normalization.ru.taggers.tokenize_and_classify import (
ClassifyFst as ITNClassifyFst,
)
from nemo_text_processing.inverse_text_normalization.ru.verbalizers.verbalize import (
VerbalizeFst as ITNVerbalizeFst,
)
elif args.language == 'es':
from nemo_text_processing.inverse_text_normalization.es.taggers.tokenize_and_classify import (
ClassifyFst as ITNClassifyFst,
)
from nemo_text_processing.inverse_text_normalization.es.verbalizers.verbalize import (
VerbalizeFst as ITNVerbalizeFst,
)
from nemo_text_processing.text_normalization.es.taggers.tokenize_and_classify import (
ClassifyFst as TNClassifyFst,
)
from nemo_text_processing.text_normalization.es.verbalizers.verbalize import VerbalizeFst as TNVerbalizeFst
elif args.language == 'pt':
from nemo_text_processing.inverse_text_normalization.pt.taggers.tokenize_and_classify import (
ClassifyFst as ITNClassifyFst,
)
from nemo_text_processing.inverse_text_normalization.pt.verbalizers.verbalize import (
VerbalizeFst as ITNVerbalizeFst,
)
elif args.language == 'fr':
from nemo_text_processing.inverse_text_normalization.fr.taggers.tokenize_and_classify import (
ClassifyFst as ITNClassifyFst,
)
from nemo_text_processing.inverse_text_normalization.fr.verbalizers.verbalize import (
VerbalizeFst as ITNVerbalizeFst,
)
from nemo_text_processing.text_normalization.fr.taggers.tokenize_and_classify import (
ClassifyFst as TNClassifyFst,
)
from nemo_text_processing.text_normalization.fr.verbalizers.verbalize import VerbalizeFst as TNVerbalizeFst
elif args.language == 'hu':
from nemo_text_processing.text_normalization.hu.taggers.tokenize_and_classify import (
ClassifyFst as TNClassifyFst,
)
from nemo_text_processing.text_normalization.hu.verbalizers.verbalize import VerbalizeFst as TNVerbalizeFst
elif args.language == 'sv':
from nemo_text_processing.inverse_text_normalization.sv.taggers.tokenize_and_classify import (
ClassifyFst as ITNClassifyFst,
)
from nemo_text_processing.inverse_text_normalization.sv.verbalizers.verbalize import (
VerbalizeFst as ITNVerbalizeFst,
)
from nemo_text_processing.text_normalization.sv.taggers.tokenize_and_classify import (
ClassifyFst as TNClassifyFst,
)
from nemo_text_processing.text_normalization.sv.verbalizers.verbalize import VerbalizeFst as TNVerbalizeFst
elif args.language == 'vi':
from nemo_text_processing.inverse_text_normalization.vi.taggers.tokenize_and_classify import (
ClassifyFst as ITNClassifyFst,
)
from nemo_text_processing.inverse_text_normalization.vi.verbalizers.verbalize import (
VerbalizeFst as ITNVerbalizeFst,
)
elif args.language == 'zh':
from nemo_text_processing.inverse_text_normalization.zh.taggers.tokenize_and_classify import (
ClassifyFst as ITNClassifyFst,
)
from nemo_text_processing.inverse_text_normalization.zh.verbalizers.verbalize import (
VerbalizeFst as ITNVerbalizeFst,
)
from nemo_text_processing.text_normalization.zh.taggers.tokenize_and_classify import (
ClassifyFst as TNClassifyFst,
)
from nemo_text_processing.text_normalization.zh.verbalizers.verbalize import VerbalizeFst as TNVerbalizeFst
elif args.language == 'ar':
from nemo_text_processing.inverse_text_normalization.ar.taggers.tokenize_and_classify import (
ClassifyFst as ITNClassifyFst,
)
from nemo_text_processing.inverse_text_normalization.ar.verbalizers.verbalize import (
VerbalizeFst as ITNVerbalizeFst,
)
from nemo_text_processing.text_normalization.ar.taggers.tokenize_and_classify import (
ClassifyFst as TNClassifyFst,
)
elif args.language == 'it':
from nemo_text_processing.text_normalization.it.taggers.tokenize_and_classify import (
ClassifyFst as TNClassifyFst,
)
from nemo_text_processing.text_normalization.it.verbalizers.verbalize import VerbalizeFst as TNVerbalizeFst
elif args.language == 'es_en':
from nemo_text_processing.inverse_text_normalization.es_en.taggers.tokenize_and_classify import (
ClassifyFst as ITNClassifyFst,
)
from nemo_text_processing.inverse_text_normalization.es_en.verbalizers.verbalize import (
VerbalizeFst as ITNVerbalizeFst,
)
output_dir = os.path.join(args.output_dir, args.language)
export_grammars(
output_dir=output_dir,
grammars=locals()[args.grammars](
input_case=args.input_case,
cache_dir=args.cache_dir,
overwrite_cache=args.overwrite_cache,
whitelist=args.whitelist,
),
)
| NeMo-text-processing-main | tools/text_processing_deployment/pynini_export.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
MAJOR = 0
MINOR = 2
PATCH = 0
PRE_RELEASE = 'rc0'
# Use the following formatting: (major, minor, patch, pre-release)
VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE)
__shortversion__ = '.'.join(map(str, VERSION[:3]))
__version__ = '.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:])
__package_name__ = 'nemo_text_processing'
__contact_names__ = 'NVIDIA'
__contact_emails__ = '[email protected]'
__homepage__ = 'https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/'
__repository_url__ = 'https://github.com/nvidia/nemo-text-processing'
__download_url__ = 'https://github.com/NVIDIA/NeMo-text-processing/releases'
__description__ = 'NeMo text processing for ASR and TTS'
__license__ = 'Apache2'
__keywords__ = ' NeMo, nvidia, tts, asr, text processing, text normalization, inverse text normalization, language'
| NeMo-text-processing-main | nemo_text_processing/package_info.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from argparse import ArgumentParser
from time import perf_counter
from typing import List
from nemo_text_processing.text_normalization.data_loader_utils import load_file, write_file
from nemo_text_processing.text_normalization.en.graph_utils import INPUT_CASED, INPUT_LOWER_CASED
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo_text_processing.text_normalization.token_parser import TokenParser
class InverseNormalizer(Normalizer):
"""
Inverse normalizer that converts text from spoken to written form. Useful for ASR postprocessing.
Input is expected to have no punctuation outside of approstrophe (') and dash (-) and be lower cased.
Args:
input_case: Input text capitalization, set to 'cased' if text contains capital letters.
This flag affects normalization rules applied to the text. Note, `lower_cased` won't lower case input.
lang: language specifying the ITN
whitelist: path to a file with whitelist replacements. (each line of the file: written_form\tspoken_form\n),
e.g. nemo_text_processing/inverse_text_normalization/en/data/whitelist.tsv
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
max_number_of_permutations_per_split: a maximum number
of permutations which can be generated from input sequence of tokens.
"""
def __init__(
self,
input_case: str = INPUT_LOWER_CASED,
lang: str = "en",
whitelist: str = None,
cache_dir: str = None,
overwrite_cache: bool = False,
max_number_of_permutations_per_split: int = 729,
):
assert input_case in ["lower_cased", "cased"]
if lang == 'en': # English
from nemo_text_processing.inverse_text_normalization.en.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.inverse_text_normalization.en.verbalizers.verbalize_final import (
VerbalizeFinalFst,
)
elif lang == 'es': # Spanish (Espanol)
from nemo_text_processing.inverse_text_normalization.es.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.inverse_text_normalization.es.verbalizers.verbalize_final import (
VerbalizeFinalFst,
)
elif lang == 'pt': # Portuguese (Português)
from nemo_text_processing.inverse_text_normalization.pt.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.inverse_text_normalization.pt.verbalizers.verbalize_final import (
VerbalizeFinalFst,
)
elif lang == 'ru': # Russian (Russkiy Yazyk)
from nemo_text_processing.inverse_text_normalization.ru.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.inverse_text_normalization.ru.verbalizers.verbalize_final import (
VerbalizeFinalFst,
)
elif lang == 'de': # German (Deutsch)
from nemo_text_processing.inverse_text_normalization.de.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.inverse_text_normalization.de.verbalizers.verbalize_final import (
VerbalizeFinalFst,
)
elif lang == 'fr': # French (Français)
from nemo_text_processing.inverse_text_normalization.fr.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.inverse_text_normalization.fr.verbalizers.verbalize_final import (
VerbalizeFinalFst,
)
elif lang == 'sv': # Swedish (Svenska)
from nemo_text_processing.inverse_text_normalization.sv.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.inverse_text_normalization.sv.verbalizers.verbalize_final import (
VerbalizeFinalFst,
)
elif lang == 'vi': # Vietnamese (Tiếng Việt)
from nemo_text_processing.inverse_text_normalization.vi.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.inverse_text_normalization.vi.verbalizers.verbalize_final import (
VerbalizeFinalFst,
)
elif lang == 'ar': # Arabic
from nemo_text_processing.inverse_text_normalization.ar.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.inverse_text_normalization.ar.verbalizers.verbalize_final import (
VerbalizeFinalFst,
)
elif lang == 'es_en': # Arabic
from nemo_text_processing.inverse_text_normalization.es_en.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.inverse_text_normalization.es_en.verbalizers.verbalize_final import (
VerbalizeFinalFst,
)
elif lang == 'zh': # Mandarin
from nemo_text_processing.inverse_text_normalization.zh.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.inverse_text_normalization.zh.verbalizers.verbalize_final import (
VerbalizeFinalFst,
)
self.tagger = ClassifyFst(
cache_dir=cache_dir, whitelist=whitelist, overwrite_cache=overwrite_cache, input_case=input_case
)
self.verbalizer = VerbalizeFinalFst()
self.parser = TokenParser()
self.lang = lang
self.max_number_of_permutations_per_split = max_number_of_permutations_per_split
def inverse_normalize_list(self, texts: List[str], verbose=False) -> List[str]:
"""
NeMo inverse text normalizer
Args:
texts: list of input strings
verbose: whether to print intermediate meta information
Returns converted list of input strings
"""
return self.normalize_list(texts=texts, verbose=verbose)
def inverse_normalize(self, text: str, verbose: bool) -> str:
"""
Main function. Inverse normalizes tokens from spoken to written form
e.g. twelve kilograms -> 12 kg
Args:
text: string that may include semiotic classes
verbose: whether to print intermediate meta information
Returns: written form
"""
return self.normalize(text=text, verbose=verbose)
def parse_args():
parser = ArgumentParser()
input = parser.add_mutually_exclusive_group()
input.add_argument("--text", dest="input_string", help="input string", type=str)
input.add_argument("--input_file", dest="input_file", help="input file path", type=str)
parser.add_argument('--output_file', dest="output_file", help="output file path", type=str)
parser.add_argument(
"--language",
help="language",
choices=['en', 'de', 'es', 'pt', 'ru', 'fr', 'sv', 'vi', 'ar', 'es_en', 'zh'],
default="en",
type=str,
)
parser.add_argument(
"--input_case",
help="Input text capitalization, set to 'cased' if text contains capital letters."
"This flag affects normalization rules applied to the text. Note, `lower_cased` won't lower case input.",
choices=[INPUT_CASED, INPUT_LOWER_CASED],
default=INPUT_LOWER_CASED,
type=str,
)
parser.add_argument(
"--whitelist",
help="Path to a file with with whitelist replacements," "e.g., inverse_normalization/en/data/whitelist.tsv",
default=None,
type=str,
)
parser.add_argument("--verbose", help="print info for debugging", action='store_true')
parser.add_argument("--overwrite_cache", help="set to True to re-create .far grammar files", action="store_true")
parser.add_argument(
"--cache_dir",
help="path to a dir with .far grammar file. Set to None to avoid using cache",
default=None,
type=str,
)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
whitelist = os.path.abspath(args.whitelist) if args.whitelist else None
start_time = perf_counter()
inverse_normalizer = InverseNormalizer(
input_case=args.input_case,
lang=args.language,
cache_dir=args.cache_dir,
overwrite_cache=args.overwrite_cache,
whitelist=whitelist,
)
print(f'Time to generate graph: {round(perf_counter() - start_time, 2)} sec')
if args.input_string:
print(inverse_normalizer.inverse_normalize(args.input_string, verbose=args.verbose))
elif args.input_file:
print("Loading data: " + args.input_file)
data = load_file(args.input_file)
print("- Data: " + str(len(data)) + " sentences")
prediction = inverse_normalizer.inverse_normalize_list(data, verbose=args.verbose)
if args.output_file:
write_file(args.output_file, prediction)
print(f"- Denormalized. Writing out to {args.output_file}")
else:
print(prediction)
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/inverse_normalize.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
from nemo_text_processing.text_normalization.data_loader_utils import (
evaluate,
known_types,
load_files,
training_data_to_sentences,
training_data_to_tokens,
)
'''
Runs Evaluation on data in the format of : <semiotic class>\t<unnormalized text>\t<`self` if trivial class or normalized text>
like the Google text normalization data https://www.kaggle.com/richardwilliamsproat/text-normalization-for-english-russian-and-polish
'''
def parse_args():
parser = ArgumentParser()
parser.add_argument("--input", help="input file path", type=str)
parser.add_argument(
"--lang", help="language", choices=['en', 'de', 'es', 'pt', 'ru', 'fr', 'vi'], default="en", type=str
)
parser.add_argument(
"--cat",
dest="category",
help="focus on class only (" + ", ".join(known_types) + ")",
type=str,
default=None,
choices=known_types,
)
parser.add_argument("--filter", action='store_true', help="clean data for inverse normalization purposes")
return parser.parse_args()
if __name__ == "__main__":
# Example usage:
# python run_evaluate.py --input=<INPUT> --cat=<CATEGORY> --filter
args = parse_args()
if args.lang == 'en':
from nemo_text_processing.inverse_text_normalization.en.clean_eval_data import filter_loaded_data
file_path = args.input
inverse_normalizer = InverseNormalizer(lang=args.lang)
print("Loading training data: " + file_path)
training_data = load_files([file_path])
if args.filter:
training_data = filter_loaded_data(training_data)
if args.category is None:
print("Sentence level evaluation...")
sentences_un_normalized, sentences_normalized, _ = training_data_to_sentences(training_data)
print("- Data: " + str(len(sentences_normalized)) + " sentences")
sentences_prediction = inverse_normalizer.inverse_normalize_list(sentences_normalized)
print("- Denormalized. Evaluating...")
sentences_accuracy = evaluate(
preds=sentences_prediction, labels=sentences_un_normalized, input=sentences_normalized
)
print("- Accuracy: " + str(sentences_accuracy))
print("Token level evaluation...")
tokens_per_type = training_data_to_tokens(training_data, category=args.category)
token_accuracy = {}
for token_type in tokens_per_type:
print("- Token type: " + token_type)
tokens_un_normalized, tokens_normalized = tokens_per_type[token_type]
print(" - Data: " + str(len(tokens_normalized)) + " tokens")
tokens_prediction = inverse_normalizer.inverse_normalize_list(tokens_normalized)
print(" - Denormalized. Evaluating...")
token_accuracy[token_type] = evaluate(tokens_prediction, tokens_un_normalized, input=tokens_normalized)
print(" - Accuracy: " + str(token_accuracy[token_type]))
token_count_per_type = {token_type: len(tokens_per_type[token_type][0]) for token_type in tokens_per_type}
token_weighted_accuracy = [
token_count_per_type[token_type] * accuracy for token_type, accuracy in token_accuracy.items()
]
print("- Accuracy: " + str(sum(token_weighted_accuracy) / sum(token_count_per_type.values())))
print(" - Total: " + str(sum(token_count_per_type.values())), '\n')
for token_type in token_accuracy:
if token_type not in known_types:
raise ValueError("Unexpected token type: " + token_type)
if args.category is None:
c1 = ['Class', 'sent level'] + known_types
c2 = ['Num Tokens', len(sentences_normalized)] + [
token_count_per_type[known_type] if known_type in tokens_per_type else '0' for known_type in known_types
]
c3 = ["Denormalization", sentences_accuracy] + [
token_accuracy[known_type] if known_type in token_accuracy else '0' for known_type in known_types
]
for i in range(len(c1)):
print(f'{str(c1[i]):10s} | {str(c2[i]):10s} | {str(c3[i]):5s}')
else:
print(f'numbers\t{token_count_per_type[args.category]}')
print(f'Denormalization\t{token_accuracy[args.category]}')
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/run_evaluate.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import string
from pathlib import Path
from typing import Dict
import pynini
from pynini import Far
from pynini.export import export
from pynini.lib import byte, pynutil, utf8
NEMO_CHAR = utf8.VALID_UTF8_CHAR
NEMO_DIGIT = byte.DIGIT
NEMO_LOWER = pynini.union(*string.ascii_lowercase).optimize()
NEMO_UPPER = pynini.union(*string.ascii_uppercase).optimize()
NEMO_ALPHA = pynini.union(NEMO_LOWER, NEMO_UPPER).optimize()
NEMO_ALNUM = pynini.union(NEMO_DIGIT, NEMO_ALPHA).optimize()
NEMO_HEX = pynini.union(*string.hexdigits).optimize()
NEMO_NON_BREAKING_SPACE = "\u00A0"
NEMO_SPACE = " "
NEMO_WHITE_SPACE = pynini.union(" ", "\t", "\n", "\r", "\u00A0").optimize()
NEMO_NOT_SPACE = pynini.difference(NEMO_CHAR, NEMO_WHITE_SPACE).optimize()
NEMO_NOT_QUOTE = pynini.difference(NEMO_CHAR, r'"').optimize()
NEMO_PUNCT = pynini.union(*map(pynini.escape, string.punctuation)).optimize()
NEMO_GRAPH = pynini.union(NEMO_ALNUM, NEMO_PUNCT).optimize()
NEMO_SIGMA = pynini.closure(NEMO_CHAR)
delete_space = pynutil.delete(pynini.closure(NEMO_WHITE_SPACE))
insert_space = pynutil.insert(" ")
delete_extra_space = pynini.cross(pynini.closure(NEMO_WHITE_SPACE, 1), " ")
# French frequently compounds numbers with hyphen.
delete_hyphen = pynutil.delete(pynini.closure("-", 0, 1))
insert_hyphen = pynutil.insert("-")
TO_LOWER = pynini.union(*[pynini.cross(x, y) for x, y in zip(string.ascii_uppercase, string.ascii_lowercase)])
TO_UPPER = pynini.invert(TO_LOWER)
def generator_main(file_name: str, graphs: Dict[str, pynini.FstLike]):
"""
Exports graph as OpenFst finite state archive (FAR) file with given file name and rule name.
Args:
file_name: exported file name
graphs: Mapping of a rule name and Pynini WFST graph to be exported
"""
exporter = export.Exporter(file_name)
for rule, graph in graphs.items():
exporter[rule] = graph.optimize()
exporter.close()
logging.info(f"Created {file_name}")
def convert_space(fst) -> "pynini.FstLike":
"""
Converts space to nonbreaking space.
Used only in tagger grammars for transducing token values within quotes, e.g. name: "hello kitty"
This is making transducer significantly slower, so only use when there could be potential spaces within quotes, otherwise leave it.
Args:
fst: input fst
Returns output fst where breaking spaces are converted to non breaking spaces
"""
return fst @ pynini.cdrewrite(pynini.cross(NEMO_SPACE, NEMO_NON_BREAKING_SPACE), "", "", NEMO_SIGMA)
class GraphFst:
"""
Base class for all grammar fsts.
Args:
name: name of grammar class
kind: either 'classify' or 'verbalize'
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, name: str, kind: str, deterministic: bool = True):
self.name = name
self.kind = kind
self._fst = None
self.deterministic = deterministic
self.far_path = Path(os.path.dirname(__file__) + "/grammars/" + kind + "/" + name + ".far")
if self.far_exist():
self._fst = Far(self.far_path, mode="r", arc_type="standard", far_type="default").get_fst()
def far_exist(self) -> bool:
"""
Returns true if FAR can be loaded
"""
return self.far_path.exists()
@property
def fst(self) -> "pynini.FstLike":
return self._fst
@fst.setter
def fst(self, fst):
self._fst = fst
def add_tokens(self, fst) -> "pynini.FstLike":
"""
Wraps class name around to given fst
Args:
fst: input fst
Returns:
Fst: fst
"""
return pynutil.insert(f"{self.name} {{ ") + fst + pynutil.insert(" }")
def delete_tokens(self, fst) -> "pynini.FstLike":
"""
Deletes class name wrap around output of given fst
Args:
fst: input fst
Returns:
Fst: fst
"""
res = (
pynutil.delete(f"{self.name}")
+ delete_space
+ pynutil.delete("{")
+ delete_space
+ fst
+ delete_space
+ pynutil.delete("}")
)
return res @ pynini.cdrewrite(pynini.cross("\u00A0", " "), "", "", NEMO_SIGMA)
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/graph_utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.inverse_text_normalization.vi.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.inverse_text_normalization.vi.verbalizers.verbalize import VerbalizeFst
from nemo_text_processing.inverse_text_normalization.vi.verbalizers.verbalize_final import VerbalizeFinalFst
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def get_abs_path(rel_path):
"""
Get absolute path
Args:
rel_path: relative path to this file
Returns absolute path
"""
return os.path.dirname(os.path.abspath(__file__)) + "/" + rel_path
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.vi.graph_utils import (
GraphFst,
convert_space,
delete_extra_space,
delete_space,
insert_space,
)
from nemo_text_processing.inverse_text_normalization.vi.utils import get_abs_path
from pynini.lib import pynutil
class TimeFst(GraphFst):
"""
Finite state transducer for classifying time
e.g. hai rưỡi -> time { hours: "2" minutes: "30" }
e.g. chín giờ kém hai mươi -> time { hours: "8" minutes: "40" }
e.g. ba phút hai giây -> time { minutes: "3" seconds: "2" }
e.g. mười giờ chín phút bốn mươi lăm giây -> time { hours: "10" minutes: "9" seconds: "45" }
"""
def __init__(self):
super().__init__(name="time", kind="classify")
# hours, minutes, seconds, suffix, zone, style, speak_period
graph_hours_to = pynini.string_file(get_abs_path("data/time/hours_to.tsv"))
graph_minutes_to = pynini.string_file(get_abs_path("data/time/minutes_to.tsv"))
graph_hours = pynini.string_file(get_abs_path("data/time/hours.tsv"))
graph_minutes = pynini.string_file(get_abs_path("data/time/minutes.tsv"))
time_zone_graph = pynini.invert(pynini.string_file(get_abs_path("data/time/time_zone.tsv")))
graph_half = pynini.cross("rưỡi", "30")
oclock = pynini.cross("giờ", "")
minute = pynini.cross("phút", "")
optional_minute = pynini.closure(delete_space + minute, 0, 1)
second = pynini.cross("giây", "")
final_graph_hour = pynutil.insert('hours: "') + graph_hours + pynutil.insert('"') + delete_space + oclock
graph_minute = graph_minutes + optional_minute
graph_second = graph_minutes + delete_space + second
final_time_zone_optional = pynini.closure(
delete_space
+ insert_space
+ pynutil.insert('zone: "')
+ convert_space(time_zone_graph)
+ pynutil.insert('"'),
0,
1,
)
graph_hm = (
final_graph_hour
+ delete_extra_space
+ pynutil.insert('minutes: "')
+ (graph_minute | graph_half)
+ pynutil.insert('"')
)
graph_hms = (
final_graph_hour
+ delete_extra_space
+ pynutil.insert('minutes: "')
+ graph_minutes
+ delete_space
+ minute
+ pynutil.insert('"')
+ delete_extra_space
+ pynutil.insert('seconds: "')
+ graph_second
+ pynutil.insert('"')
)
graph_ms = (
pynutil.insert('minutes: "')
+ graph_minutes
+ delete_space
+ minute
+ pynutil.insert('"')
+ delete_extra_space
+ pynutil.insert('seconds: "')
+ (graph_second | graph_half)
+ pynutil.insert('"')
)
graph_hours_to_component = graph_hours @ graph_hours_to
graph_minutes_to_component = graph_minutes @ graph_minutes_to
graph_time_to = (
pynutil.insert('hours: "')
+ graph_hours_to_component
+ pynutil.insert('"')
+ delete_space
+ oclock
+ delete_space
+ pynutil.delete("kém")
+ delete_extra_space
+ pynutil.insert('minutes: "')
+ graph_minutes_to_component
+ pynutil.insert('"')
+ optional_minute
)
final_graph = (final_graph_hour | graph_hm | graph_hms) + final_time_zone_optional
final_graph |= graph_ms
final_graph |= graph_time_to
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/taggers/time.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.vi.graph_utils import (
GraphFst,
convert_space,
delete_extra_space,
delete_space,
)
from nemo_text_processing.inverse_text_normalization.vi.utils import get_abs_path
from pynini.lib import pynutil
class MeasureFst(GraphFst):
"""
Finite state transducer for classifying measure
e.g. trừ mười hai ki lô gam -> measure { negative: "true" cardinal { integer: "12" } units: "kg" }
Args:
cardinal: CardinalFst
decimal: DecimalFst
"""
def __init__(self, cardinal: GraphFst, decimal: GraphFst):
super().__init__(name="measure", kind="classify")
cardinal_graph = cardinal.graph_no_exception
graph_digit = pynini.string_file(get_abs_path("data/numbers/digit.tsv"))
graph_four = pynini.cross("tư", "4")
graph_one = pynini.cross("mốt", "1")
graph_half = pynini.cross("rưỡi", "5")
graph_unit = pynini.string_file(get_abs_path("data/measurements.tsv"))
graph_unit_singular = pynini.invert(graph_unit) # singular -> abbr
optional_graph_negative = pynini.closure(
pynutil.insert("negative: ") + pynini.cross(pynini.union("âm", "trừ"), '"true"') + delete_extra_space,
0,
1,
)
unit_singular = convert_space(graph_unit_singular)
unit_misc = pynutil.insert("/") + pynutil.delete("trên") + delete_space + convert_space(graph_unit_singular)
unit_singular = (
pynutil.insert('units: "')
+ (unit_singular | unit_misc | pynutil.add_weight(unit_singular + delete_space + unit_misc, 0.01))
+ pynutil.insert('"')
)
subgraph_decimal = (
pynutil.insert("decimal { ")
+ optional_graph_negative
+ decimal.final_graph_wo_negative
+ pynutil.insert(" }")
+ delete_extra_space
+ unit_singular
)
subgraph_cardinal = (
pynutil.insert("cardinal { ")
+ optional_graph_negative
+ pynutil.insert('integer: "')
+ cardinal_graph
+ pynutil.insert('"')
+ pynutil.insert(" }")
+ delete_extra_space
+ unit_singular
)
fraction_graph = (
delete_extra_space
+ pynutil.insert('fractional_part: "')
+ (graph_digit | graph_half | graph_one | graph_four)
+ pynutil.insert('"')
)
subgraph_cardinal |= (
pynutil.insert("cardinal { ")
+ optional_graph_negative
+ pynutil.insert('integer: "')
+ cardinal_graph
+ pynutil.insert('" }')
+ delete_extra_space
+ unit_singular
+ fraction_graph
)
final_graph = subgraph_decimal | subgraph_cardinal
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/taggers/measure.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.vi.graph_utils import GraphFst, delete_extra_space, delete_space
from pynini.lib import pynutil
class FractionFst(GraphFst):
"""
Finite state transducer for classifying fraction
e.g. 2 phần 3 -> tokens { fraction { numerator: "2" denominator: "3" } }
e.g. 2 trên 3 -> tokens { fraction { numerator: "2" denominator: "3" } }
e.g. 2 chia 3 -> tokens { fraction { numerator: "2" denominator: "3" } }
Args:
cardinal: OrdinalFst
"""
def __init__(self, cardinal: GraphFst):
super().__init__(name="fraction", kind="classify")
# integer_part # numerator # denominator
graph_cardinal = cardinal.graph_no_exception
graph_four = pynini.cross("tư", "4")
numerator = pynutil.insert('numerator: "') + graph_cardinal + pynutil.insert('"')
fraction_component = pynutil.delete(pynini.union("phần", "trên", "chia"))
denominator = pynutil.insert('denominator: "') + (graph_cardinal | graph_four) + pynutil.insert('"')
graph_fraction_component = numerator + delete_space + fraction_component + delete_extra_space + denominator
self.graph_fraction_component = graph_fraction_component
graph = graph_fraction_component
graph = graph.optimize()
self.final_graph_wo_negative = graph
optional_graph_negative = pynini.closure(
pynutil.insert("negative: ") + pynini.cross(pynini.union("âm", "trừ"), '"true"') + delete_extra_space,
0,
1,
)
graph = optional_graph_negative + graph
final_graph = self.add_tokens(graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/taggers/fraction.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.vi.graph_utils import GraphFst, delete_space
from nemo_text_processing.inverse_text_normalization.vi.utils import get_abs_path
from pynini.lib import pynutil
class TelephoneFst(GraphFst):
"""
Finite state transducer for classifying telephone numbers, e.g.
một hai ba một hai ba năm sáu bảy tám -> { number_part: "1231235678" }
"""
def __init__(self):
super().__init__(name="telephone", kind="classify")
graph_zero = pynini.string_file(get_abs_path("data/numbers/zero.tsv"))
graph_digit = pynini.string_file(get_abs_path("data/numbers/digit.tsv"))
digit = graph_digit | graph_zero
last_digit = digit | pynini.cross("mốt", "1") | pynini.cross("tư", "4") | pynini.cross("lăm", "5")
graph_number_part = pynini.closure(digit + delete_space, 2) + last_digit
number_part = pynutil.insert('number_part: "') + graph_number_part + pynutil.insert('"')
graph = number_part
final_graph = self.add_tokens(graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/taggers/telephone.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.vi.graph_utils import GraphFst, delete_space
from nemo_text_processing.inverse_text_normalization.vi.utils import get_abs_path
from pynini.lib import pynutil
class OrdinalFst(GraphFst):
"""
Finite state transducer for classifying ordinal
e.g. thứ nhất -> ordinal { integer: "1" }
"""
def __init__(self):
super().__init__(name="ordinal", kind="classify")
graph_digit = pynini.string_file(get_abs_path("data/ordinals/digit.tsv"))
graph_ordinal = pynini.cross("thứ", "")
graph = graph_digit
self.graph = graph
final_graph = pynutil.insert('integer: "') + graph_ordinal + delete_space + self.graph + pynutil.insert('"')
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/taggers/ordinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.vi.graph_utils import GraphFst, convert_space
from nemo_text_processing.inverse_text_normalization.vi.utils import get_abs_path
from pynini.lib import pynutil
class WhiteListFst(GraphFst):
"""
Finite state transducer for classifying whitelisted tokens
e.g. misses -> tokens { name: "mrs." }
This class has highest priority among all classifier grammars.
Whitelisted tokens are defined and loaded from "data/whitelist.tsv" (unless input_file specified).
Args:
input_file: path to a file with whitelist replacements (each line of the file: written_form\tspoken_form\n),
e.g. nemo_text_processing/inverse_text_normalization/en/data/whitelist.tsv
"""
def __init__(self, input_file: str = None):
super().__init__(name="whitelist", kind="classify")
if input_file:
whitelist = pynini.string_file(input_file).invert()
else:
whitelist = pynini.string_file(get_abs_path("data/whitelist.tsv")).invert()
graph = pynutil.insert('name: "') + convert_space(whitelist) + pynutil.insert('"')
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/taggers/whitelist.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pynini
from nemo_text_processing.inverse_text_normalization.vi.graph_utils import (
GraphFst,
delete_extra_space,
delete_space,
generator_main,
)
from nemo_text_processing.inverse_text_normalization.vi.taggers.cardinal import CardinalFst
from nemo_text_processing.inverse_text_normalization.vi.taggers.date import DateFst
from nemo_text_processing.inverse_text_normalization.vi.taggers.decimal import DecimalFst
from nemo_text_processing.inverse_text_normalization.vi.taggers.electronic import ElectronicFst
from nemo_text_processing.inverse_text_normalization.vi.taggers.fraction import FractionFst
from nemo_text_processing.inverse_text_normalization.vi.taggers.measure import MeasureFst
from nemo_text_processing.inverse_text_normalization.vi.taggers.money import MoneyFst
from nemo_text_processing.inverse_text_normalization.vi.taggers.ordinal import OrdinalFst
from nemo_text_processing.inverse_text_normalization.vi.taggers.punctuation import PunctuationFst
from nemo_text_processing.inverse_text_normalization.vi.taggers.telephone import TelephoneFst
from nemo_text_processing.inverse_text_normalization.vi.taggers.time import TimeFst
from nemo_text_processing.inverse_text_normalization.vi.taggers.whitelist import WhiteListFst
from nemo_text_processing.inverse_text_normalization.vi.taggers.word import WordFst
from nemo_text_processing.text_normalization.en.graph_utils import INPUT_LOWER_CASED
from pynini.lib import pynutil
class ClassifyFst(GraphFst):
"""
Final class that composes all other classification grammars. This class can process an entire sentence, that is lower cased.
For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
Args:
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
whitelist: path to a file with whitelist replacements
input_case: accepting either "lower_cased" or "cased" input.
"""
def __init__(
self,
cache_dir: str = None,
overwrite_cache: bool = False,
whitelist: str = None,
input_case: str = INPUT_LOWER_CASED,
):
super().__init__(name="tokenize_and_classify", kind="classify")
far_file = None
if cache_dir is not None and cache_dir != "None":
os.makedirs(cache_dir, exist_ok=True)
far_file = os.path.join(cache_dir, f"vi_itn_{input_case}.far")
if not overwrite_cache and far_file and os.path.exists(far_file):
self.fst = pynini.Far(far_file, mode="r")["tokenize_and_classify"]
logging.info(f"ClassifyFst.fst was restored from {far_file}.")
else:
logging.info(f"Creating ClassifyFst grammars.")
cardinal = CardinalFst()
cardinal_graph = cardinal.fst
fraction = FractionFst(cardinal)
fraction_graph = fraction.fst
ordinal = OrdinalFst()
ordinal_graph = ordinal.fst
decimal = DecimalFst(cardinal)
decimal_graph = decimal.fst
measure_graph = MeasureFst(cardinal=cardinal, decimal=decimal).fst
date_graph = DateFst(cardinal=cardinal).fst
word_graph = WordFst().fst
time_graph = TimeFst().fst
money_graph = MoneyFst(cardinal=cardinal, decimal=decimal).fst
whitelist_graph = WhiteListFst(input_file=whitelist).fst
punct_graph = PunctuationFst().fst
electronic_graph = ElectronicFst().fst
telephone_graph = TelephoneFst().fst
classify = (
pynutil.add_weight(whitelist_graph, 1.01)
| pynutil.add_weight(time_graph, 1.05)
| pynutil.add_weight(date_graph, 1.09)
| pynutil.add_weight(decimal_graph, 1.08)
| pynutil.add_weight(measure_graph, 1.1)
| pynutil.add_weight(cardinal_graph, 1.1)
| pynutil.add_weight(ordinal_graph, 1.1)
| pynutil.add_weight(fraction_graph, 1.09)
| pynutil.add_weight(money_graph, 1.07)
| pynutil.add_weight(telephone_graph, 1.1)
| pynutil.add_weight(electronic_graph, 1.1)
| pynutil.add_weight(word_graph, 100)
)
punct = pynutil.insert("tokens { ") + pynutil.add_weight(punct_graph, weight=1.1) + pynutil.insert(" }")
token = pynutil.insert("tokens { ") + classify + pynutil.insert(" }")
token_plus_punct = (
pynini.closure(punct + pynutil.insert(" ")) + token + pynini.closure(pynutil.insert(" ") + punct)
)
graph = token_plus_punct + pynini.closure(delete_extra_space + token_plus_punct)
graph = delete_space + graph + delete_space
self.fst = graph.optimize()
if far_file:
generator_main(far_file, {"tokenize_and_classify": self.fst})
logging.info(f"ClassifyFst grammars are saved to {far_file}.")
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/taggers/tokenize_and_classify.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.vi.graph_utils import GraphFst
from pynini.lib import pynutil
class PunctuationFst(GraphFst):
"""
Finite state transducer for classifying punctuation
e.g. a, -> tokens { name: "a" } tokens { name: "," }
"""
def __init__(self):
super().__init__(name="punctuation", kind="classify")
s = "!#$%&'()*+,-./:;<=>?@^_`{|}~"
punct = pynini.union(*s)
graph = pynutil.insert('name: "') + punct + pynutil.insert('"')
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/taggers/punctuation.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/taggers/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.vi.graph_utils import (
NEMO_DIGIT,
GraphFst,
delete_extra_space,
delete_space,
)
from nemo_text_processing.inverse_text_normalization.vi.utils import get_abs_path
from pynini.lib import pynutil
graph_digit = pynini.string_file(get_abs_path("data/numbers/digit.tsv"))
def get_quantity(decimal: "pynini.FstLike", cardinal_up_to_hundred: "pynini.FstLike") -> "pynini.FstLike":
"""
Returns FST that transforms either a cardinal or decimal followed by a quantity into a numeral,
e.g. một triệu -> integer_part: "1" quantity: "triệu"
e.g. một tỷ rưỡi -> integer_part: "1" fractional_part: "5" quantity: "tỷ"
Args:
decimal: decimal FST
cardinal_up_to_hundred: cardinal FST
"""
numbers = cardinal_up_to_hundred @ (
pynutil.delete(pynini.closure("0")) + pynini.difference(NEMO_DIGIT, "0") + pynini.closure(NEMO_DIGIT)
)
suffix = pynini.union("triệu", "tỉ", "tỷ", "vạn")
graph_four = pynini.cross("tư", "4")
graph_one = pynini.cross("mốt", "1")
graph_half = pynini.cross("rưỡi", "5")
last_digit_exception = pynini.project(pynini.cross("năm", "5"), "input")
last_digit = pynini.union(
(pynini.project(graph_digit, "input") - last_digit_exception.arcsort()) @ graph_digit,
graph_one,
graph_four,
graph_half,
)
optional_fraction_graph = pynini.closure(
delete_extra_space
+ pynutil.insert('fractional_part: "')
+ (last_digit | graph_half | graph_one | graph_four)
+ pynutil.insert('"'),
0,
1,
)
res = (
pynutil.insert('integer_part: "')
+ numbers
+ pynutil.insert('"')
+ delete_extra_space
+ pynutil.insert('quantity: "')
+ suffix
+ pynutil.insert('"')
+ optional_fraction_graph
)
res |= (
decimal
+ delete_extra_space
+ pynutil.insert('quantity: "')
+ (suffix | "ngàn" | "nghìn")
+ pynutil.insert('"')
)
return res
class DecimalFst(GraphFst):
"""
Finite state transducer for classifying decimal
e.g. âm hai hai phẩy không năm tư năm tỉ -> decimal { negative: "true" integer_part: "22" fractional_part: "054" quantity: "tỉ" }
e.g. không chấm ba lăm -> decimal { integer_part: "0" fractional_part: "35" }
e.g. một triệu rưỡi -> decimal { integer_part: "1" quantity: "triệu" fractional_part: "5" }
Args:
cardinal: CardinalFst
"""
def __init__(self, cardinal: GraphFst):
super().__init__(name="decimal", kind="classify")
cardinal_graph = cardinal.graph_no_exception
graph_decimal = graph_digit | pynini.string_file(get_abs_path("data/numbers/zero.tsv"))
graph_one = pynini.cross("mốt", "1")
graph_four = pynini.cross("tư", "4")
graph_five = pynini.cross("lăm", "5")
graph_decimal = pynini.union(
graph_decimal,
graph_four,
pynini.closure(graph_decimal + delete_space, 1) + (graph_decimal | graph_four | graph_five | graph_one),
)
self.graph = graph_decimal
point = pynutil.delete("chấm") | pynutil.delete("phẩy")
optional_graph_negative = pynini.closure(
pynutil.insert("negative: ") + pynini.cross(pynini.union("âm", "trừ"), '"true"') + delete_extra_space,
0,
1,
)
graph_fractional = pynutil.insert('fractional_part: "') + graph_decimal + pynutil.insert('"')
graph_integer = pynutil.insert('integer_part: "') + cardinal_graph + pynutil.insert('"')
final_graph_wo_sign = (
pynini.closure(graph_integer + delete_extra_space, 0, 1) + point + delete_extra_space + graph_fractional
)
final_graph = optional_graph_negative + final_graph_wo_sign
self.final_graph_wo_negative = final_graph_wo_sign | get_quantity(
final_graph_wo_sign, cardinal.graph_hundred_component_at_least_one_none_zero_digit,
)
final_graph |= optional_graph_negative + get_quantity(
final_graph_wo_sign, cardinal.graph_hundred_component_at_least_one_none_zero_digit,
)
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/taggers/decimal.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.vi.graph_utils import (
NEMO_DIGIT,
GraphFst,
convert_space,
delete_extra_space,
)
from nemo_text_processing.inverse_text_normalization.vi.utils import get_abs_path
from pynini.lib import pynutil
class MoneyFst(GraphFst):
"""
Finite state transducer for classifying money
e.g. mười hai đô la mỹ -> money { integer_part: "12" currency: "$" }
e.g. mười phẩy chín đồng -> money { integer_part: "10.9" currency: "đ" }
Args:
cardinal: CardinalFst
decimal: DecimalFst
"""
def __init__(self, cardinal: GraphFst, decimal: GraphFst):
super().__init__(name="money", kind="classify")
# quantity, integer_part, fractional_part, currency
cardinal_graph = cardinal.graph_no_exception
graph_decimal_final = decimal.final_graph_wo_negative
graph_half = pynini.cross("rưỡi", "5")
unit = pynini.string_file(get_abs_path("data/currency.tsv"))
unit_singular = pynini.invert(unit)
graph_unit_singular = pynutil.insert('currency: "') + convert_space(unit_singular) + pynutil.insert('"')
add_leading_zero_to_double_digit = (NEMO_DIGIT + NEMO_DIGIT) | (pynutil.insert("0") + NEMO_DIGIT)
# twelve dollars fifty, only after integer
optional_cents_suffix = pynini.closure(
delete_extra_space
+ pynutil.insert('fractional_part: "')
+ (pynutil.add_weight(cardinal_graph @ add_leading_zero_to_double_digit, -0.7) | graph_half)
+ pynutil.insert('"'),
0,
1,
)
graph_integer = (
pynutil.insert('integer_part: "')
+ cardinal_graph
+ pynutil.insert('"')
+ delete_extra_space
+ graph_unit_singular
+ optional_cents_suffix
)
graph_decimal = graph_decimal_final + delete_extra_space + graph_unit_singular + optional_cents_suffix
final_graph = graph_integer | graph_decimal
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/taggers/money.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.vi.graph_utils import (
NEMO_DIGIT,
NEMO_SPACE,
GraphFst,
delete_space,
)
from nemo_text_processing.inverse_text_normalization.vi.utils import get_abs_path
from pynini.lib import pynutil
class CardinalFst(GraphFst):
"""
Finite state transducer for classifying cardinals
e.g. trừ hai mươi ba -> cardinal { integer: "23" negative: "-" } }
e.g. hai nghìn lẻ chín -> cardinal { integer: "2009"} }
Numbers below ten are not converted.
"""
def __init__(self):
super().__init__(name="cardinal", kind="classify")
graph_zero = pynini.string_file(get_abs_path("data/numbers/zero.tsv"))
graph_digit = pynini.string_file(get_abs_path("data/numbers/digit.tsv"))
graph_ties = pynini.string_file(get_abs_path("data/numbers/ties.tsv"))
graph_teen = pynini.string_file(get_abs_path("data/numbers/teen.tsv"))
graph_one = pynini.cross("mốt", "1")
graph_four = pynini.cross("tư", "4")
graph_five = pynini.cross("lăm", "5")
graph_half = pynini.cross("rưỡi", "5")
graph_hundred = pynini.cross("trăm", "")
graph_ten = pynini.cross("mươi", "")
zero = pynini.cross(pynini.union("linh", "lẻ"), "0")
optional_ten = pynini.closure(delete_space + graph_ten, 0, 1)
last_digit_exception = pynini.project(pynini.cross("năm", "5"), "input")
last_digit = pynini.union(
(pynini.project(graph_digit, "input") - last_digit_exception.arcsort()) @ graph_digit,
graph_one,
graph_four,
graph_five,
)
graph_hundred_ties_component = (graph_digit | graph_zero) + delete_space + graph_hundred
graph_hundred_ties_component += delete_space
graph_hundred_ties_component += pynini.union(
graph_teen,
(graph_half | graph_four | graph_one) + pynutil.insert("0"),
graph_ties + optional_ten + ((delete_space + last_digit) | pynutil.insert("0")),
zero + delete_space + (graph_digit | graph_four),
pynutil.insert("00"),
)
graph_hundred_ties_component |= (
pynutil.insert("0")
+ delete_space
+ pynini.union(
graph_teen,
graph_ties + optional_ten + delete_space + last_digit,
graph_ties + delete_space + graph_ten + pynutil.insert("0"),
zero + delete_space + (graph_digit | graph_four),
)
)
graph_hundred_component = graph_hundred_ties_component | (pynutil.insert("00") + delete_space + graph_digit)
graph_hundred_component_at_least_one_none_zero_digit = graph_hundred_component @ (
pynini.closure(NEMO_DIGIT) + (NEMO_DIGIT - "0") + pynini.closure(NEMO_DIGIT)
)
self.graph_hundred_component_at_least_one_none_zero_digit = (
graph_hundred_component_at_least_one_none_zero_digit
)
graph_hundred_ties_zero = graph_hundred_ties_component | pynutil.insert("000")
graph_thousands = pynini.union(
graph_hundred_component_at_least_one_none_zero_digit
+ delete_space
+ pynutil.delete(pynini.union("nghìn", "ngàn")),
pynutil.insert("000", weight=0.1),
)
graph_ten_thousand = pynini.union(
graph_hundred_component_at_least_one_none_zero_digit + delete_space + pynutil.delete("vạn"),
pynutil.insert("0000", weight=0.1),
)
graph_ten_thousand_suffix = pynini.union(
graph_digit + delete_space + pynutil.delete(pynini.union("nghìn", "ngàn")),
pynutil.insert("0", weight=0.1),
)
graph_million = pynini.union(
graph_hundred_component_at_least_one_none_zero_digit + delete_space + pynutil.delete("triệu"),
pynutil.insert("000", weight=0.1),
)
graph_billion = pynini.union(
graph_hundred_component_at_least_one_none_zero_digit
+ delete_space
+ pynutil.delete(pynini.union("tỉ", "tỷ")),
pynutil.insert("000", weight=0.1),
)
graph = pynini.union(
graph_billion
+ delete_space
+ graph_million
+ delete_space
+ graph_thousands
+ delete_space
+ graph_hundred_ties_zero,
graph_ten_thousand + delete_space + graph_ten_thousand_suffix + delete_space + graph_hundred_ties_zero,
graph_hundred_component_at_least_one_none_zero_digit
+ delete_space
+ pynutil.delete(pynini.union("nghìn", "ngàn"))
+ delete_space
+ (((last_digit | graph_half) + pynutil.insert("00")) | graph_hundred_ties_zero),
graph_digit,
graph_zero,
)
graph = graph @ pynini.union(
pynutil.delete(pynini.closure("0")) + pynini.difference(NEMO_DIGIT, "0") + pynini.closure(NEMO_DIGIT), "0",
)
# don't convert cardinals from zero to nine inclusive
graph_exception = pynini.project(pynini.union(graph_digit, graph_zero), "input")
self.graph_no_exception = graph
self.graph = (pynini.project(graph, "input") - graph_exception.arcsort()) @ graph
optional_minus_graph = pynini.closure(
pynutil.insert("negative: ") + pynini.cross(pynini.union("âm", "trừ"), '"-"') + NEMO_SPACE, 0, 1,
)
final_graph = optional_minus_graph + pynutil.insert('integer: "') + self.graph + pynutil.insert('"')
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/taggers/cardinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.vi.graph_utils import NEMO_ALPHA, GraphFst, insert_space
from nemo_text_processing.inverse_text_normalization.vi.utils import get_abs_path
from pynini.lib import pynutil
class ElectronicFst(GraphFst):
"""
Finite state transducer for classifying electronic: as URLs, email addresses, etc.
e.g. c d f một a còng a b c dot e d u -> tokens { electronic { username: "cdf1" domain: "abc.edu" } }
"""
def __init__(self):
super().__init__(name="electronic", kind="classify")
delete_extra_space = pynutil.delete(" ")
alpha_num = (
NEMO_ALPHA
| pynini.string_file(get_abs_path("data/numbers/digit.tsv"))
| pynini.string_file(get_abs_path("data/numbers/zero.tsv"))
)
symbols = pynini.string_file(get_abs_path("data/electronic/symbols.tsv")).invert()
accepted_username = alpha_num | symbols
process_dot = pynini.cross("chấm", ".")
username = (
pynutil.insert('username: "')
+ alpha_num
+ pynini.closure(delete_extra_space + accepted_username)
+ pynutil.insert('"')
)
single_alphanum = pynini.closure(alpha_num + delete_extra_space) + alpha_num
server = single_alphanum | pynini.string_file(get_abs_path("data/electronic/server_name.tsv"))
domain = single_alphanum | pynini.string_file(get_abs_path("data/electronic/domain.tsv"))
multi_domain = (
pynini.closure(process_dot + delete_extra_space + domain + delete_extra_space)
+ process_dot
+ delete_extra_space
+ domain
)
domain_graph = pynutil.insert('domain: "') + server + delete_extra_space + multi_domain + pynutil.insert('"')
graph = (
username
+ delete_extra_space
+ pynutil.delete(pynini.union("a còng", "a móc", "a vòng"))
+ insert_space
+ delete_extra_space
+ domain_graph
)
############# url ###
protocol_end = pynini.cross(pynini.union("w w w", "www"), "www")
protocol_start = (pynini.cross("h t t p", "http") | pynini.cross("h t t p s", "https")) + pynini.cross(
" hai chấm sẹc sẹc ", "://"
)
# .com,
ending = (
delete_extra_space
+ symbols
+ delete_extra_space
+ (domain | pynini.closure(accepted_username + delete_extra_space) + accepted_username)
)
protocol = (
pynini.closure(protocol_start, 0, 1)
+ protocol_end
+ delete_extra_space
+ process_dot
+ pynini.closure(delete_extra_space + accepted_username, 1)
+ pynini.closure(ending, 1, 2)
)
protocol = pynutil.insert('protocol: "') + protocol + pynutil.insert('"')
graph |= protocol
########
final_graph = self.add_tokens(graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/taggers/electronic.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.vi.graph_utils import GraphFst, delete_extra_space, delete_space
from nemo_text_processing.inverse_text_normalization.vi.utils import get_abs_path
from pynini.lib import pynutil
graph_teen = pynini.string_file(get_abs_path("data/numbers/teen.tsv")).optimize()
graph_digit = pynini.string_file(get_abs_path("data/numbers/digit.tsv")).optimize()
graph_zero = pynini.string_file(get_abs_path("data/numbers/zero.tsv")).optimize()
ties_graph = pynini.string_file(get_abs_path("data/numbers/ties.tsv")).optimize()
def _get_month_graph():
"""
Transducer for month, e.g. march -> march
"""
month_graph = pynini.string_file(get_abs_path("data/months.tsv")).optimize()
return month_graph
def _get_ties_graph():
"""
Transducer for 20-99 e.g
hai ba -> 23
"""
graph_one = pynini.cross("mốt", "1")
graph_four = pynini.cross("tư", "4")
graph_five = pynini.cross("lăm", "5")
graph_ten = pynini.cross("mươi", "")
optional_ten = pynini.closure(delete_space + graph_ten, 0, 1)
graph = pynini.union(
ties_graph + optional_ten + delete_space + (graph_digit | graph_one | graph_four | graph_five),
ties_graph + delete_space + graph_ten + pynutil.insert("0"),
)
return graph
def _get_year_graph():
"""
Transducer for year, e.g. hai không hai mươi -> 2020
"""
def _get_digits_graph():
zero = pynini.cross((pynini.union("linh", "lẻ")), "0")
four = pynini.cross("tư", "4")
graph = pynini.union(zero + delete_space + (graph_digit | four), graph_zero + delete_space + graph_digit,)
graph.optimize()
return graph
def _get_hundreds_graph(graph_ties, graph_digits):
graph = (
graph_digit
+ delete_space
+ pynutil.delete("trăm")
+ delete_space
+ (graph_teen | graph_ties | graph_digits)
)
return graph
def _get_thousands_graph(graph_ties, graph_digits):
graph_hundred_component = (
(graph_digit | graph_zero) + delete_space + pynutil.delete("trăm")
) | pynutil.insert("0")
graph = (
graph_digit
+ delete_space
+ pynutil.delete(pynini.union("nghìn", "ngàn"))
+ delete_space
+ graph_hundred_component
+ delete_space
+ (graph_teen | graph_ties | graph_digits)
)
return graph
graph_ties = _get_ties_graph()
graph_digits = _get_digits_graph()
graph_hundreds = _get_hundreds_graph(graph_ties, graph_digits)
graph_thousands = _get_thousands_graph(graph_ties, graph_digits)
year_graph = (
# 20 19, 40 12, 2012, 2 0 0 5, 2 0 17, 938 - assuming no limit on the year
graph_digit
+ delete_space
+ (graph_digit | graph_zero)
+ delete_space
+ (graph_teen | graph_ties | graph_digits)
| graph_thousands
| graph_hundreds
| (graph_digit + pynutil.insert("0") + delete_space + (graph_ties | graph_digits | graph_teen))
)
year_graph.optimize()
return year_graph
class DateFst(GraphFst):
"""
Finite state transducer for classifying date,
e.g. mười lăm tháng một năm hai nghìn mười hai -> date { day: "15" month: "1" year: "2012" preserve_order: true }
e.g. ngày ba mốt tháng mười hai năm một chín chín chín -> date { day: "31" month: "12" year: "2012" preserve_order: true }
e.g. năm hai không hai mốt -> date { year: "2021" preserve_order: true }
Args:
cardinal: CardinalFst
"""
def __init__(self, cardinal: GraphFst):
super().__init__(name="date", kind="classify")
cardinal_graph = cardinal.graph_no_exception
year_graph = _get_year_graph()
YEAR_WEIGHT = 0.001
year_graph = pynutil.add_weight(year_graph, YEAR_WEIGHT)
month_graph = _get_month_graph()
month_graph = pynutil.insert('month: "') + month_graph + pynutil.insert('"')
month_exception = pynini.project(pynini.cross("năm", "5"), "input")
month_graph_exception = (pynini.project(month_graph, "input") - month_exception.arcsort()) @ month_graph
day_graph = pynutil.insert('day: "') + cardinal_graph + pynutil.insert('"')
# day_suffix = pynini.union("ngày", "mùng")
# optional_day = pynini.closure(day_suffix + delete_space, 0, 1)
graph_month = pynutil.delete("tháng") + delete_space + month_graph_exception
graph_year = (
delete_extra_space
+ pynutil.delete("năm")
+ delete_extra_space
+ pynutil.insert('year: "')
+ pynutil.add_weight(year_graph, -YEAR_WEIGHT)
+ pynutil.insert('"')
)
optional_graph_year = pynini.closure(graph_year, 0, 1)
graph_my = pynutil.delete("tháng") + delete_space + month_graph + graph_year
graph_dmy = (
day_graph + delete_space + pynutil.delete("tháng") + delete_extra_space + month_graph + optional_graph_year
)
graph_year = (
pynutil.delete("năm") + delete_extra_space + pynutil.insert('year: "') + year_graph + pynutil.insert('"')
)
final_graph = (graph_dmy | graph_my | graph_month | graph_year) + pynutil.insert(" preserve_order: true")
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/taggers/date.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.vi.graph_utils import NEMO_NOT_SPACE, GraphFst
from pynini.lib import pynutil
class WordFst(GraphFst):
"""
Finite state transducer for classifying plain tokens, that do not belong to any special class. This can be considered as the default class.
e.g. sleep -> tokens { name: "sleep" }
"""
def __init__(self):
super().__init__(name="word", kind="classify")
word = pynutil.insert('name: "') + pynini.closure(NEMO_NOT_SPACE, 1) + pynutil.insert('"')
self.fst = word.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/taggers/word.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.vi.graph_utils import (
NEMO_CHAR,
NEMO_DIGIT,
GraphFst,
delete_space,
insert_space,
)
from pynini.lib import pynutil
class TimeFst(GraphFst):
"""
Finite state transducer for verbalizing time, e.g.
time { hours: "3" } -> 3h
time { hours: "12" minutes: "30" } -> 12:30
time { hours: "1" minutes: "12" second: "22"} -> 1:12:22
time { minutes: "36" second: "45"} -> 36p45s
time { hours: "2" zone: "gmt" } -> 2h gmt
"""
def __init__(self):
super().__init__(name="time", kind="verbalize")
add_leading_zero_to_double_digit = (NEMO_DIGIT + NEMO_DIGIT) | (pynutil.insert("0") + NEMO_DIGIT)
hour = (
pynutil.delete("hours:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_DIGIT, 1)
+ pynutil.delete('"')
)
minute = (
pynutil.delete("minutes:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_DIGIT, 1)
+ pynutil.delete('"')
)
second = (
pynutil.delete("seconds:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_DIGIT, 1)
+ pynutil.delete('"')
)
zone = (
delete_space
+ insert_space
+ pynutil.delete("zone:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_CHAR - " ", 1)
+ pynutil.delete('"')
)
optional_zone = pynini.closure(zone, 0, 1)
optional_second = pynini.closure(
delete_space + pynutil.insert(":") + (second @ add_leading_zero_to_double_digit), 0, 1,
)
graph_h = hour + pynutil.insert("h")
graph_hms = (
hour + delete_space + pynutil.insert(":") + (minute @ add_leading_zero_to_double_digit) + optional_second
)
graph_ms = (
minute
+ delete_space
+ pynutil.insert("p")
+ (second @ add_leading_zero_to_double_digit)
+ pynutil.insert("s")
)
graph = (graph_h | graph_ms | graph_hms) + optional_zone
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/verbalizers/time.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.vi.graph_utils import (
NEMO_CHAR,
NEMO_NOT_QUOTE,
GraphFst,
delete_space,
)
from pynini.lib import pynutil
class MeasureFst(GraphFst):
"""
Finite state transducer for verbalizing measure, e.g.
measure { negative: "true" cardinal { integer: "12" } units: "kg" } -> -12 kg
Args:
decimal: DecimalFst
cardinal: CardinalFst
"""
def __init__(self, decimal: GraphFst, cardinal: GraphFst):
super().__init__(name="measure", kind="verbalize")
optional_sign = pynini.closure(pynini.cross('negative: "true"', "-"), 0, 1)
unit = (
pynutil.delete("units:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_CHAR - " ", 1)
+ pynutil.delete('"')
+ delete_space
)
graph_decimal = (
pynutil.delete("decimal {")
+ delete_space
+ optional_sign
+ delete_space
+ decimal.numbers
+ delete_space
+ pynutil.delete("}")
)
graph_cardinal = (
pynutil.delete("cardinal {")
+ delete_space
+ optional_sign
+ delete_space
+ cardinal.numbers
+ delete_space
+ pynutil.delete("}")
)
fractional = (
pynutil.insert(".")
+ pynutil.delete("fractional_part:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete('"')
)
optional_fractional = pynini.closure(fractional + delete_space, 0, 1)
graph = (
(graph_cardinal | graph_decimal)
+ delete_space
+ optional_fractional
+ pynutil.insert(" ")
+ unit
+ delete_space
)
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/verbalizers/measure.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.vi.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space
from pynini.lib import pynutil
class FractionFst(GraphFst):
"""
Finite state transducer for verbalizing fraction,
e.g. fraction { numerator: "2" denominator: "3" } } -> 2/3
e.g. fraction { numerator: "20" denominator: "3" negative: "true"} } -> 2/3
"""
def __init__(self):
super().__init__(name="fraction", kind="verbalize")
optional_sign = pynini.closure(pynini.cross('negative: "true"', "-") + delete_space, 0, 1)
numerator = pynutil.delete('numerator: "') + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('"')
denominator = (
pynutil.insert("/")
+ pynutil.delete('denominator: "')
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete('"')
)
graph = (numerator + delete_space + denominator).optimize()
self.numbers = graph
delete_tokens = self.delete_tokens(optional_sign + graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/verbalizers/fraction.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.vi.graph_utils import NEMO_NOT_QUOTE, GraphFst
from pynini.lib import pynutil
class TelephoneFst(GraphFst):
"""
Finite state transducer for verbalizing telephone, e.g.
telephone { number_part: "1231235678" }
-> 1231235678
"""
def __init__(self):
super().__init__(name="telephone", kind="verbalize")
number_part = pynutil.delete('number_part: "') + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('"')
delete_tokens = self.delete_tokens(number_part)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/verbalizers/telephone.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.vi.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space
from pynini.lib import pynutil
class OrdinalFst(GraphFst):
"""
Finite state transducer for verbalizing ordinal, e.g.
ordinal { integer: "2" } -> thứ 2
"""
def __init__(self):
super().__init__(name="ordinal", kind="verbalize")
graph = (
pynutil.delete("integer:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete('"')
)
graph = pynutil.insert("thứ ") + graph
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/verbalizers/ordinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.inverse_text_normalization.vi.graph_utils import GraphFst
from nemo_text_processing.inverse_text_normalization.vi.verbalizers.cardinal import CardinalFst
from nemo_text_processing.inverse_text_normalization.vi.verbalizers.date import DateFst
from nemo_text_processing.inverse_text_normalization.vi.verbalizers.decimal import DecimalFst
from nemo_text_processing.inverse_text_normalization.vi.verbalizers.electronic import ElectronicFst
from nemo_text_processing.inverse_text_normalization.vi.verbalizers.fraction import FractionFst
from nemo_text_processing.inverse_text_normalization.vi.verbalizers.measure import MeasureFst
from nemo_text_processing.inverse_text_normalization.vi.verbalizers.money import MoneyFst
from nemo_text_processing.inverse_text_normalization.vi.verbalizers.ordinal import OrdinalFst
from nemo_text_processing.inverse_text_normalization.vi.verbalizers.telephone import TelephoneFst
from nemo_text_processing.inverse_text_normalization.vi.verbalizers.time import TimeFst
from nemo_text_processing.inverse_text_normalization.vi.verbalizers.whitelist import WhiteListFst
class VerbalizeFst(GraphFst):
"""
Composes other verbalizer grammars.
For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
"""
def __init__(self):
super().__init__(name="verbalize", kind="verbalize")
cardinal = CardinalFst()
cardinal_graph = cardinal.fst
ordinal_graph = OrdinalFst().fst
decimal = DecimalFst()
decimal_graph = decimal.fst
fraction = FractionFst()
fraction_graph = fraction.fst
measure_graph = MeasureFst(decimal=decimal, cardinal=cardinal).fst
money_graph = MoneyFst(decimal=decimal).fst
time_graph = TimeFst().fst
date_graph = DateFst().fst
whitelist_graph = WhiteListFst().fst
telephone_graph = TelephoneFst().fst
electronic_graph = ElectronicFst().fst
graph = (
time_graph
| date_graph
| money_graph
| measure_graph
| ordinal_graph
| fraction_graph
| decimal_graph
| cardinal_graph
| whitelist_graph
| telephone_graph
| electronic_graph
)
self.fst = graph
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/verbalizers/verbalize.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.vi.graph_utils import (
NEMO_CHAR,
NEMO_SIGMA,
GraphFst,
delete_space,
)
from pynini.lib import pynutil
class WhiteListFst(GraphFst):
"""
Finite state transducer for verbalizing whitelist
e.g. tokens { name: "mrs." } -> mrs.
"""
def __init__(self):
super().__init__(name="whitelist", kind="verbalize")
graph = (
pynutil.delete("name:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_CHAR - " ", 1)
+ pynutil.delete('"')
)
graph = graph @ pynini.cdrewrite(pynini.cross(u"\u00A0", " "), "", "", NEMO_SIGMA)
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/verbalizers/whitelist.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.vi.graph_utils import GraphFst, delete_extra_space, delete_space
from nemo_text_processing.inverse_text_normalization.vi.verbalizers.verbalize import VerbalizeFst
from nemo_text_processing.inverse_text_normalization.vi.verbalizers.word import WordFst
from pynini.lib import pynutil
class VerbalizeFinalFst(GraphFst):
"""
Finite state transducer that verbalizes an entire sentence, e.g.
tokens { name: "its" } tokens { time { hours: "12" minutes: "30" } } tokens { name: "now" } -> its 12:30 now
"""
def __init__(self):
super().__init__(name="verbalize_final", kind="verbalize")
verbalize = VerbalizeFst().fst
word = WordFst().fst
types = verbalize | word
graph = (
pynutil.delete("tokens")
+ delete_space
+ pynutil.delete("{")
+ delete_space
+ types
+ delete_space
+ pynutil.delete("}")
)
graph = delete_space + pynini.closure(graph + delete_extra_space) + graph + delete_space
self.fst = graph
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/verbalizers/verbalize_final.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/verbalizers/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.vi.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space
from pynini.lib import pynutil
class DecimalFst(GraphFst):
"""
Finite state transducer for verbalizing decimal, e.g.
decimal { negative: "true" integer_part: "12" fractional_part: "5006" quantity: "tỷ" } -> -12.5006 tỷ
"""
def __init__(self):
super().__init__(name="decimal", kind="verbalize")
optionl_sign = pynini.closure(pynini.cross('negative: "true"', "-") + delete_space, 0, 1)
integer = (
pynutil.delete("integer_part:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete('"')
)
optional_integer = pynini.closure(integer + delete_space, 0, 1)
fractional = (
pynutil.insert(".")
+ pynutil.delete("fractional_part:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete('"')
)
optional_fractional = pynini.closure(fractional + delete_space, 0, 1)
quantity = (
pynutil.delete("quantity:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete('"')
)
optional_quantity = pynini.closure(pynutil.insert(" ") + quantity + delete_space, 0, 1)
graph = optional_integer + optional_fractional + optional_quantity
self.numbers = graph
graph = optionl_sign + graph
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/verbalizers/decimal.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.vi.graph_utils import NEMO_CHAR, GraphFst, delete_space
from pynini.lib import pynutil
class MoneyFst(GraphFst):
"""
Finite state transducer for verbalizing money, e.g.
money { integer_part: "12" fractional_part: "05" currency: "$" } -> 12.05$
Args:
decimal: DecimalFst
"""
def __init__(self, decimal: GraphFst):
super().__init__(name="money", kind="verbalize")
unit = (
pynutil.delete("currency:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_CHAR - " ", 1)
+ pynutil.delete('"')
)
graph = decimal.numbers + delete_space + unit
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/verbalizers/money.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.vi.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space
from pynini.lib import pynutil
class CardinalFst(GraphFst):
"""
Finite state transducer for verbalizing cardinal
e.g. cardinal { integer: "23" negative: "-" } -> -23
"""
def __init__(self):
super().__init__(name="cardinal", kind="verbalize")
optional_sign = pynini.closure(
pynutil.delete("negative:")
+ delete_space
+ pynutil.delete('"')
+ NEMO_NOT_QUOTE
+ pynutil.delete('"')
+ delete_space,
0,
1,
)
graph = (
pynutil.delete("integer:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete('"')
)
self.numbers = graph
graph = optional_sign + graph
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/verbalizers/cardinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.vi.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space
from pynini.lib import pynutil
class ElectronicFst(GraphFst):
"""
Finite state transducer for verbalizing electronic
e.g. tokens { electronic { username: "cdf1" domain: "abc.edu" } } -> [email protected]
"""
def __init__(self):
super().__init__(name="electronic", kind="verbalize")
user_name = (
pynutil.delete("username:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete('"')
)
domain = (
pynutil.delete("domain:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete('"')
)
protocol = (
pynutil.delete("protocol:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete('"')
)
graph = user_name + delete_space + pynutil.insert("@") + domain
graph |= protocol
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/verbalizers/electronic.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.vi.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space
from pynini.lib import pynutil
class DateFst(GraphFst):
"""
Finite state transducer for verbalizing date, e.g.
date { month: "1" year: "2012"} -> tháng 1 năm 2012
date { day: "5" month: "10" year: "2021" preserve_order: true } -> 5 tháng 10 năm 2021
"""
def __init__(self):
super().__init__(name="date", kind="verbalize")
day = (
pynutil.delete("day:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete('"')
)
month = (
pynutil.delete("month:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete('"')
)
year = (
pynutil.delete("year:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ delete_space
+ pynutil.delete('"')
)
# (day) month year
# day month
graph_dm = day + delete_space + pynutil.insert(" tháng ") + month
graph_dmy = graph_dm + delete_space + pynutil.insert(" năm ") + year
graph_m = pynutil.insert("tháng ") + month
graph_my = pynutil.insert("tháng ") + month + delete_space + pynutil.insert(" năm ") + year
graph_y = pynutil.insert("năm ") + year
optional_preserve_order = pynini.closure(
pynutil.delete("preserve_order:") + delete_space + pynutil.delete("true") + delete_space
| pynutil.delete("field_order:")
+ delete_space
+ pynutil.delete('"')
+ NEMO_NOT_QUOTE
+ pynutil.delete('"')
+ delete_space
)
final_graph = (graph_y | graph_m | graph_dm | graph_dmy | graph_my) + delete_space + optional_preserve_order
delete_tokens = self.delete_tokens(final_graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/verbalizers/date.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.vi.graph_utils import (
NEMO_CHAR,
NEMO_SIGMA,
GraphFst,
delete_space,
)
from pynini.lib import pynutil
class WordFst(GraphFst):
"""
Finite state transducer for verbalizing plain tokens
e.g. tokens { name: "sleep" } -> sleep
"""
def __init__(self):
super().__init__(name="word", kind="verbalize")
chars = pynini.closure(NEMO_CHAR - " ", 1)
char = pynutil.delete("name:") + delete_space + pynutil.delete('"') + chars + pynutil.delete('"')
graph = char @ pynini.cdrewrite(pynini.cross(u"\u00A0", " "), "", "", NEMO_SIGMA)
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/verbalizers/word.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/data/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/data/numbers/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/data/ordinals/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/data/math/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/data/electronic/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/vi/data/time/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/sv/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def get_abs_path(rel_path):
"""
Get absolute path
Args:
rel_path: relative path to this file
Returns absolute path
"""
return os.path.dirname(os.path.abspath(__file__)) + '/' + rel_path
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/sv/utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright (c) 2023, Jim O'Regan.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.sv.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_SPACE, GraphFst
from nemo_text_processing.text_normalization.sv.utils import get_abs_path as get_tn_abs_path
from nemo_text_processing.text_normalization.sv.utils import load_labels
from pynini.lib import pynutil
QUARTERS = {15: "kvart över", 30: "halv", 45: "kvart i"}
def get_all_to_or_from_numbers():
output = {}
for num, word in QUARTERS.items():
current_past = []
current_to = []
for i in range(1, 60):
if i == num:
continue
elif i < num:
current_to.append((str(i), str(num - i)))
else:
current_past.append((str(i), str(i - num)))
output[word] = {}
output[word]["över"] = current_past
output[word]["i"] = current_to
return output
def get_all_to_or_from_fst(cardinal: GraphFst):
numbers = get_all_to_or_from_numbers()
output = {}
for key in numbers:
output[key] = {}
for when in ["över", "i"]:
map = pynini.string_map(numbers[key][when])
output[key][when] = pynini.project(map, "input") @ map @ cardinal.graph
return output
class TimeFst(GraphFst):
"""
Finite state transducer for classifying time
e.g. klockan åtta e s t -> time { hours: "kl. 8" zone: "e s t" }
e.g. klockan tretton -> time { hours: "kl. 13" }
e.g. klockan tretton tio -> time { hours: "kl. 13" minutes: "10" }
e.g. kvart i tolv -> time { minutes: "45" hours: "11" }
e.g. kvart över tolv -> time { minutes: "15" hours: "12" }
Args:
tn_cardinal_tagger: TN cardinal verbalizer
"""
def __init__(self, tn_cardinal_tagger: GraphFst):
super().__init__(name="time", kind="classify")
suffixes = pynini.invert(pynini.string_map(load_labels(get_abs_path("data/time/suffix.tsv"))))
self.suffixes = suffixes
klockan = pynini.union(pynini.cross("klockan", "kl."), pynini.cross("klockan är", "kl."))
klockan_graph_piece = pynutil.insert("hours: \"") + klockan
minutes_to = pynini.string_map([(str(i), str(60 - i)) for i in range(1, 60)])
minutes = pynini.string_map([str(i) for i in range(1, 60)])
minutes_inverse = pynini.invert(pynini.project(minutes_to, "input") @ tn_cardinal_tagger.graph_en)
minutes = pynini.invert(pynini.project(minutes, "input") @ tn_cardinal_tagger.graph_en)
self.minute_words_to_words = minutes_inverse @ minutes_to @ tn_cardinal_tagger.graph_en
self.minute_words_to_words_graph = (
pynutil.insert("minutes: \"") + self.minute_words_to_words + pynutil.insert("\"")
)
time_zone_graph = pynini.invert(pynini.string_file(get_tn_abs_path("data/time/time_zone.tsv")))
final_suffix = pynutil.insert("suffix: \"") + suffixes + pynutil.insert("\"")
final_suffix_optional = pynini.closure(NEMO_SPACE + final_suffix, 0, 1)
final_time_zone = pynutil.insert("zone: \"") + time_zone_graph + pynutil.insert("\"")
final_time_zone_optional = pynini.closure(NEMO_SPACE + final_time_zone, 0, 1)
both_optional_suffixes = final_suffix_optional + final_time_zone_optional
one_optional_suffix = NEMO_SPACE + final_suffix + final_time_zone_optional
one_optional_suffix |= final_suffix_optional + NEMO_SPACE + final_time_zone
labels_hour = [str(x) for x in range(0, 24)]
hours = pynini.invert(pynini.union(*labels_hour) @ tn_cardinal_tagger.graph)
self.hours = hours
hours_graph = pynutil.insert("hours: \"") + hours + pynutil.insert("\"")
klockan_hour = klockan_graph_piece + NEMO_SPACE + hours + pynutil.insert("\"")
hours_graph |= klockan_hour
hour_sfx = hours_graph + one_optional_suffix
def hours_to_pairs():
for x in range(1, 13):
if x == 12:
y = 1
else:
y = x + 1
yield x, y
hours_to = pynini.string_map([(str(x[0]), str(x[1])) for x in hours_to_pairs()])
hours_to = pynini.invert(hours_to @ tn_cardinal_tagger.graph)
self.hours_to = hours_to
hours_to_graph = pynutil.insert("hours: \"") + hours_to + pynutil.insert("\"")
bare_quarters_to = pynini.string_map([(x[1], str(x[0])) for x in QUARTERS.items() if not "över" in x[1]])
bare_quarters_from = pynini.cross("kvart över", "15")
self.quarters_to = bare_quarters_to
self.quarters_from = bare_quarters_from
prefix_minutes_to = bare_quarters_to
prefix_minutes_from = bare_quarters_from
from_to_output = get_all_to_or_from_fst(tn_cardinal_tagger)
for _, word in QUARTERS.items():
for when in ["över", "i"]:
num_part = pynini.invert(from_to_output[word][when])
num_part_end = num_part + pynutil.delete(f" {when} {word}")
if word == "kvart över":
prefix_minutes_from |= num_part_end
else:
prefix_minutes_to |= num_part_end
prefix_minutes_to |= minutes_inverse + pynutil.delete(" i")
prefix_minutes_from |= minutes + pynutil.delete(" över")
prefix_minutes_to_graph = pynutil.insert("minutes: \"") + prefix_minutes_to + pynutil.insert("\"")
graph_to_prefixed = prefix_minutes_to_graph + NEMO_SPACE + hours_to_graph
prefix_minutes_from_graph = pynutil.insert("minutes: \"") + prefix_minutes_from + pynutil.insert("\"")
graph_from_prefixed = prefix_minutes_from_graph + NEMO_SPACE + hours_graph
minutes_graph = pynutil.insert("minutes: \"") + minutes + pynutil.insert("\"")
seconds_graph = pynutil.insert("seconds: \"") + minutes + pynutil.insert("\"")
hm_sfx = hours_graph + NEMO_SPACE + minutes_graph + one_optional_suffix
hms_sfx = hours_graph + NEMO_SPACE + minutes_graph + NEMO_SPACE + seconds_graph + one_optional_suffix
graph = graph_to_prefixed | graph_from_prefixed | klockan_hour + both_optional_suffixes | hour_sfx
graph |= hm_sfx
graph |= hms_sfx
self.fst = self.add_tokens(graph).optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/sv/taggers/time.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright (c) 2023, Jim O'Regan for Språkbanken Tal
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_SPACE, GraphFst, convert_space
from pynini.lib import pynutil
class FractionFst(GraphFst):
"""
Finite state transducer for classifying fraction
e.g. halv -> tokens { name: "1/2" }
e.g. ett och en halv -> tokens { name: "1 1/2" }
e.g. tre och fyra femtedelar -> tokens { name: "3 4/5" }
Args:
itn_cardinal_tagger: ITN cardinal tagger
tn_fraction_verbalizer: TN fraction verbalizer
"""
def __init__(self, itn_cardinal_tagger: GraphFst, tn_fraction_tagger: GraphFst):
super().__init__(name="fraction", kind="classify")
cardinal = itn_cardinal_tagger.graph_no_exception
fractions = tn_fraction_tagger.fractions_any.invert().optimize()
minus = pynini.cross("minus ", "-")
optional_minus = pynini.closure(minus, 0, 1)
no_numerator = pynini.cross("och ", "1/")
integer = optional_minus + cardinal
self.graph = pynini.union(
integer + NEMO_SPACE + no_numerator + fractions,
integer + NEMO_SPACE + cardinal + pynini.cross(" ", "/") + fractions,
integer + pynini.cross(" och ", " ") + cardinal + pynini.cross(" ", "/") + fractions,
integer + pynini.cross(" och ", " ") + pynini.cross("en halv", "1/2"),
cardinal + pynini.cross(" ", "/") + fractions,
)
graph = pynutil.insert("name: \"") + convert_space(self.graph) + pynutil.insert("\"")
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/sv/taggers/fraction.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_SPACE, GraphFst, convert_space
from pynini.lib import pynutil
class TelephoneFst(GraphFst):
"""
Finite state transducer for classifying telephone numbers, e.g.
noll åtta sjuhundraåttionio femtiotvå tjugofem -> tokens { name: "08-789 52 25" }
Args:
tn_cardinal_tagger: TN Cardinal Tagger
"""
def __init__(self, tn_cardinal_tagger: GraphFst, tn_telephone_tagger: GraphFst):
super().__init__(name="telephone", kind="classify")
# country_plus_area_code = pynini.invert(tn_telephone_tagger.country_plus_area_code).optimize()
area_codes = pynini.invert(tn_telephone_tagger.area_codes).optimize()
# lead = (country_plus_area_code | area_codes) + pynini.cross(" ", "-")
lead = area_codes + pynini.cross(" ", "-")
two_digits = pynini.invert(tn_cardinal_tagger.two_digits_read).optimize()
three_digits = pynini.invert(tn_cardinal_tagger.three_digits_read).optimize()
base_number_part = pynini.union(
three_digits + NEMO_SPACE + three_digits + NEMO_SPACE + two_digits,
three_digits + NEMO_SPACE + two_digits + NEMO_SPACE + two_digits,
two_digits + NEMO_SPACE + two_digits + NEMO_SPACE + two_digits,
three_digits + NEMO_SPACE + two_digits,
)
graph = convert_space(lead + base_number_part)
final_graph = pynutil.insert("name: \"") + graph + pynutil.insert("\"")
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/sv/taggers/telephone.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst
from pynini.lib import pynutil
class OrdinalFst(GraphFst):
"""
Finite state transducer for classifying ordinal
e.g. hundraandra -> tokens { name: "102." }
Args:
tn_ordinal_verbalizer: TN Ordinal Verbalizer
"""
def __init__(self, tn_ordinal: GraphFst):
super().__init__(name="ordinal", kind="classify")
graph = pynini.arcmap(tn_ordinal.bare_ordinals, map_type="rmweight").invert().optimize()
self.bare_ordinals = graph
self.ordinals = graph + pynutil.insert(".")
forsta_andra = pynini.project(pynini.union("1", "2") @ tn_ordinal.bare_ordinals, "output")
graph = ((pynini.project(graph, "input") - forsta_andra.arcsort()) @ graph) + pynutil.insert(".")
graph = pynutil.insert("name: \"") + graph + pynutil.insert("\"")
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/sv/taggers/ordinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from nemo_text_processing.inverse_text_normalization.sv.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import (
INPUT_LOWER_CASED,
GraphFst,
convert_space,
string_map_cased,
)
from pynini.lib import pynutil
class WhiteListFst(GraphFst):
"""
Finite state transducer for classifying whitelisted tokens
e.g. sankt -> tokens { name: "s:t" }
This class has highest priority among all classifier grammars.
Whitelisted tokens are defined and loaded from "data/whitelist.tsv" (unless input_file specified).
Args:
input_file: path to a file with whitelist replacements (each line of the file: written_form\tspoken_form\n),
e.g. nemo_text_processing/inverse_text_normalization/sv/data/whitelist.tsv
input_case: accepting either "lower_cased" or "cased" input.
"""
def __init__(self, input_case: str = INPUT_LOWER_CASED, input_file: str = None):
super().__init__(name="whitelist", kind="classify")
if input_file is None:
input_file = get_abs_path("data/whitelist.tsv")
if not os.path.exists(input_file):
raise ValueError(f"Whitelist file {input_file} not found")
whitelist = string_map_cased(input_file, input_case)
graph = pynutil.insert("name: \"") + convert_space(whitelist) + pynutil.insert("\"")
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/sv/taggers/whitelist.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pynini
from nemo_text_processing.inverse_text_normalization.en.taggers.punctuation import PunctuationFst
from nemo_text_processing.inverse_text_normalization.en.taggers.word import WordFst
from nemo_text_processing.inverse_text_normalization.sv.taggers.cardinal import CardinalFst
from nemo_text_processing.inverse_text_normalization.sv.taggers.date import DateFst
from nemo_text_processing.inverse_text_normalization.sv.taggers.decimal import DecimalFst
from nemo_text_processing.inverse_text_normalization.sv.taggers.electronic import ElectronicFst
from nemo_text_processing.inverse_text_normalization.sv.taggers.fraction import FractionFst
from nemo_text_processing.inverse_text_normalization.sv.taggers.ordinal import OrdinalFst
from nemo_text_processing.inverse_text_normalization.sv.taggers.telephone import TelephoneFst
from nemo_text_processing.inverse_text_normalization.sv.taggers.time import TimeFst
from nemo_text_processing.inverse_text_normalization.sv.taggers.whitelist import WhiteListFst
from nemo_text_processing.text_normalization.en.graph_utils import (
INPUT_LOWER_CASED,
GraphFst,
delete_extra_space,
delete_space,
generator_main,
)
from nemo_text_processing.text_normalization.sv.taggers.cardinal import CardinalFst as TNCardinalTagger
from nemo_text_processing.text_normalization.sv.taggers.date import DateFst as TNDateTagger
from nemo_text_processing.text_normalization.sv.taggers.decimal import DecimalFst as TNDecimalTagger
from nemo_text_processing.text_normalization.sv.taggers.electronic import ElectronicFst as TNElectronicTagger
from nemo_text_processing.text_normalization.sv.taggers.fraction import FractionFst as TNFractionTagger
from nemo_text_processing.text_normalization.sv.taggers.ordinal import OrdinalFst as TNOrdinalTagger
from nemo_text_processing.text_normalization.sv.taggers.telephone import TelephoneFst as TNTelephoneTagger
from nemo_text_processing.text_normalization.sv.verbalizers.electronic import ElectronicFst as TNElectronicVerbalizer
from pynini.lib import pynutil
class ClassifyFst(GraphFst):
"""
Final class that composes all other classification grammars. This class can process an entire sentence, that is lower cased.
For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
Args:
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
whitelist: path to a file with whitelist replacements
input_case: accepting either "lower_cased" or "cased" input.
"""
def __init__(
self,
cache_dir: str = None,
overwrite_cache: bool = False,
whitelist: str = None,
input_case: str = INPUT_LOWER_CASED,
):
super().__init__(name="tokenize_and_classify", kind="classify")
far_file = None
if cache_dir is not None and cache_dir != 'None':
os.makedirs(cache_dir, exist_ok=True)
far_file = os.path.join(cache_dir, f"sv_itn_{input_case}.far")
if not overwrite_cache and far_file and os.path.exists(far_file):
self.fst = pynini.Far(far_file, mode="r")["tokenize_and_classify"]
logging.info(f"ClassifyFst.fst was restored from {far_file}.")
else:
logging.info(f"Creating ClassifyFst grammars.")
tn_cardinal_tagger = TNCardinalTagger(deterministic=False)
tn_ordinal_tagger = TNOrdinalTagger(cardinal=tn_cardinal_tagger, deterministic=False)
tn_date_tagger = TNDateTagger(cardinal=tn_cardinal_tagger, ordinal=tn_ordinal_tagger, deterministic=False)
tn_decimal_tagger = TNDecimalTagger(cardinal=tn_cardinal_tagger, deterministic=False)
tn_fraction_tagger = TNFractionTagger(
cardinal=tn_cardinal_tagger, ordinal=tn_ordinal_tagger, deterministic=True
)
tn_electronic_tagger = TNElectronicTagger(deterministic=False)
tn_electronic_verbalizer = TNElectronicVerbalizer(deterministic=False)
tn_telephone_tagger = TNTelephoneTagger(deterministic=False)
cardinal = CardinalFst(tn_cardinal_tagger=tn_cardinal_tagger)
cardinal_graph = cardinal.fst
ordinal = OrdinalFst(tn_ordinal=tn_ordinal_tagger)
ordinal_graph = ordinal.fst
decimal = DecimalFst(itn_cardinal_tagger=cardinal, tn_decimal_tagger=tn_decimal_tagger)
decimal_graph = decimal.fst
fraction = FractionFst(itn_cardinal_tagger=cardinal, tn_fraction_tagger=tn_fraction_tagger)
fraction_graph = fraction.fst
date_graph = DateFst(tn_date_tagger=tn_date_tagger).fst
word_graph = WordFst().fst
time_graph = TimeFst(tn_cardinal_tagger=tn_cardinal_tagger).fst
whitelist_graph = WhiteListFst(input_file=whitelist, input_case=input_case).fst
punct_graph = PunctuationFst().fst
electronic_graph = ElectronicFst(
tn_electronic_tagger=tn_electronic_tagger, tn_electronic_verbalizer=tn_electronic_verbalizer
).fst
telephone_graph = TelephoneFst(
tn_cardinal_tagger=tn_cardinal_tagger, tn_telephone_tagger=tn_telephone_tagger
).fst
classify = (
pynutil.add_weight(cardinal_graph, 1.1)
| pynutil.add_weight(whitelist_graph, 1.0)
| pynutil.add_weight(time_graph, 1.1)
| pynutil.add_weight(date_graph, 1.1)
| pynutil.add_weight(decimal_graph, 1.1)
| pynutil.add_weight(ordinal_graph, 1.1)
| pynutil.add_weight(fraction_graph, 1.1)
| pynutil.add_weight(telephone_graph, 1.1)
| pynutil.add_weight(electronic_graph, 1.1)
| pynutil.add_weight(word_graph, 100)
)
punct = pynutil.insert("tokens { ") + pynutil.add_weight(punct_graph, weight=1.1) + pynutil.insert(" }")
token = pynutil.insert("tokens { ") + classify + pynutil.insert(" }")
token_plus_punct = (
pynini.closure(punct + pynutil.insert(" ")) + token + pynini.closure(pynutil.insert(" ") + punct)
)
graph = token_plus_punct + pynini.closure(delete_extra_space + token_plus_punct)
graph = delete_space + graph + delete_space
self.fst = graph.optimize()
if far_file:
generator_main(far_file, {"tokenize_and_classify": self.fst})
logging.info(f"ClassifyFst grammars are saved to {far_file}.")
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/sv/taggers/tokenize_and_classify.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/sv/taggers/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_SIGMA, GraphFst
from nemo_text_processing.text_normalization.sv.taggers.decimal import get_quantity
from pynini.lib import pynutil
class DecimalFst(GraphFst):
"""
Finite state transducer for classifying decimal
e.g. minus elva komma två nulla nulla sex biljoner -> decimal { negative: "true" integer_part: "11" fractional_part: "2006" quantity: "biljoner" }
e.g. en biljon -> decimal { integer_part: "1" quantity: "biljon" }
Args:
itn_cardinal_tagger: ITN Cardinal tagger
tn_decimal_tagger: TN decimal tagger
"""
def __init__(self, itn_cardinal_tagger: GraphFst, tn_decimal_tagger: GraphFst):
super().__init__(name="decimal", kind="classify")
self.graph = tn_decimal_tagger.graph_itn
self.graph = self.graph @ pynini.cdrewrite(pynini.cross(" ", ""), "", "", NEMO_SIGMA)
delete_point = pynutil.delete(" komma")
graph_fractional = pynutil.insert("fractional_part: \"") + self.graph + pynutil.insert("\"")
hundreds = itn_cardinal_tagger.graph_hundred_component_at_least_one_non_zero_digit
hundreds = (pynini.project(hundreds, "input") - "en" - "ett") @ hundreds
hundreds_no_one = hundreds
hundreds |= pynini.cross("en", "1")
hundreds |= pynini.cross("ett", "1")
graph_integer = pynutil.insert("integer_part: \"") + hundreds + pynutil.insert("\"")
self.graph_integer = graph_integer
final_graph_wo_sign = graph_integer + delete_point + pynini.accep(" ") + graph_fractional
self.final_graph_wo_sign = final_graph_wo_sign
self.final_graph_wo_negative = (
final_graph_wo_sign | get_quantity(final_graph_wo_sign, None, hundreds_no_one, None, False, True,)
).optimize()
optional_minus_graph = pynini.closure(pynini.cross("minus ", "negative: \"true\" "), 0, 1)
final_graph = optional_minus_graph + self.final_graph_wo_negative
final_graph += pynutil.insert(" preserve_order: true")
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/sv/taggers/decimal.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_SIGMA, GraphFst
from pynini.lib import pynutil
class CardinalFst(GraphFst):
"""
Finite state transducer for classifying cardinals. Numbers below ten are not converted.
Allows both compound numeral strings or separated by whitespace.
e.g. minus tjugoen -> cardinal { negative: "-" integer: "21" } }
e.g. minus tjugoett -> cardinal { negative: "-" integer: "21" } }
Args:
tn_cardinal_tagger: TN cardinal tagger
"""
def __init__(self, tn_cardinal_tagger: GraphFst):
super().__init__(name="cardinal", kind="classify")
graph = pynini.invert(pynini.arcmap(tn_cardinal_tagger.graph, map_type="rmweight")).optimize()
graph = graph @ pynini.cdrewrite(pynini.cross(" ", ""), "", "", NEMO_SIGMA)
self.graph = graph
no_ones = pynini.project(graph, "input") - "en" - "ett"
graph = no_ones @ graph
self.graph_no_ones = graph
self.graph_hundred_component_at_least_one_non_zero_digit = pynini.invert(
pynini.arcmap(tn_cardinal_tagger.graph_hundreds_component_at_least_one_non_zero_digit, map_type="rmweight")
).optimize()
self.graph_hundred_component_at_least_one_non_zero_digit_no_one = (
pynini.project(self.graph_hundred_component_at_least_one_non_zero_digit, "input") - "en" - "ett"
) @ self.graph_hundred_component_at_least_one_non_zero_digit
self.graph_ties = (tn_cardinal_tagger.two_digit_non_zero).invert().optimize()
# this is to make sure if there is an ambiguity with decimal, decimal is chosen, e.g. 1000000 vs. 1 million
graph = pynutil.add_weight(graph, weight=0.001)
self.graph_no_exception = graph
self.digit = pynini.arcmap(tn_cardinal_tagger.digit, map_type="rmweight").invert().optimize()
self.optional_minus_graph = pynini.closure(pynini.cross("minus ", "negative: \"-\" "), 0, 1)
final_graph = self.optional_minus_graph + pynutil.insert("integer: \"") + graph + pynutil.insert("\"")
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/sv/taggers/cardinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst
from pynini.lib import pynutil
class ElectronicFst(GraphFst):
"""
Finite state transducer for classifying electronic: email addresses, etc.
e.g. c d f ett at a b c punkt e d u -> tokens { name: "cdf1.abc.edu" }
Args:
tn_electronic_tagger: TN eletronic tagger
tn_electronic_verbalizer: TN eletronic verbalizer
"""
def __init__(self, tn_electronic_tagger: GraphFst, tn_electronic_verbalizer: GraphFst):
super().__init__(name="electronic", kind="classify")
tagger = pynini.invert(tn_electronic_verbalizer.graph).optimize()
verbalizer = pynini.invert(tn_electronic_tagger.graph).optimize()
final_graph = tagger @ verbalizer
graph = pynutil.insert("name: \"") + final_graph + pynutil.insert("\"")
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/sv/taggers/electronic.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright (c) 2023, Jim O'Regan for Språkbanken Tal
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_DIGIT, NEMO_SPACE, GraphFst
from pynini.lib import pynutil
class DateFst(GraphFst):
"""
Finite state transducer for classifying date, in the form of (day) month (year) or year
e.g. andra januari tjugohundraett -> tokens { name: "2001-01-02" }
e.g. tjugotredje januari -> tokens { name: "23. jan." }
e.g. tjugotjugo -> tokens { name: "2020" }
Args:
tn_date_tagger: TN date tagger
"""
def __init__(
self, tn_date_tagger: GraphFst,
):
super().__init__(name="date", kind="classify")
def force_double_digits(fst: GraphFst):
double = (NEMO_DIGIT + NEMO_DIGIT) @ fst
single = (pynutil.insert("0") + NEMO_DIGIT) @ (NEMO_DIGIT @ fst)
return single | double
year = tn_date_tagger.year.invert().optimize()
decade = tn_date_tagger.decade.invert().optimize()
era_words = tn_date_tagger.era_words.invert().optimize()
day = tn_date_tagger.digit_day.invert().optimize()
day_double = tn_date_tagger.digit_day_zero.invert().optimize()
month_double = force_double_digits(tn_date_tagger.number_to_month).invert().optimize()
month_abbr = tn_date_tagger.month_abbr.invert().optimize()
self.month_to_number = tn_date_tagger.number_to_month.invert().optimize()
graph_year = pynutil.insert("year: \"") + year + pynutil.insert("\"")
graph_month = pynutil.insert("month: \"") + month_double + pynutil.insert("\"")
graph_month_abbr = pynutil.insert("month: \"") + month_abbr + pynutil.insert("\"")
graph_day = pynutil.insert("day: \"") + day_double + pynutil.insert("\"")
graph_day_ord = pynutil.insert("day: \"") + day + pynutil.insert("\"")
graph_era = pynutil.insert("era: \"") + era_words + pynutil.insert("\"")
optional_era = pynini.closure(NEMO_SPACE + graph_era, 0, 1)
graph_decade = pynutil.insert("year: \"") + decade + pynutil.insert("\"")
preserve = pynutil.insert(" preserve_order: true")
optional_preserve = pynini.closure(preserve, 0, 1)
year_era = graph_year + NEMO_SPACE + graph_era + preserve
graph_dm = graph_day_ord + NEMO_SPACE + graph_month_abbr + preserve
dmy = graph_day + NEMO_SPACE + graph_month + NEMO_SPACE + graph_year
graph_dmy = dmy + optional_era
ydm = graph_year + NEMO_SPACE + graph_month + NEMO_SPACE + graph_day
graph_ydm = ydm + optional_era + preserve + optional_preserve
final_graph = year_era | graph_dmy | graph_dm | graph_ydm | graph_decade
graph = self.add_tokens(final_graph)
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/sv/taggers/date.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_DIGIT,
NEMO_NOT_QUOTE,
NEMO_SPACE,
GraphFst,
delete_space,
)
from pynini.lib import pynutil
class TimeFst(GraphFst):
"""
Finite state transducer for verbalizing time, e.g.
time { hours: "8" minutes: "30" zone: "e s t" } -> 08:30 est
time { hours: "8" } -> kl. 8
time { hours: "8" minutes: "30" seconds: "10" } -> 08:30:10
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="time", kind="verbalize", deterministic=deterministic)
add_leading_zero_to_double_digit = (NEMO_DIGIT + NEMO_DIGIT) | (pynutil.insert("0") + NEMO_DIGIT)
hour = pynutil.delete("hours: \"") + pynini.closure(NEMO_DIGIT, 1) + pynutil.delete("\"")
kl_hour = (
pynutil.delete("hours: \"") + pynini.accep("kl. ") + pynini.closure(NEMO_DIGIT, 1) + pynutil.delete("\"")
)
minute = pynutil.delete("minutes: \"") + pynini.closure(NEMO_DIGIT, 1) + pynutil.delete("\"")
zeroed_hour = hour @ add_leading_zero_to_double_digit
lead_hour = zeroed_hour | kl_hour
lead_minute = minute @ add_leading_zero_to_double_digit
second = pynutil.delete("seconds: \"") + pynini.closure(NEMO_DIGIT, 1) + pynutil.delete("\"")
lead_second = second @ add_leading_zero_to_double_digit
ANY_NOT_QUOTE = pynini.closure(NEMO_NOT_QUOTE, 1)
final_suffix = pynutil.delete("suffix: \"") + ANY_NOT_QUOTE + pynutil.delete("\"")
optional_suffix = pynini.closure(NEMO_SPACE + final_suffix, 0, 1)
zone = pynutil.delete("zone: \"") + ANY_NOT_QUOTE + pynutil.delete("\"")
optional_zone = pynini.closure(NEMO_SPACE + zone, 0, 1)
one_optional_suffix = NEMO_SPACE + final_suffix + optional_zone
one_optional_suffix |= optional_suffix + NEMO_SPACE + zone
graph = (
delete_space
+ pynutil.insert(":")
+ lead_minute
+ pynini.closure(delete_space + pynutil.insert(":") + lead_second, 0, 1)
+ optional_suffix
+ optional_zone
)
graph_h = hour + one_optional_suffix
graph_klh = kl_hour + optional_suffix + optional_zone
graph_hm = lead_hour + graph
final_graph = graph_hm | graph_h | graph_klh
self.fst = self.delete_tokens(final_graph).optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/sv/verbalizers/time.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.inverse_text_normalization.sv.verbalizers.cardinal import CardinalFst
from nemo_text_processing.inverse_text_normalization.sv.verbalizers.date import DateFst
from nemo_text_processing.inverse_text_normalization.sv.verbalizers.decimal import DecimalFst
from nemo_text_processing.inverse_text_normalization.sv.verbalizers.time import TimeFst
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst
from nemo_text_processing.text_normalization.sv.verbalizers.cardinal import CardinalFst as TNCardinalVerbalizer
class VerbalizeFst(GraphFst):
"""
Composes other verbalizer grammars.
For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="verbalize", kind="verbalize", deterministic=deterministic)
tn_cardinal_verbalizer = TNCardinalVerbalizer(deterministic=False)
cardinal = CardinalFst(tn_cardinal_verbalizer=tn_cardinal_verbalizer)
cardinal_graph = cardinal.fst
date_graph = DateFst().fst
decimal = DecimalFst()
decimal_graph = decimal.fst
time_graph = TimeFst().fst
graph = time_graph | decimal_graph | cardinal_graph | date_graph
self.fst = graph
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/sv/verbalizers/verbalize.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.en.verbalizers.word import WordFst
from nemo_text_processing.inverse_text_normalization.sv.verbalizers.verbalize import VerbalizeFst
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst, delete_extra_space, delete_space
from pynini.lib import pynutil
class VerbalizeFinalFst(GraphFst):
"""
Finite state transducer that verbalizes an entire sentence, e.g.
tokens { name: "klockan" } tokens { name: "är" } tokens { time { hours: "12" minutes: "30" } } -> klockan är 12:30
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="verbalize_final", kind="verbalize", deterministic=deterministic)
verbalize = VerbalizeFst().fst
word = WordFst().fst
types = verbalize | word
graph = (
pynutil.delete("tokens")
+ delete_space
+ pynutil.delete("{")
+ delete_space
+ types
+ delete_space
+ pynutil.delete("}")
)
graph = delete_space + pynini.closure(graph + delete_extra_space) + graph + delete_space
self.fst = graph
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/sv/verbalizers/verbalize_final.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/sv/verbalizers/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_NOT_QUOTE,
GraphFst,
delete_preserve_order,
delete_space,
)
from nemo_text_processing.text_normalization.sv.graph_utils import ensure_space
from pynini.lib import pynutil
class DecimalFst(GraphFst):
"""
Finite state transducer for verbalizing decimal, e.g.
decimal { negative: "true" integer_part: "12" fractional_part: "5006" quantity: "biljoner" } -> -12,5006 biljoner
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="decimal", kind="verbalize", deterministic=deterministic)
optional_sign = pynini.closure(pynini.cross("negative: \"true\"", "-") + delete_space, 0, 1)
integer = pynutil.delete("integer_part: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
fractional = pynutil.delete("fractional_part: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
quantity = pynutil.delete("quantity: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
number = pynini.union(
optional_sign + integer,
optional_sign + integer + pynini.cross(" ", ",") + fractional,
pynutil.insert(",") + fractional,
)
number_quantity = number + ensure_space + quantity
optional_delete_preserve_order = pynini.closure(delete_preserve_order, 0, 1)
graph = (number | number_quantity | quantity).optimize()
self.graph = graph
graph = self.graph + optional_delete_preserve_order
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/sv/verbalizers/decimal.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst
from pynini.lib import pynutil
class CardinalFst(GraphFst):
"""
Finite state transducer for verbalizing cardinal
e.g. cardinal { integer: "23" negative: "-" } -> -23
Args:
tn_cardinal_verbalizer: TN cardinal verbalizer
"""
def __init__(self, tn_cardinal_verbalizer: GraphFst, deterministic: bool = True):
super().__init__(name="cardinal", kind="verbalize", deterministic=deterministic)
self.numbers = tn_cardinal_verbalizer.numbers
optional_sign = pynini.closure(pynutil.delete("negative: \"") + NEMO_NOT_QUOTE + pynutil.delete("\" "), 0, 1)
graph = optional_sign + self.numbers
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/sv/verbalizers/cardinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_NOT_QUOTE,
NEMO_SPACE,
GraphFst,
delete_preserve_order,
delete_space,
)
from pynini.lib import pynutil
class DateFst(GraphFst):
"""
Finite state transducer for verbalizing date, e.g.
date { day: "1." month: "jan." preserve_order: true } -> 1. jan.
"""
def __init__(self):
super().__init__(name="date", kind="verbalize")
year = pynutil.delete("year: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
month = pynutil.delete("month: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
day = pynutil.delete("day: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
era = pynutil.delete("era: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
optional_era = pynini.closure(NEMO_SPACE + era, 0, 1)
space_to_hyphen = pynini.cross(" ", "-")
optional_preserve_order = pynini.closure(
pynutil.delete(" preserve_order:") + delete_space + pynutil.delete("true") + delete_space
| pynutil.delete(" field_order:")
+ delete_space
+ pynutil.delete("\"")
+ NEMO_NOT_QUOTE
+ pynutil.delete("\"")
)
# day month
year_era = year + optional_era + optional_preserve_order
graph_dm = day + NEMO_SPACE + month + delete_preserve_order
graph_ydm = year + space_to_hyphen + month + space_to_hyphen + day + optional_era + optional_preserve_order
final_graph = graph_dm | graph_ydm | year_era
delete_tokens = self.delete_tokens(final_graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/sv/verbalizers/date.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/sv/data/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/sv/data/time/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ru/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_SPACE, GraphFst
from nemo_text_processing.text_normalization.ru.verbalizers.time import TimeFst as TNTimeVerbalizer
from pynini.lib import pynutil
class TimeFst(GraphFst):
"""
Finite state transducer for classifying time, e.g.
"два часа пятнадцать минут" -> time { hours: "02:15" }
Args:
tn_time: Text Normalization Time graph
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, tn_time: GraphFst, deterministic: bool = True):
super().__init__(name="time", kind="classify", deterministic=deterministic)
tn_time_tagger = tn_time.graph_preserve_order
tn_time_verbalizer = TNTimeVerbalizer().graph
tn_time_graph_preserve_order = pynini.compose(tn_time_tagger, tn_time_verbalizer).optimize()
graph_preserve_order = pynini.invert(tn_time_graph_preserve_order).optimize()
graph_preserve_order = pynutil.insert("hours: \"") + graph_preserve_order + pynutil.insert("\"")
# "пятнадцать минут шестого" -> 17:15
# Requires permutations for the correct verbalization
m_next_h = (
pynutil.insert("minutes: \"")
+ pynini.invert(tn_time.minutes).optimize()
+ pynutil.insert("\"")
+ pynini.accep(NEMO_SPACE)
+ pynutil.insert("hours: \"")
+ pynini.invert(tn_time.increment_hour_ordinal).optimize()
+ pynutil.insert("\"")
).optimize()
# "без пятнадцати минут шесть" -> 17:45
# Requires permutation for the correct verbalization
m_to_h = (
pynini.cross("без ", "minutes: \"")
+ pynini.invert(tn_time.mins_to_h)
+ pynutil.insert("\"")
+ pynini.accep(NEMO_SPACE)
+ pynutil.insert("hours: \"")
+ pynini.invert(tn_time.increment_hour_cardinal).optimize()
+ pynutil.insert("\"")
)
graph_reserve_order = m_next_h | m_to_h
graph = graph_preserve_order | graph_reserve_order
graph = self.add_tokens(graph)
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ru/taggers/time.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.