ecom_demo / inference_pb2.py
Nina.Konovalova
main
ef5bd5d
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# NO CHECKED-IN PROTOBUF GENCODE
# source: inference.proto
# Protobuf Python Version: 6.31.0
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import runtime_version as _runtime_version
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
_runtime_version.ValidateProtobufRuntimeVersion(
_runtime_version.Domain.PUBLIC,
6,
31,
0,
'',
'inference.proto'
)
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0finference.proto\x12\tinference\"A\n\x0bLoraRequest\x12\x0f\n\x07\x63oncept\x18\x01 \x01(\t\x12\x0e\n\x06prompt\x18\x02 \x01(\t\x12\x11\n\tuse_cache\x18\x03 \x01(\x08\"F\n\x0cLoraResponse\x12\x0c\n\x04res1\x18\x01 \x01(\x0c\x12\x0c\n\x04res2\x18\x02 \x01(\x0c\x12\x0c\n\x04res3\x18\x03 \x01(\x0c\x12\x0c\n\x04res4\x18\x04 \x01(\x0c\x32J\n\x0bLoraService\x12;\n\x08generate\x12\x16.inference.LoraRequest\x1a\x17.inference.LoraResponseb\x06proto3')
_globals = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'inference_pb2', _globals)
if not _descriptor._USE_C_DESCRIPTORS:
DESCRIPTOR._loaded_options = None
_globals['_LORAREQUEST']._serialized_start=30
_globals['_LORAREQUEST']._serialized_end=95
_globals['_LORARESPONSE']._serialized_start=97
_globals['_LORARESPONSE']._serialized_end=167
_globals['_LORASERVICE']._serialized_start=169
_globals['_LORASERVICE']._serialized_end=243
# @@protoc_insertion_point(module_scope)