python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .images import (
ImageClassificationModelJob,
GenericImageModelJob,
ImageModelJob,
)
from .job import ModelJob
__all__ = [
'ImageClassificationModelJob',
'GenericImageModelJob',
'ImageModelJob',
'ModelJob',
]
| DIGITS-master | digits/model/__init__.py |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
import os
import flask
from flask_wtf import Form
import wtforms
from wtforms import validators
from digits.config import config_value
from digits.device_query import get_device, get_nvml_info
from digits import utils
from digits.utils import sizeof_fmt
from digits.utils.forms import validate_required_iff
from digits import frameworks
class ModelForm(Form):
# Methods
def selection_exists_in_choices(form, field):
found = False
for choice in field.choices:
if choice[0] == field.data:
found = True
if not found:
raise validators.ValidationError("Selected job doesn't exist. Maybe it was deleted by another user.")
def validate_NetParameter(form, field):
fw = frameworks.get_framework_by_id(form['framework'].data)
try:
# below function raises a BadNetworkException in case of validation error
fw.validate_network(field.data)
except frameworks.errors.BadNetworkError as e:
raise validators.ValidationError('Bad network: %s' % e.message)
def validate_file_exists(form, field):
from_client = bool(form.python_layer_from_client.data)
filename = ''
if not from_client and field.type == 'StringField':
filename = field.data
if filename == '':
return
if not os.path.isfile(filename):
raise validators.ValidationError('Server side file, %s, does not exist.' % filename)
def validate_py_ext(form, field):
from_client = bool(form.python_layer_from_client.data)
filename = ''
if from_client and field.type == 'FileField':
filename = flask.request.files[field.name].filename
elif not from_client and field.type == 'StringField':
filename = field.data
if filename == '':
return
(root, ext) = os.path.splitext(filename)
if ext != '.py' and ext != '.pyc':
raise validators.ValidationError('Python file, %s, needs .py or .pyc extension.' % filename)
# Fields
# The options for this get set in the view (since they are dynamic)
dataset = utils.forms.SelectField(
'Select Dataset',
choices=[],
tooltip="Choose the dataset to use for this model."
)
python_layer_from_client = utils.forms.BooleanField(
u'Use client-side file',
default=False,
)
python_layer_client_file = utils.forms.FileField(
u'Client-side file',
validators=[
validate_py_ext
],
tooltip="Choose a Python file on the client containing layer definitions."
)
python_layer_server_file = utils.forms.StringField(
u'Server-side file',
validators=[
validate_file_exists,
validate_py_ext
],
tooltip="Choose a Python file on the server containing layer definitions."
)
train_epochs = utils.forms.IntegerField(
'Training epochs',
validators=[
validators.NumberRange(min=1)
],
default=30,
tooltip="How many passes through the training data?"
)
snapshot_interval = utils.forms.FloatField(
'Snapshot interval (in epochs)',
default=1,
validators=[
validators.NumberRange(min=0),
],
tooltip="How many epochs of training between taking a snapshot?"
)
val_interval = utils.forms.FloatField(
'Validation interval (in epochs)',
default=1,
validators=[
validators.NumberRange(min=0)
],
tooltip="How many epochs of training between running through one pass of the validation data?"
)
traces_interval = utils.forms.IntegerField(
'Tracing Interval (in steps)',
validators=[
validators.NumberRange(min=0)
],
default=0,
tooltip="Generation of a timeline trace every few steps"
)
random_seed = utils.forms.IntegerField(
'Random seed',
validators=[
validators.NumberRange(min=0),
validators.Optional(),
],
tooltip=('If you provide a random seed, then back-to-back runs with '
'the same model and dataset should give identical results.')
)
batch_size = utils.forms.MultiIntegerField(
'Batch size',
validators=[
utils.forms.MultiNumberRange(min=1),
utils.forms.MultiOptional(),
],
tooltip="How many images to process at once. If blank, values are used from the network definition."
)
batch_accumulation = utils.forms.IntegerField(
'Batch Accumulation',
validators=[
validators.NumberRange(min=1),
validators.Optional(),
],
tooltip=("Accumulate gradients over multiple batches (useful when you "
"need a bigger batch size for training but it doesn't fit in memory).")
)
# Solver types
solver_type = utils.forms.SelectField(
'Solver type',
choices=[
('SGD', 'SGD (Stochastic Gradient Descent)'),
('MOMENTUM', 'Momentum'),
('NESTEROV', "NAG (Nesterov's accelerated gradient)"),
('ADAGRAD', 'AdaGrad (Adaptive Gradient)'),
('ADAGRADDA', 'AdaGradDA (AdaGrad Dual Averaging)'),
('ADADELTA', 'AdaDelta'),
('ADAM', 'Adam (Adaptive Moment Estimation)'),
('RMSPROP', 'RMSprop'),
('FTRL', 'FTRL (Follow-The-Regularized-Leader)'),
],
default='SGD',
tooltip="What type of solver will be used?",
)
def validate_solver_type(form, field):
fw = frameworks.get_framework_by_id(form.framework)
if fw is not None:
if not fw.supports_solver_type(field.data):
raise validators.ValidationError(
'Solver type not supported by this framework')
# Additional settings specific to selected solver
rms_decay = utils.forms.FloatField(
'RMS decay value',
default=0.99,
validators=[
validators.NumberRange(min=0),
],
tooltip=("If the gradient updates results in oscillations the gradient is reduced "
"by times 1-rms_decay. Otherwise it will be increased by rms_decay.")
)
# Learning rate
learning_rate = utils.forms.MultiFloatField(
'Base Learning Rate',
default=0.01,
validators=[
utils.forms.MultiNumberRange(min=0),
],
tooltip=("Affects how quickly the network learns. If you are getting "
"NaN for your loss, you probably need to lower this value.")
)
lr_policy = wtforms.SelectField(
'Policy',
choices=[
('fixed', 'Fixed'),
('step', 'Step Down'),
('multistep', 'Step Down (arbitrary steps)'),
('exp', 'Exponential Decay'),
('inv', 'Inverse Decay'),
('poly', 'Polynomial Decay'),
('sigmoid', 'Sigmoid Decay'),
],
default='step'
)
lr_step_size = wtforms.FloatField('Step Size', default=33)
lr_step_gamma = wtforms.FloatField('Gamma', default=0.1)
lr_multistep_values = wtforms.StringField('Step Values', default="50,85")
def validate_lr_multistep_values(form, field):
if form.lr_policy.data == 'multistep':
for value in field.data.split(','):
try:
float(value)
except ValueError:
raise validators.ValidationError('invalid value')
lr_multistep_gamma = wtforms.FloatField('Gamma', default=0.5)
lr_exp_gamma = wtforms.FloatField('Gamma', default=0.95)
lr_inv_gamma = wtforms.FloatField('Gamma', default=0.1)
lr_inv_power = wtforms.FloatField('Power', default=0.5)
lr_poly_power = wtforms.FloatField('Power', default=3)
lr_sigmoid_step = wtforms.FloatField('Step', default=50)
lr_sigmoid_gamma = wtforms.FloatField('Gamma', default=0.1)
# Network
# Use a SelectField instead of a HiddenField so that the default value
# is used when nothing is provided (through the REST API)
method = wtforms.SelectField(
u'Network type',
choices=[
('standard', 'Standard network'),
('previous', 'Previous network'),
('pretrained', 'Pretrained network'),
('custom', 'Custom network'),
],
default='standard',
)
# framework - hidden field, set by Javascript to the selected framework ID
framework = wtforms.HiddenField(
'framework',
validators=[
validators.AnyOf(
[fw.get_id() for fw in frameworks.get_frameworks()],
message='The framework you choose is not currently supported.'
)
],
default=frameworks.get_frameworks()[0].get_id()
)
# The options for this get set in the view (since they are dependent on the data type)
standard_networks = wtforms.RadioField(
'Standard Networks',
validators=[
validate_required_iff(method='standard'),
],
)
previous_networks = wtforms.RadioField(
'Previous Networks',
choices=[],
validators=[
validate_required_iff(method='previous'),
selection_exists_in_choices,
],
)
pretrained_networks = wtforms.RadioField(
'Pretrained Networks',
choices=[],
validators=[
validate_required_iff(method='pretrained'),
selection_exists_in_choices,
],
)
custom_network = utils.forms.TextAreaField(
'Custom Network',
validators=[
validate_required_iff(method='custom'),
validate_NetParameter,
],
)
custom_network_snapshot = utils.forms.TextField(
'Pretrained model(s)',
tooltip=("Paths to pretrained model files, separated by '%s'. "
"Only edit this field if you understand how fine-tuning "
"works in caffe or torch." % os.path.pathsep)
)
def validate_custom_network_snapshot(form, field):
pass
# if form.method.data == 'custom':
# for filename in field.data.strip().split(os.path.pathsep):
# if filename and not os.path.lexists(filename):
# raise validators.ValidationError('File "%s" does not exist' % filename)
# Select one of several GPUs
select_gpu = wtforms.RadioField(
'Select which GPU you would like to use',
choices=[('next', 'Next available')] + [(
index,
'#%s - %s (%s memory)' % (
index,
get_device(index).name,
sizeof_fmt(
get_nvml_info(index)['memory']['total']
if get_nvml_info(index) and 'memory' in get_nvml_info(index)
else get_device(index).totalGlobalMem)
),
) for index in config_value('gpu_list').split(',') if index],
default='next',
)
# Select N of several GPUs
select_gpus = utils.forms.SelectMultipleField(
'Select which GPU[s] you would like to use',
choices=[(
index,
'#%s - %s (%s memory)' % (
index,
get_device(index).name,
sizeof_fmt(
get_nvml_info(index)['memory']['total']
if get_nvml_info(index) and 'memory' in get_nvml_info(index)
else get_device(index).totalGlobalMem)
),
) for index in config_value('gpu_list').split(',') if index],
tooltip="The job won't start until all of the chosen GPUs are available."
)
# XXX For testing
# The Flask test framework can't handle SelectMultipleFields correctly
select_gpus_list = wtforms.StringField('Select which GPU[s] you would like to use (comma separated)')
def validate_select_gpus(form, field):
if form.select_gpus_list.data:
field.data = form.select_gpus_list.data.split(',')
# Use next available N GPUs
select_gpu_count = wtforms.IntegerField('Use this many GPUs (next available)',
validators=[
validators.NumberRange(min=1, max=len(
config_value('gpu_list').split(',')))
],
default=1,
)
def validate_select_gpu_count(form, field):
if field.data is None:
if form.select_gpus.data:
# Make this field optional
field.errors[:] = []
raise validators.StopValidation()
model_name = utils.forms.StringField('Model Name',
validators=[
validators.DataRequired()
],
tooltip="An identifier, later used to refer to this model in the Application."
)
group_name = utils.forms.StringField('Group Name',
tooltip="An optional group name for organization on the main page."
)
# allows shuffling data during training (for frameworks that support this, as indicated by
# their Framework.can_shuffle_data() method)
shuffle = utils.forms.BooleanField('Shuffle Train Data',
default=True,
tooltip='For every epoch, shuffle the data before training.'
)
| DIGITS-master | digits/model/forms.py |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import io
import json
import math
import os
import tarfile
import zipfile
import flask
from flask import flash
import requests
import werkzeug.exceptions
from . import images as model_images
from . import ModelJob
from digits.pretrained_model.job import PretrainedModelJob
from digits import frameworks, extensions
from digits.utils import auth
from digits.utils.routing import request_wants_json, job_from_request, get_request_arg
from digits.webapp import scheduler
blueprint = flask.Blueprint(__name__, __name__)
@blueprint.route('/<job_id>/json', methods=['GET'])
@blueprint.route('/<job_id>', methods=['GET'])
def show(job_id):
"""
Show a ModelJob
Returns JSON when requested:
{id, name, directory, status, snapshots: [epoch,epoch,...]}
"""
job = scheduler.get_job(job_id)
if job is None:
raise werkzeug.exceptions.NotFound('Job not found')
related_jobs = scheduler.get_related_jobs(job)
if request_wants_json():
return flask.jsonify(job.json_dict(True))
else:
if isinstance(job, model_images.ImageClassificationModelJob):
return model_images.classification.views.show(job, related_jobs=related_jobs)
elif isinstance(job, model_images.GenericImageModelJob):
return model_images.generic.views.show(job, related_jobs=related_jobs)
else:
raise werkzeug.exceptions.BadRequest(
'Invalid job type')
@blueprint.route('/customize', methods=['POST'])
def customize():
"""
Returns a customized file for the ModelJob based on completed form fields
"""
network = flask.request.args['network']
framework = flask.request.args.get('framework')
if not network:
raise werkzeug.exceptions.BadRequest('network not provided')
fw = frameworks.get_framework_by_id(framework)
# can we find it in standard networks?
network_desc = fw.get_standard_network_desc(network)
if network_desc:
return json.dumps({'network': network_desc})
# not found in standard networks, looking for matching job
job = scheduler.get_job(network)
if job is None:
raise werkzeug.exceptions.NotFound('Job not found')
snapshot = None
epoch = float(flask.request.form.get('snapshot_epoch', 0))
if epoch == 0:
pass
elif epoch == -1:
snapshot = job.train_task().pretrained_model
else:
for filename, e in job.train_task().snapshots:
if e == epoch:
snapshot = job.path(filename)
break
if isinstance(job, PretrainedModelJob):
model_def = open(job.get_model_def_path(), 'r')
network = model_def.read()
snapshot = job.get_weights_path()
python_layer = job.get_python_layer_path()
else:
network = job.train_task().get_network_desc()
python_layer = None
return json.dumps({
'network': network,
'snapshot': snapshot,
'python_layer': python_layer
})
@blueprint.route('/timeline_trace_data', methods=['POST'])
def timeline_trace_data():
"""
Shows timeline trace of a model
"""
job = job_from_request()
step = get_request_arg('step')
if step is None:
raise werkzeug.exceptions.BadRequest('step is a required field')
return job.train_task().timeline_trace(int(step))
@blueprint.route('/view-config/<extension_id>', methods=['GET'])
def view_config(extension_id):
"""
Returns a rendering of a view extension configuration template
"""
extension = extensions.view.get_extension(extension_id)
if extension is None:
raise ValueError("Unknown extension '%s'" % extension_id)
config_form = extension.get_config_form()
template, context = extension.get_config_template(config_form)
return flask.render_template_string(template, **context)
@blueprint.route('/visualize-network', methods=['POST'])
def visualize_network():
"""
Returns a visualization of the custom network as a string of PNG data
"""
framework = flask.request.args.get('framework')
if not framework:
raise werkzeug.exceptions.BadRequest('framework not provided')
dataset = None
if 'dataset_id' in flask.request.form:
dataset = scheduler.get_job(flask.request.form['dataset_id'])
fw = frameworks.get_framework_by_id(framework)
ret = fw.get_network_visualization(
desc=flask.request.form['custom_network'],
dataset=dataset,
solver_type=flask.request.form['solver_type'] if 'solver_type' in flask.request.form else None,
use_mean=flask.request.form['use_mean'] if 'use_mean' in flask.request.form else None,
crop_size=flask.request.form['crop_size'] if 'crop_size' in flask.request.form else None,
num_gpus=flask.request.form['num_gpus'] if 'num_gpus' in flask.request.form else None,
)
return ret
@blueprint.route('/visualize-lr', methods=['POST'])
def visualize_lr():
"""
Returns a JSON object of data used to create the learning rate graph
"""
policy = flask.request.form['lr_policy']
# There may be multiple lrs if the learning_rate is swept
lrs = map(float, flask.request.form['learning_rate'].split(','))
if policy == 'fixed':
pass
elif policy == 'step':
step = float(flask.request.form['lr_step_size'])
gamma = float(flask.request.form['lr_step_gamma'])
elif policy == 'multistep':
steps = [float(s) for s in flask.request.form['lr_multistep_values'].split(',')]
current_step = 0
gamma = float(flask.request.form['lr_multistep_gamma'])
elif policy == 'exp':
gamma = float(flask.request.form['lr_exp_gamma'])
elif policy == 'inv':
gamma = float(flask.request.form['lr_inv_gamma'])
power = float(flask.request.form['lr_inv_power'])
elif policy == 'poly':
power = float(flask.request.form['lr_poly_power'])
elif policy == 'sigmoid':
step = float(flask.request.form['lr_sigmoid_step'])
gamma = float(flask.request.form['lr_sigmoid_gamma'])
else:
raise werkzeug.exceptions.BadRequest('Invalid policy')
datalist = []
for j, lr in enumerate(lrs):
data = ['Learning Rate %d' % j]
for i in xrange(101):
if policy == 'fixed':
data.append(lr)
elif policy == 'step':
data.append(lr * math.pow(gamma, math.floor(float(i) / step)))
elif policy == 'multistep':
if current_step < len(steps) and i >= steps[current_step]:
current_step += 1
data.append(lr * math.pow(gamma, current_step))
elif policy == 'exp':
data.append(lr * math.pow(gamma, i))
elif policy == 'inv':
data.append(lr * math.pow(1.0 + gamma * i, -power))
elif policy == 'poly':
data.append(lr * math.pow(1.0 - float(i) / 100, power))
elif policy == 'sigmoid':
data.append(lr / (1.0 + math.exp(gamma * (i - step))))
datalist.append(data)
return json.dumps({'data': {'columns': datalist}})
@auth.requires_login
@blueprint.route('/<job_id>/to_pretrained', methods=['GET', 'POST'])
def to_pretrained(job_id):
job = scheduler.get_job(job_id)
if job is None:
raise werkzeug.exceptions.NotFound('Job not found')
epoch = -1
# GET ?epoch=n
if 'epoch' in flask.request.args:
epoch = float(flask.request.args['epoch'])
# POST ?snapshot_epoch=n (from form)
elif 'snapshot_epoch' in flask.request.form:
epoch = float(flask.request.form['snapshot_epoch'])
# Write the stats of the job to json,
# and store in tempfile (for archive)
info = job.json_dict(verbose=False, epoch=epoch)
task = job.train_task()
snapshot_filename = None
snapshot_filename = task.get_snapshot(epoch)
# Set defaults:
labels_path = None
resize_mode = None
if "labels file" in info:
labels_path = os.path.join(task.dataset.dir(), info["labels file"])
if "image resize mode" in info:
resize_mode = info["image resize mode"]
job = PretrainedModelJob(
snapshot_filename,
os.path.join(job.dir(), task.model_file),
labels_path,
info["framework"],
info["image dimensions"][2],
resize_mode,
info["image dimensions"][0],
info["image dimensions"][1],
username=auth.get_username(),
name=info["name"]
)
scheduler.add_job(job)
return flask.redirect(flask.url_for('digits.views.home', tab=3)), 302
@blueprint.route('/<job_id>/publish_inference', methods=['POST'])
def publish_inference(job_id):
"""
Publish model to inference server
"""
rie_url = os.environ.get('RIE_URL', "http://localhost:5055")
publish_endpoint = rie_url+'/models'
# Get data from the modal form
description = flask.request.form.get('description')
modality = flask.request.form.getlist('modality')
output_layer = flask.request.form.get('output_layer')
input_layer = flask.request.form.get('input_layer')
input_shape = flask.request.form.get('input_shape')
output_shape = flask.request.form.get('output_shape')
job = scheduler.get_job(job_id)
if job is None:
raise werkzeug.exceptions.NotFound('Job not found')
epoch = -1
# GET ?epoch=n
if 'epoch' in flask.request.args:
epoch = float(flask.request.args['epoch'])
# POST ?snapshot_epoch=n (from form)
elif 'snapshot_epoch' in flask.request.form:
epoch = float(flask.request.form['snapshot_epoch'])
# Write the stats of the job to json,
# and store in tempfile (for archive)
job_dict = job.json_dict(verbose=False, epoch=epoch)
job_dict.update({"output_layer": output_layer,
"description": description,
"input_layer": input_layer,
"input_shape": input_shape,
"output_shape": output_shape,
"modality": modality})
info = json.dumps(job_dict, sort_keys=True, indent=4, separators=(',', ': '))
info_io = io.BytesIO()
info_io.write(info)
b = io.BytesIO()
mode = ''
with tarfile.open(fileobj=b, mode='w:%s' % mode) as tar:
for path, name in job.download_files(epoch, frozen_file=(job_dict['framework'] == 'tensorflow')):
tar.add(path, arcname=name)
tar_info = tarfile.TarInfo("info.json")
tar_info.size = len(info_io.getvalue())
info_io.seek(0)
tar.addfile(tar_info, info_io)
temp_buffer = b.getvalue()
files = {'model': ('tmp.tgz', temp_buffer)}
try:
r = requests.post(publish_endpoint, files=files)
except Exception as e:
return flask.make_response(e)
if r.status_code != requests.codes.ok:
raise werkzeug.exceptions.BadRequest("Bad Request")
end_point = json.loads(r.text)["location"]
flash('Model successfully published to RIE.<p>New endpoint at {}'.format(end_point))
return flask.redirect(flask.request.referrer), 302
@blueprint.route('/<job_id>/download',
methods=['GET', 'POST'],
defaults={'extension': 'tar.gz'})
@blueprint.route('/<job_id>/download.<extension>',
methods=['GET', 'POST'])
def download(job_id, extension):
"""
Return a tarball of all files required to run the model
"""
job = scheduler.get_job(job_id)
if job is None:
raise werkzeug.exceptions.NotFound('Job not found')
epoch = -1
# GET ?epoch=n
if 'epoch' in flask.request.args:
epoch = float(flask.request.args['epoch'])
# POST ?snapshot_epoch=n (from form)
elif 'snapshot_epoch' in flask.request.form:
epoch = float(flask.request.form['snapshot_epoch'])
# Write the stats of the job to json,
# and store in tempfile (for archive)
info = json.dumps(job.json_dict(verbose=False, epoch=epoch), sort_keys=True, indent=4, separators=(',', ': '))
info_io = io.BytesIO()
info_io.write(info)
b = io.BytesIO()
if extension in ['tar', 'tar.gz', 'tgz', 'tar.bz2']:
# tar file
mode = ''
if extension in ['tar.gz', 'tgz']:
mode = 'gz'
elif extension in ['tar.bz2']:
mode = 'bz2'
with tarfile.open(fileobj=b, mode='w:%s' % mode) as tar:
for path, name in job.download_files(epoch):
tar.add(path, arcname=name)
tar_info = tarfile.TarInfo("info.json")
tar_info.size = len(info_io.getvalue())
info_io.seek(0)
tar.addfile(tar_info, info_io)
elif extension in ['zip']:
with zipfile.ZipFile(b, 'w') as zf:
for path, name in job.download_files(epoch):
zf.write(path, arcname=name)
zf.writestr("info.json", info_io.getvalue())
else:
raise werkzeug.exceptions.BadRequest('Invalid extension')
response = flask.make_response(b.getvalue())
response.headers['Content-Disposition'] = 'attachment; filename=%s_epoch_%s.%s' % (job.id(), epoch, extension)
return response
class JobBasicInfo(object):
def __init__(self, name, ID, status, time, framework_id):
self.name = name
self.id = ID
self.status = status
self.time = time
self.framework_id = framework_id
class ColumnType(object):
def __init__(self, name, has_suffix, find_fn):
self.name = name
self.has_suffix = has_suffix
self.find_from_list = find_fn
def label(self, attr):
if self.has_suffix:
return '{} {}'.format(attr, self.name)
else:
return attr
def get_column_attrs():
job_outs = [set(j.train_task().train_outputs.keys() + j.train_task().val_outputs.keys())
for j in scheduler.jobs.values() if isinstance(j, ModelJob)]
return reduce(lambda acc, j: acc.union(j), job_outs, set())
| DIGITS-master | digits/model/views.py |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from digits import test_utils
def test_caffe_imports():
test_utils.skipIfNotFramework('caffe')
import numpy # noqa
import google.protobuf # noqa
| DIGITS-master | digits/model/tasks/test_caffe_train.py |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from collections import OrderedDict
import copy
import math
import operator
import os
import re
import sys
import time
from google.protobuf import text_format
import numpy as np
import platform
import scipy
from .train import TrainTask
import digits
from digits import utils
from digits.config import config_value
from digits.status import Status
from digits.utils import subclass, override, constants
from digits.utils.filesystem import tail
# Must import after importing digit.config
import caffe
import caffe_pb2
# NOTE: Increment this every time the pickled object changes
PICKLE_VERSION = 5
# Constants
CAFFE_SOLVER_FILE = 'solver.prototxt'
CAFFE_ORIGINAL_FILE = 'original.prototxt'
CAFFE_TRAIN_VAL_FILE = 'train_val.prototxt'
CAFFE_SNAPSHOT_PREFIX = 'snapshot'
CAFFE_DEPLOY_FILE = 'deploy.prototxt'
CAFFE_PYTHON_LAYER_FILE = 'digits_python_layers.py'
@subclass
class DigitsTransformer(caffe.io.Transformer):
"""
A subclass of caffe.io.Transformer (an old-style class)
Handles cases when we don't want to resize inputs
"""
def __init__(self, resize, **kwargs):
"""
Arguments:
resize -- whether to resize inputs to the network default
"""
self.resize = resize
caffe.io.Transformer.__init__(self, **kwargs)
def preprocess(self, in_, data):
"""
Preprocess an image
See parent class for details
"""
if not self.resize:
# update target input dimension such that no resize occurs
self.inputs[in_] = self.inputs[in_][:2] + data.shape[:2]
# do we have a mean?
if in_ in self.mean:
# resize mean if necessary
if self.mean[in_].size > 1:
# we are doing mean image subtraction
if self.mean[in_].size != data.size:
# mean image size is different from data size
# => we need to resize the mean image
transpose = self.transpose.get(in_)
if transpose is not None:
# detranspose
self.mean[in_] = self.mean[in_].transpose(
np.argsort(transpose))
self.mean[in_] = caffe.io.resize_image(
self.mean[in_],
data.shape[:2])
if transpose is not None:
# retranspose
self.mean[in_] = self.mean[in_].transpose(transpose)
return caffe.io.Transformer.preprocess(self, in_, data)
@subclass
class Error(Exception):
pass
@subclass
class CaffeTrainSanityCheckError(Error):
"""A sanity check failed"""
pass
@subclass
class CaffeTrainTask(TrainTask):
"""
Trains a caffe model
"""
CAFFE_LOG = 'caffe_output.log'
@staticmethod
def upgrade_network(network):
# TODO
pass
@staticmethod
def set_mode(gpu):
if gpu is not None:
caffe.set_device(gpu)
caffe.set_mode_gpu()
else:
caffe.set_mode_cpu()
def __init__(self, **kwargs):
"""
Arguments:
network -- a caffe NetParameter defining the network
"""
super(CaffeTrainTask, self).__init__(**kwargs)
self.pickver_task_caffe_train = PICKLE_VERSION
self.current_iteration = 0
self.loaded_snapshot_file = None
self.loaded_snapshot_epoch = None
self.image_mean = None
self.solver = None
self.solver_file = CAFFE_SOLVER_FILE
self.model_file = CAFFE_ORIGINAL_FILE
self.train_val_file = CAFFE_TRAIN_VAL_FILE
self.snapshot_prefix = CAFFE_SNAPSHOT_PREFIX
self.deploy_file = CAFFE_DEPLOY_FILE
self.log_file = self.CAFFE_LOG
self.digits_version = digits.__version__
self.caffe_version = config_value('caffe')['version']
self.caffe_flavor = config_value('caffe')['flavor']
def __getstate__(self):
state = super(CaffeTrainTask, self).__getstate__()
# Don't pickle these things
if 'caffe_log' in state:
del state['caffe_log']
if '_transformer' in state:
del state['_transformer']
if '_caffe_net' in state:
del state['_caffe_net']
return state
def __setstate__(self, state):
super(CaffeTrainTask, self).__setstate__(state)
# Upgrade pickle file
if state['pickver_task_caffe_train'] <= 1:
self.caffe_log_file = self.CAFFE_LOG
if state['pickver_task_caffe_train'] <= 2:
if hasattr(self, 'caffe_log_file'):
self.log_file = self.caffe_log_file
else:
self.log_file = None
self.framework_id = 'caffe'
if state['pickver_task_caffe_train'] <= 3:
try:
import caffe.proto.caffe_pb2
if isinstance(self.network, caffe.proto.caffe_pb2.NetParameter):
# Convert from NetParameter to string back to NetParameter
# to avoid this error:
# TypeError: Parameter to MergeFrom() must be instance of
# same class: expected caffe_pb2.NetParameter got
# caffe.proto.caffe_pb2.NetParameter.
fixed = caffe_pb2.NetParameter()
text_format.Merge(
text_format.MessageToString(self.network),
fixed,
)
self.network = fixed
except ImportError:
# If caffe.proto.caffe_pb2 can't be imported, then you're
# probably on a platform where that was never possible.
# So you can't need this upgrade and we can ignore the error.
pass
if state['pickver_task_caffe_train'] <= 4:
if hasattr(self, "original_file"):
self.model_file = self.original_file
del self.original_file
else:
self.model_file = None
self.pickver_task_caffe_train = PICKLE_VERSION
# Make changes to self
self.loaded_snapshot_file = None
self.loaded_snapshot_epoch = None
# These things don't get pickled
self.image_mean = None
# Task overrides
@override
def name(self):
return 'Train Caffe Model'
@override
def before_run(self):
super(CaffeTrainTask, self).before_run()
if isinstance(self.job, digits.model.images.classification.ImageClassificationModelJob):
self.save_files_classification()
elif isinstance(self.job, digits.model.images.generic.GenericImageModelJob):
self.save_files_generic()
else:
raise NotImplementedError
self.caffe_log = open(self.path(self.CAFFE_LOG), 'a')
self.saving_snapshot = False
self.receiving_train_output = False
self.receiving_val_output = False
self.last_train_update = None
return True
def get_mean_image(self, mean_file, resize=False):
mean_image = None
with open(self.dataset.path(mean_file), 'rb') as f:
blob = caffe_pb2.BlobProto()
blob.MergeFromString(f.read())
mean_image = np.reshape(blob.data,
(
self.dataset.get_feature_dims()[2],
self.dataset.get_feature_dims()[0],
self.dataset.get_feature_dims()[1],
)
)
# Resize the mean image if crop_size exists
if mean_image is not None and resize:
# Get the image size needed
network = caffe_pb2.NetParameter()
with open(self.path(self.deploy_file)) as infile:
text_format.Merge(infile.read(), network)
if network.input_shape:
data_shape = network.input_shape[0].dim
else:
data_shape = network.input_dim[:4]
assert len(data_shape) == 4, 'Bad data shape.'
# Get the image
mean_image = mean_image.astype('uint8')
mean_image = mean_image.transpose(1, 2, 0)
shape = list(mean_image.shape)
# imresize will not resize if the depth is anything
# other than 3 or 4. If it's 1, imresize expects an
# array.
if (len(shape) == 2 or (len(shape) == 3 and (shape[2] == 3 or shape[2] == 4))):
mean_image = scipy.misc.imresize(mean_image, (data_shape[2], data_shape[3]))
else:
mean_image = scipy.misc.imresize(mean_image[:, :, 0],
(data_shape[2], data_shape[3]))
mean_image = np.expand_dims(mean_image, axis=2)
mean_image = mean_image.transpose(2, 0, 1)
mean_image = mean_image.astype('float')
return mean_image
def get_mean_pixel(self, mean_file):
mean_image = self.get_mean_image(mean_file)
mean_pixel = None
if mean_image is not None:
mean_pixel = mean_image.mean(1).mean(1)
return mean_pixel
def set_mean_value(self, layer, mean_pixel):
# remove any values that may already be in the network
if layer.transform_param.HasField('mean_file'):
layer.transform_param.ClearField('mean_file')
self.logger.warning('Ignoring mean_file from network ...')
if len(layer.transform_param.mean_value) > 0:
layer.transform_param.ClearField('mean_value')
self.logger.warning('Ignoring mean_value from network ...')
layer.transform_param.mean_value.extend(list(mean_pixel))
def set_mean_file(self, layer, mean_file):
# remove any values that may already be in the network
if layer.transform_param.HasField('mean_file'):
layer.transform_param.ClearField('mean_file')
self.logger.warning('Ignoring mean_file from network ...')
if len(layer.transform_param.mean_value) > 0:
layer.transform_param.ClearField('mean_value')
self.logger.warning('Ignoring mean_value from network ...')
layer.transform_param.mean_file = mean_file
# TODO merge these monolithic save_files functions
def save_files_classification(self):
"""
Save solver, train_val and deploy files to disk
"""
# Save the origin network to file:
with open(self.path(self.model_file), 'w') as outfile:
text_format.PrintMessage(self.network, outfile)
network = cleanedUpClassificationNetwork(self.network, len(self.get_labels()))
data_layers, train_val_layers, deploy_layers = filterLayersByState(network)
# Write train_val file
train_val_network = caffe_pb2.NetParameter()
# Data layers
# TODO clean this up
train_data_layer = None
val_data_layer = None
for layer in data_layers.layer:
for rule in layer.include:
if rule.phase == caffe_pb2.TRAIN:
assert train_data_layer is None, 'cannot specify two train data layers'
train_data_layer = layer
elif rule.phase == caffe_pb2.TEST:
assert val_data_layer is None, 'cannot specify two test data layers'
val_data_layer = layer
if train_data_layer is None:
assert val_data_layer is None, 'cannot specify a test data layer without a train data layer'
dataset_backend = self.dataset.get_backend()
has_val_set = self.dataset.get_entry_count(constants.VAL_DB) > 0
if train_data_layer is not None:
if dataset_backend == 'lmdb':
assert train_data_layer.type == 'Data', 'expecting a Data layer'
elif dataset_backend == 'hdf5':
assert train_data_layer.type == 'HDF5Data', 'expecting an HDF5Data layer'
if dataset_backend == 'lmdb' and train_data_layer.HasField('data_param'):
assert not train_data_layer.data_param.HasField('source'), "don't set the data_param.source"
assert not train_data_layer.data_param.HasField('backend'), "don't set the data_param.backend"
if dataset_backend == 'hdf5' and train_data_layer.HasField('hdf5_data_param'):
assert not train_data_layer.hdf5_data_param.HasField('source'), "don't set the hdf5_data_param.source"
max_crop_size = min(self.dataset.get_feature_dims()[0], self.dataset.get_feature_dims()[1])
if self.crop_size:
assert dataset_backend != 'hdf5', 'HDF5Data layer does not support cropping'
assert self.crop_size <= max_crop_size, 'crop_size is larger than the image size'
train_data_layer.transform_param.crop_size = self.crop_size
elif train_data_layer.transform_param.HasField('crop_size'):
cs = train_data_layer.transform_param.crop_size
if cs > max_crop_size:
# don't throw an error here
cs = max_crop_size
train_data_layer.transform_param.crop_size = cs
self.crop_size = cs
train_val_network.layer.add().CopyFrom(train_data_layer)
train_data_layer = train_val_network.layer[-1]
if val_data_layer is not None and has_val_set:
if dataset_backend == 'lmdb':
assert val_data_layer.type == 'Data', 'expecting a Data layer'
elif dataset_backend == 'hdf5':
assert val_data_layer.type == 'HDF5Data', 'expecting an HDF5Data layer'
if dataset_backend == 'lmdb' and val_data_layer.HasField('data_param'):
assert not val_data_layer.data_param.HasField('source'), "don't set the data_param.source"
assert not val_data_layer.data_param.HasField('backend'), "don't set the data_param.backend"
if dataset_backend == 'hdf5' and val_data_layer.HasField('hdf5_data_param'):
assert not val_data_layer.hdf5_data_param.HasField('source'), "don't set the hdf5_data_param.source"
if self.crop_size:
# use our error checking from the train layer
val_data_layer.transform_param.crop_size = self.crop_size
train_val_network.layer.add().CopyFrom(val_data_layer)
val_data_layer = train_val_network.layer[-1]
else:
layer_type = 'Data'
if dataset_backend == 'hdf5':
layer_type = 'HDF5Data'
train_data_layer = train_val_network.layer.add(type=layer_type, name='data')
train_data_layer.top.append('data')
train_data_layer.top.append('label')
train_data_layer.include.add(phase=caffe_pb2.TRAIN)
if dataset_backend == 'lmdb':
train_data_layer.data_param.batch_size = constants.DEFAULT_BATCH_SIZE
elif dataset_backend == 'hdf5':
train_data_layer.hdf5_data_param.batch_size = constants.DEFAULT_BATCH_SIZE
if self.crop_size:
assert dataset_backend != 'hdf5', 'HDF5Data layer does not support cropping'
train_data_layer.transform_param.crop_size = self.crop_size
if has_val_set:
val_data_layer = train_val_network.layer.add(type=layer_type, name='data')
val_data_layer.top.append('data')
val_data_layer.top.append('label')
val_data_layer.include.add(phase=caffe_pb2.TEST)
if dataset_backend == 'lmdb':
val_data_layer.data_param.batch_size = constants.DEFAULT_BATCH_SIZE
elif dataset_backend == 'hdf5':
val_data_layer.hdf5_data_param.batch_size = constants.DEFAULT_BATCH_SIZE
if self.crop_size:
val_data_layer.transform_param.crop_size = self.crop_size
if dataset_backend == 'lmdb':
train_data_layer.data_param.source = self.dataset.get_feature_db_path(constants.TRAIN_DB)
train_data_layer.data_param.backend = caffe_pb2.DataParameter.LMDB
if val_data_layer is not None and has_val_set:
val_data_layer.data_param.source = self.dataset.get_feature_db_path(constants.VAL_DB)
val_data_layer.data_param.backend = caffe_pb2.DataParameter.LMDB
elif dataset_backend == 'hdf5':
train_data_layer.hdf5_data_param.source = os.path.join(
self.dataset.get_feature_db_path(constants.TRAIN_DB), 'list.txt')
if val_data_layer is not None and has_val_set:
val_data_layer.hdf5_data_param.source = os.path.join(
self.dataset.get_feature_db_path(constants.VAL_DB), 'list.txt')
if self.use_mean == 'pixel':
assert dataset_backend != 'hdf5', 'HDF5Data layer does not support mean subtraction'
mean_pixel = self.get_mean_pixel(self.dataset.path(self.dataset.get_mean_file()))
self.set_mean_value(train_data_layer, mean_pixel)
if val_data_layer is not None and has_val_set:
self.set_mean_value(val_data_layer, mean_pixel)
elif self.use_mean == 'image':
self.set_mean_file(train_data_layer, self.dataset.path(self.dataset.get_mean_file()))
if val_data_layer is not None and has_val_set:
self.set_mean_file(val_data_layer, self.dataset.path(self.dataset.get_mean_file()))
if self.batch_size:
if dataset_backend == 'lmdb':
train_data_layer.data_param.batch_size = self.batch_size
if val_data_layer is not None and has_val_set:
val_data_layer.data_param.batch_size = self.batch_size
elif dataset_backend == 'hdf5':
train_data_layer.hdf5_data_param.batch_size = self.batch_size
if val_data_layer is not None and has_val_set:
val_data_layer.hdf5_data_param.batch_size = self.batch_size
else:
if dataset_backend == 'lmdb':
if not train_data_layer.data_param.HasField('batch_size'):
train_data_layer.data_param.batch_size = constants.DEFAULT_BATCH_SIZE
if val_data_layer is not None and has_val_set and not val_data_layer.data_param.HasField('batch_size'):
val_data_layer.data_param.batch_size = constants.DEFAULT_BATCH_SIZE
elif dataset_backend == 'hdf5':
if not train_data_layer.hdf5_data_param.HasField('batch_size'):
train_data_layer.hdf5_data_param.batch_size = constants.DEFAULT_BATCH_SIZE
if (val_data_layer is not None and has_val_set and
not val_data_layer.hdf5_data_param.HasField('batch_size')):
val_data_layer.hdf5_data_param.batch_size = constants.DEFAULT_BATCH_SIZE
# Non-data layers
train_val_network.MergeFrom(train_val_layers)
# Write to file
with open(self.path(self.train_val_file), 'w') as outfile:
text_format.PrintMessage(train_val_network, outfile)
# network sanity checks
self.logger.debug("Network sanity check - train")
CaffeTrainTask.net_sanity_check(train_val_network, caffe_pb2.TRAIN)
if has_val_set:
self.logger.debug("Network sanity check - val")
CaffeTrainTask.net_sanity_check(train_val_network, caffe_pb2.TEST)
# Write deploy file
deploy_network = caffe_pb2.NetParameter()
# Input
deploy_network.input.append('data')
shape = deploy_network.input_shape.add()
shape.dim.append(1)
shape.dim.append(self.dataset.get_feature_dims()[2])
if self.crop_size:
shape.dim.append(self.crop_size)
shape.dim.append(self.crop_size)
else:
shape.dim.append(self.dataset.get_feature_dims()[0])
shape.dim.append(self.dataset.get_feature_dims()[1])
# Layers
deploy_network.MergeFrom(deploy_layers)
# Write to file
with open(self.path(self.deploy_file), 'w') as outfile:
text_format.PrintMessage(deploy_network, outfile)
# network sanity checks
self.logger.debug("Network sanity check - deploy")
CaffeTrainTask.net_sanity_check(deploy_network, caffe_pb2.TEST)
found_softmax = False
for layer in deploy_network.layer:
if layer.type == 'Softmax':
found_softmax = True
break
assert found_softmax, \
('Your deploy network is missing a Softmax layer! '
'Read the documentation for custom networks and/or look at the standard networks for examples.')
# Write solver file
solver = caffe_pb2.SolverParameter()
# get enum value for solver type
solver.solver_type = getattr(solver, self.solver_type)
solver.net = self.train_val_file
# Set CPU/GPU mode
if config_value('caffe')['cuda_enabled'] and \
bool(config_value('gpu_list')):
solver.solver_mode = caffe_pb2.SolverParameter.GPU
else:
solver.solver_mode = caffe_pb2.SolverParameter.CPU
solver.snapshot_prefix = self.snapshot_prefix
# Batch accumulation
from digits.frameworks import CaffeFramework
if self.batch_accumulation and CaffeFramework().can_accumulate_gradients():
solver.iter_size = self.batch_accumulation
# Epochs -> Iterations
train_iter = int(math.ceil(
float(self.dataset.get_entry_count(constants.TRAIN_DB)) /
(train_data_layer.data_param.batch_size * solver.iter_size)
))
solver.max_iter = train_iter * self.train_epochs
snapshot_interval = self.snapshot_interval * train_iter
if 0 < snapshot_interval <= 1:
solver.snapshot = 1 # don't round down
elif 1 < snapshot_interval < solver.max_iter:
solver.snapshot = int(snapshot_interval)
else:
solver.snapshot = 0 # only take one snapshot at the end
if has_val_set and self.val_interval:
solver.test_iter.append(
int(math.ceil(float(self.dataset.get_entry_count(constants.VAL_DB)) /
val_data_layer.data_param.batch_size)))
val_interval = self.val_interval * train_iter
if 0 < val_interval <= 1:
solver.test_interval = 1 # don't round down
elif 1 < val_interval < solver.max_iter:
solver.test_interval = int(val_interval)
else:
solver.test_interval = solver.max_iter # only test once at the end
# Learning rate
solver.base_lr = self.learning_rate
solver.lr_policy = self.lr_policy['policy']
scale = float(solver.max_iter) / 100.0
if solver.lr_policy == 'fixed':
pass
elif solver.lr_policy == 'step':
# stepsize = stepsize * scale
solver.stepsize = int(math.ceil(float(self.lr_policy['stepsize']) * scale))
solver.gamma = self.lr_policy['gamma']
elif solver.lr_policy == 'multistep':
for value in self.lr_policy['stepvalue'].split(','):
# stepvalue = stepvalue * scale
solver.stepvalue.append(int(math.ceil(float(value) * scale)))
solver.gamma = self.lr_policy['gamma']
elif solver.lr_policy == 'exp':
# gamma = gamma^(1/scale)
solver.gamma = math.pow(self.lr_policy['gamma'], 1.0 / scale)
elif solver.lr_policy == 'inv':
# gamma = gamma / scale
solver.gamma = self.lr_policy['gamma'] / scale
solver.power = self.lr_policy['power']
elif solver.lr_policy == 'poly':
solver.power = self.lr_policy['power']
elif solver.lr_policy == 'sigmoid':
# gamma = -gamma / scale
solver.gamma = -1.0 * self.lr_policy['gamma'] / scale
# stepsize = stepsize * scale
solver.stepsize = int(math.ceil(float(self.lr_policy['stepsize']) * scale))
else:
raise Exception('Unknown lr_policy: "%s"' % solver.lr_policy)
# These solver types don't support momentum
unsupported = [solver.ADAGRAD]
try:
unsupported.append(solver.RMSPROP)
except AttributeError:
pass
if solver.solver_type not in unsupported:
solver.momentum = 0.9
solver.weight_decay = solver.base_lr / 100.0
# solver specific values
if solver.solver_type == solver.RMSPROP:
solver.rms_decay = self.rms_decay
# Display 8x per epoch, or once per 5000 images, whichever is more frequent
solver.display = max(1, min(
int(math.floor(float(solver.max_iter) / (self.train_epochs * 8))),
int(math.ceil(5000.0 / (train_data_layer.data_param.batch_size * solver.iter_size)))
))
if self.random_seed is not None:
solver.random_seed = self.random_seed
with open(self.path(self.solver_file), 'w') as outfile:
text_format.PrintMessage(solver, outfile)
self.solver = solver # save for later
return True
def save_files_generic(self):
"""
Save solver, train_val and deploy files to disk
"""
train_feature_db_path = self.dataset.get_feature_db_path(constants.TRAIN_DB)
train_label_db_path = self.dataset.get_label_db_path(constants.TRAIN_DB)
val_feature_db_path = self.dataset.get_feature_db_path(constants.VAL_DB)
val_label_db_path = self.dataset.get_label_db_path(constants.VAL_DB)
assert train_feature_db_path is not None, 'Training images are required'
# Save the origin network to file:
with open(self.path(self.model_file), 'w') as outfile:
text_format.PrintMessage(self.network, outfile)
# Split up train_val and deploy layers
network = cleanedUpGenericNetwork(self.network)
data_layers, train_val_layers, deploy_layers = filterLayersByState(network)
# Write train_val file
train_val_network = caffe_pb2.NetParameter()
# Data layers
# TODO clean this up
train_image_data_layer = None
train_label_data_layer = None
val_image_data_layer = None
val_label_data_layer = None
# Find the existing Data layers
for layer in data_layers.layer:
for rule in layer.include:
if rule.phase == caffe_pb2.TRAIN:
for top_name in layer.top:
if 'data' in top_name:
assert train_image_data_layer is None, \
'cannot specify two train image data layers'
train_image_data_layer = layer
elif 'label' in top_name:
assert train_label_data_layer is None, \
'cannot specify two train label data layers'
train_label_data_layer = layer
elif rule.phase == caffe_pb2.TEST:
for top_name in layer.top:
if 'data' in top_name:
assert val_image_data_layer is None, \
'cannot specify two val image data layers'
val_image_data_layer = layer
elif 'label' in top_name:
assert val_label_data_layer is None, \
'cannot specify two val label data layers'
val_label_data_layer = layer
# Create and add the Data layers
# (uses info from existing data layers, where possible)
train_image_data_layer = self.make_generic_data_layer(
train_feature_db_path, train_image_data_layer, 'data', 'data', caffe_pb2.TRAIN)
if train_image_data_layer is not None:
train_val_network.layer.add().CopyFrom(train_image_data_layer)
train_label_data_layer = self.make_generic_data_layer(
train_label_db_path, train_label_data_layer, 'label', 'label', caffe_pb2.TRAIN)
if train_label_data_layer is not None:
train_val_network.layer.add().CopyFrom(train_label_data_layer)
val_image_data_layer = self.make_generic_data_layer(
val_feature_db_path, val_image_data_layer, 'data', 'data', caffe_pb2.TEST)
if val_image_data_layer is not None:
train_val_network.layer.add().CopyFrom(val_image_data_layer)
val_label_data_layer = self.make_generic_data_layer(
val_label_db_path, val_label_data_layer, 'label', 'label', caffe_pb2.TEST)
if val_label_data_layer is not None:
train_val_network.layer.add().CopyFrom(val_label_data_layer)
# Add non-data layers
train_val_network.MergeFrom(train_val_layers)
# Write to file
with open(self.path(self.train_val_file), 'w') as outfile:
text_format.PrintMessage(train_val_network, outfile)
# network sanity checks
self.logger.debug("Network sanity check - train")
CaffeTrainTask.net_sanity_check(train_val_network, caffe_pb2.TRAIN)
if val_image_data_layer is not None:
self.logger.debug("Network sanity check - val")
CaffeTrainTask.net_sanity_check(train_val_network, caffe_pb2.TEST)
# Write deploy file
deploy_network = caffe_pb2.NetParameter()
# Input
deploy_network.input.append('data')
shape = deploy_network.input_shape.add()
shape.dim.append(1)
shape.dim.append(self.dataset.get_feature_dims()[2]) # channels
if train_image_data_layer.transform_param.HasField('crop_size'):
shape.dim.append(
train_image_data_layer.transform_param.crop_size)
shape.dim.append(
train_image_data_layer.transform_param.crop_size)
else:
shape.dim.append(self.dataset.get_feature_dims()[0]) # height
shape.dim.append(self.dataset.get_feature_dims()[1]) # width
# Layers
deploy_network.MergeFrom(deploy_layers)
# Write to file
with open(self.path(self.deploy_file), 'w') as outfile:
text_format.PrintMessage(deploy_network, outfile)
# network sanity checks
self.logger.debug("Network sanity check - deploy")
CaffeTrainTask.net_sanity_check(deploy_network, caffe_pb2.TEST)
# Write solver file
solver = caffe_pb2.SolverParameter()
# get enum value for solver type
solver.solver_type = getattr(solver, self.solver_type)
solver.net = self.train_val_file
# Set CPU/GPU mode
if config_value('caffe')['cuda_enabled'] and \
bool(config_value('gpu_list')):
solver.solver_mode = caffe_pb2.SolverParameter.GPU
else:
solver.solver_mode = caffe_pb2.SolverParameter.CPU
solver.snapshot_prefix = self.snapshot_prefix
# Batch accumulation
from digits.frameworks import CaffeFramework
if self.batch_accumulation and CaffeFramework().can_accumulate_gradients():
solver.iter_size = self.batch_accumulation
# Epochs -> Iterations
train_iter = int(math.ceil(
float(self.dataset.get_entry_count(constants.TRAIN_DB)) /
(train_image_data_layer.data_param.batch_size * solver.iter_size)
))
solver.max_iter = train_iter * self.train_epochs
snapshot_interval = self.snapshot_interval * train_iter
if 0 < snapshot_interval <= 1:
solver.snapshot = 1 # don't round down
elif 1 < snapshot_interval < solver.max_iter:
solver.snapshot = int(snapshot_interval)
else:
solver.snapshot = 0 # only take one snapshot at the end
if val_image_data_layer:
solver.test_iter.append(int(math.ceil(float(self.dataset.get_entry_count(
constants.VAL_DB)) / val_image_data_layer.data_param.batch_size)))
val_interval = self.val_interval * train_iter
if 0 < val_interval <= 1:
solver.test_interval = 1 # don't round down
elif 1 < val_interval < solver.max_iter:
solver.test_interval = int(val_interval)
else:
solver.test_interval = solver.max_iter # only test once at the end
# Learning rate
solver.base_lr = self.learning_rate
solver.lr_policy = self.lr_policy['policy']
scale = float(solver.max_iter) / 100.0
if solver.lr_policy == 'fixed':
pass
elif solver.lr_policy == 'step':
# stepsize = stepsize * scale
solver.stepsize = int(math.ceil(float(self.lr_policy['stepsize']) * scale))
solver.gamma = self.lr_policy['gamma']
elif solver.lr_policy == 'multistep':
for value in self.lr_policy['stepvalue'].split(','):
# stepvalue = stepvalue * scale
solver.stepvalue.append(int(math.ceil(float(value) * scale)))
solver.gamma = self.lr_policy['gamma']
elif solver.lr_policy == 'exp':
# gamma = gamma^(1/scale)
solver.gamma = math.pow(self.lr_policy['gamma'], 1.0 / scale)
elif solver.lr_policy == 'inv':
# gamma = gamma / scale
solver.gamma = self.lr_policy['gamma'] / scale
solver.power = self.lr_policy['power']
elif solver.lr_policy == 'poly':
solver.power = self.lr_policy['power']
elif solver.lr_policy == 'sigmoid':
# gamma = -gamma / scale
solver.gamma = -1.0 * self.lr_policy['gamma'] / scale
# stepsize = stepsize * scale
solver.stepsize = int(math.ceil(float(self.lr_policy['stepsize']) * scale))
else:
raise Exception('Unknown lr_policy: "%s"' % solver.lr_policy)
# These solver types don't support momentum
unsupported = [solver.ADAGRAD]
try:
unsupported.append(solver.RMSPROP)
except AttributeError:
pass
if solver.solver_type not in unsupported:
solver.momentum = 0.9
solver.weight_decay = solver.base_lr / 100.0
# Display 8x per epoch, or once per 5000 images, whichever is more frequent
solver.display = max(1, min(
int(math.floor(float(solver.max_iter) / (self.train_epochs * 8))),
int(math.ceil(5000.0 / (train_image_data_layer.data_param.batch_size * solver.iter_size)))
))
if self.random_seed is not None:
solver.random_seed = self.random_seed
with open(self.path(self.solver_file), 'w') as outfile:
text_format.PrintMessage(solver, outfile)
self.solver = solver # save for later
return True
def make_generic_data_layer(self, db_path, orig_layer, name, top, phase):
"""
Utility within save_files_generic for creating a Data layer
Returns a LayerParameter (or None)
Arguments:
db_path -- path to database (or None)
orig_layer -- a LayerParameter supplied by the user (or None)
"""
if db_path is None:
# TODO allow user to specify a standard data layer even if it doesn't exist in the dataset
return None
layer = caffe_pb2.LayerParameter()
if orig_layer is not None:
layer.CopyFrom(orig_layer)
layer.type = 'Data'
if not layer.HasField('name'):
layer.name = name
if not len(layer.top):
layer.top.append(top)
layer.ClearField('include')
layer.include.add(phase=phase)
# source
if layer.data_param.HasField('source'):
self.logger.warning('Ignoring data_param.source ...')
layer.data_param.source = db_path
if layer.data_param.HasField('backend'):
self.logger.warning('Ignoring data_param.backend ...')
layer.data_param.backend = caffe_pb2.DataParameter.LMDB
# batch size
if not layer.data_param.HasField('batch_size'):
layer.data_param.batch_size = constants.DEFAULT_BATCH_SIZE
if self.batch_size:
layer.data_param.batch_size = self.batch_size
# mean
if name == 'data' and self.dataset.get_mean_file():
if self.use_mean == 'pixel':
mean_pixel = self.get_mean_pixel(self.dataset.path(self.dataset.get_mean_file()))
# remove any values that may already be in the network
self.set_mean_value(layer, mean_pixel)
elif self.use_mean == 'image':
self.set_mean_file(layer, self.dataset.path(self.dataset.get_mean_file()))
# crop size
if name == 'data' and self.crop_size:
max_crop_size = min(self.dataset.get_feature_dims()[0], self.dataset.get_feature_dims()[1])
assert self.crop_size <= max_crop_size, 'crop_size is larger than the image size'
layer.transform_param.crop_size = self.crop_size
return layer
def iteration_to_epoch(self, it):
return float(it * self.train_epochs) / self.solver.max_iter
@override
def task_arguments(self, resources, env):
"""
Generate Caffe command line options or, in certain cases, pycaffe Python script
Returns a list of strings
Arguments:
resources -- dict of available task resources
env -- dict of environment variables
"""
if platform.system() == 'Windows':
if any([layer.type == 'Python' for layer in self.network.layer]):
# Arriving here because the network includes Python Layer and we are running inside Windows.
# We can not invoke caffe.exe and need to fallback to pycaffe
# https://github.com/Microsoft/caffe/issues/87
# TODO: Remove this once caffe.exe works fine with Python Layer
win_python_layer_gpu_id = None
if 'gpus' in resources:
n_gpus = len(resources['gpus'])
if n_gpus > 1:
raise Exception('Please select single GPU when running in Windows with Python layer.')
elif n_gpus == 1:
win_python_layer_gpu_id = resources['gpus'][0][0]
# We know which GPU to use, call helper to create the script
return self._pycaffe_args(win_python_layer_gpu_id)
# Not in Windows, or in Windows but no Python Layer
# This is the normal path
args = [config_value('caffe')['executable'],
'train',
'--solver=%s' % self.path(self.solver_file),
]
if 'gpus' in resources:
identifiers = []
for identifier, value in resources['gpus']:
identifiers.append(identifier)
if len(identifiers) == 1:
args.append('--gpu=%s' % identifiers[0])
elif len(identifiers) > 1:
if config_value('caffe')['flavor'] == 'NVIDIA':
if (utils.parse_version(config_value('caffe')['version']) < utils.parse_version('0.14.0-alpha')):
# Prior to version 0.14, NVcaffe used the --gpus switch
args.append('--gpus=%s' % ','.join(identifiers))
else:
args.append('--gpu=%s' % ','.join(identifiers))
elif config_value('caffe')['flavor'] == 'BVLC':
args.append('--gpu=%s' % ','.join(identifiers))
else:
raise ValueError('Unknown flavor. Support NVIDIA and BVLC flavors only.')
if self.pretrained_model:
args.append('--weights=%s' % ','.join(map(lambda x: self.path(x),
self.pretrained_model.split(os.path.pathsep))))
return args
def _pycaffe_args(self, gpu_id):
"""
Helper to generate pycaffe Python script
Returns a list of strings
Throws ValueError if self.solver_type is not recognized
Arguments:
gpu_id -- the GPU device id to use
"""
# TODO: Remove this once caffe.exe works fine with Python Layer
solver_type_mapping = {
'ADADELTA': 'AdaDeltaSolver',
'ADAGRAD': 'AdaGradSolver',
'ADAM': 'AdamSolver',
'NESTEROV': 'NesterovSolver',
'RMSPROP': 'RMSPropSolver',
'SGD': 'SGDSolver'}
try:
solver_type = solver_type_mapping[self.solver_type]
except KeyError:
raise ValueError("Unknown solver type {}.".format(self.solver_type))
if gpu_id is not None:
gpu_script = "caffe.set_device({id});caffe.set_mode_gpu();".format(id=gpu_id)
else:
gpu_script = "caffe.set_mode_cpu();"
loading_script = ""
if self.pretrained_model:
weight_files = map(lambda x: self.path(x), self.pretrained_model.split(os.path.pathsep))
for weight_file in weight_files:
loading_script = loading_script + "solv.net.copy_from('{weight}');".format(weight=weight_file)
command_script =\
"import caffe;" \
"{gpu_script}" \
"solv=caffe.{solver}('{solver_file}');" \
"{loading_script}" \
"solv.solve()" \
.format(gpu_script=gpu_script,
solver=solver_type,
solver_file=self.solver_file, loading_script=loading_script)
args = [sys.executable + ' -c ' + '\"' + command_script + '\"']
return args
@override
def process_output(self, line):
float_exp = '(NaN|[-+]?[0-9]*\.?[0-9]+(e[-+]?[0-9]+)?)'
self.caffe_log.write('%s\n' % line)
self.caffe_log.flush()
# parse caffe output
timestamp, level, message = self.preprocess_output_caffe(line)
if not message:
return True
# iteration updates
match = re.match(r'Iteration (\d+)', message)
if match:
i = int(match.group(1))
self.new_iteration(i)
# net output
leading_match = re.match(r'(\(\d\.\d\)?\s{0,7})(.*)', message)
if leading_match:
message = leading_match.group(2)
match = re.match(r'(Train|Test) net output #(\d+): (\S*) = %s' % float_exp, message, flags=re.IGNORECASE)
if match:
phase = match.group(1)
# index = int(match.group(2))
name = match.group(3)
value = match.group(4)
assert value.lower() != 'nan', \
'Network outputted NaN for "%s" (%s phase). Try decreasing your learning rate.' % (name, phase)
value = float(value)
# Find the layer type
kind = '?'
for layer in self.network.layer:
if name in layer.top:
kind = layer.type
break
if phase.lower() == 'train':
self.save_train_output(name, kind, value)
elif phase.lower() == 'test':
self.save_val_output(name, kind, value)
return True
# learning rate updates
match = re.match(r'Iteration (\d+).*lr = %s' % float_exp, message, flags=re.IGNORECASE)
if match:
i = int(match.group(1))
lr = float(match.group(2))
self.save_train_output('learning_rate', 'LearningRate', lr)
return True
# snapshot saved
if self.saving_snapshot:
if not message.startswith('Snapshotting solver state'):
self.logger.warning(
'caffe output format seems to have changed. '
'Expected "Snapshotting solver state..." after "Snapshotting to..."')
else:
self.logger.debug('Snapshot saved.')
self.detect_snapshots()
self.send_snapshot_update()
self.saving_snapshot = False
return True
# snapshot starting
match = re.match(r'Snapshotting to (.*)\s*$', message)
if match:
self.saving_snapshot = True
return True
if level in ['error', 'critical']:
self.logger.error('%s: %s' % (self.name(), message))
self.exception = message
return True
return True
def preprocess_output_caffe(self, line):
"""
Takes line of output and parses it according to caffe's output format
Returns (timestamp, level, message) or (None, None, None)
"""
# NOTE: This must change when the logging format changes
# LMMDD HH:MM:SS.MICROS pid file:lineno] message
match = re.match(r'(\w)(\d{4} \S{8}).*]\s+(\S.*)$', line)
if match:
level = match.group(1)
# add the year because caffe omits it
timestr = '%s%s' % (time.strftime('%Y'), match.group(2))
message = match.group(3)
if level == 'I':
level = 'info'
elif level == 'W':
level = 'warning'
elif level == 'E':
level = 'error'
elif level == 'F': # FAIL
level = 'critical'
timestamp = time.mktime(time.strptime(timestr, '%Y%m%d %H:%M:%S'))
return (timestamp, level, message)
else:
return (None, None, None)
def new_iteration(self, it):
"""
Update current_iteration
"""
if self.current_iteration == it:
return
self.current_iteration = it
self.send_progress_update(self.iteration_to_epoch(it))
def send_snapshot_update(self):
"""
Sends socketio message about the snapshot list
"""
from digits.webapp import socketio
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'snapshots',
'data': self.snapshot_list(),
},
namespace='/jobs',
room=self.job_id,
)
@override
def after_run(self):
super(CaffeTrainTask, self).after_run()
self.caffe_log.close()
@override
def after_runtime_error(self):
if os.path.exists(self.path(self.CAFFE_LOG)):
output = tail(self.path(self.CAFFE_LOG), 40)
lines = []
for line in output.split('\n'):
# parse caffe header
timestamp, level, message = self.preprocess_output_caffe(line)
if message:
lines.append(message)
# return the last 20 lines
self.traceback = '\n'.join(lines[len(lines) - 20:])
if 'DIGITS_MODE_TEST' in os.environ:
print output
# TrainTask overrides
@override
def get_task_stats(self, epoch=-1):
"""
return a dictionary of task statistics
"""
loc, mean_file = os.path.split(self.dataset.get_mean_file())
stats = {
"image dimensions": self.dataset.get_feature_dims(),
"mean file": mean_file,
"snapshot file": self.get_snapshot_filename(epoch),
"solver file": self.solver_file,
"train_val file": self.train_val_file,
"deploy file": self.deploy_file,
"framework": "caffe",
"mean subtraction": self.use_mean
}
# These attributes only available in more recent jobs:
if hasattr(self, "model_file"):
if self.model_file is not None:
stats.update({
"caffe flavor": self.caffe_flavor,
"caffe version": self.caffe_version,
"model file": self.model_file,
"digits version": self.digits_version
})
if hasattr(self.dataset, "resize_mode"):
stats.update({"image resize mode": self.dataset.resize_mode})
if hasattr(self.dataset, "labels_file"):
stats.update({"labels file": self.dataset.labels_file})
# Add this if python layer file exists
if os.path.exists(os.path.join(self.job_dir, CAFFE_PYTHON_LAYER_FILE)):
stats.update({"python layer file": CAFFE_PYTHON_LAYER_FILE})
elif os.path.exists(os.path.join(self.job_dir, CAFFE_PYTHON_LAYER_FILE + 'c')):
stats.update({"python layer file": CAFFE_PYTHON_LAYER_FILE + 'c'})
return stats
@override
def detect_snapshots(self):
self.snapshots = []
snapshot_dir = os.path.join(self.job_dir, os.path.dirname(self.snapshot_prefix))
snapshots = []
solverstates = []
for filename in os.listdir(snapshot_dir):
# find models
match = re.match(r'%s_iter_(\d+)\.caffemodel' % os.path.basename(self.snapshot_prefix), filename)
if match:
iteration = int(match.group(1))
epoch = float(iteration) / (float(self.solver.max_iter) / self.train_epochs)
# assert epoch.is_integer(), '%s is not an integer' % epoch
epoch = round(epoch, 3)
# if epoch is int
if epoch == math.ceil(epoch):
# print epoch,math.ceil(epoch),int(epoch)
epoch = int(epoch)
snapshots.append((
os.path.join(snapshot_dir, filename),
epoch
)
)
# find solverstates
match = re.match(r'%s_iter_(\d+)\.solverstate' % os.path.basename(self.snapshot_prefix), filename)
if match:
solverstates.append((
os.path.join(snapshot_dir, filename),
int(match.group(1))
)
)
# delete all but the most recent solverstate
for filename, iteration in sorted(solverstates, key=lambda tup: tup[1])[:-1]:
# print 'Removing "%s"' % filename
os.remove(filename)
self.snapshots = sorted(snapshots, key=lambda tup: tup[1])
return len(self.snapshots) > 0
@override
def est_next_snapshot(self):
if self.status != Status.RUN or self.current_iteration == 0:
return None
elapsed = time.time() - self.status_updates[-1][1]
next_snapshot_iteration = (1 + self.current_iteration // self.snapshot_interval) * self.snapshot_interval
return (next_snapshot_iteration - self.current_iteration) * elapsed // self.current_iteration
@override
def can_view_weights(self):
return False
@override
def infer_one(self,
data,
snapshot_epoch=None,
layers=None,
gpu=None,
resize=True):
return self.infer_one_image(data,
snapshot_epoch=snapshot_epoch,
layers=layers,
gpu=gpu,
resize=resize
)
def infer_one_image(self,
image,
snapshot_epoch=None,
layers=None,
gpu=None,
resize=True):
"""
Run inference on one image for a generic model
Returns (output, visualizations)
output -- an OrderedDict of string -> np.ndarray
visualizations -- a list of dicts for the specified layers
Returns (None, None) if something goes wrong
Arguments:
image -- an np.ndarray
Keyword arguments:
snapshot_epoch -- which snapshot to use
layers -- which layer activation[s] and weight[s] to visualize
"""
net = self.get_net(snapshot_epoch, gpu=gpu)
# process image
if image.ndim == 2:
image = image[:, :, np.newaxis]
preprocessed = self.get_transformer(resize).preprocess(
'data', image)
# reshape net input (if necessary)
test_shape = (1,) + preprocessed.shape
if net.blobs['data'].data.shape != test_shape:
net.blobs['data'].reshape(*test_shape)
# run inference
net.blobs['data'].data[...] = preprocessed
o = net.forward()
# order outputs in prototxt order
output = OrderedDict()
for blob in net.blobs.keys():
if blob in o:
output[blob] = o[blob]
visualizations = self.get_layer_visualizations(net, layers)
return (output, visualizations)
def get_layer_visualizations(self, net, layers='all'):
"""
Returns visualizations of various layers in the network
"""
# add visualizations
visualizations = []
if layers and layers != 'none':
if layers == 'all':
added_activations = []
for layer in self.network.layer:
for bottom in layer.bottom:
if bottom in net.blobs and bottom not in added_activations:
data = net.blobs[bottom].data[0]
vis = utils.image.get_layer_vis_square(data,
allow_heatmap=bool(bottom != 'data'),
channel_order='BGR')
mean, std, hist = self.get_layer_statistics(data)
visualizations.append(
{
'name': str(bottom),
'vis_type': 'Activation',
'vis': vis,
'data_stats': {
'shape': data.shape,
'mean': mean,
'stddev': std,
'histogram': hist,
},
}
)
added_activations.append(bottom)
if layer.name in net.params:
data = net.params[layer.name][0].data
if layer.type not in ['InnerProduct']:
vis = utils.image.get_layer_vis_square(data, channel_order='BGR')
else:
vis = None
mean, std, hist = self.get_layer_statistics(data)
params = net.params[layer.name]
weight_count = reduce(operator.mul, params[0].data.shape, 1)
if len(params) > 1:
bias_count = reduce(operator.mul, params[1].data.shape, 1)
else:
bias_count = 0
parameter_count = weight_count + bias_count
visualizations.append(
{
'name': str(layer.name),
'vis_type': 'Weights',
'layer_type': layer.type,
'param_count': parameter_count,
'vis': vis,
'data_stats': {
'shape': data.shape,
'mean': mean,
'stddev': std,
'histogram': hist,
},
}
)
for top in layer.top:
if top in net.blobs and top not in added_activations:
data = net.blobs[top].data[0]
normalize = True
# don't normalize softmax layers but scale by 255 to fill image range
if layer.type == 'Softmax':
vis = utils.image.get_layer_vis_square(data * 255,
normalize=False,
allow_heatmap=bool(top != 'data'),
channel_order='BGR')
else:
vis = utils.image.get_layer_vis_square(data,
normalize=normalize,
allow_heatmap=bool(top != 'data'),
channel_order='BGR')
mean, std, hist = self.get_layer_statistics(data)
visualizations.append(
{
'name': str(top),
'vis_type': 'Activation',
'vis': vis,
'data_stats': {
'shape': data.shape,
'mean': mean,
'stddev': std,
'histogram': hist,
},
}
)
added_activations.append(top)
else:
raise NotImplementedError
return visualizations
def get_layer_statistics(self, data):
"""
Returns statistics for the given layer data:
(mean, standard deviation, histogram)
histogram -- [y, x, ticks]
Arguments:
data -- a np.ndarray
"""
# XXX These calculations can be super slow
mean = np.mean(data).astype(np.float32)
std = np.std(data).astype(np.float32)
y, x = np.histogram(data, bins=20)
y = list(y.astype(np.float32))
ticks = x[[0, len(x) / 2, -1]]
x = [((x[i] + x[i + 1]) / 2.0).astype(np.float32) for i in xrange(len(x) - 1)]
ticks = list(ticks.astype(np.float32))
return (mean, std, [y, x, ticks])
@override
def infer_many(self,
data,
snapshot_epoch=None,
gpu=None,
resize=True):
return self.infer_many_images(data,
snapshot_epoch=snapshot_epoch,
gpu=gpu,
resize=resize)
def infer_many_images(self,
images,
snapshot_epoch=None,
gpu=None,
resize=True):
"""
Returns a list of OrderedDict, one for each image
Arguments:
images -- a list of np.arrays
Keyword arguments:
snapshot_epoch -- which snapshot to use
"""
net = self.get_net(snapshot_epoch, gpu=gpu)
caffe_images = []
for image in images:
if image.ndim == 2:
caffe_images.append(image[:, :, np.newaxis])
else:
caffe_images.append(image)
data_shape = tuple(self.get_transformer(resize).inputs['data'])[1:]
if self.batch_size:
data_shape = (self.batch_size,) + data_shape
# TODO: grab batch_size from the TEST phase in train_val network
else:
data_shape = (constants.DEFAULT_BATCH_SIZE,) + data_shape
outputs = None
for chunk in [caffe_images[x:x + data_shape[0]] for x in xrange(0, len(caffe_images), data_shape[0])]:
new_shape = (len(chunk),) + data_shape[1:]
if net.blobs['data'].data.shape != new_shape:
net.blobs['data'].reshape(*new_shape)
for index, image in enumerate(chunk):
net.blobs['data'].data[index] = self.get_transformer(resize).preprocess(
'data', image)
o = net.forward()
# order output in prototxt order
output = OrderedDict()
for blob in net.blobs.keys():
if blob in o:
output[blob] = o[blob]
if outputs is None:
outputs = copy.deepcopy(output)
else:
for name, blob in output.iteritems():
outputs[name] = np.vstack((outputs[name], blob))
print 'Processed %s/%s images' % (len(outputs[outputs.keys()[0]]), len(caffe_images))
return outputs
def has_model(self):
"""
Returns True if there is a model that can be used
"""
return len(self.snapshots) > 0
def get_net(self, epoch=None, gpu=None):
"""
Returns an instance of caffe.Net
Keyword Arguments:
epoch -- which snapshot to load (default is -1 to load the most recently generated snapshot)
"""
if not self.has_model():
return False
file_to_load = self.get_snapshot(epoch)
# check if already loaded
if self.loaded_snapshot_file and self.loaded_snapshot_file == file_to_load \
and hasattr(self, '_caffe_net') and self._caffe_net is not None:
return self._caffe_net
CaffeTrainTask.set_mode(gpu)
# Add job_dir to PATH to pick up any python layers used by the model
sys.path.append(self.job_dir)
# Attempt to force a reload of the "digits_python_layers" module
loaded_module = sys.modules.get('digits_python_layers', None)
if loaded_module:
try:
reload(loaded_module)
except ImportError:
# Let Caffe throw the error if the file is missing
pass
# Load the model
self._caffe_net = caffe.Net(
self.path(self.deploy_file),
file_to_load,
caffe.TEST)
# Remove job_dir from PATH
sys.path.remove(self.job_dir)
self.loaded_snapshot_epoch = epoch
self.loaded_snapshot_file = file_to_load
return self._caffe_net
def get_transformer(self, resize=True):
"""
Returns an instance of DigitsTransformer
Parameters:
- resize_shape: specify shape of network (or None for network default)
"""
# check if already loaded
if hasattr(self, '_transformer') and self._transformer is not None:
return self._transformer
data_shape = None
channel_swap = None
mean_pixel = None
mean_image = None
network = caffe_pb2.NetParameter()
with open(self.path(self.deploy_file)) as infile:
text_format.Merge(infile.read(), network)
if network.input_shape:
data_shape = network.input_shape[0].dim
else:
data_shape = network.input_dim[:4]
if self.dataset.get_feature_dims()[2] == 3:
# BGR when there are three channels
# XXX see issue #59
channel_swap = (2, 1, 0)
if self.dataset.get_mean_file():
if self.use_mean == 'pixel':
mean_pixel = self.get_mean_pixel(self.dataset.path(self.dataset.get_mean_file()))
elif self.use_mean == 'image':
mean_image = self.get_mean_image(self.dataset.path(self.dataset.get_mean_file()), True)
t = DigitsTransformer(
inputs={'data': tuple(data_shape)},
resize=resize
)
# transpose to (channels, height, width)
t.set_transpose('data', (2, 0, 1))
if channel_swap is not None:
# swap color channels
t.set_channel_swap('data', channel_swap)
# set mean
if self.use_mean == 'pixel' and mean_pixel is not None:
t.set_mean('data', mean_pixel)
elif self.use_mean == 'image' and mean_image is not None:
t.set_mean('data', mean_image)
# t.set_raw_scale('data', 255) # [0,255] range instead of [0,1]
self._transformer = t
return self._transformer
@override
def get_model_files(self):
"""
return paths to model files
"""
model_files = {
"Solver": self.solver_file,
"Network (train/val)": self.train_val_file,
"Network (deploy)": self.deploy_file
}
if os.path.exists(os.path.join(self.job_dir, CAFFE_PYTHON_LAYER_FILE)):
model_files.update({"Python layer": os.path.join(self.job_dir, CAFFE_PYTHON_LAYER_FILE)})
elif os.path.exists(os.path.join(self.job_dir, CAFFE_PYTHON_LAYER_FILE + 'c')):
model_files.update({"Python layer": os.path.join(self.job_dir, CAFFE_PYTHON_LAYER_FILE + 'c')})
if hasattr(self, "model_file"):
if self.model_file is not None:
model_files.update({"Network (original)": self.model_file})
return model_files
@override
def get_network_desc(self):
"""
return text description of model
"""
return text_format.MessageToString(self.network)
@staticmethod
def net_sanity_check(net, phase):
"""
Perform various sanity checks on the network, including:
- check that all layer bottoms are included at the specified stage
"""
assert phase == caffe_pb2.TRAIN or phase == caffe_pb2.TEST, "Unknown phase: %s" % repr(phase)
# work out which layers and tops are included at the specified phase
layers = []
tops = []
for layer in net.layer:
if len(layer.include) > 0:
mask = 0 # include none by default
for rule in layer.include:
mask = mask | (1 << rule.phase)
elif len(layer.exclude) > 0:
# include and exclude rules are mutually exclusive as per Caffe spec
mask = (1 << caffe_pb2.TRAIN) | (1 << caffe_pb2.TEST) # include all by default
for rule in layer.exclude:
mask = mask & ~(1 << rule.phase)
else:
mask = (1 << caffe_pb2.TRAIN) | (1 << caffe_pb2.TEST)
if mask & (1 << phase):
# layer will be included at this stage
layers.append(layer)
tops.extend(layer.top)
# add inputs
tops.extend(net.input)
# now make sure all bottoms are present at this stage
for layer in layers:
for bottom in layer.bottom:
if bottom not in tops:
raise CaffeTrainSanityCheckError(
"Layer '%s' references bottom '%s' at the %s stage however "
"this blob is not included at that stage. Please consider "
"using an include directive to limit the scope of this layer."
% (
layer.name, bottom,
"TRAIN" if phase == caffe_pb2.TRAIN else "TEST"
)
)
def cleanedUpClassificationNetwork(original_network, num_categories):
"""
Perform a few cleanup routines on a classification network
Returns a new NetParameter
"""
network = caffe_pb2.NetParameter()
network.CopyFrom(original_network)
for i, layer in enumerate(network.layer):
if 'Data' in layer.type:
assert layer.type in ['Data', 'HDF5Data'], \
'Unsupported data layer type %s' % layer.type
elif layer.type == 'Input':
# DIGITS handles the deploy file for you
del network.layer[i]
elif layer.type == 'Accuracy':
# Check to see if top_k > num_categories
if (layer.accuracy_param.HasField('top_k') and
layer.accuracy_param.top_k > num_categories):
del network.layer[i]
elif layer.type == 'InnerProduct':
# Check to see if num_output is unset
if not layer.inner_product_param.HasField('num_output'):
layer.inner_product_param.num_output = num_categories
return network
def cleanedUpGenericNetwork(original_network):
"""
Perform a few cleanup routines on a generic network
Returns a new NetParameter
"""
network = caffe_pb2.NetParameter()
network.CopyFrom(original_network)
for i, layer in enumerate(network.layer):
if 'Data' in layer.type:
assert layer.type in ['Data'], \
'Unsupported data layer type %s' % layer.type
elif layer.type == 'Input':
# DIGITS handles the deploy file for you
del network.layer[i]
elif layer.type == 'InnerProduct':
# Check to see if num_output is unset
assert layer.inner_product_param.HasField('num_output'), \
"Don't leave inner_product_param.num_output unset for generic networks (layer %s)" % layer.name
return network
def filterLayersByState(network):
"""
Splits up a network into data, train_val and deploy layers
"""
# The net has a NetState when in use
train_state = caffe_pb2.NetState()
text_format.Merge('phase: TRAIN stage: "train"', train_state)
val_state = caffe_pb2.NetState()
text_format.Merge('phase: TEST stage: "val"', val_state)
deploy_state = caffe_pb2.NetState()
text_format.Merge('phase: TEST stage: "deploy"', deploy_state)
# Each layer can have several NetStateRules
train_rule = caffe_pb2.NetStateRule()
text_format.Merge('phase: TRAIN', train_rule)
val_rule = caffe_pb2.NetStateRule()
text_format.Merge('phase: TEST', val_rule)
# Return three NetParameters
data_layers = caffe_pb2.NetParameter()
train_val_layers = caffe_pb2.NetParameter()
deploy_layers = caffe_pb2.NetParameter()
for layer in network.layer:
included_train = _layerIncludedInState(layer, train_state)
included_val = _layerIncludedInState(layer, val_state)
included_deploy = _layerIncludedInState(layer, deploy_state)
# Treat data layers differently (more processing done later)
if 'Data' in layer.type:
data_layers.layer.add().CopyFrom(layer)
rule = None
if not included_train:
# Exclude from train
rule = val_rule
elif not included_val:
# Exclude from val
rule = train_rule
_setLayerRule(data_layers.layer[-1], rule)
# Non-data layers
else:
if included_train or included_val:
# Add to train_val
train_val_layers.layer.add().CopyFrom(layer)
rule = None
if not included_train:
# Exclude from train
rule = val_rule
elif not included_val:
# Exclude from val
rule = train_rule
_setLayerRule(train_val_layers.layer[-1], rule)
if included_deploy:
# Add to deploy
deploy_layers.layer.add().CopyFrom(layer)
_setLayerRule(deploy_layers.layer[-1], None)
return (data_layers, train_val_layers, deploy_layers)
def _layerIncludedInState(layer, state):
"""
Returns True if this layer will be included in the given state
Logic copied from Caffe's Net::FilterNet()
"""
# If no include rules are specified, the layer is included by default and
# only excluded if it meets one of the exclude rules.
layer_included = len(layer.include) == 0
for exclude_rule in layer.exclude:
if _stateMeetsRule(state, exclude_rule):
layer_included = False
break
for include_rule in layer.include:
if _stateMeetsRule(state, include_rule):
layer_included = True
break
return layer_included
def _stateMeetsRule(state, rule):
"""
Returns True if the given state meets the given rule
Logic copied from Caffe's Net::StateMeetsRule()
"""
if rule.HasField('phase'):
if rule.phase != state.phase:
return False
if rule.HasField('min_level'):
if state.level < rule.min_level:
return False
if rule.HasField('max_level'):
if state.level > rule.max_level:
return False
# The state must contain ALL of the rule's stages
for stage in rule.stage:
if stage not in state.stage:
return False
# The state must contain NONE of the rule's not_stages
for stage in rule.not_stage:
if stage in state.stage:
return False
return True
def _setLayerRule(layer, rule=None):
"""
Set a new include rule for this layer
If rule is None, the layer will always be included
"""
layer.ClearField('include')
layer.ClearField('exclude')
if rule is not None:
layer.include.add().CopyFrom(rule)
| DIGITS-master | digits/model/tasks/caffe_train.py |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .caffe_train import CaffeTrainTask
from .torch_train import TorchTrainTask
from .train import TrainTask
__all__ = [
'CaffeTrainTask',
'TorchTrainTask',
'TrainTask',
]
from digits.config import config_value # noqa
if config_value('tensorflow')['enabled']:
from .tensorflow_train import TensorflowTrainTask # noqa
__all__.append('TensorflowTrainTask')
| DIGITS-master | digits/model/tasks/__init__.py |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .caffe_train import CaffeTrainTask, CaffeTrainSanityCheckError
from google.protobuf import text_format
from digits import test_utils
# Must import after importing digit.config
import caffe_pb2
def check_positive(desc, stage):
network = caffe_pb2.NetParameter()
text_format.Merge(desc, network)
CaffeTrainTask.net_sanity_check(network, stage)
def check_negative(desc, stage):
network = caffe_pb2.NetParameter()
text_format.Merge(desc, network)
try:
CaffeTrainTask.net_sanity_check(network, stage)
except CaffeTrainSanityCheckError:
pass
class TestCaffeNetSanityCheck(test_utils.CaffeMixin):
# positive cases
def test_std_net_train(self):
desc = \
"""
layer {
name: "data"
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
}
layer {
name: "hidden"
type: 'InnerProduct'
bottom: "data"
top: "output"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "output"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
"""
check_positive(desc, caffe_pb2.TRAIN)
def test_std_net_deploy(self):
desc = \
"""
input: "data"
layer {
name: "hidden"
type: 'InnerProduct'
bottom: "data"
top: "output"
}
"""
check_positive(desc, caffe_pb2.TEST)
def test_ref_label_with_proper_include_directive(self):
desc = \
"""
input: "data"
layer {
name: "hidden"
type: 'InnerProduct'
bottom: "data"
top: "output"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
include {
phase: TRAIN
}
}
"""
check_positive(desc, caffe_pb2.TEST)
def test_ref_label_with_proper_exclude_directive(self):
desc = \
"""
input: "data"
layer {
name: "hidden"
type: 'InnerProduct'
bottom: "data"
top: "output"
}
layer {
name: "lossExcludedInTest"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude {
phase: TEST
}
}
"""
check_positive(desc, caffe_pb2.TEST)
# negative cases
def test_error_ref_label_in_deploy(self):
desc = \
"""
input: "data"
layer {
name: "hidden"
type: 'InnerProduct'
bottom: "data"
top: "output"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
}
"""
check_negative(desc, caffe_pb2.TEST)
def test_error_ref_unknown_blob(self):
desc = \
"""
input: "data"
layer {
name: "hidden"
type: 'InnerProduct'
bottom: "data"
bottom: "bogusBlob"
top: "output"
}
"""
check_negative(desc, caffe_pb2.TRAIN)
def test_error_ref_unincluded_blob(self):
desc = \
"""
input: "data"
layer {
name: "hidden"
type: 'InnerProduct'
bottom: "data"
top: "output"
include {
phase: TRAIN
}
}
layer {
name: "hidden"
type: 'InnerProduct2'
bottom: "data"
top: "output"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
include {
phase: TRAIN
}
}
"""
check_negative(desc, caffe_pb2.TEST)
def test_error_ref_excluded_blob(self):
desc = \
"""
input: "data"
layer {
name: "hidden"
type: 'InnerProduct'
bottom: "data"
top: "output"
include {
phase: TRAIN
}
}
layer {
name: "hidden"
type: 'InnerProduct2'
bottom: "data"
top: "output"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude {
phase: TEST
}
}
"""
check_negative(desc, caffe_pb2.TEST)
| DIGITS-master | digits/model/tasks/test_caffe_sanity_checks.py |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from collections import OrderedDict, namedtuple
import os.path
import time
import flask
import gevent
import psutil
from digits import device_query
from digits.task import Task
from digits.utils import subclass, override
# NOTE: Increment this every time the picked object changes
PICKLE_VERSION = 2
# Used to store network outputs
NetworkOutput = namedtuple('NetworkOutput', ['kind', 'data'])
@subclass
class TrainTask(Task):
"""
Defines required methods for child classes
"""
def __init__(self, job, dataset, train_epochs, snapshot_interval, learning_rate, lr_policy, **kwargs):
"""
Arguments:
job -- model job
dataset -- a DatasetJob containing the dataset for this model
train_epochs -- how many epochs of training data to train on
snapshot_interval -- how many epochs between taking a snapshot
learning_rate -- the base learning rate
lr_policy -- a hash of options to be used for the learning rate policy
Keyword arguments:
gpu_count -- how many GPUs to use for training (integer)
selected_gpus -- a list of GPU indexes to be used for training
batch_size -- if set, override any network specific batch_size with this value
batch_accumulation -- accumulate gradients over multiple batches
val_interval -- how many epochs between validating the model with an epoch of validation data
traces_interval -- amount of steps in between timeline traces
pretrained_model -- filename for a model to use for fine-tuning
crop_size -- crop each image down to a square of this size
use_mean -- subtract the dataset's mean file or mean pixel
random_seed -- optional random seed
data_aug -- data augmentation options
"""
self.gpu_count = kwargs.pop('gpu_count', None)
self.selected_gpus = kwargs.pop('selected_gpus', None)
self.batch_size = kwargs.pop('batch_size', None)
self.batch_accumulation = kwargs.pop('batch_accumulation', None)
self.val_interval = kwargs.pop('val_interval', None)
self.traces_interval = kwargs.pop('traces_interval', None)
self.pretrained_model = kwargs.pop('pretrained_model', None)
self.crop_size = kwargs.pop('crop_size', None)
self.use_mean = kwargs.pop('use_mean', None)
self.random_seed = kwargs.pop('random_seed', None)
self.solver_type = kwargs.pop('solver_type', None)
self.rms_decay = kwargs.pop('rms_decay', None)
self.shuffle = kwargs.pop('shuffle', None)
self.network = kwargs.pop('network', None)
self.framework_id = kwargs.pop('framework_id', None)
self.data_aug = kwargs.pop('data_aug', None)
super(TrainTask, self).__init__(job_dir=job.dir(), **kwargs)
self.pickver_task_train = PICKLE_VERSION
self.job = job
self.dataset = dataset
self.train_epochs = train_epochs
self.snapshot_interval = snapshot_interval
self.learning_rate = learning_rate
self.lr_policy = lr_policy
self.current_epoch = 0
self.snapshots = []
self.timeline_traces = []
# data gets stored as dicts of lists (for graphing)
self.train_outputs = OrderedDict()
self.val_outputs = OrderedDict()
def __getstate__(self):
state = super(TrainTask, self).__getstate__()
if 'dataset' in state:
del state['dataset']
if 'snapshots' in state:
del state['snapshots']
if '_labels' in state:
del state['_labels']
if '_hw_socketio_thread' in state:
del state['_hw_socketio_thread']
return state
def __setstate__(self, state):
if state['pickver_task_train'] < 2:
state['train_outputs'] = OrderedDict()
state['val_outputs'] = OrderedDict()
tl = state.pop('train_loss_updates', None)
vl = state.pop('val_loss_updates', None)
va = state.pop('val_accuracy_updates', None)
lr = state.pop('lr_updates', None)
if tl:
state['train_outputs']['epoch'] = NetworkOutput('Epoch', [x[0] for x in tl])
state['train_outputs']['loss'] = NetworkOutput('SoftmaxWithLoss', [x[1] for x in tl])
state['train_outputs']['learning_rate'] = NetworkOutput('LearningRate', [x[1] for x in lr])
if vl:
state['val_outputs']['epoch'] = NetworkOutput('Epoch', [x[0] for x in vl])
if va:
state['val_outputs']['accuracy'] = NetworkOutput('Accuracy', [x[1] / 100 for x in va])
state['val_outputs']['loss'] = NetworkOutput('SoftmaxWithLoss', [x[1] for x in vl])
if state['use_mean'] is True:
state['use_mean'] = 'pixel'
elif state['use_mean'] is False:
state['use_mean'] = 'none'
state['pickver_task_train'] = PICKLE_VERSION
super(TrainTask, self).__setstate__(state)
self.snapshots = []
self.timeline_traces = []
self.dataset = None
@override
def offer_resources(self, resources):
if 'gpus' not in resources:
return None
if not resources['gpus']:
return {} # don't use a GPU at all
if self.gpu_count is not None:
identifiers = []
for resource in resources['gpus']:
if resource.remaining() >= 1:
identifiers.append(resource.identifier)
if len(identifiers) == self.gpu_count:
break
if len(identifiers) == self.gpu_count:
return {'gpus': [(i, 1) for i in identifiers]}
else:
return None
elif self.selected_gpus is not None:
all_available = True
for i in self.selected_gpus:
available = False
for gpu in resources['gpus']:
if i == gpu.identifier:
if gpu.remaining() >= 1:
available = True
break
if not available:
all_available = False
break
if all_available:
return {'gpus': [(i, 1) for i in self.selected_gpus]}
else:
return None
return None
@override
def before_run(self):
# start a thread which sends SocketIO updates about hardware utilization
gpus = None
if 'gpus' in self.current_resources:
gpus = [identifier for (identifier, value) in self.current_resources['gpus']]
self._hw_socketio_thread = gevent.spawn(
self.hw_socketio_updater,
gpus)
def hw_socketio_updater(self, gpus):
"""
This thread sends SocketIO messages about hardware utilization
to connected clients
Arguments:
gpus -- a list of identifiers for the GPUs currently being used
"""
from digits.webapp import app, socketio
devices = []
if gpus is not None:
for index in gpus:
device = device_query.get_device(index)
if device:
devices.append((index, device))
else:
raise RuntimeError('Failed to load gpu information for GPU #"%s"' % index)
# this thread continues until killed in after_run()
while True:
# CPU (Non-GPU) Info
data_cpu = {}
if hasattr(self, 'p') and self.p is not None:
data_cpu['pid'] = self.p.pid
try:
ps = psutil.Process(self.p.pid) # 'self.p' is the system call object
if ps.is_running():
if psutil.version_info[0] >= 2:
data_cpu['cpu_pct'] = ps.cpu_percent(interval=1)
data_cpu['mem_pct'] = ps.memory_percent()
data_cpu['mem_used'] = ps.memory_info().rss
else:
data_cpu['cpu_pct'] = ps.get_cpu_percent(interval=1)
data_cpu['mem_pct'] = ps.get_memory_percent()
data_cpu['mem_used'] = ps.get_memory_info().rss
except (psutil.NoSuchProcess, psutil.AccessDenied):
# In rare case of instant process crash or PID went zombie (report nothing)
pass
data_gpu = []
for index, device in devices:
update = {'name': device.name, 'index': index}
nvml_info = device_query.get_nvml_info(index)
if nvml_info is not None:
update.update(nvml_info)
data_gpu.append(update)
with app.app_context():
html = flask.render_template('models/gpu_utilization.html',
data_gpu=data_gpu,
data_cpu=data_cpu)
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'gpu_utilization',
'html': html,
},
namespace='/jobs',
room=self.job_id,
)
gevent.sleep(1)
def send_progress_update(self, epoch):
"""
Sends socketio message about the current progress
"""
if self.current_epoch == epoch:
return
self.current_epoch = epoch
self.progress = epoch / self.train_epochs
self.emit_progress_update()
def save_train_output(self, *args):
"""
Save output to self.train_outputs
"""
from digits.webapp import socketio
if not self.save_output(self.train_outputs, *args):
return
if self.last_train_update and (time.time() - self.last_train_update) < 5:
return
self.last_train_update = time.time()
self.logger.debug('Training %s%% complete.' % round(100 * self.current_epoch / self.train_epochs, 2))
# loss graph data
data = self.combined_graph_data()
if data:
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'combined_graph',
'data': data,
},
namespace='/jobs',
room=self.job_id,
)
if data['columns']:
# isolate the Loss column data for the sparkline
graph_data = data['columns'][0][1:]
socketio.emit('task update',
{
'task': self.html_id(),
'job_id': self.job_id,
'update': 'combined_graph',
'data': graph_data,
},
namespace='/jobs',
room='job_management',
)
# lr graph data
data = self.lr_graph_data()
if data:
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'lr_graph',
'data': data,
},
namespace='/jobs',
room=self.job_id,
)
def save_val_output(self, *args):
"""
Save output to self.val_outputs
"""
from digits.webapp import socketio
if not self.save_output(self.val_outputs, *args):
return
# loss graph data
data = self.combined_graph_data()
if data:
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'combined_graph',
'data': data,
},
namespace='/jobs',
room=self.job_id,
)
def save_output(self, d, name, kind, value):
"""
Save output to self.train_outputs or self.val_outputs
Returns true if all outputs for this epoch have been added
Arguments:
d -- the dictionary where the output should be stored
name -- name of the output (e.g. "accuracy")
kind -- the type of outputs (e.g. "Accuracy")
value -- value for this output (e.g. 0.95)
"""
# don't let them be unicode
name = str(name)
kind = str(kind)
# update d['epoch']
if 'epoch' not in d:
d['epoch'] = NetworkOutput('Epoch', [self.current_epoch])
elif d['epoch'].data[-1] != self.current_epoch:
d['epoch'].data.append(self.current_epoch)
if name not in d:
d[name] = NetworkOutput(kind, [])
epoch_len = len(d['epoch'].data)
name_len = len(d[name].data)
# save to back of d[name]
if name_len > epoch_len:
raise Exception('Received a new output without being told the new epoch')
elif name_len == epoch_len:
# already exists
if isinstance(d[name].data[-1], list):
d[name].data[-1].append(value)
else:
d[name].data[-1] = [d[name].data[-1], value]
elif name_len == epoch_len - 1:
# expected case
d[name].data.append(value)
else:
# we might have missed one
for _ in xrange(epoch_len - name_len - 1):
d[name].data.append(None)
d[name].data.append(value)
for key in d:
if key not in ['epoch', 'learning_rate']:
if len(d[key].data) != epoch_len:
return False
return True
@override
def after_run(self):
if hasattr(self, '_hw_socketio_thread'):
self._hw_socketio_thread.kill()
def detect_snapshots(self):
"""
Populate self.snapshots with snapshots that exist on disk
Returns True if at least one usable snapshot is found
"""
return False
def snapshot_list(self):
"""
Returns an array of arrays for creating an HTML select field
"""
return [[s[1], 'Epoch #%s' % s[1]] for s in reversed(self.snapshots)]
def est_next_snapshot(self):
"""
Returns the estimated time in seconds until the next snapshot is taken
"""
return None
def can_view_weights(self):
"""
Returns True if this Task can visualize the weights of each layer for a given model
"""
raise NotImplementedError()
def view_weights(self, model_epoch=None, layers=None):
"""
View the weights for a specific model and layer[s]
"""
return None
def can_view_activations(self):
"""
Returns True if this Task can visualize the activations of a model after inference
"""
raise NotImplementedError()
def infer_one(self, data, model_epoch=None, layers=None):
"""
Run inference on one input
"""
return None
def can_infer_many(self):
"""
Returns True if this Task can run inference on many inputs
"""
raise NotImplementedError()
def infer_many(self, data, model_epoch=None):
"""
Run inference on many inputs
"""
return None
def get_snapshot(self, epoch=-1, download=False):
"""
return snapshot file for specified epoch
"""
snapshot_filename = None
if len(self.snapshots) == 0:
return "no snapshots"
if epoch == -1 or not epoch:
epoch = self.snapshots[-1][1]
snapshot_filename = self.snapshots[-1][0]
else:
for f, e in self.snapshots:
if e == epoch:
snapshot_filename = f
break
if not snapshot_filename:
raise ValueError('Invalid epoch')
return snapshot_filename
def get_snapshot_filename(self, epoch=-1):
"""
Return the filename for the specified epoch
"""
path, name = os.path.split(self.get_snapshot(epoch))
return name
def get_labels(self):
"""
Read labels from labels_file and return them in a list
"""
# The labels might be set already
if hasattr(self, '_labels') and self._labels and len(self._labels) > 0:
return self._labels
assert hasattr(self.dataset, 'labels_file'), 'labels_file not set'
assert self.dataset.labels_file, 'labels_file not set'
assert os.path.exists(self.dataset.path(self.dataset.labels_file)), 'labels_file does not exist: {}'.format(
self.dataset.path(self.dataset.labels_file)
)
labels = []
with open(self.dataset.path(self.dataset.labels_file)) as infile:
for line in infile:
label = line.strip()
if label:
labels.append(label)
assert len(labels) > 0, 'no labels in labels_file'
self._labels = labels
return self._labels
def lr_graph_data(self):
"""
Returns learning rate data formatted for a C3.js graph
Keyword arguments:
"""
if not self.train_outputs or 'epoch' not in self.train_outputs or 'learning_rate' not in self.train_outputs:
return None
# return 100-200 values or fewer
stride = max(len(self.train_outputs['epoch'].data) / 100, 1)
e = ['epoch'] + self.train_outputs['epoch'].data[::stride]
lr = ['lr'] + self.train_outputs['learning_rate'].data[::stride]
return {
'columns': [e, lr],
'xs': {
'lr': 'epoch'
},
'names': {
'lr': 'Learning Rate'
},
}
def detect_timeline_traces(self):
"""
Populate self.timeline_traces with snapshots that exist on disk
Returns True if at least one usable snapshot is found
"""
return False
def has_timeline_traces(self):
"""
Evaluates if there are timeline traces to be viewed at all
"""
return len(self.timeline_traces) > 0
def timeline_trace(self, tid):
"""
Returns the data of a selected timeline trace
"""
for item in self.timeline_traces:
if item[1] == tid:
fn = item[0]
with open(fn, 'r') as file_data:
return file_data.read()
raise ValueError('Requested timeline not found in timeline list')
def timeline_trace_list(self):
"""
Returns an array of timeline trace id's for creating an HTML select field
"""
return [[s[1], 'Trace #%s' % s[1]] for s in reversed(self.timeline_traces)]
def combined_graph_data(self, cull=True):
"""
Returns all train/val outputs in data for one C3.js graph
Keyword arguments:
cull -- if True, cut down the number of data points returned to a reasonable size
"""
data = {
'columns': [],
'xs': {},
'axes': {},
'names': {},
}
added_train_data = False
added_val_data = False
if self.train_outputs and 'epoch' in self.train_outputs:
if cull:
# max 200 data points
stride = max(len(self.train_outputs['epoch'].data) / 100, 1)
else:
# return all data
stride = 1
for name, output in self.train_outputs.iteritems():
if name not in ['epoch', 'learning_rate']:
col_id = '%s-train' % name
data['xs'][col_id] = 'train_epochs'
data['names'][col_id] = '%s (train)' % name
if 'accuracy' in output.kind.lower() or 'accuracy' in name.lower():
data['columns'].append([col_id] + [
(100 * x if x is not None else 'none')
for x in output.data[::stride]])
data['axes'][col_id] = 'y2'
else:
data['columns'].append([col_id] + [
(x if x is not None else 'none')
for x in output.data[::stride]])
added_train_data = True
if added_train_data:
data['columns'].append(['train_epochs'] + self.train_outputs['epoch'].data[::stride])
if self.val_outputs and 'epoch' in self.val_outputs:
if cull:
# max 200 data points
stride = max(len(self.val_outputs['epoch'].data) / 100, 1)
else:
# return all data
stride = 1
for name, output in self.val_outputs.iteritems():
if name not in ['epoch']:
col_id = '%s-val' % name
data['xs'][col_id] = 'val_epochs'
data['names'][col_id] = '%s (val)' % name
if 'accuracy' in output.kind.lower() or 'accuracy' in name.lower():
data['columns'].append([col_id] + [
(100 * x if x is not None else 'none')
for x in output.data[::stride]])
data['axes'][col_id] = 'y2'
else:
data['columns'].append([col_id] + [
(x if x is not None else 'none')
for x in output.data[::stride]])
added_val_data = True
if added_val_data:
data['columns'].append(['val_epochs'] + self.val_outputs['epoch'].data[::stride])
if added_train_data:
return data
else:
# return None if only validation data exists
# helps with ordering of columns in graph
return None
# return id of framework used for training
def get_framework_id(self):
"""
Returns a string
"""
return self.framework_id
def get_model_files(self):
"""
return path to model file
"""
raise NotImplementedError()
def get_network_desc(self):
"""
return text description of model
"""
raise NotImplementedError()
def get_task_stats(self, epoch=-1):
"""
return a dictionary of task statistics
"""
raise NotImplementedError()
| DIGITS-master | digits/model/tasks/train.py |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import operator
import os
import re
import shutil
import subprocess
import tempfile
import time
import h5py
import numpy as np
import PIL.Image
from .train import TrainTask
import digits
from digits import utils
from digits.config import config_value
from digits.utils import subclass, override, constants
# Must import after importing digit.config
import caffe_pb2
# NOTE: Increment this every time the pickled object changes
PICKLE_VERSION = 1
# Constants
TORCH_MODEL_FILE = 'model.lua'
TORCH_SNAPSHOT_PREFIX = 'snapshot'
def subprocess_visible_devices(gpus):
"""
Calculates CUDA_VISIBLE_DEVICES for a subprocess
"""
if not isinstance(gpus, list):
raise ValueError('gpus should be a list')
gpus = [int(g) for g in gpus]
old_cvd = os.environ.get('CUDA_VISIBLE_DEVICES', None)
if old_cvd is None:
real_gpus = gpus
else:
map_visible_to_real = {}
for visible, real in enumerate(old_cvd.split(',')):
map_visible_to_real[visible] = int(real)
real_gpus = []
for visible_gpu in gpus:
real_gpus.append(map_visible_to_real[visible_gpu])
return ','.join(str(g) for g in real_gpus)
@subclass
class TorchTrainTask(TrainTask):
"""
Trains a torch model
"""
TORCH_LOG = 'torch_output.log'
def __init__(self, **kwargs):
"""
Arguments:
network -- a NetParameter defining the network
"""
super(TorchTrainTask, self).__init__(**kwargs)
# save network description to file
with open(os.path.join(self.job_dir, TORCH_MODEL_FILE), "w") as outfile:
outfile.write(self.network)
self.pickver_task_torch_train = PICKLE_VERSION
self.current_epoch = 0
self.loaded_snapshot_file = None
self.loaded_snapshot_epoch = None
self.image_mean = None
self.classifier = None
self.solver = None
self.model_file = TORCH_MODEL_FILE
self.train_file = constants.TRAIN_DB
self.val_file = constants.VAL_DB
self.snapshot_prefix = TORCH_SNAPSHOT_PREFIX
self.log_file = self.TORCH_LOG
self.digits_version = digits.__version__
def __getstate__(self):
state = super(TorchTrainTask, self).__getstate__()
# Don't pickle these things
if 'labels' in state:
del state['labels']
if 'image_mean' in state:
del state['image_mean']
if 'classifier' in state:
del state['classifier']
if 'torch_log' in state:
del state['torch_log']
return state
def __setstate__(self, state):
super(TorchTrainTask, self).__setstate__(state)
# Make changes to self
self.loaded_snapshot_file = None
self.loaded_snapshot_epoch = None
# These things don't get pickled
self.image_mean = None
self.classifier = None
# Task overrides
@override
def name(self):
return 'Train Torch Model'
@override
def before_run(self):
super(TorchTrainTask, self).before_run()
self.torch_log = open(self.path(self.TORCH_LOG), 'a')
self.saving_snapshot = False
self.receiving_train_output = False
self.receiving_val_output = False
self.last_train_update = None
self.displaying_network = False
self.temp_unrecognized_output = []
return True
def create_mean_file(self):
filename = os.path.join(self.job_dir, constants.MEAN_FILE_IMAGE)
# don't recreate file if it already exists
if not os.path.exists(filename):
mean_file = self.dataset.get_mean_file()
assert mean_file is not None and mean_file.endswith('.binaryproto'), \
'Mean subtraction required but dataset has no mean file in .binaryproto format'
blob = caffe_pb2.BlobProto()
with open(self.dataset.path(mean_file), 'rb') as infile:
blob.ParseFromString(infile.read())
data = np.array(blob.data, dtype=np.uint8).reshape(blob.channels, blob.height, blob.width)
if blob.channels == 3:
# converting from BGR to RGB
data = data[[2, 1, 0], ...] # channel swap
# convert to (height, width, channels)
data = data.transpose((1, 2, 0))
else:
assert blob.channels == 1
# convert to (height, width)
data = data[0]
# save to file
image = PIL.Image.fromarray(data)
image.save(filename)
return filename
@override
def task_arguments(self, resources, env):
dataset_backend = self.dataset.get_backend()
assert dataset_backend == 'lmdb' or dataset_backend == 'hdf5'
args = [config_value('torch')['executable'],
os.path.join(
os.path.dirname(os.path.abspath(digits.__file__)),
'tools', 'torch', 'wrapper.lua'),
'main.lua',
'--network=%s' % self.model_file.split(".")[0],
'--epoch=%d' % int(self.train_epochs),
'--networkDirectory=%s' % self.job_dir,
'--save=%s' % self.job_dir,
'--snapshotPrefix=%s' % self.snapshot_prefix,
'--snapshotInterval=%s' % self.snapshot_interval,
'--learningRate=%s' % self.learning_rate,
'--policy=%s' % str(self.lr_policy['policy']),
'--dbbackend=%s' % dataset_backend
]
if self.batch_size is not None:
args.append('--batchSize=%d' % self.batch_size)
if self.use_mean != 'none':
filename = self.create_mean_file()
args.append('--mean=%s' % filename)
if hasattr(self.dataset, 'labels_file'):
args.append('--labels=%s' % self.dataset.path(self.dataset.labels_file))
train_feature_db_path = self.dataset.get_feature_db_path(constants.TRAIN_DB)
train_label_db_path = self.dataset.get_label_db_path(constants.TRAIN_DB)
val_feature_db_path = self.dataset.get_feature_db_path(constants.VAL_DB)
val_label_db_path = self.dataset.get_label_db_path(constants.VAL_DB)
args.append('--train=%s' % train_feature_db_path)
if train_label_db_path:
args.append('--train_labels=%s' % train_label_db_path)
if val_feature_db_path:
args.append('--validation=%s' % val_feature_db_path)
if val_label_db_path:
args.append('--validation_labels=%s' % val_label_db_path)
# learning rate policy input parameters
if self.lr_policy['policy'] == 'fixed':
pass
elif self.lr_policy['policy'] == 'step':
args.append('--gamma=%s' % self.lr_policy['gamma'])
args.append('--stepvalues=%s' % self.lr_policy['stepsize'])
elif self.lr_policy['policy'] == 'multistep':
args.append('--stepvalues=%s' % self.lr_policy['stepvalue'])
args.append('--gamma=%s' % self.lr_policy['gamma'])
elif self.lr_policy['policy'] == 'exp':
args.append('--gamma=%s' % self.lr_policy['gamma'])
elif self.lr_policy['policy'] == 'inv':
args.append('--gamma=%s' % self.lr_policy['gamma'])
args.append('--power=%s' % self.lr_policy['power'])
elif self.lr_policy['policy'] == 'poly':
args.append('--power=%s' % self.lr_policy['power'])
elif self.lr_policy['policy'] == 'sigmoid':
args.append('--stepvalues=%s' % self.lr_policy['stepsize'])
args.append('--gamma=%s' % self.lr_policy['gamma'])
if self.shuffle:
args.append('--shuffle=yes')
if self.crop_size:
args.append('--crop=yes')
args.append('--croplen=%d' % self.crop_size)
if self.use_mean == 'pixel':
args.append('--subtractMean=pixel')
elif self.use_mean == 'image':
args.append('--subtractMean=image')
else:
args.append('--subtractMean=none')
if self.random_seed is not None:
args.append('--seed=%s' % self.random_seed)
if self.solver_type == 'SGD':
args.append('--optimization=sgd')
elif self.solver_type == 'NESTEROV':
args.append('--optimization=nag')
elif self.solver_type == 'ADAGRAD':
args.append('--optimization=adagrad')
elif self.solver_type == 'RMSPROP':
args.append('--optimization=rmsprop')
elif self.solver_type == 'ADADELTA':
args.append('--optimization=adadelta')
elif self.solver_type == 'ADAM':
args.append('--optimization=adam')
else:
raise ValueError('Unknown solver_type %s' % self.solver_type)
if self.val_interval > 0:
args.append('--interval=%s' % self.val_interval)
if 'gpus' in resources:
identifiers = []
for identifier, value in resources['gpus']:
identifiers.append(identifier)
# make all selected GPUs visible to the Torch 'th' process.
# don't make other GPUs visible though since Torch will load
# CUDA libraries and allocate memory on all visible GPUs by
# default.
env['CUDA_VISIBLE_DEVICES'] = subprocess_visible_devices(identifiers)
# switch to GPU mode
args.append('--type=cuda')
else:
# switch to CPU mode
args.append('--type=float')
if self.pretrained_model:
filenames = self.pretrained_model.split(os.path.pathsep)
if len(filenames) > 1:
raise ValueError('Torch does not support multiple pretrained model files')
args.append('--weights=%s' % self.path(filenames[0]))
# Augmentations
assert self.data_aug['flip'] in ['none', 'fliplr', 'flipud', 'fliplrud'], 'Bad or unknown flag "flip"'
args.append('--augFlip=%s' % self.data_aug['flip'])
assert self.data_aug['quad_rot'] in ['none', 'rot90', 'rot180', 'rotall'], 'Bad or unknown flag "quad_rot"'
args.append('--augQuadRot=%s' % self.data_aug['quad_rot'])
if self.data_aug['rot']:
args.append('--augRot=%s' % self.data_aug['rot'])
if self.data_aug['scale']:
args.append('--augScale=%s' % self.data_aug['scale'])
if self.data_aug['noise']:
args.append('--augNoise=%s' % self.data_aug['noise'])
if self.data_aug['hsv_use']:
args.append('--augHSVh=%s' % self.data_aug['hsv_h'])
args.append('--augHSVs=%s' % self.data_aug['hsv_s'])
args.append('--augHSVv=%s' % self.data_aug['hsv_v'])
else:
args.append('--augHSVh=0')
args.append('--augHSVs=0')
args.append('--augHSVv=0')
return args
@override
def process_output(self, line):
regex = re.compile('\x1b\[[0-9;]*m', re.UNICODE) # TODO: need to include regular expression for MAC color codes
line = regex.sub('', line).strip()
self.torch_log.write('%s\n' % line)
self.torch_log.flush()
# parse torch output
timestamp, level, message = self.preprocess_output_torch(line)
# return false when unrecognized output is encountered
if not level:
# network display in progress
if self.displaying_network:
self.temp_unrecognized_output.append(line)
return True
return False
if not message:
return True
# network display ends
if self.displaying_network:
if message.startswith('Network definition ends'):
self.temp_unrecognized_output = []
self.displaying_network = False
return True
# by default Lua prints infinite numbers as 'inf' however Torch tensor may use 'nan' to represent infinity
float_exp = '([-]?inf|nan|[-+]?[0-9]*\.?[0-9]+(e[-+]?[0-9]+)?)'
# loss and learning rate updates
match = re.match(r'Training \(epoch (\d+\.?\d*)\): \w*loss\w* = %s, lr = %s' % (float_exp, float_exp), message)
if match:
index = float(match.group(1))
l = match.group(2)
assert not('inf' in l or 'nan' in l), \
'Network reported %s for training loss. Try decreasing your learning rate.' % l
l = float(l)
lr = match.group(4)
lr = float(lr)
# epoch updates
self.send_progress_update(index)
self.save_train_output('loss', 'SoftmaxWithLoss', l)
self.save_train_output('learning_rate', 'LearningRate', lr)
self.logger.debug(message)
return True
# testing loss and accuracy updates
match = re.match(r'Validation \(epoch (\d+\.?\d*)\): \w*loss\w* = %s(, accuracy = %s)?' %
(float_exp, float_exp), message, flags=re.IGNORECASE)
if match:
index = float(match.group(1))
l = match.group(2)
a = match.group(5)
# note: validation loss could have diverged however
# if the training loss is still finite, there is a slim possibility
# that the network keeps learning something useful, so we don't treat
# infinite validation loss as a fatal error
if not('inf' in l or 'nan' in l):
l = float(l)
self.logger.debug('Network validation loss #%s: %s' % (index, l))
# epoch updates
self.send_progress_update(index)
self.save_val_output('loss', 'SoftmaxWithLoss', l)
if a and a.lower() != 'inf' and a.lower() != '-inf':
a = float(a)
self.logger.debug('Network accuracy #%s: %s' % (index, a))
self.save_val_output('accuracy', 'Accuracy', a)
return True
# snapshot saved
if self.saving_snapshot:
if not message.startswith('Snapshot saved'):
self.logger.warning(
'Torch output format seems to have changed. '
'Expected "Snapshot saved..." after "Snapshotting to..."')
else:
self.logger.info('Snapshot saved.') # to print file name here, you can use "message"
self.detect_snapshots()
self.send_snapshot_update()
self.saving_snapshot = False
return True
# snapshot starting
match = re.match(r'Snapshotting to (.*)\s*$', message)
if match:
self.saving_snapshot = True
return True
# network display starting
if message.startswith('Network definition:'):
self.displaying_network = True
return True
if level in ['error', 'critical']:
self.logger.error('%s: %s' % (self.name(), message))
self.exception = message
return True
# skip remaining info and warn messages
return True
@staticmethod
def preprocess_output_torch(line):
"""
Takes line of output and parses it according to caffe's output format
Returns (timestamp, level, message) or (None, None, None)
"""
# NOTE: This must change when the logging format changes
# LMMDD HH:MM:SS.MICROS pid file:lineno] message
match = re.match(r'(\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2})\s\[(\w+)\s*]\s+(\S.*)$', line)
if match:
timestamp = time.mktime(time.strptime(match.group(1), '%Y-%m-%d %H:%M:%S'))
level = match.group(2)
message = match.group(3)
if level == 'INFO':
level = 'info'
elif level == 'WARNING':
level = 'warning'
elif level == 'ERROR':
level = 'error'
elif level == 'FAIL': # FAIL
level = 'critical'
return (timestamp, level, message)
else:
return (None, None, None)
def send_snapshot_update(self):
"""
Sends socketio message about the snapshot list
"""
# TODO: move to TrainTask
from digits.webapp import socketio
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'snapshots',
'data': self.snapshot_list(),
},
namespace='/jobs',
room=self.job_id,
)
# TrainTask overrides
@override
def after_run(self):
if self.temp_unrecognized_output:
if self.traceback:
self.traceback = self.traceback + ('\n'.join(self.temp_unrecognized_output))
else:
self.traceback = '\n'.join(self.temp_unrecognized_output)
self.temp_unrecognized_output = []
self.torch_log.close()
@override
def after_runtime_error(self):
if os.path.exists(self.path(self.TORCH_LOG)):
output = subprocess.check_output(['tail', '-n40', self.path(self.TORCH_LOG)])
lines = []
for line in output.split('\n'):
# parse torch header
timestamp, level, message = self.preprocess_output_torch(line)
if message:
lines.append(message)
# return the last 20 lines
traceback = '\n\nLast output:\n' + '\n'.join(lines[len(lines) - 20:]) if len(lines) > 0 else ''
if self.traceback:
self.traceback = self.traceback + traceback
else:
self.traceback = traceback
if 'DIGITS_MODE_TEST' in os.environ:
print output
@override
def detect_snapshots(self):
self.snapshots = []
snapshot_dir = os.path.join(self.job_dir, os.path.dirname(self.snapshot_prefix))
snapshots = []
for filename in os.listdir(snapshot_dir):
# find models
match = re.match(r'%s_(\d+)\.?(\d*)(_Weights|_Model)\.t7' %
os.path.basename(self.snapshot_prefix), filename)
if match:
epoch = 0
if match.group(2) == '':
epoch = int(match.group(1))
else:
epoch = float(match.group(1) + '.' + match.group(2))
snapshots.append((
os.path.join(snapshot_dir, filename),
epoch
)
)
self.snapshots = sorted(snapshots, key=lambda tup: tup[1])
return len(self.snapshots) > 0
@override
def est_next_snapshot(self):
# TODO: Currently this function is not in use. Probably in future we may have to implement this
return None
@override
def infer_one(self,
data,
snapshot_epoch=None,
layers=None,
gpu=None,
resize=True):
# resize parameter is unused
return self.infer_one_image(data,
snapshot_epoch=snapshot_epoch,
layers=layers,
gpu=gpu)
def infer_one_image(self, image, snapshot_epoch=None, layers=None, gpu=None):
"""
Classify an image
Returns (predictions, visualizations)
predictions -- an array of [ (label, confidence), ...] for each label, sorted by confidence
visualizations -- an array of (layer_name, activations, weights) for the specified layers
Returns (None, None) if something goes wrong
Arguments:
image -- a np.array
Keyword arguments:
snapshot_epoch -- which snapshot to use
layers -- which layer activation[s] and weight[s] to visualize
"""
temp_image_handle, temp_image_path = tempfile.mkstemp(suffix='.png')
os.close(temp_image_handle)
image = PIL.Image.fromarray(image)
try:
image.save(temp_image_path, format='png')
except KeyError:
error_message = 'Unable to save file to "%s"' % temp_image_path
self.logger.error(error_message)
raise digits.inference.errors.InferenceError(error_message)
file_to_load = self.get_snapshot(snapshot_epoch)
args = [config_value('torch')['executable'],
os.path.join(
os.path.dirname(os.path.abspath(digits.__file__)),
'tools', 'torch', 'wrapper.lua'),
'test.lua',
'--image=%s' % temp_image_path,
'--network=%s' % self.model_file.split(".")[0],
'--networkDirectory=%s' % self.job_dir,
'--snapshot=%s' % file_to_load,
'--allPredictions=yes',
]
if hasattr(self.dataset, 'labels_file'):
args.append('--labels=%s' % self.dataset.path(self.dataset.labels_file))
if self.use_mean != 'none':
filename = self.create_mean_file()
args.append('--mean=%s' % filename)
if self.use_mean == 'pixel':
args.append('--subtractMean=pixel')
elif self.use_mean == 'image':
args.append('--subtractMean=image')
else:
args.append('--subtractMean=none')
if self.crop_size:
args.append('--crop=yes')
args.append('--croplen=%d' % self.crop_size)
if layers == 'all':
args.append('--visualization=yes')
args.append('--save=%s' % self.job_dir)
# Convert them all to strings
args = [str(x) for x in args]
regex = re.compile('\x1b\[[0-9;]*m', re.UNICODE) # TODO: need to include regular expression for MAC color codes
self.logger.info('%s classify one task started.' % self.get_framework_id())
unrecognized_output = []
predictions = []
self.visualization_file = None
env = os.environ.copy()
if gpu is not None:
args.append('--type=cuda')
# make only the selected GPU visible
env['CUDA_VISIBLE_DEVICES'] = subprocess_visible_devices([gpu])
else:
args.append('--type=float')
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=self.job_dir,
close_fds=True,
env=env,
)
try:
while p.poll() is None:
for line in utils.nonblocking_readlines(p.stdout):
if self.aborted.is_set():
p.terminate()
raise digits.inference.errors.InferenceError(
'%s classify one task got aborted. error code - %d'
% (self.get_framework_id(), p.returncode))
if line is not None:
# Remove color codes and whitespace
line = regex.sub('', line).strip()
if line:
if not self.process_test_output(line, predictions, 'one'):
self.logger.warning('%s classify one task unrecognized input: %s' %
(self.get_framework_id(), line.strip()))
unrecognized_output.append(line)
else:
time.sleep(0.05)
except Exception as e:
if p.poll() is None:
p.terminate()
error_message = ''
if type(e) == digits.inference.errors.InferenceError:
error_message = e.__str__()
else:
error_message = '%s classify one task failed with error code %d \n %s' % (
self.get_framework_id(), p.returncode, str(e))
self.logger.error(error_message)
if unrecognized_output:
unrecognized_output = '\n'.join(unrecognized_output)
error_message = error_message + unrecognized_output
raise digits.inference.errors.InferenceError(error_message)
finally:
self.after_test_run(temp_image_path)
if p.returncode != 0:
error_message = '%s classify one task failed with error code %d' % (self.get_framework_id(), p.returncode)
self.logger.error(error_message)
if unrecognized_output:
unrecognized_output = '\n'.join(unrecognized_output)
error_message = error_message + unrecognized_output
raise digits.inference.errors.InferenceError(error_message)
else:
self.logger.info('%s classify one task completed.' % self.get_framework_id())
predictions = {'output': np.array(predictions)}
visualizations = []
if layers == 'all' and self.visualization_file:
vis_db = h5py.File(self.visualization_file, 'r')
# the HDF5 database is organized as follows:
# <root>
# |- layers
# |- 1
# | |- name
# | |- activations
# | |- weights
# |- 2
for layer_id, layer in vis_db['layers'].items():
layer_desc = layer['name'][...].tostring()
if 'Sequential' in layer_desc or 'Parallel' in layer_desc:
# ignore containers
continue
idx = int(layer_id)
# activations
if 'activations' in layer:
data = np.array(layer['activations'][...])
# skip batch dimension
if len(data.shape) > 1 and data.shape[0] == 1:
data = data[0]
vis = utils.image.get_layer_vis_square(data)
mean, std, hist = self.get_layer_statistics(data)
visualizations.append(
{
'id': idx,
'name': layer_desc,
'vis_type': 'Activations',
'vis': vis,
'data_stats': {
'shape': data.shape,
'mean': mean,
'stddev': std,
'histogram': hist,
}
}
)
# weights
if 'weights' in layer:
data = np.array(layer['weights'][...])
if 'Linear' not in layer_desc:
vis = utils.image.get_layer_vis_square(data)
else:
# Linear (inner product) layers have too many weights
# to display
vis = None
mean, std, hist = self.get_layer_statistics(data)
parameter_count = reduce(operator.mul, data.shape, 1)
if 'bias' in layer:
bias = np.array(layer['bias'][...])
parameter_count += reduce(operator.mul, bias.shape, 1)
visualizations.append(
{
'id': idx,
'name': layer_desc,
'vis_type': 'Weights',
'vis': vis,
'param_count': parameter_count,
'data_stats': {
'shape': data.shape,
'mean': mean,
'stddev': std,
'histogram': hist,
}
}
)
# sort by layer ID
visualizations = sorted(visualizations, key=lambda x: x['id'])
return (predictions, visualizations)
def get_layer_statistics(self, data):
"""
Returns statistics for the given layer data:
(mean, standard deviation, histogram)
histogram -- [y, x, ticks]
Arguments:
data -- a np.ndarray
"""
# XXX These calculations can be super slow
mean = np.mean(data)
std = np.std(data)
y, x = np.histogram(data, bins=20)
y = list(y)
ticks = x[[0, len(x) / 2, -1]]
x = [(x[i] + x[i + 1]) / 2.0 for i in xrange(len(x) - 1)]
ticks = list(ticks)
return (mean, std, [y, x, ticks])
def after_test_run(self, temp_image_path):
try:
os.remove(temp_image_path)
except OSError:
pass
def process_test_output(self, line, predictions, test_category):
# parse torch output
timestamp, level, message = self.preprocess_output_torch(line)
# return false when unrecognized output is encountered
if not (level or message):
return False
if not message:
return True
float_exp = '([-]?inf|nan|[-+]?[0-9]*\.?[0-9]+(e[-+]?[0-9]+)?)'
# format of output while testing single image
match = re.match(r'For image \d+, predicted class \d+: \d+ \((.*?)\) %s' % (float_exp), message)
if match:
label = match.group(1)
confidence = match.group(2)
assert not('inf' in confidence or 'nan' in confidence), \
'Network reported %s for confidence value. Please check image and network' % label
confidence = float(confidence)
predictions.append((label, confidence))
return True
# format of output while testing multiple images
match = re.match(r'Predictions for image \d+: (.*)', message)
if match:
values = match.group(1).strip()
# 'values' should contain a JSON representation of
# the prediction
predictions.append(eval(values))
return True
# path to visualization file
match = re.match(r'Saving visualization to (.*)', message)
if match:
self.visualization_file = match.group(1).strip()
return True
# displaying info and warn messages as we aren't maintaining separate log file for model testing
if level == 'info':
self.logger.debug('%s classify %s task : %s' % (self.get_framework_id(), test_category, message))
return True
if level == 'warning':
self.logger.warning('%s classify %s task : %s' % (self.get_framework_id(), test_category, message))
return True
if level in ['error', 'critical']:
raise digits.inference.errors.InferenceError(
'%s classify %s task failed with error message - %s'
% (self.get_framework_id(), test_category, message))
return True # control never reach this line. It can be removed.
@override
def infer_many(self, data, snapshot_epoch=None, gpu=None, resize=True):
# resize parameter is unused
return self.infer_many_images(data, snapshot_epoch=snapshot_epoch, gpu=gpu)
def infer_many_images(self, images, snapshot_epoch=None, gpu=None):
"""
Returns (labels, results):
labels -- an array of strings
results -- a 2D np array:
[
[image0_label0_confidence, image0_label1_confidence, ...],
[image1_label0_confidence, image1_label1_confidence, ...],
...
]
Arguments:
images -- a list of np.arrays
Keyword arguments:
snapshot_epoch -- which snapshot to use
"""
# create a temporary folder to store images and a temporary file
# to store a list of paths to the images
temp_dir_path = tempfile.mkdtemp()
try: # this try...finally clause is used to clean up the temp directory in any case
temp_imglist_handle, temp_imglist_path = tempfile.mkstemp(dir=temp_dir_path, suffix='.txt')
for image in images:
temp_image_handle, temp_image_path = tempfile.mkstemp(
dir=temp_dir_path, suffix='.png')
image = PIL.Image.fromarray(image)
try:
image.save(temp_image_path, format='png')
except KeyError:
error_message = 'Unable to save file to "%s"' % temp_image_path
self.logger.error(error_message)
raise digits.inference.errors.InferenceError(error_message)
os.write(temp_imglist_handle, "%s\n" % temp_image_path)
os.close(temp_image_handle)
os.close(temp_imglist_handle)
file_to_load = self.get_snapshot(snapshot_epoch)
args = [config_value('torch')['executable'],
os.path.join(
os.path.dirname(os.path.abspath(digits.__file__)),
'tools', 'torch', 'wrapper.lua'),
'test.lua',
'--testMany=yes',
'--allPredictions=yes', # all predictions are grabbed and formatted as required by DIGITS
'--image=%s' % str(temp_imglist_path),
'--network=%s' % self.model_file.split(".")[0],
'--networkDirectory=%s' % self.job_dir,
'--snapshot=%s' % file_to_load,
]
if hasattr(self.dataset, 'labels_file'):
args.append('--labels=%s' % self.dataset.path(self.dataset.labels_file))
if self.use_mean != 'none':
filename = self.create_mean_file()
args.append('--mean=%s' % filename)
if self.use_mean == 'pixel':
args.append('--subtractMean=pixel')
elif self.use_mean == 'image':
args.append('--subtractMean=image')
else:
args.append('--subtractMean=none')
if self.crop_size:
args.append('--crop=yes')
args.append('--croplen=%d' % self.crop_size)
# Convert them all to strings
args = [str(x) for x in args]
# TODO: need to include regular expression for MAC color codes
regex = re.compile('\x1b\[[0-9;]*m', re.UNICODE)
self.logger.info('%s classify many task started.' % self.name())
env = os.environ.copy()
if gpu is not None:
args.append('--type=cuda')
# make only the selected GPU visible
env['CUDA_VISIBLE_DEVICES'] = subprocess_visible_devices([gpu])
else:
args.append('--type=float')
unrecognized_output = []
predictions = []
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=self.job_dir,
close_fds=True,
env=env
)
try:
while p.poll() is None:
for line in utils.nonblocking_readlines(p.stdout):
if self.aborted.is_set():
p.terminate()
raise digits.inference.errors.InferenceError(
'%s classify many task got aborted. error code - %d'
% (self.get_framework_id(), p.returncode))
if line is not None:
# Remove whitespace and color codes.
# Color codes are appended to beginning and end of line by torch binary
# i.e., 'th'. Check the below link for more information
# https://groups.google.com/forum/#!searchin/torch7/color$20codes/torch7/8O_0lSgSzuA/Ih6wYg9fgcwJ # noqa
line = regex.sub('', line).strip()
if line:
if not self.process_test_output(line, predictions, 'many'):
self.logger.warning('%s classify many task unrecognized input: %s' %
(self.get_framework_id(), line.strip()))
unrecognized_output.append(line)
else:
time.sleep(0.05)
except Exception as e:
if p.poll() is None:
p.terminate()
error_message = ''
if type(e) == digits.inference.errors.InferenceError:
error_message = e.__str__()
else:
error_message = '%s classify many task failed with error code %d \n %s' % (
self.get_framework_id(), p.returncode, str(e))
self.logger.error(error_message)
if unrecognized_output:
unrecognized_output = '\n'.join(unrecognized_output)
error_message = error_message + unrecognized_output
raise digits.inference.errors.InferenceError(error_message)
if p.returncode != 0:
error_message = '%s classify many task failed with error code %d' % (
self.get_framework_id(), p.returncode)
self.logger.error(error_message)
if unrecognized_output:
unrecognized_output = '\n'.join(unrecognized_output)
error_message = error_message + unrecognized_output
raise digits.inference.errors.InferenceError(error_message)
else:
self.logger.info('%s classify many task completed.' % self.get_framework_id())
finally:
shutil.rmtree(temp_dir_path)
# task.infer_one() expects dictionary in return value
return {'output': np.array(predictions)}
def has_model(self):
"""
Returns True if there is a model that can be used
"""
return len(self.snapshots) != 0
@override
def get_model_files(self):
"""
return paths to model files
"""
return {
"Network": self.model_file
}
@override
def get_network_desc(self):
"""
return text description of network
"""
with open(os.path.join(self.job_dir, TORCH_MODEL_FILE), "r") as infile:
desc = infile.read()
return desc
@override
def get_task_stats(self, epoch=-1):
"""
return a dictionary of task statistics
"""
loc, mean_file = os.path.split(self.dataset.get_mean_file())
stats = {
"image dimensions": self.dataset.get_feature_dims(),
"mean file": mean_file,
"snapshot file": self.get_snapshot_filename(epoch),
"model file": self.model_file,
"framework": "torch"
}
if hasattr(self, "digits_version"):
stats.update({"digits version": self.digits_version})
if hasattr(self.dataset, "resize_mode"):
stats.update({"image resize mode": self.dataset.resize_mode})
if hasattr(self.dataset, "labels_file"):
stats.update({"labels file": self.dataset.labels_file})
return stats
| DIGITS-master | digits/model/tasks/torch_train.py |
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import operator
import os
import re
import shutil
import subprocess
import tempfile
import time
import sys
import h5py
import numpy as np
from .train import TrainTask
import digits
from digits import utils
from digits.utils import subclass, override, constants
import tensorflow as tf
# NOTE: Increment this everytime the pickled object changes
PICKLE_VERSION = 1
# Constants
TENSORFLOW_MODEL_FILE = 'network.py'
TENSORFLOW_SNAPSHOT_PREFIX = 'snapshot'
TIMELINE_PREFIX = 'timeline'
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float_array_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def subprocess_visible_devices(gpus):
"""
Calculates CUDA_VISIBLE_DEVICES for a subprocess
"""
if not isinstance(gpus, list):
raise ValueError('gpus should be a list')
gpus = [int(g) for g in gpus]
old_cvd = os.environ.get('CUDA_VISIBLE_DEVICES', None)
if old_cvd is None:
real_gpus = gpus
else:
map_visible_to_real = {}
for visible, real in enumerate(old_cvd.split(',')):
map_visible_to_real[visible] = int(real)
real_gpus = []
for visible_gpu in gpus:
real_gpus.append(map_visible_to_real[visible_gpu])
return ','.join(str(g) for g in real_gpus)
@subclass
class TensorflowTrainTask(TrainTask):
"""
Trains a tensorflow model
"""
TENSORFLOW_LOG = 'tensorflow_output.log'
def __init__(self, **kwargs):
"""
Arguments:
network -- a NetParameter defining the network
"""
super(TensorflowTrainTask, self).__init__(**kwargs)
# save network description to file
with open(os.path.join(self.job_dir, TENSORFLOW_MODEL_FILE), "w") as outfile:
outfile.write(self.network)
self.pickver_task_tensorflow_train = PICKLE_VERSION
self.current_epoch = 0
self.loaded_snapshot_file = None
self.loaded_snapshot_epoch = None
self.image_mean = None
self.classifier = None
self.solver = None
self.model_file = TENSORFLOW_MODEL_FILE
self.train_file = constants.TRAIN_DB
self.val_file = constants.VAL_DB
self.snapshot_prefix = TENSORFLOW_SNAPSHOT_PREFIX
self.log_file = self.TENSORFLOW_LOG
def __getstate__(self):
state = super(TensorflowTrainTask, self).__getstate__()
# Don't pickle these things
if 'labels' in state:
del state['labels']
if 'image_mean' in state:
del state['image_mean']
if 'classifier' in state:
del state['classifier']
if 'tensorflow_log' in state:
del state['tensorflow_log']
return state
def __setstate__(self, state):
super(TensorflowTrainTask, self).__setstate__(state)
# Make changes to self
self.loaded_snapshot_file = None
self.loaded_snapshot_epoch = None
# These things don't get pickled
self.image_mean = None
self.classifier = None
# Task overrides
@override
def name(self):
return 'Train Tensorflow Model'
@override
def before_run(self):
super(TensorflowTrainTask, self).before_run()
self.tensorflow_log = open(self.path(self.TENSORFLOW_LOG), 'a')
self.saving_snapshot = False
self.receiving_train_output = False
self.receiving_val_output = False
self.last_train_update = None
self.displaying_network = False
self.temp_unrecognized_output = []
return True
@override
def get_snapshot(self, epoch=-1, download=False, frozen_file=False):
"""
return snapshot file for specified epoch
"""
snapshot_pre = None
if len(self.snapshots) == 0:
return "no snapshots"
if epoch == -1 or not epoch:
epoch = self.snapshots[-1][1]
snapshot_pre = self.snapshots[-1][0]
else:
for f, e in self.snapshots:
if e == epoch:
snapshot_pre = f
break
if not snapshot_pre:
raise ValueError('Invalid epoch')
if download:
snapshot_file = snapshot_pre + ".data-00000-of-00001"
meta_file = snapshot_pre + ".meta"
index_file = snapshot_pre + ".index"
snapshot_files = [snapshot_file, meta_file, index_file]
elif frozen_file:
snapshot_files = os.path.join(os.path.dirname(snapshot_pre), "frozen_model.pb")
else:
snapshot_files = snapshot_pre
return snapshot_files
@override
def task_arguments(self, resources, env):
args = [sys.executable,
os.path.join(os.path.dirname(os.path.abspath(digits.__file__)), 'tools', 'tensorflow', 'main.py'),
'--network=%s' % self.model_file,
'--epoch=%d' % int(self.train_epochs),
'--networkDirectory=%s' % self.job_dir,
'--save=%s' % self.job_dir,
'--snapshotPrefix=%s' % self.snapshot_prefix,
'--snapshotInterval=%s' % self.snapshot_interval,
'--lr_base_rate=%s' % self.learning_rate,
'--lr_policy=%s' % str(self.lr_policy['policy'])
]
if self.batch_size is not None:
args.append('--batch_size=%d' % self.batch_size)
if self.use_mean != 'none':
mean_file = self.dataset.get_mean_file()
assert mean_file is not None, 'Failed to retrieve mean file.'
args.append('--mean=%s' % self.dataset.path(mean_file))
if hasattr(self.dataset, 'labels_file'):
args.append('--labels_list=%s' % self.dataset.path(self.dataset.labels_file))
train_feature_db_path = self.dataset.get_feature_db_path(constants.TRAIN_DB)
train_label_db_path = self.dataset.get_label_db_path(constants.TRAIN_DB)
val_feature_db_path = self.dataset.get_feature_db_path(constants.VAL_DB)
val_label_db_path = self.dataset.get_label_db_path(constants.VAL_DB)
args.append('--train_db=%s' % train_feature_db_path)
if train_label_db_path:
args.append('--train_labels=%s' % train_label_db_path)
if val_feature_db_path:
args.append('--validation_db=%s' % val_feature_db_path)
if val_label_db_path:
args.append('--validation_labels=%s' % val_label_db_path)
# learning rate policy input parameters
if self.lr_policy['policy'] == 'fixed':
pass
elif self.lr_policy['policy'] == 'step':
args.append('--lr_gamma=%s' % self.lr_policy['gamma'])
args.append('--lr_stepvalues=%s' % self.lr_policy['stepsize'])
elif self.lr_policy['policy'] == 'multistep':
args.append('--lr_stepvalues=%s' % self.lr_policy['stepvalue'])
args.append('--lr_gamma=%s' % self.lr_policy['gamma'])
elif self.lr_policy['policy'] == 'exp':
args.append('--lr_gamma=%s' % self.lr_policy['gamma'])
elif self.lr_policy['policy'] == 'inv':
args.append('--lr_gamma=%s' % self.lr_policy['gamma'])
args.append('--lr_power=%s' % self.lr_policy['power'])
elif self.lr_policy['policy'] == 'poly':
args.append('--lr_power=%s' % self.lr_policy['power'])
elif self.lr_policy['policy'] == 'sigmoid':
args.append('--lr_stepvalues=%s' % self.lr_policy['stepsize'])
args.append('--lr_gamma=%s' % self.lr_policy['gamma'])
if self.shuffle:
args.append('--shuffle=1')
if self.crop_size:
args.append('--croplen=%d' % self.crop_size)
if self.use_mean == 'pixel':
args.append('--subtractMean=pixel')
elif self.use_mean == 'image':
args.append('--subtractMean=image')
else:
args.append('--subtractMean=none')
if self.random_seed is not None:
args.append('--seed=%s' % self.random_seed)
if self.solver_type == 'SGD':
args.append('--optimization=sgd')
elif self.solver_type == 'ADADELTA':
args.append('--optimization=adadelta')
elif self.solver_type == 'ADAGRAD':
args.append('--optimization=adagrad')
elif self.solver_type == 'ADAGRADDA':
args.append('--optimization=adagradda')
elif self.solver_type == 'MOMENTUM':
args.append('--optimization=momentum')
elif self.solver_type == 'ADAM':
args.append('--optimization=adam')
elif self.solver_type == 'FTRL':
args.append('--optimization=ftrl')
elif self.solver_type == 'RMSPROP':
args.append('--optimization=rmsprop')
else:
raise ValueError('Unknown solver_type %s' % self.solver_type)
if self.val_interval is not None:
args.append('--validation_interval=%d' % self.val_interval)
# if self.traces_interval is not None:
args.append('--log_runtime_stats_per_step=%d' % self.traces_interval)
if 'gpus' in resources:
identifiers = []
for identifier, value in resources['gpus']:
identifiers.append(identifier)
# make all selected GPUs visible to the process.
# don't make other GPUs visible though since the process will load
# CUDA libraries and allocate memory on all visible GPUs by
# default.
env['CUDA_VISIBLE_DEVICES'] = subprocess_visible_devices(identifiers)
if self.pretrained_model:
args.append('--weights=%s' % self.path(self.pretrained_model))
# Augmentations
assert self.data_aug['flip'] in ['none', 'fliplr', 'flipud', 'fliplrud'], 'Bad or unknown flag "flip"'
args.append('--augFlip=%s' % self.data_aug['flip'])
if self.data_aug['noise']:
args.append('--augNoise=%s' % self.data_aug['noise'])
if self.data_aug['contrast']:
args.append('--augContrast=%s' % self.data_aug['contrast'])
if self.data_aug['whitening']:
args.append('--augWhitening=1')
if self.data_aug['hsv_use']:
args.append('--augHSVh=%s' % self.data_aug['hsv_h'])
args.append('--augHSVs=%s' % self.data_aug['hsv_s'])
args.append('--augHSVv=%s' % self.data_aug['hsv_v'])
else:
args.append('--augHSVh=0')
args.append('--augHSVs=0')
args.append('--augHSVv=0')
return args
@override
def process_output(self, line):
self.tensorflow_log.write('%s\n' % line)
self.tensorflow_log.flush()
# parse tensorflow output
timestamp, level, message = self.preprocess_output_tensorflow(line)
# return false when unrecognized output is encountered
if not level:
# network display in progress
if self.displaying_network:
self.temp_unrecognized_output.append(line)
return True
return False
if not message:
return True
# network display ends
if self.displaying_network:
if message.startswith('Network definition ends'):
self.temp_unrecognized_output = []
self.displaying_network = False
return True
# Distinguish between a Validation and Training stage epoch
pattern_stage_epoch = re.compile(r'(Validation|Training)\ \(\w+\ ([^\ ]+)\)\:\ (.*)')
for (stage, epoch, kvlist) in re.findall(pattern_stage_epoch, message):
epoch = float(epoch)
self.send_progress_update(epoch)
pattern_key_val = re.compile(r'([\w\-_]+)\ =\ ([^,^\ ]+)')
# Now iterate through the keys and values on this line dynamically
for (key, value) in re.findall(pattern_key_val, kvlist):
assert not('Inf' in value or 'NaN' in value), 'Network reported %s for %s.' % (value, key)
value = float(value)
if key == 'lr':
key = 'learning_rate' # Convert to special DIGITS key for learning rate
if stage == 'Training':
self.save_train_output(key, key, value)
elif stage == 'Validation':
self.save_val_output(key, key, value)
self.logger.debug('Network validation %s #%s: %s' % (key, epoch, value))
else:
self.logger.error('Unknown stage found other than training or validation: %s' % (stage))
self.logger.debug(message)
return True
# timeline trace saved
if message.startswith('Timeline trace written to'):
self.logger.info(message)
self.detect_timeline_traces()
return True
# snapshot saved
if self.saving_snapshot:
if message.startswith('Snapshot saved'):
self.logger.info(message)
self.detect_snapshots()
self.send_snapshot_update()
self.saving_snapshot = False
return True
# snapshot starting
match = re.match(r'Snapshotting to (.*)\s*$', message)
if match:
self.saving_snapshot = True
return True
# network display starting
if message.startswith('Network definition:'):
self.displaying_network = True
return True
if level in ['error', 'critical']:
self.logger.error('%s: %s' % (self.name(), message))
self.exception = message
return True
# skip remaining info and warn messages
return True
@staticmethod
def preprocess_output_tensorflow(line):
"""
Takes line of output and parses it according to tensorflow's output format
Returns (timestamp, level, message) or (None, None, None)
"""
# NOTE: This must change when the logging format changes
# LMMDD HH:MM:SS.MICROS pid file:lineno] message
match = re.match(r'(\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2})\s\[(\w+)\s*]\s+(\S.*)$', line)
if match:
timestamp = time.mktime(time.strptime(match.group(1), '%Y-%m-%d %H:%M:%S'))
level = match.group(2)
message = match.group(3)
if level == 'INFO':
level = 'info'
elif level == 'WARNING':
level = 'warning'
elif level == 'ERROR':
level = 'error'
elif level == 'FAIL': # FAIL
level = 'critical'
return (timestamp, level, message)
else:
# self.logger.warning('Unrecognized task output "%s"' % line)
return (None, None, None)
def send_snapshot_update(self):
"""
Sends socketio message about the snapshot list
"""
# TODO: move to TrainTask
from digits.webapp import socketio
socketio.emit('task update', {'task': self.html_id(),
'update': 'snapshots',
'data': self.snapshot_list()},
namespace='/jobs',
room=self.job_id)
# TrainTask overrides
@override
def after_run(self):
if self.temp_unrecognized_output:
if self.traceback:
self.traceback = self.traceback + ('\n'.join(self.temp_unrecognized_output))
else:
self.traceback = '\n'.join(self.temp_unrecognized_output)
self.temp_unrecognized_output = []
self.tensorflow_log.close()
@override
def after_runtime_error(self):
if os.path.exists(self.path(self.TENSORFLOW_LOG)):
output = subprocess.check_output(['tail', '-n40', self.path(self.TENSORFLOW_LOG)])
lines = []
for line in output.split('\n'):
# parse tensorflow header
timestamp, level, message = self.preprocess_output_tensorflow(line)
if message:
lines.append(message)
# return the last 20 lines
traceback = '\n\nLast output:\n' + '\n'.join(lines[len(lines)-20:]) if len(lines) > 0 else ''
if self.traceback:
self.traceback = self.traceback + traceback
else:
self.traceback = traceback
if 'DIGITS_MODE_TEST' in os.environ:
print output
@override
def detect_timeline_traces(self):
timeline_traces = []
for filename in os.listdir(self.job_dir):
# find timeline jsons
match = re.match(r'%s_(.*)\.json$' % TIMELINE_PREFIX, filename)
if match:
step = int(match.group(1))
timeline_traces.append((os.path.join(self.job_dir, filename), step))
self.timeline_traces = sorted(timeline_traces, key=lambda tup: tup[1])
return len(self.timeline_traces) > 0
@override
def detect_snapshots(self):
self.snapshots = []
snapshots = []
for filename in os.listdir(self.job_dir):
# find models
match = re.match(r'%s_(\d+)\.?(\d*)\.ckpt\.index$' % self.snapshot_prefix, filename)
if match:
epoch = 0
# remove '.index' suffix from filename
filename = filename[:-6]
if match.group(2) == '':
epoch = int(match.group(1))
else:
epoch = float(match.group(1) + '.' + match.group(2))
snapshots.append((os.path.join(self.job_dir, filename), epoch))
self.snapshots = sorted(snapshots, key=lambda tup: tup[1])
return len(self.snapshots) > 0
@override
def est_next_snapshot(self):
# TODO: Currently this function is not in use. Probably in future we may have to implement this
return None
@override
def infer_one(self,
data,
snapshot_epoch=None,
layers=None,
gpu=None,
resize=True):
# resize parameter is unused
return self.infer_one_image(data,
snapshot_epoch=snapshot_epoch,
layers=layers,
gpu=gpu)
def infer_one_image(self, image, snapshot_epoch=None, layers=None, gpu=None):
"""
Classify an image
Returns (predictions, visualizations)
predictions -- an array of [ (label, confidence), ...] for each label, sorted by confidence
visualizations -- an array of (layer_name, activations, weights) for the specified layers
Returns (None, None) if something goes wrong
Arguments:
image -- a np.array
Keyword arguments:
snapshot_epoch -- which snapshot to use
layers -- which layer activation[s] and weight[s] to visualize
"""
temp_image_handle, temp_image_path = tempfile.mkstemp(suffix='.tfrecords')
os.close(temp_image_handle)
if image.ndim < 3:
image = image[..., np.newaxis]
writer = tf.python_io.TFRecordWriter(temp_image_path)
image = image.astype('float')
record = tf.train.Example(features=tf.train.Features(feature={
'height': _int64_feature(image.shape[0]),
'width': _int64_feature(image.shape[1]),
'depth': _int64_feature(image.shape[2]),
'image_raw': _float_array_feature(image.flatten()),
'label': _int64_feature(0),
'encoding': _int64_feature(0)}))
writer.write(record.SerializeToString())
writer.close()
file_to_load = self.get_snapshot(snapshot_epoch)
args = [sys.executable,
os.path.join(os.path.dirname(os.path.abspath(digits.__file__)), 'tools', 'tensorflow', 'main.py'),
'--inference_db=%s' % temp_image_path,
'--network=%s' % self.model_file,
'--networkDirectory=%s' % self.job_dir,
'--weights=%s' % file_to_load,
'--allPredictions=1',
'--batch_size=1',
]
if hasattr(self.dataset, 'labels_file'):
args.append('--labels_list=%s' % self.dataset.path(self.dataset.labels_file))
if self.use_mean != 'none':
mean_file = self.dataset.get_mean_file()
assert mean_file is not None, 'Failed to retrieve mean file.'
args.append('--mean=%s' % self.dataset.path(mean_file))
if self.use_mean == 'pixel':
args.append('--subtractMean=pixel')
elif self.use_mean == 'image':
args.append('--subtractMean=image')
else:
args.append('--subtractMean=none')
if self.crop_size:
args.append('--croplen=%d' % self.crop_size)
if layers == 'all':
args.append('--visualize_inf=1')
args.append('--save=%s' % self.job_dir)
# Convert them all to strings
args = [str(x) for x in args]
self.logger.info('%s classify one task started.' % self.get_framework_id())
unrecognized_output = []
predictions = []
self.visualization_file = None
env = os.environ.copy()
if gpu is not None:
# make only the selected GPU visible
env['CUDA_VISIBLE_DEVICES'] = subprocess_visible_devices([gpu])
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=self.job_dir,
close_fds=True,
env=env)
try:
while p.poll() is None:
for line in utils.nonblocking_readlines(p.stdout):
if self.aborted.is_set():
p.terminate()
raise digits.inference.errors.InferenceError('%s classify one task got aborted. error code - %d' % (self.get_framework_id(), p.returncode)) # noqa
if line is not None and len(line) > 1:
if not self.process_test_output(line, predictions, 'one'):
self.logger.warning('%s classify one task unrecognized input: %s' % (
self.get_framework_id(), line.strip()))
unrecognized_output.append(line)
else:
time.sleep(0.05)
except Exception as e:
if p.poll() is None:
p.terminate()
error_message = ''
if type(e) == digits.inference.errors.InferenceError:
error_message = e.__str__()
else:
error_message = '%s classify one task failed with error code %d \n %s' % (
self.get_framework_id(), p.returncode, str(e))
self.logger.error(error_message)
if unrecognized_output:
unrecognized_output = '\n'.join(unrecognized_output)
error_message = error_message + unrecognized_output
raise digits.inference.errors.InferenceError(error_message)
finally:
self.after_test_run(temp_image_path)
if p.returncode != 0:
error_message = '%s classify one task failed with error code %d' % (self.get_framework_id(), p.returncode)
self.logger.error(error_message)
if unrecognized_output:
unrecognized_output = '\n'.join(unrecognized_output)
error_message = error_message + unrecognized_output
raise digits.inference.errors.InferenceError(error_message)
else:
self.logger.info('%s classify one task completed.' % self.get_framework_id())
predictions = {'output': np.array(predictions)}
visualizations = []
if layers == 'all' and self.visualization_file:
vis_db = h5py.File(self.visualization_file, 'r')
# the HDF5 database is organized as follows:
# <root>
# |- layers
# |- 1
# | [attrs] - op
# | [attrs] - var
# | |- activations
# | |- weights
# |- 2
for layer_id, layer in vis_db['layers'].items():
op_name = layer.attrs['op']
var_name = layer.attrs['var']
layer_desc = "%s\n%s" % (op_name, var_name)
idx = int(layer_id)
# activations (tf: operation outputs)
if 'activations' in layer:
data = np.array(layer['activations'][...])
if len(data.shape) > 1 and data.shape[0] == 1:
# skip batch dimension
data = data[0]
if len(data.shape) == 3:
data = data.transpose(2, 0, 1)
elif len(data.shape) == 4:
data = data.transpose(3, 2, 0, 1)
vis = utils.image.get_layer_vis_square(data)
mean, std, hist = self.get_layer_statistics(data)
visualizations.append(
{
'id': idx,
'name': layer_desc,
'vis_type': 'Activations',
'vis': vis,
'data_stats': {
'shape': data.shape,
'mean': mean,
'stddev': std,
'histogram': hist,
}
}
)
# weights (tf: variables)
if 'weights' in layer:
data = np.array(layer['weights'][...])
if len(data.shape) == 3:
data = data.transpose(2, 0, 1)
elif len(data.shape) == 4:
data = data.transpose(3, 2, 0, 1)
if 'MatMul' in layer_desc:
vis = None # too many layers to display?
else:
vis = utils.image.get_layer_vis_square(data)
mean, std, hist = self.get_layer_statistics(data)
parameter_count = reduce(operator.mul, data.shape, 1)
visualizations.append(
{
'id': idx,
'name': layer_desc,
'vis_type': 'Weights',
'vis': vis,
'param_count': parameter_count,
'data_stats': {
'shape': data.shape,
'mean': mean,
'stddev': std,
'histogram': hist,
}
}
)
# sort by layer ID
visualizations = sorted(visualizations, key=lambda x: x['id'])
return (predictions, visualizations)
def get_layer_statistics(self, data):
"""
Returns statistics for the given layer data:
(mean, standard deviation, histogram)
histogram -- [y, x, ticks]
Arguments:
data -- a np.ndarray
"""
# These calculations can be super slow
mean = np.mean(data)
std = np.std(data)
y, x = np.histogram(data, bins=20)
y = list(y)
ticks = x[[0, len(x)/2, -1]]
x = [(x[i]+x[i+1])/2.0 for i in xrange(len(x)-1)]
ticks = list(ticks)
return (mean, std, [y, x, ticks])
def after_test_run(self, temp_image_path):
try:
os.remove(temp_image_path)
except OSError:
pass
def process_test_output(self, line, predictions, test_category):
# parse torch output
timestamp, level, message = self.preprocess_output_tensorflow(line)
# return false when unrecognized output is encountered
if not (level or message):
return False
if not message:
return True
float_exp = '([-]?inf|nan|[-+]?[0-9]*\.?[0-9]+(e[-+]?[0-9]+)?)'
# format of output while testing single image
match = re.match(r'For image \d+, predicted class \d+: \d+ \((.*?)\) %s' % (float_exp), message)
if match:
label = match.group(1)
confidence = match.group(2)
assert not('inf' in confidence or 'nan' in confidence), 'Network reported %s for confidence value. Please check image and network' % label # noqa
confidence = float(confidence)
predictions.append((label, confidence))
return True
# format of output while testing multiple images
match = re.match(r'Predictions for image \d+: (.*)', message)
if match:
values = match.group(1).strip()
# 'values' should contain a JSON representation of
# the prediction
predictions.append(eval(values))
return True
# path to visualization file
match = re.match(r'Saving visualization to (.*)', message)
if match:
self.visualization_file = match.group(1).strip()
return True
# displaying info and warn messages as we aren't maintaining separate log file for model testing
if level == 'info':
self.logger.debug('%s classify %s task : %s' % (self.get_framework_id(), test_category, message))
return True
if level == 'warning':
self.logger.warning('%s classify %s task : %s' % (self.get_framework_id(), test_category, message))
return True
if level in ['error', 'critical']:
raise digits.inference.errors.InferenceError('%s classify %s task failed with error message - %s' % (
self.get_framework_id(), test_category, message))
return False # control should never reach this line.
@override
def infer_many(self, data, snapshot_epoch=None, gpu=None, resize=True):
# resize parameter is unused
return self.infer_many_images(data, snapshot_epoch=snapshot_epoch, gpu=gpu)
def infer_many_images(self, images, snapshot_epoch=None, gpu=None):
"""
Returns (labels, results):
labels -- an array of strings
results -- a 2D np array:
[
[image0_label0_confidence, image0_label1_confidence, ...],
[image1_label0_confidence, image1_label1_confidence, ...],
...
]
Arguments:
images -- a list of np.arrays
Keyword arguments:
snapshot_epoch -- which snapshot to use
"""
# create a temporary folder to store images and a temporary file
# to store a list of paths to the images
temp_dir_path = tempfile.mkdtemp(suffix='.tfrecords')
try: # this try...finally clause is used to clean up the temp directory in any case
with open(os.path.join(temp_dir_path, 'list.txt'), 'w') as imglist_file:
for image in images:
if image.ndim < 3:
image = image[..., np.newaxis]
image = image.astype('float')
temp_image_handle, temp_image_path = tempfile.mkstemp(dir=temp_dir_path, suffix='.tfrecords')
writer = tf.python_io.TFRecordWriter(temp_image_path)
record = tf.train.Example(features=tf.train.Features(feature={
'height': _int64_feature(image.shape[0]),
'width': _int64_feature(image.shape[1]),
'depth': _int64_feature(image.shape[2]),
'image_raw': _float_array_feature(image.flatten()),
'label': _int64_feature(0),
'encoding': _int64_feature(0)}))
writer.write(record.SerializeToString())
writer.close()
imglist_file.write("%s\n" % temp_image_path)
os.close(temp_image_handle)
file_to_load = self.get_snapshot(snapshot_epoch)
args = [sys.executable,
os.path.join(os.path.dirname(os.path.abspath(digits.__file__)), 'tools', 'tensorflow', 'main.py'),
'--testMany=1',
'--allPredictions=1', # all predictions are grabbed and formatted as required by DIGITS
'--inference_db=%s' % str(temp_dir_path),
'--network=%s' % self.model_file,
'--networkDirectory=%s' % self.job_dir,
'--weights=%s' % file_to_load,
]
if hasattr(self.dataset, 'labels_file'):
args.append('--labels_list=%s' % self.dataset.path(self.dataset.labels_file))
if self.use_mean != 'none':
mean_file = self.dataset.get_mean_file()
assert mean_file is not None, 'Failed to retrieve mean file.'
args.append('--mean=%s' % self.dataset.path(mean_file))
if self.use_mean == 'pixel':
args.append('--subtractMean=pixel')
elif self.use_mean == 'image':
args.append('--subtractMean=image')
else:
args.append('--subtractMean=none')
if self.crop_size:
args.append('--croplen=%d' % self.crop_size)
# Convert them all to strings
args = [str(x) for x in args]
self.logger.info('%s classify many task started.' % self.name())
env = os.environ.copy()
if gpu is not None:
# make only the selected GPU visible
env['CUDA_VISIBLE_DEVICES'] = subprocess_visible_devices([gpu])
unrecognized_output = []
predictions = []
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=self.job_dir,
close_fds=True,
env=env)
try:
while p.poll() is None:
for line in utils.nonblocking_readlines(p.stdout):
if self.aborted.is_set():
p.terminate()
raise digits.inference.errors.InferenceError('%s classify many task got aborted.'
'error code - %d' % (self.get_framework_id(),
p.returncode))
if line is not None and len(line) > 1:
if not self.process_test_output(line, predictions, 'many'):
self.logger.warning('%s classify many task unrecognized input: %s' % (
self.get_framework_id(), line.strip()))
unrecognized_output.append(line)
else:
time.sleep(0.05)
except Exception as e:
if p.poll() is None:
p.terminate()
error_message = ''
if type(e) == digits.inference.errors.InferenceError:
error_message = e.__str__()
else:
error_message = '%s classify many task failed with error code %d \n %s' % (
self.get_framework_id(), p.returncode, str(e))
self.logger.error(error_message)
if unrecognized_output:
unrecognized_output = '\n'.join(unrecognized_output)
error_message = error_message + unrecognized_output
raise digits.inference.errors.InferenceError(error_message)
if p.returncode != 0:
error_message = '%s classify many task failed with error code %d' % (self.get_framework_id(),
p.returncode)
self.logger.error(error_message)
if unrecognized_output:
unrecognized_output = '\n'.join(unrecognized_output)
error_message = error_message + unrecognized_output
raise digits.inference.errors.InferenceError(error_message)
else:
self.logger.info('%s classify many task completed.' % self.get_framework_id())
finally:
shutil.rmtree(temp_dir_path)
# task.infer_one() expects dictionary in return value
return {'output': np.array(predictions)}
def has_model(self):
"""
Returns True if there is a model that can be used
"""
return len(self.snapshots) != 0
@override
def get_model_files(self):
"""
return paths to model files
"""
return {"Network": self.model_file}
@override
def get_network_desc(self):
"""
return text description of network
"""
with open(os.path.join(self.job_dir, TENSORFLOW_MODEL_FILE), "r") as infile:
desc = infile.read()
return desc
@override
def get_task_stats(self, epoch=-1):
"""
return a dictionary of task statistics
"""
loc, mean_file = os.path.split(self.dataset.get_mean_file())
stats = {
"image dimensions": self.dataset.get_feature_dims(),
"mean file": mean_file,
"snapshot file": self.get_snapshot_filename(epoch),
"model file": self.model_file,
"framework": "tensorflow",
"mean subtraction": self.use_mean
}
if hasattr(self, "digits_version"):
stats.update({"digits version": self.digits_version})
if hasattr(self.dataset, "resize_mode"):
stats.update({"image resize mode": self.dataset.resize_mode})
if hasattr(self.dataset, "labels_file"):
stats.update({"labels file": self.dataset.labels_file})
return stats
| DIGITS-master | digits/model/tasks/tensorflow_train.py |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import datetime
from ..job import ModelJob
from digits.utils import subclass, override
# NOTE: Increment this every time the pickled object changes
PICKLE_VERSION = 1
@subclass
class ImageModelJob(ModelJob):
"""
A Job that creates an image model
"""
def __init__(self, **kwargs):
"""
"""
super(ImageModelJob, self).__init__(**kwargs)
self.pickver_job_model_image = PICKLE_VERSION
@override
def json_dict(self, verbose=False, epoch=-1):
d = super(ImageModelJob, self).json_dict(verbose)
task = self.train_task()
creation_time = str(datetime.datetime.fromtimestamp(self.status_history[0][1]))
d.update({
"job id": self.id(),
"creation time": creation_time,
"username": self.username,
})
d.update(task.get_task_stats(epoch))
return d
| DIGITS-master | digits/model/images/job.py |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .classification import ImageClassificationModelJob
from .generic import GenericImageModelJob
from .job import ImageModelJob
__all__ = [
'ImageClassificationModelJob',
'GenericImageModelJob',
'ImageModelJob',
]
| DIGITS-master | digits/model/images/__init__.py |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from wtforms import validators
from ..forms import ModelForm
from digits import utils
class ImageModelForm(ModelForm):
"""
Defines the form used to create a new ImageModelJob
"""
crop_size = utils.forms.IntegerField(
'Crop Size',
validators=[
validators.NumberRange(min=1),
validators.Optional()
],
tooltip=("If specified, during training a random square crop will be "
"taken from the input image before using as input for the network.")
)
use_mean = utils.forms.SelectField(
'Subtract Mean',
choices=[
('none', 'None'),
('image', 'Image'),
('pixel', 'Pixel'),
],
default='image',
tooltip="Subtract the mean file or mean pixel for this dataset from each image."
)
aug_flip = utils.forms.SelectField(
'Flipping',
choices=[
('none', 'None'),
('fliplr', 'Horizontal'),
('flipud', 'Vertical'),
('fliplrud', 'Horizontal and/or Vertical'),
],
default='none',
tooltip="Randomly flips each image during batch preprocessing."
)
aug_quad_rot = utils.forms.SelectField(
'Quadrilateral Rotation',
choices=[
('none', 'None'),
('rot90', '0, 90 or 270 degrees'),
('rot180', '0 or 180 degrees'),
('rotall', '0, 90, 180 or 270 degrees.'),
],
default='none',
tooltip="Randomly rotates (90 degree steps) each image during batch preprocessing."
)
aug_rot = utils.forms.IntegerField(
'Rotation (+- deg)',
default=0,
validators=[
validators.NumberRange(min=0, max=180)
],
tooltip="The uniform-random rotation angle that will be performed during batch preprocessing."
)
aug_scale = utils.forms.FloatField(
'Rescale (stddev)',
default=0,
validators=[
validators.NumberRange(min=0, max=1)
],
tooltip=("Retaining image size, the image is rescaled with a "
"+-stddev of this parameter. Suggested value is 0.07.")
)
aug_noise = utils.forms.FloatField(
'Noise (stddev)',
default=0,
validators=[
validators.NumberRange(min=0, max=1)
],
tooltip=("Adds AWGN (Additive White Gaussian Noise) during batch "
"preprocessing, assuming [0 1] pixel-value range. Suggested value is 0.03.")
)
aug_contrast = utils.forms.FloatField(
'Contrast (factor)',
default=0,
validators=[
validators.NumberRange(min=0, max=5)
],
tooltip="Per channel, the mean of the channel is computed and then adjusts each component x "
"of each pixel to (x - mean) * contrast_factor + mean. The contrast_factor is picked "
"form a random uniform distribution to yield a value between [1-contrast_factor, "
"1+contrast_factor]. Suggested value is 0.8."
)
aug_whitening = utils.forms.BooleanField(
'Whitening',
default=False,
validators=[],
tooltip="Per-image whitening by subtracting its own mean, and dividing by its own standard deviation."
)
aug_hsv_use = utils.forms.BooleanField(
'HSV Shifting',
default=False,
tooltip=("Augmentation by normal-distributed random shifts in HSV "
"color space, assuming [0 1] pixel-value range."),
)
aug_hsv_h = utils.forms.FloatField(
'Hue',
default=0.02,
validators=[
validators.NumberRange(min=0, max=0.5)
],
tooltip=("Standard deviation of a shift that will be performed during "
"preprocessing, assuming [0 1] pixel-value range.")
)
aug_hsv_s = utils.forms.FloatField(
'Saturation',
default=0.04,
validators=[
validators.NumberRange(min=0, max=0.5)
],
tooltip=("Standard deviation of a shift that will be performed during "
"preprocessing, assuming [0 1] pixel-value range.")
)
aug_hsv_v = utils.forms.FloatField(
'Value',
default=0.06,
validators=[
validators.NumberRange(min=0, max=0.5)
],
tooltip=("Standard deviation of a shift that will be performed during "
"preprocessing, assuming [0 1] pixel-value range.")
)
| DIGITS-master | digits/model/images/forms.py |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import flask
blueprint = flask.Blueprint(__name__, __name__)
| DIGITS-master | digits/model/images/views.py |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os.path
from ..job import ImageModelJob
from digits.utils import subclass, override
# NOTE: Increment this every time the pickled object changes
PICKLE_VERSION = 1
@subclass
class ImageClassificationModelJob(ImageModelJob):
"""
A Job that creates an image model for a classification network
"""
def __init__(self, **kwargs):
super(ImageClassificationModelJob, self).__init__(**kwargs)
self.pickver_job_model_image_classification = PICKLE_VERSION
@override
def job_type(self):
return 'Image Classification Model'
@override
def download_files(self, epoch=-1, frozen_file=False):
task = self.train_task()
if frozen_file:
snapshot_filenames = task.get_snapshot(epoch, frozen_file=True)
else:
snapshot_filenames = task.get_snapshot(epoch, download=True)
# get model files
model_files = task.get_model_files()
download_files = [(self.path(filename), os.path.basename(filename))
for filename in model_files.values()]
# add other files
download_files.extend([
(task.dataset.path(task.dataset.labels_file),
os.path.basename(task.dataset.labels_file)),
(task.dataset.path(task.dataset.get_mean_file()),
os.path.basename(task.dataset.get_mean_file()))
])
if not isinstance(snapshot_filenames, list):
download_files.append((snapshot_filenames,
os.path.basename(snapshot_filenames)))
else:
for snapshot_filename in snapshot_filenames:
download_files.append((snapshot_filename,
os.path.basename(snapshot_filename)))
return download_files
| DIGITS-master | digits/model/images/classification/job.py |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .job import ImageClassificationModelJob
__all__ = ['ImageClassificationModelJob']
| DIGITS-master | digits/model/images/classification/__init__.py |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import itertools
import json
import os
import shutil
import tempfile
import time
import unittest
import caffe_pb2
import math
# Find the best implementation available
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from bs4 import BeautifulSoup
from digits.config import config_value
import digits.dataset.images.classification.test_views
import digits.test_views
from digits import test_utils
import digits.webapp
from digits.frameworks import CaffeFramework
from google.protobuf import text_format
# May be too short on a slow system
TIMEOUT_DATASET = 45
TIMEOUT_MODEL = 60
################################################################################
# Base classes (they don't start with "Test" so nose won't run them)
################################################################################
class BaseViewsTest(digits.test_views.BaseViewsTest):
"""
Provides some functions
"""
CAFFE_NETWORK = \
"""
layer {
name: "hidden"
type: 'InnerProduct'
bottom: "data"
top: "output"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude { stage: "deploy" }
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "output"
bottom: "label"
top: "accuracy"
include { stage: "val" }
}
layer {
name: "softmax"
type: "Softmax"
bottom: "output"
top: "softmax"
include { stage: "deploy" }
}
"""
TORCH_NETWORK = \
"""
return function(p)
-- adjust to number of classes
local nclasses = p.nclasses or 1
-- model should adjust to any 3D input
local nDim = 1
if p.inputShape then p.inputShape:apply(function(x) nDim=nDim*x end) end
local model = nn.Sequential()
model:add(nn.View(-1):setNumInputDims(3)) -- c*h*w -> chw (flattened)
-- set all weights and biases to zero as this speeds learning up
-- for the type of problem we're trying to solve in this test
local linearLayer = nn.Linear(nDim, nclasses)
linearLayer.weight:fill(0)
linearLayer.bias:fill(0)
model:add(linearLayer) -- chw -> nclasses
model:add(nn.LogSoftMax())
return {
model = model
}
end
"""
TENSORFLOW_NETWORK = \
"""
class UserModel(Tower):
@model_property
def inference(self):
ninputs = self.input_shape[0] * self.input_shape[1] * self.input_shape[2]
W = tf.get_variable('W', [ninputs, self.nclasses], initializer=tf.constant_initializer(0.0))
b = tf.get_variable('b', [self.nclasses], initializer=tf.constant_initializer(0.0)),
model = tf.reshape(self.x, shape=[-1, ninputs])
model = tf.add(tf.matmul(model, W), b)
return model
@model_property
def loss(self):
loss = digits.classification_loss(self.inference, self.y)
return loss
"""
@classmethod
def model_exists(cls, job_id):
return cls.job_exists(job_id, 'models')
@classmethod
def model_status(cls, job_id):
return cls.job_status(job_id, 'models')
@classmethod
def model_info(cls, job_id):
return cls.job_info(job_id, 'models')
@classmethod
def abort_model(cls, job_id):
return cls.abort_job(job_id, job_type='models')
@classmethod
def model_wait_completion(cls, job_id, **kwargs):
kwargs['job_type'] = 'models'
if 'timeout' not in kwargs:
kwargs['timeout'] = TIMEOUT_MODEL
return cls.job_wait_completion(job_id, **kwargs)
@classmethod
def delete_model(cls, job_id):
return cls.delete_job(job_id, job_type='models')
@classmethod
def network(cls):
if cls.FRAMEWORK == 'torch':
return cls.TORCH_NETWORK
elif cls.FRAMEWORK == 'caffe':
return cls.CAFFE_NETWORK
elif cls.FRAMEWORK == 'tensorflow':
return cls.TENSORFLOW_NETWORK
else:
raise Exception('Unknown cls.FRAMEWORK "%s"' % cls.FRAMEWORK)
class BaseViewsTestWithDataset(BaseViewsTest,
digits.dataset.images.classification.test_views.BaseViewsTestWithDataset):
"""
Provides a dataset
"""
# Inherited classes may want to override these attributes
CROP_SIZE = None
TRAIN_EPOCHS = 1
SHUFFLE = False
LR_POLICY = None
LR_MULTISTEP_VALUES = None
LEARNING_RATE = None
AUG_FLIP = None
AUG_QUAD_ROT = None
AUG_ROT = None
AUG_SCALE = None
AUG_NOISE = None
AUG_CONTRAST = None
AUG_WHITENING = None
AUG_HSV_USE = None
AUG_HSV_H = None
AUG_HSV_S = None
AUG_HSV_V = None
OPTIMIZER = None
@classmethod
def setUpClass(cls):
super(BaseViewsTestWithDataset, cls).setUpClass()
cls.created_models = []
@classmethod
def tearDownClass(cls):
# delete any created datasets
for job_id in cls.created_models:
cls.delete_model(job_id)
super(BaseViewsTestWithDataset, cls).tearDownClass()
@classmethod
def create_model(cls, network=None, **kwargs):
"""
Create a model
Returns the job_id
Raise RuntimeError if job fails to create
Keyword arguments:
**kwargs -- data to be sent with POST request
"""
if network is None:
network = cls.network()
data = {
'model_name': 'test_model',
'group_name': 'test_group',
'dataset': cls.dataset_id,
'method': 'custom',
'custom_network': network,
'batch_size': 10,
'train_epochs': cls.TRAIN_EPOCHS,
'framework': cls.FRAMEWORK,
'random_seed': 0xCAFEBABE,
'shuffle': 'true' if cls.SHUFFLE else 'false'
}
if cls.CROP_SIZE is not None:
data['crop_size'] = cls.CROP_SIZE
if cls.LR_POLICY is not None:
data['lr_policy'] = cls.LR_POLICY
if cls.LEARNING_RATE is not None:
data['learning_rate'] = cls.LEARNING_RATE
if cls.LR_MULTISTEP_VALUES is not None:
data['lr_multistep_values'] = cls.LR_MULTISTEP_VALUES
if cls.AUG_FLIP is not None:
data['aug_flip'] = cls.AUG_FLIP
if cls.AUG_QUAD_ROT is not None:
data['aug_quad_rot'] = cls.AUG_QUAD_ROT
if cls.AUG_ROT is not None:
data['aug_rot'] = cls.AUG_ROT
if cls.AUG_SCALE is not None:
data['aug_scale'] = cls.AUG_SCALE
if cls.AUG_NOISE is not None:
data['aug_noise'] = cls.AUG_NOISE
if cls.AUG_CONTRAST is not None:
data['aug_contrast'] = cls.AUG_CONTRAST
if cls.AUG_WHITENING is not None:
data['aug_whitening'] = cls.AUG_WHITENING
if cls.AUG_HSV_USE is not None:
data['aug_hsv_use'] = cls.AUG_HSV_USE
if cls.AUG_HSV_H is not None:
data['aug_hsv_h'] = cls.AUG_HSV_H
if cls.AUG_HSV_S is not None:
data['aug_hsv_s'] = cls.AUG_HSV_S
if cls.AUG_HSV_V is not None:
data['aug_hsv_v'] = cls.AUG_HSV_V
if cls.OPTIMIZER is not None:
data['solver_type'] = cls.OPTIMIZER
data.update(kwargs)
request_json = data.pop('json', False)
url = '/models/images/classification'
if request_json:
url += '/json'
rv = cls.app.post(url, data=data)
if request_json:
if rv.status_code != 200:
print json.loads(rv.data)
raise RuntimeError('Model creation failed with %s' % rv.status_code)
data = json.loads(rv.data)
if 'jobs' in data.keys():
return [j['id'] for j in data['jobs']]
else:
return data['id']
# expect a redirect
if not 300 <= rv.status_code <= 310:
print 'Status code:', rv.status_code
s = BeautifulSoup(rv.data, 'html.parser')
div = s.select('div.alert-danger')
if div:
print div[0]
else:
print rv.data
raise RuntimeError('Failed to create dataset - status %s' % rv.status_code)
job_id = cls.job_id_from_response(rv)
assert cls.model_exists(job_id), 'model not found after successful creation'
cls.created_models.append(job_id)
return job_id
class BaseViewsTestWithModel(BaseViewsTestWithDataset):
"""
Provides a model
"""
@classmethod
def setUpClass(cls):
super(BaseViewsTestWithModel, cls).setUpClass()
cls.model_id = cls.create_model(json=True)
assert cls.model_wait_completion(cls.model_id) == 'Done', 'create failed'
class BaseTestViews(BaseViewsTest):
"""
Tests which don't require a dataset or a model
"""
def test_page_model_new(self):
rv = self.app.get('/models/images/classification/new')
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
assert 'New Image Classification Model' in rv.data, 'unexpected page format'
def test_nonexistent_model(self):
assert not self.model_exists('foo'), "model shouldn't exist"
def test_visualize_network(self):
rv = self.app.post('/models/visualize-network?framework=' + self.FRAMEWORK,
data={'custom_network': self.network()}
)
s = BeautifulSoup(rv.data, 'html.parser')
if rv.status_code != 200:
body = s.select('body')[0]
if 'InvocationException' in str(body):
raise unittest.SkipTest('GraphViz not installed')
raise AssertionError('POST failed with %s\n\n%s' % (rv.status_code, body))
image = s.select('img')
assert image is not None, "didn't return an image"
def test_customize(self):
rv = self.app.post('/models/customize?network=lenet&framework=' + self.FRAMEWORK)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
class BaseTestCreation(BaseViewsTestWithDataset):
"""
Model creation tests
"""
def test_create_json(self):
job_id = self.create_model(json=True)
self.abort_model(job_id)
def test_create_delete(self):
job_id = self.create_model()
assert self.delete_model(job_id) == 200, 'delete failed'
assert not self.model_exists(job_id), 'model exists after delete'
def test_create_wait_delete(self):
job_id = self.create_model()
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
assert self.delete_model(job_id) == 200, 'delete failed'
assert not self.model_exists(job_id), 'model exists after delete'
def test_create_abort_delete(self):
job_id = self.create_model()
assert self.abort_model(job_id) == 200, 'abort failed'
assert self.delete_model(job_id) == 200, 'delete failed'
assert not self.model_exists(job_id), 'model exists after delete'
def test_snapshot_interval_2(self):
job_id = self.create_model(snapshot_interval=0.5)
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
rv = self.app.get('/models/%s/json' % job_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']) > 1, 'should take >1 snapshot'
def test_snapshot_interval_0_5(self):
job_id = self.create_model(train_epochs=4, snapshot_interval=2)
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
rv = self.app.get('/models/%s/json' % job_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']) == 2, 'should take 2 snapshots'
@unittest.skipIf(
not config_value('gpu_list'),
'no GPUs selected')
@unittest.skipIf(
not config_value('caffe')['cuda_enabled'],
'CUDA disabled')
@unittest.skipIf(
config_value('caffe')['multi_gpu'],
'multi-GPU enabled')
def test_select_gpu(self):
for index in config_value('gpu_list').split(','):
yield self.check_select_gpu, index
def check_select_gpu(self, gpu_index):
job_id = self.create_model(select_gpu=gpu_index)
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
@unittest.skipIf(
not config_value('gpu_list'),
'no GPUs selected')
@unittest.skipIf(
not config_value('caffe')['cuda_enabled'],
'CUDA disabled')
@unittest.skipIf(
not config_value('caffe')['multi_gpu'],
'multi-GPU disabled')
def test_select_gpus(self):
# test all possible combinations
gpu_list = config_value('gpu_list').split(',')
for i in xrange(len(gpu_list)):
for combination in itertools.combinations(gpu_list, i + 1):
yield self.check_select_gpus, combination
def check_select_gpus(self, gpu_list):
job_id = self.create_model(select_gpus_list=','.join(gpu_list), batch_size=len(gpu_list))
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
def classify_one_for_job(self, job_id, test_misclassification=True):
# carry out one inference test per category in dataset
for category in self.imageset_paths.keys():
image_path = self.imageset_paths[category][0]
image_path = os.path.join(self.imageset_folder, image_path)
with open(image_path, 'rb') as infile:
# StringIO wrapping is needed to simulate POST file upload.
image_upload = (StringIO(infile.read()), 'image.png')
rv = self.app.post(
'/models/images/classification/classify_one?job_id=%s' % job_id,
data={
'image_file': image_upload,
}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
# gets an array of arrays [[confidence, label],...]
predictions = [p.get_text().split() for p in s.select('ul.list-group li')]
if test_misclassification:
assert predictions[0][1] == category, 'image misclassified'
def test_classify_one_mean_image(self):
# test the creation
job_id = self.create_model(use_mean='image')
assert self.model_wait_completion(job_id) == 'Done', 'job failed'
self.classify_one_for_job(job_id)
def test_classify_one_mean_pixel(self):
# test the creation
job_id = self.create_model(use_mean='pixel')
assert self.model_wait_completion(job_id) == 'Done', 'job failed'
self.classify_one_for_job(job_id)
def test_classify_one_mean_none(self):
# test the creation
job_id = self.create_model(use_mean='none')
assert self.model_wait_completion(job_id) == 'Done', 'job failed'
self.classify_one_for_job(job_id, False)
def test_retrain(self):
job1_id = self.create_model()
assert self.model_wait_completion(job1_id) == 'Done', 'first job failed'
rv = self.app.get('/models/%s/json' % job1_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']), 'should have at least snapshot'
options = {
'method': 'previous',
'previous_networks': job1_id,
}
options['%s-snapshot' % job1_id] = content['snapshots'][-1]
job2_id = self.create_model(**options)
assert self.model_wait_completion(job2_id) == 'Done', 'second job failed'
def test_retrain_twice(self):
# retrain from a job which already had a pretrained model
job1_id = self.create_model()
assert self.model_wait_completion(job1_id) == 'Done', 'first job failed'
rv = self.app.get('/models/%s/json' % job1_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']), 'should have at least snapshot'
options_2 = {
'method': 'previous',
'previous_networks': job1_id,
}
options_2['%s-snapshot' % job1_id] = content['snapshots'][-1]
job2_id = self.create_model(**options_2)
assert self.model_wait_completion(job2_id) == 'Done', 'second job failed'
options_3 = {
'method': 'previous',
'previous_networks': job2_id,
}
options_3['%s-snapshot' % job2_id] = -1
job3_id = self.create_model(**options_3)
assert self.model_wait_completion(job3_id) == 'Done', 'third job failed'
def test_bad_network_definition(self):
if self.FRAMEWORK == 'caffe':
bogus_net = """
layer {
name: "hidden"
type: 'BogusCode'
bottom: "data"
top: "output"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude { stage: "deploy" }
}
layer {
name: "softmax"
type: "Softmax"
bottom: "output"
top: "softmax"
include { stage: "deploy" }
}
"""
elif self.FRAMEWORK == 'torch':
bogus_net = """
local model = BogusCode(0)
return function(params)
return {
model = model
}
end
"""
elif self.FRAMEWORK == 'tensorflow':
bogus_net = """
class UserModel(Tower):
@model_property
def inference(self):
model = BogusCode(0)
return model
@model_property
def loss(y):
return BogusCode(0)
"""
job_id = self.create_model(json=True, network=bogus_net)
assert self.model_wait_completion(job_id) == 'Error', 'job should have failed'
job_info = self.job_info_html(job_id=job_id, job_type='models')
assert 'BogusCode' in job_info, "job_info: \n%s" % str(job_info)
def test_clone(self):
options_1 = {
'shuffle': True,
'snapshot_interval': 2.0,
'lr_step_size': 33.0,
'lr_inv_power': 0.5,
'lr_inv_gamma': 0.1,
'lr_poly_power': 3.0,
'lr_exp_gamma': 0.9,
'use_mean': 'image',
'lr_multistep_gamma': 0.5,
'lr_policy': 'exp',
'val_interval': 3.0,
'random_seed': 123,
'learning_rate': 0.0125,
'lr_step_gamma': 0.1,
'lr_sigmoid_step': 50.0,
'lr_sigmoid_gamma': 0.1,
'lr_multistep_values': '50,85',
}
job1_id = self.create_model(**options_1)
assert self.model_wait_completion(job1_id) == 'Done', 'first job failed'
rv = self.app.get('/models/%s/json' % job1_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content1 = json.loads(rv.data)
# Clone job1 as job2
options_2 = {
'clone': job1_id,
}
job2_id = self.create_model(**options_2)
assert self.model_wait_completion(job2_id) == 'Done', 'second job failed'
rv = self.app.get('/models/%s/json' % job2_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content2 = json.loads(rv.data)
# These will be different
content1.pop('id')
content2.pop('id')
content1.pop('directory')
content2.pop('directory')
content1.pop('creation time')
content2.pop('creation time')
content1.pop('job id')
content2.pop('job id')
assert (content1 == content2), 'job content does not match'
job1 = digits.webapp.scheduler.get_job(job1_id)
job2 = digits.webapp.scheduler.get_job(job2_id)
assert (job1.form_data == job2.form_data), 'form content does not match'
class BaseTestCreated(BaseViewsTestWithModel):
"""
Tests on a model that has already been created
"""
def test_save(self):
job = digits.webapp.scheduler.get_job(self.model_id)
assert job.save(), 'Job failed to save'
def test_get_snapshot(self):
job = digits.webapp.scheduler.get_job(self.model_id)
task = job.train_task()
f = task.get_snapshot(-1)
assert f, "Failed to load snapshot"
filename = task.get_snapshot_filename(-1)
assert filename, "Failed to get filename"
def test_download(self):
for extension in ['tar', 'zip', 'tar.gz', 'tar.bz2']:
yield self.check_download, extension
def check_download(self, extension):
url = '/models/%s/download.%s' % (self.model_id, extension)
rv = self.app.get(url)
assert rv.status_code == 200, 'download "%s" failed with %s' % (url, rv.status_code)
def test_index_json(self):
rv = self.app.get('/index/json')
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
content = json.loads(rv.data)
found = False
for m in content['models']:
if m['id'] == self.model_id:
found = True
break
assert found, 'model not found in list'
def test_model_json(self):
rv = self.app.get('/models/%s/json' % self.model_id)
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert content['id'] == self.model_id, 'id %s != %s' % (content['id'], self.model_id)
assert content['dataset_id'] == self.dataset_id, 'dataset_id %s != %s' % (
content['dataset_id'], self.dataset_id)
assert len(content['snapshots']) > 0, 'no snapshots in list'
def test_edit_name(self):
status = self.edit_job(
self.dataset_id,
name='new name'
)
assert status == 200, 'failed with %s' % status
def test_edit_notes(self):
status = self.edit_job(
self.dataset_id,
notes='new notes'
)
assert status == 200, 'failed with %s' % status
def test_classify_one(self):
# test first image in first category
category = self.imageset_paths.keys()[0]
image_path = self.imageset_paths[category][0]
image_path = os.path.join(self.imageset_folder, image_path)
with open(image_path, 'rb') as infile:
# StringIO wrapping is needed to simulate POST file upload.
image_upload = (StringIO(infile.read()), 'image.png')
rv = self.app.post(
'/models/images/classification/classify_one?job_id=%s' % self.model_id,
data={
'image_file': image_upload,
'show_visualizations': 'y',
}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
# gets an array of arrays [[confidence, label],...]
predictions = [p.get_text().split() for p in s.select('ul.list-group li')]
assert predictions[0][1] == category, 'image misclassified'
def test_classify_one_json(self):
# test last image in last category
category = self.imageset_paths.keys()[-1]
image_path = self.imageset_paths[category][-1]
image_path = os.path.join(self.imageset_folder, image_path)
with open(image_path, 'rb') as infile:
# StringIO wrapping is needed to simulate POST file upload.
image_upload = (StringIO(infile.read()), 'image.png')
rv = self.app.post(
'/models/images/classification/classify_one/json?job_id=%s' % self.model_id,
data={
'image_file': image_upload,
'show_visualizations': 'y',
}
)
assert rv.status_code == 200, 'POST failed with %s' % rv.status_code
data = json.loads(rv.data)
assert data['predictions'][0][0] == category, 'image misclassified'
def test_classify_many(self):
textfile_images = ''
label_id = 0
for label, images in self.imageset_paths.iteritems():
for image in images:
image_path = image
image_path = os.path.join(self.imageset_folder, image_path)
textfile_images += '%s %d\n' % (image_path, label_id)
label_id += 1
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/classification/classify_many?job_id=%s' % self.model_id,
data={'image_list': file_upload}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
def test_classify_many_from_folder(self):
textfile_images = ''
label_id = 0
for label, images in self.imageset_paths.iteritems():
for image in images:
image_path = image
textfile_images += '%s %d\n' % (image_path, label_id)
label_id += 1
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/classification/classify_many?job_id=%s' % self.model_id,
data={'image_list': file_upload, 'image_folder': self.imageset_folder}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
def test_classify_many_invalid_ground_truth(self):
textfile_images = ''
label_id = 0
for label, images in self.imageset_paths.iteritems():
for image in images:
image_path = image
image_path = os.path.join(self.imageset_folder, image_path)
# test label_id with -1 and >len(labels)
textfile_images += '%s %s\n' % (image_path, 3 * label_id - 1)
label_id += 1
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/classification/classify_many?job_id=%s' % self.model_id,
data={'image_list': file_upload}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
def test_classify_many_json(self):
textfile_images = ''
label_id = 0
for label, images in self.imageset_paths.iteritems():
for image in images:
image_path = image
image_path = os.path.join(self.imageset_folder, image_path)
textfile_images += '%s %d\n' % (image_path, label_id)
label_id += 1
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/classification/classify_many/json?job_id=%s' % self.model_id,
data={'image_list': file_upload}
)
assert rv.status_code == 200, 'POST failed with %s' % rv.status_code
data = json.loads(rv.data)
assert 'classifications' in data, 'invalid response'
# verify classification of first image in each category
for category in self.imageset_paths.keys():
image_path = self.imageset_paths[category][0]
image_path = os.path.join(self.imageset_folder, image_path)
prediction = data['classifications'][image_path][0][0]
assert prediction == category, 'image misclassified- predicted %s - expected %s' % (prediction, category)
def test_top_n(self):
textfile_images = ''
label_id = 0
for label, images in self.imageset_paths.iteritems():
for image in images:
image_path = image
image_path = os.path.join(self.imageset_folder, image_path)
textfile_images += '%s %d\n' % (image_path, label_id)
label_id += 1
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/classification/top_n?job_id=%s' % self.model_id,
data={'image_list': file_upload}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
keys = self.imageset_paths.keys()
for key in keys:
assert key in rv.data, '"%s" not found in the response'
def test_top_n_from_folder(self):
textfile_images = ''
label_id = 0
for label, images in self.imageset_paths.iteritems():
for image in images:
image_path = image
textfile_images += '%s %d\n' % (image_path, label_id)
label_id += 1
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/classification/top_n?job_id=%s' % self.model_id,
data={'image_list': file_upload, 'image_folder': self.imageset_folder}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
keys = self.imageset_paths.keys()
for key in keys:
assert key in rv.data, '"%s" not found in the response'
def test_inference_while_training(self):
# make sure we can do inference while all GPUs are in use for training
# if no GPUs, just test inference during a normal training job
# get number of GPUs
if self.FRAMEWORK == 'tensorflow':
raise unittest.SkipTest('Tensorflow CPU inference during training not supported')
gpu_count = 1
if (config_value('gpu_list') and
config_value('caffe')['cuda_enabled'] and
config_value('caffe')['multi_gpu']):
gpu_count = len(config_value('gpu_list').split(','))
# grab an image for testing
category = self.imageset_paths.keys()[-1]
image_path = self.imageset_paths[category][-1]
image_path = os.path.join(self.imageset_folder, image_path)
with open(image_path, 'rb') as infile:
# StringIO wrapping is needed to simulate POST file upload.
image_upload = (StringIO(infile.read()), 'image.png')
# create a long-running training job
job2_id = self.create_model(
select_gpu_count=gpu_count,
batch_size=10 * gpu_count,
train_epochs=1000,
)
try:
while True:
status = self.model_status(job2_id)
if status in ['Initialized', 'Waiting']:
time.sleep(0.01)
elif status == 'Running':
break
else:
raise RuntimeError('job status is %s' % status)
rv = self.app.post(
'/models/images/classification/classify_one/json?job_id=%s' % self.model_id,
data={'image_file': image_upload}
)
json.loads(rv.data)
assert rv.status_code == 200, 'POST failed with %s' % rv.status_code
finally:
self.delete_model(job2_id)
class BaseTestDatasetModelInteractions(BaseViewsTestWithDataset):
"""
Test the interactions between datasets and models
"""
# If you try to create a model using a deleted dataset, it should fail
def test_create_model_deleted_dataset(self):
dataset_id = self.create_dataset()
assert self.delete_dataset(dataset_id) == 200, 'delete failed'
assert not self.dataset_exists(dataset_id), 'dataset exists after delete'
try:
self.create_model(dataset=dataset_id)
except RuntimeError:
return
assert False, 'Should have failed'
# If you try to create a model using a running dataset,
# it should wait to start until the dataset is completed
def test_create_model_running_dataset(self):
dataset_id = self.create_dataset()
model_id = self.create_model(dataset=dataset_id)
# Model should be in WAIT status while dataset is running
# Copying functionality from job_wait_completion ...
start_time = time.time()
timeout = TIMEOUT_DATASET
dataset_status = self.dataset_status(dataset_id)
while dataset_status != 'Done':
model_status = self.model_status(model_id)
if model_status == 'Initialized':
# give it some time ...
pass
elif model_status == 'Waiting':
# That's what we were waiting for
break
else:
raise Exception('Model not waiting - "%s"' % model_status)
assert (time.time() - start_time) < timeout, 'Job took more than %s seconds' % timeout
time.sleep(0.5)
dataset_status = self.dataset_status(dataset_id)
# Model should switch to RUN status after dataset is DONE
assert self.dataset_wait_completion(dataset_id) == 'Done', 'dataset creation failed'
time.sleep(1)
assert self.model_status(model_id) in ['Running', 'Done'], "model didn't start"
self.abort_model(model_id)
# If you try to delete a completed dataset with a dependent model, it should fail
def test_delete_dataset_dependent_model(self):
dataset_id = self.create_dataset()
model_id = self.create_model(dataset=dataset_id)
assert self.dataset_wait_completion(dataset_id) == 'Done', 'dataset creation failed'
assert self.delete_dataset(dataset_id) == 403, 'dataset deletion should not have succeeded'
self.abort_model(model_id)
# If you try to delete a running dataset with a dependent model, it should fail
def test_delete_running_dataset_dependent_model(self):
dataset_id = self.create_dataset()
model_id = self.create_model(dataset=dataset_id)
assert self.delete_dataset(dataset_id) == 403, 'dataset deletion should not have succeeded'
self.abort_dataset(dataset_id)
self.abort_model(model_id)
class BaseTestCreatedWide(BaseTestCreated):
IMAGE_WIDTH = 20
class BaseTestCreatedTall(BaseTestCreated):
IMAGE_HEIGHT = 20
class BaseTestCreatedCropInForm(BaseTestCreated):
CROP_SIZE = 8
class BaseTestCreatedDataAug(BaseTestCreatedTall):
AUG_FLIP = 'fliplrud'
AUG_QUAD_ROT = 'rotall'
AUG_ROT = 45
AUG_SCALE = 0.07
AUG_NOISE = 0.03
AUG_HSV_USE = True
AUG_HSV_H = 0.02
AUG_HSV_S = 0.04
AUG_HSV_V = 0.06
class BaseTestCreatedCropInNetwork(BaseTestCreated):
CAFFE_NETWORK = \
"""
layer {
name: "data"
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
crop_size: 8
}
}
layer {
name: "data"
type: "Data"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
crop_size: 8
}
}
layer {
name: "hidden"
type: 'InnerProduct'
bottom: "data"
top: "output"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude { stage: "deploy" }
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "output"
bottom: "label"
top: "accuracy"
include { stage: "val" }
}
layer {
name: "softmax"
type: "Softmax"
bottom: "output"
top: "softmax"
include { stage: "deploy" }
}
"""
TORCH_NETWORK = \
"""
return function(p)
local nclasses = p.nclasses or 1
local croplen = 8, channels
if p.inputShape then channels=p.inputShape[1] else channels=1 end
local model = nn.Sequential()
model:add(nn.View(-1):setNumInputDims(3)) -- flatten
local linLayer = nn.Linear(channels*croplen*croplen, nclasses)
linLayer.weight:fill(0)
linLayer.bias:fill(0)
model:add(linLayer) -- chw -> nclasses
model:add(nn.LogSoftMax())
return {
model = model,
croplen = croplen
}
end
"""
TENSORFLOW_NETWORK = \
"""
@TODO(tzaman)
"""
################################################################################
# Test classes
################################################################################
class TestCaffeViews(BaseTestViews, test_utils.CaffeMixin):
pass
class TestCaffeCreation(BaseTestCreation, test_utils.CaffeMixin):
pass
class TestCaffeCreatedWideMoreNumOutput(BaseTestCreatedWide, test_utils.CaffeMixin):
CAFFE_NETWORK = \
"""
layer {
name: "hidden"
type: 'InnerProduct'
bottom: "data"
top: "output"
inner_product_param {
num_output: 1000
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude { stage: "deploy" }
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "output"
bottom: "label"
top: "accuracy"
include { stage: "val" }
}
layer {
name: "softmax"
type: "Softmax"
bottom: "output"
top: "softmax"
include { stage: "deploy" }
}
"""
class TestCaffeDatasetModelInteractions(BaseTestDatasetModelInteractions, test_utils.CaffeMixin):
pass
class TestCaffeCreatedCropInForm(BaseTestCreatedCropInForm, test_utils.CaffeMixin):
pass
class TestCaffeCreatedCropInNetwork(BaseTestCreatedCropInNetwork, test_utils.CaffeMixin):
pass
@unittest.skipIf(
not CaffeFramework().can_accumulate_gradients(),
'This version of Caffe cannot accumulate gradients')
class TestBatchAccumulationCaffe(BaseViewsTestWithDataset, test_utils.CaffeMixin):
TRAIN_EPOCHS = 1
IMAGE_COUNT = 10 # per class
def test_batch_accumulation_calculations(self):
batch_size = 10
batch_accumulation = 2
job_id = self.create_model(
batch_size=batch_size,
batch_accumulation=batch_accumulation,
)
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
info = self.model_info(job_id)
solver = caffe_pb2.SolverParameter()
with open(os.path.join(info['directory'], info['solver file']), 'r') as infile:
text_format.Merge(infile.read(), solver)
assert solver.iter_size == batch_accumulation, \
'iter_size is %d instead of %d' % (solver.iter_size, batch_accumulation)
max_iter = int(math.ceil(
float(self.TRAIN_EPOCHS * self.IMAGE_COUNT * 3) /
(batch_size * batch_accumulation)
))
assert solver.max_iter == max_iter,\
'max_iter is %d instead of %d' % (solver.max_iter, max_iter)
class TestCaffeCreatedTallMultiStepLR(BaseTestCreatedTall, test_utils.CaffeMixin):
LR_POLICY = 'multistep'
LR_MULTISTEP_VALUES = '50,75,90'
class TestTorchViews(BaseTestViews, test_utils.TorchMixin):
pass
class TestTorchCreation(BaseTestCreation, test_utils.TorchMixin):
pass
class TestTorchCreatedUnencodedShuffle(BaseTestCreated, test_utils.TorchMixin):
ENCODING = 'none'
SHUFFLE = True
class TestTorchCreatedHdf5(BaseTestCreated, test_utils.TorchMixin):
BACKEND = 'hdf5'
class TestTorchCreatedTallHdf5Shuffle(BaseTestCreatedTall, test_utils.TorchMixin):
BACKEND = 'hdf5'
SHUFFLE = True
class TestTorchDatasetModelInteractions(BaseTestDatasetModelInteractions, test_utils.TorchMixin):
pass
class TestCaffeLeNet(BaseTestCreated, test_utils.CaffeMixin):
IMAGE_WIDTH = 28
IMAGE_HEIGHT = 28
CAFFE_NETWORK = open(
os.path.join(
os.path.dirname(digits.__file__),
'standard-networks', 'caffe', 'lenet.prototxt')
).read()
class TestCaffeLeNetADAMOptimizer(TestCaffeLeNet):
OPTIMIZER = 'ADAM'
class TestTorchCreatedCropInForm(BaseTestCreatedCropInForm, test_utils.TorchMixin):
pass
class TestTorchCreatedDataAug(BaseTestCreatedDataAug, test_utils.TorchMixin):
TRAIN_EPOCHS = 2
class TestTorchCreatedCropInNetwork(BaseTestCreatedCropInNetwork, test_utils.TorchMixin):
pass
class TestTorchCreatedWideMultiStepLR(BaseTestCreatedWide, test_utils.TorchMixin):
LR_POLICY = 'multistep'
LR_MULTISTEP_VALUES = '50,75,90'
class TestTorchLeNet(BaseTestCreated, test_utils.TorchMixin):
IMAGE_WIDTH = 28
IMAGE_HEIGHT = 28
TRAIN_EPOCHS = 20
# standard lenet model will adjust to color
# or grayscale images
TORCH_NETWORK = open(
os.path.join(
os.path.dirname(digits.__file__),
'standard-networks', 'torch', 'lenet.lua')
).read()
def test_inference_while_training(self):
# override parent method to skip this test as the reference
# model for LeNet uses CuDNN by default and it difficult to
# perform inference on a CuDNN-trained model without non-trivial
# model tweaking
raise unittest.SkipTest('Torch CPU inference on CuDNN-trained model not supported')
class TestTorchLeNetADAMOptimizer(TestTorchLeNet):
OPTIMIZER = 'ADAM'
class TestTorchLeNetHdf5Shuffle(TestTorchLeNet):
BACKEND = 'hdf5'
SHUFFLE = True
class TestCaffePythonLayer(BaseViewsTestWithDataset, test_utils.CaffeMixin):
CAFFE_NETWORK = """\
layer {
name: "hidden"
type: 'InnerProduct'
inner_product_param {
num_output: 500
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
bottom: "data"
top: "output"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude { stage: "deploy" }
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "output"
bottom: "label"
top: "accuracy"
include { stage: "val" }
}
layer {
name: "py_test"
type: "Python"
bottom: "output"
top: "py_test"
python_param {
module: "digits_python_layers"
layer: "PythonLayer"
}
}
layer {
name: "softmax"
type: "Softmax"
bottom: "output"
top: "softmax"
include { stage: "deploy" }
}
"""
def write_python_layer_script(self, filename):
with open(filename, 'w') as f:
f.write("""\
import caffe
import numpy as np
class PythonLayer(caffe.Layer):
def setup(self, bottom, top):
print 'PythonLayer::setup'
if len(bottom) != 1:
raise Exception("Need one input.")
def reshape(self, bottom, top):
print 'PythonLayer::reshape'
top[0].reshape(1)
def forward(self, bottom, top):
print 'PythonLayer::forward'
top[0].data[...] = np.sum(bottom[0].data) / 2. / bottom[0].num
""")
# This test makes a temporary python layer file whose path is set
# as py_layer_server_file. The job creation process copies that
# file to the job_dir. The CAFFE_NETWORK above, requires that
# python script to be in the correct spot. If there is an error
# in the script or if the script is named incorrectly, or does
# not exist in the job_dir, then the test will fail.
def test_python_layer(self):
tmpdir = tempfile.mkdtemp()
py_file = tmpdir + '/py_test.py'
self.write_python_layer_script(py_file)
job_id = self.create_model(python_layer_server_file=py_file)
# remove the temporary python script.
shutil.rmtree(tmpdir)
assert self.model_wait_completion(job_id) == 'Done', 'first job failed'
rv = self.app.get('/models/%s/json' % job_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']), 'should have at least snapshot'
class TestSweepCreation(BaseViewsTestWithDataset, test_utils.CaffeMixin):
"""
Model creation tests
"""
def test_sweep(self):
job_ids = self.create_model(json=True, learning_rate='[0.01, 0.02]', batch_size='[8, 10]')
for job_id in job_ids:
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
assert self.delete_model(job_id) == 200, 'delete failed'
assert not self.model_exists(job_id), 'model exists after delete'
# Tensorflow
class TestTensorflowCreation(BaseTestCreation, test_utils.TensorflowMixin):
pass
class TestTensorflowCreatedWideUnencodedShuffle(BaseTestCreatedWide, test_utils.TensorflowMixin):
ENCODING = 'none'
SHUFFLE = True
class TestTensorflowCreatedHdf5(BaseTestCreated, test_utils.TensorflowMixin):
BACKEND = 'hdf5'
class TestTensorflowCreatedTallHdf5Shuffle(BaseTestCreatedTall, test_utils.TensorflowMixin):
BACKEND = 'hdf5'
SHUFFLE = True
class TestTensorflowDatasetModelInteractions(BaseTestDatasetModelInteractions, test_utils.TensorflowMixin):
pass
class TestTensorflowCreatedDataAug(BaseTestCreatedDataAug, test_utils.TensorflowMixin):
AUG_FLIP = 'fliplrud'
AUG_NOISE = 0.03
AUG_CONTRAST = 0.1
AUG_WHITENING = True
AUG_HSV_USE = True
AUG_HSV_H = 0.02
AUG_HSV_S = 0.04
AUG_HSV_V = 0.06
TRAIN_EPOCHS = 2
class TestTensorflowCreatedWideMultiStepLR(BaseTestCreatedWide, test_utils.TensorflowMixin):
LR_POLICY = 'multistep'
LR_MULTISTEP_VALUES = '50,75,90'
class TestTensorflowLeNet(BaseTestCreated, test_utils.TensorflowMixin):
IMAGE_WIDTH = 28
IMAGE_HEIGHT = 28
TRAIN_EPOCHS = 20
# standard lenet model will adjust to color
# or grayscale images
TENSORFLOW_NETWORK = open(os.path.join(os.path.dirname(digits.__file__),
'standard-networks',
'tensorflow',
'lenet.py')).read()
class TestTensorflowLeNetADAMOptimizer(TestTensorflowLeNet):
OPTIMIZER = 'ADAM'
| DIGITS-master | digits/model/images/classification/test_views.py |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from ..forms import ImageModelForm
class ImageClassificationModelForm(ImageModelForm):
"""
Defines the form used to create a new ImageClassificationModelJob
"""
pass
| DIGITS-master | digits/model/images/classification/forms.py |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
import re
import tempfile
import flask
import numpy as np
import werkzeug.exceptions
from .forms import ImageClassificationModelForm
from .job import ImageClassificationModelJob
from digits import frameworks
from digits import utils
from digits.config import config_value
from digits.dataset import ImageClassificationDatasetJob
from digits.inference import ImageInferenceJob
from digits.pretrained_model.job import PretrainedModelJob
from digits.status import Status
from digits.utils import filesystem as fs
from digits.utils.forms import fill_form_if_cloned, save_form_to_job
from digits.utils.routing import request_wants_json, job_from_request
from digits.webapp import scheduler
blueprint = flask.Blueprint(__name__, __name__)
"""
Read image list
"""
def read_image_list(image_list, image_folder, num_test_images):
paths = []
ground_truths = []
for line in image_list.readlines():
line = line.strip()
if not line:
continue
# might contain a numerical label at the end
match = re.match(r'(.*\S)\s+(\d+)$', line)
if match:
path = match.group(1)
ground_truth = int(match.group(2))
else:
path = line
ground_truth = None
if not utils.is_url(path) and image_folder and not os.path.isabs(path):
path = os.path.join(image_folder, path)
paths.append(path)
ground_truths.append(ground_truth)
if num_test_images is not None and len(paths) >= num_test_images:
break
return paths, ground_truths
@blueprint.route('/new', methods=['GET'])
@utils.auth.requires_login
def new():
"""
Return a form for a new ImageClassificationModelJob
"""
form = ImageClassificationModelForm()
form.dataset.choices = get_datasets()
form.standard_networks.choices = get_standard_networks()
form.standard_networks.default = get_default_standard_network()
form.previous_networks.choices = get_previous_networks()
form.pretrained_networks.choices = get_pretrained_networks()
prev_network_snapshots = get_previous_network_snapshots()
# Is there a request to clone a job with ?clone=<job_id>
fill_form_if_cloned(form)
return flask.render_template('models/images/classification/new.html',
form=form,
frameworks=frameworks.get_frameworks(),
previous_network_snapshots=prev_network_snapshots,
previous_networks_fullinfo=get_previous_networks_fulldetails(),
pretrained_networks_fullinfo=get_pretrained_networks_fulldetails(),
multi_gpu=config_value('caffe')['multi_gpu'],
)
@blueprint.route('/json', methods=['POST'])
@blueprint.route('', methods=['POST'], strict_slashes=False)
@utils.auth.requires_login(redirect=False)
def create():
"""
Create a new ImageClassificationModelJob
Returns JSON when requested: {job_id,name,status} or {errors:[]}
"""
form = ImageClassificationModelForm()
form.dataset.choices = get_datasets()
form.standard_networks.choices = get_standard_networks()
form.standard_networks.default = get_default_standard_network()
form.previous_networks.choices = get_previous_networks()
form.pretrained_networks.choices = get_pretrained_networks()
prev_network_snapshots = get_previous_network_snapshots()
# Is there a request to clone a job with ?clone=<job_id>
fill_form_if_cloned(form)
if not form.validate_on_submit():
if request_wants_json():
return flask.jsonify({'errors': form.errors}), 400
else:
return flask.render_template('models/images/classification/new.html',
form=form,
frameworks=frameworks.get_frameworks(),
previous_network_snapshots=prev_network_snapshots,
previous_networks_fullinfo=get_previous_networks_fulldetails(),
pretrained_networks_fullinfo=get_pretrained_networks_fulldetails(),
multi_gpu=config_value('caffe')['multi_gpu'],
), 400
datasetJob = scheduler.get_job(form.dataset.data)
if not datasetJob:
raise werkzeug.exceptions.BadRequest(
'Unknown dataset job_id "%s"' % form.dataset.data)
# sweeps will be a list of the the permutations of swept fields
# Get swept learning_rate
sweeps = [{'learning_rate': v} for v in form.learning_rate.data]
add_learning_rate = len(form.learning_rate.data) > 1
# Add swept batch_size
sweeps = [dict(s.items() + [('batch_size', bs)]) for bs in form.batch_size.data for s in sweeps[:]]
add_batch_size = len(form.batch_size.data) > 1
n_jobs = len(sweeps)
jobs = []
for sweep in sweeps:
# Populate the form with swept data to be used in saving and
# launching jobs.
form.learning_rate.data = sweep['learning_rate']
form.batch_size.data = sweep['batch_size']
# Augment Job Name
extra = ''
if add_learning_rate:
extra += ' learning_rate:%s' % str(form.learning_rate.data[0])
if add_batch_size:
extra += ' batch_size:%d' % form.batch_size.data[0]
job = None
try:
job = ImageClassificationModelJob(
username=utils.auth.get_username(),
name=form.model_name.data + extra,
group=form.group_name.data,
dataset_id=datasetJob.id(),
)
# get handle to framework object
fw = frameworks.get_framework_by_id(form.framework.data)
pretrained_model = None
if form.method.data == 'standard':
found = False
# can we find it in standard networks?
network_desc = fw.get_standard_network_desc(form.standard_networks.data)
if network_desc:
found = True
network = fw.get_network_from_desc(network_desc)
if not found:
raise werkzeug.exceptions.BadRequest(
'Unknown standard model "%s"' % form.standard_networks.data)
elif form.method.data == 'previous':
old_job = scheduler.get_job(form.previous_networks.data)
if not old_job:
raise werkzeug.exceptions.BadRequest(
'Job not found: %s' % form.previous_networks.data)
use_same_dataset = (old_job.dataset_id == job.dataset_id)
network = fw.get_network_from_previous(old_job.train_task().network, use_same_dataset)
for choice in form.previous_networks.choices:
if choice[0] == form.previous_networks.data:
epoch = float(flask.request.form['%s-snapshot' % form.previous_networks.data])
if epoch == 0:
pass
elif epoch == -1:
pretrained_model = old_job.train_task().pretrained_model
else:
# verify snapshot exists
pretrained_model = old_job.train_task().get_snapshot(epoch, download=True)
if pretrained_model is None:
raise werkzeug.exceptions.BadRequest(
"For the job %s, selected pretrained_model for epoch %d is invalid!"
% (form.previous_networks.data, epoch))
# the first is the actual file if a list is returned, other should be meta data
if isinstance(pretrained_model, list):
pretrained_model = pretrained_model[0]
if not (os.path.exists(pretrained_model)):
raise werkzeug.exceptions.BadRequest(
"Pretrained_model for the selected epoch doesn't exist. "
"May be deleted by another user/process. "
"Please restart the server to load the correct pretrained_model details.")
# get logical path
pretrained_model = old_job.train_task().get_snapshot(epoch)
break
elif form.method.data == 'pretrained':
pretrained_job = scheduler.get_job(form.pretrained_networks.data)
model_def_path = pretrained_job.get_model_def_path()
weights_path = pretrained_job.get_weights_path()
network = fw.get_network_from_path(model_def_path)
pretrained_model = weights_path
elif form.method.data == 'custom':
network = fw.get_network_from_desc(form.custom_network.data)
pretrained_model = form.custom_network_snapshot.data.strip()
else:
raise werkzeug.exceptions.BadRequest(
'Unrecognized method: "%s"' % form.method.data)
policy = {'policy': form.lr_policy.data}
if form.lr_policy.data == 'fixed':
pass
elif form.lr_policy.data == 'step':
policy['stepsize'] = form.lr_step_size.data
policy['gamma'] = form.lr_step_gamma.data
elif form.lr_policy.data == 'multistep':
policy['stepvalue'] = form.lr_multistep_values.data
policy['gamma'] = form.lr_multistep_gamma.data
elif form.lr_policy.data == 'exp':
policy['gamma'] = form.lr_exp_gamma.data
elif form.lr_policy.data == 'inv':
policy['gamma'] = form.lr_inv_gamma.data
policy['power'] = form.lr_inv_power.data
elif form.lr_policy.data == 'poly':
policy['power'] = form.lr_poly_power.data
elif form.lr_policy.data == 'sigmoid':
policy['stepsize'] = form.lr_sigmoid_step.data
policy['gamma'] = form.lr_sigmoid_gamma.data
else:
raise werkzeug.exceptions.BadRequest(
'Invalid learning rate policy')
if config_value('caffe')['multi_gpu']:
if form.select_gpus.data:
selected_gpus = [str(gpu) for gpu in form.select_gpus.data]
gpu_count = None
elif form.select_gpu_count.data:
gpu_count = form.select_gpu_count.data
selected_gpus = None
else:
gpu_count = 1
selected_gpus = None
else:
if form.select_gpu.data == 'next':
gpu_count = 1
selected_gpus = None
else:
selected_gpus = [str(form.select_gpu.data)]
gpu_count = None
# Set up data augmentation structure
data_aug = {}
data_aug['flip'] = form.aug_flip.data
data_aug['quad_rot'] = form.aug_quad_rot.data
data_aug['rot'] = form.aug_rot.data
data_aug['scale'] = form.aug_scale.data
data_aug['noise'] = form.aug_noise.data
data_aug['contrast'] = form.aug_contrast.data
data_aug['whitening'] = form.aug_whitening.data
data_aug['hsv_use'] = form.aug_hsv_use.data
data_aug['hsv_h'] = form.aug_hsv_h.data
data_aug['hsv_s'] = form.aug_hsv_s.data
data_aug['hsv_v'] = form.aug_hsv_v.data
# Python Layer File may be on the server or copied from the client.
fs.copy_python_layer_file(
bool(form.python_layer_from_client.data),
job.dir(),
(flask.request.files[form.python_layer_client_file.name]
if form.python_layer_client_file.name in flask.request.files
else ''), form.python_layer_server_file.data)
job.tasks.append(fw.create_train_task(
job=job,
dataset=datasetJob,
train_epochs=form.train_epochs.data,
snapshot_interval=form.snapshot_interval.data,
learning_rate=form.learning_rate.data[0],
lr_policy=policy,
gpu_count=gpu_count,
selected_gpus=selected_gpus,
batch_size=form.batch_size.data[0],
batch_accumulation=form.batch_accumulation.data,
val_interval=form.val_interval.data,
traces_interval=form.traces_interval.data,
pretrained_model=pretrained_model,
crop_size=form.crop_size.data,
use_mean=form.use_mean.data,
network=network,
random_seed=form.random_seed.data,
solver_type=form.solver_type.data,
rms_decay=form.rms_decay.data,
shuffle=form.shuffle.data,
data_aug=data_aug,
)
)
# Save form data with the job so we can easily clone it later.
save_form_to_job(job, form)
jobs.append(job)
scheduler.add_job(job)
if n_jobs == 1:
if request_wants_json():
return flask.jsonify(job.json_dict())
else:
return flask.redirect(flask.url_for('digits.model.views.show', job_id=job.id()))
except:
if job:
scheduler.delete_job(job)
raise
if request_wants_json():
return flask.jsonify(jobs=[j.json_dict() for j in jobs])
# If there are multiple jobs launched, go to the home page.
return flask.redirect('/')
def show(job, related_jobs=None):
"""
Called from digits.model.views.models_show()
"""
return flask.render_template(
'models/images/classification/show.html',
job=job,
framework_ids=[
fw.get_id()
for fw in frameworks.get_frameworks()
],
related_jobs=related_jobs
)
@blueprint.route('/timeline_tracing', methods=['GET'])
def timeline_tracing():
"""
Shows timeline trace of a model
"""
job = job_from_request()
return flask.render_template('models/timeline_tracing.html', job=job)
@blueprint.route('/large_graph', methods=['GET'])
def large_graph():
"""
Show the loss/accuracy graph, but bigger
"""
job = job_from_request()
return flask.render_template('models/large_graph.html', job=job)
@blueprint.route('/classify_one/json', methods=['POST'])
@blueprint.route('/classify_one', methods=['POST', 'GET'])
def classify_one():
"""
Classify one image and return the top 5 classifications
Returns JSON when requested: {predictions: {category: confidence,...}}
"""
model_job = job_from_request()
remove_image_path = False
if 'image_path' in flask.request.form and flask.request.form['image_path']:
image_path = flask.request.form['image_path']
elif 'image_file' in flask.request.files and flask.request.files['image_file']:
outfile = tempfile.mkstemp(suffix='.png')
flask.request.files['image_file'].save(outfile[1])
image_path = outfile[1]
os.close(outfile[0])
remove_image_path = True
else:
raise werkzeug.exceptions.BadRequest('must provide image_path or image_file')
epoch = None
if 'snapshot_epoch' in flask.request.form:
epoch = float(flask.request.form['snapshot_epoch'])
layers = 'none'
if 'show_visualizations' in flask.request.form and flask.request.form['show_visualizations']:
layers = 'all'
# create inference job
inference_job = ImageInferenceJob(
username=utils.auth.get_username(),
name="Classify One Image",
model=model_job,
images=[image_path],
epoch=epoch,
layers=layers
)
# schedule tasks
scheduler.add_job(inference_job)
# wait for job to complete
inference_job.wait_completion()
# retrieve inference data
inputs, outputs, visualizations = inference_job.get_data()
# set return status code
status_code = 500 if inference_job.status == 'E' else 200
# delete job
scheduler.delete_job(inference_job)
if remove_image_path:
os.remove(image_path)
image = None
predictions = []
if inputs is not None and len(inputs['data']) == 1:
image = utils.image.embed_image_html(inputs['data'][0])
# convert to class probabilities for viewing
last_output_name, last_output_data = outputs.items()[-1]
if len(last_output_data) == 1:
scores = last_output_data[0].flatten()
indices = (-scores).argsort()
labels = model_job.train_task().get_labels()
predictions = []
for i in indices:
# ignore prediction if we don't have a label for the corresponding class
# the user might have set the final fully-connected layer's num_output to
# too high a value
if i < len(labels):
predictions.append((labels[i], scores[i]))
predictions = [(p[0], round(100.0 * p[1], 2)) for p in predictions[:5]]
if request_wants_json():
return flask.jsonify({'predictions': predictions}), status_code
else:
return flask.render_template('models/images/classification/classify_one.html',
model_job=model_job,
job=inference_job,
image_src=image,
predictions=predictions,
visualizations=visualizations,
total_parameters=sum(v['param_count']
for v in visualizations if v['vis_type'] == 'Weights'),
), status_code
@blueprint.route('/classify_many/json', methods=['POST'])
@blueprint.route('/classify_many', methods=['POST', 'GET'])
def classify_many():
"""
Classify many images and return the top 5 classifications for each
Returns JSON when requested: {classifications: {filename: [[category,confidence],...],...}}
"""
model_job = job_from_request()
image_list = flask.request.files.get('image_list')
if not image_list:
raise werkzeug.exceptions.BadRequest('image_list is a required field')
if 'image_folder' in flask.request.form and flask.request.form['image_folder'].strip():
image_folder = flask.request.form['image_folder']
if not os.path.exists(image_folder):
raise werkzeug.exceptions.BadRequest('image_folder "%s" does not exit' % image_folder)
else:
image_folder = None
if 'num_test_images' in flask.request.form and flask.request.form['num_test_images'].strip():
num_test_images = int(flask.request.form['num_test_images'])
else:
num_test_images = None
epoch = None
if 'snapshot_epoch' in flask.request.form:
epoch = float(flask.request.form['snapshot_epoch'])
paths, ground_truths = read_image_list(image_list, image_folder, num_test_images)
# create inference job
inference_job = ImageInferenceJob(
username=utils.auth.get_username(),
name="Classify Many Images",
model=model_job,
images=paths,
epoch=epoch,
layers='none'
)
# schedule tasks
scheduler.add_job(inference_job)
# wait for job to complete
inference_job.wait_completion()
# retrieve inference data
inputs, outputs, _ = inference_job.get_data()
# set return status code
status_code = 500 if inference_job.status == 'E' else 200
# delete job
scheduler.delete_job(inference_job)
if outputs is not None and len(outputs) < 1:
# an error occurred
outputs = None
if inputs is not None:
# retrieve path and ground truth of images that were successfully processed
paths = [paths[idx] for idx in inputs['ids']]
ground_truths = [ground_truths[idx] for idx in inputs['ids']]
# defaults
classifications = None
show_ground_truth = None
top1_accuracy = None
top5_accuracy = None
confusion_matrix = None
per_class_accuracy = None
labels = None
if outputs is not None:
# convert to class probabilities for viewing
last_output_name, last_output_data = outputs.items()[-1]
if len(last_output_data) < 1:
raise werkzeug.exceptions.BadRequest(
'Unable to classify any image from the file')
scores = last_output_data
# take top 5
indices = (-scores).argsort()[:, :5]
labels = model_job.train_task().get_labels()
n_labels = len(labels)
# remove invalid ground truth
ground_truths = [x if x is not None and (0 <= x < n_labels) else None for x in ground_truths]
# how many pieces of ground truth to we have?
n_ground_truth = len([1 for x in ground_truths if x is not None])
show_ground_truth = n_ground_truth > 0
# compute classifications and statistics
classifications = []
n_top1_accurate = 0
n_top5_accurate = 0
confusion_matrix = np.zeros((n_labels, n_labels), dtype=np.dtype(int))
for image_index, index_list in enumerate(indices):
result = []
if ground_truths[image_index] is not None:
if ground_truths[image_index] == index_list[0]:
n_top1_accurate += 1
if ground_truths[image_index] in index_list:
n_top5_accurate += 1
if (0 <= ground_truths[image_index] < n_labels) and (0 <= index_list[0] < n_labels):
confusion_matrix[ground_truths[image_index], index_list[0]] += 1
for i in index_list:
# `i` is a category in labels and also an index into scores
# ignore prediction if we don't have a label for the corresponding class
# the user might have set the final fully-connected layer's num_output to
# too high a value
if i < len(labels):
result.append((labels[i], round(100.0 * scores[image_index, i], 2)))
classifications.append(result)
# accuracy
if show_ground_truth:
top1_accuracy = round(100.0 * n_top1_accurate / n_ground_truth, 2)
top5_accuracy = round(100.0 * n_top5_accurate / n_ground_truth, 2)
per_class_accuracy = []
for x in xrange(n_labels):
n_examples = sum(confusion_matrix[x])
per_class_accuracy.append(
round(100.0 * confusion_matrix[x, x] / n_examples, 2) if n_examples > 0 else None)
else:
top1_accuracy = None
top5_accuracy = None
per_class_accuracy = None
# replace ground truth indices with labels
ground_truths = [labels[x] if x is not None and (0 <= x < n_labels) else None for x in ground_truths]
if request_wants_json():
joined = dict(zip(paths, classifications))
return flask.jsonify({'classifications': joined}), status_code
else:
return flask.render_template('models/images/classification/classify_many.html',
model_job=model_job,
job=inference_job,
paths=paths,
classifications=classifications,
show_ground_truth=show_ground_truth,
ground_truths=ground_truths,
top1_accuracy=top1_accuracy,
top5_accuracy=top5_accuracy,
confusion_matrix=confusion_matrix,
per_class_accuracy=per_class_accuracy,
labels=labels,
), status_code
@blueprint.route('/top_n', methods=['POST'])
def top_n():
"""
Classify many images and show the top N images per category by confidence
"""
model_job = job_from_request()
image_list = flask.request.files['image_list']
if not image_list:
raise werkzeug.exceptions.BadRequest('File upload not found')
epoch = None
if 'snapshot_epoch' in flask.request.form:
epoch = float(flask.request.form['snapshot_epoch'])
if 'top_n' in flask.request.form and flask.request.form['top_n'].strip():
top_n = int(flask.request.form['top_n'])
else:
top_n = 9
if 'image_folder' in flask.request.form and flask.request.form['image_folder'].strip():
image_folder = flask.request.form['image_folder']
if not os.path.exists(image_folder):
raise werkzeug.exceptions.BadRequest('image_folder "%s" does not exit' % image_folder)
else:
image_folder = None
if 'num_test_images' in flask.request.form and flask.request.form['num_test_images'].strip():
num_test_images = int(flask.request.form['num_test_images'])
else:
num_test_images = None
paths, _ = read_image_list(image_list, image_folder, num_test_images)
# create inference job
inference_job = ImageInferenceJob(
username=utils.auth.get_username(),
name="TopN Image Classification",
model=model_job,
images=paths,
epoch=epoch,
layers='none'
)
# schedule tasks
scheduler.add_job(inference_job)
# wait for job to complete
inference_job.wait_completion()
# retrieve inference data
inputs, outputs, _ = inference_job.get_data()
# delete job
scheduler.delete_job(inference_job)
results = None
if outputs is not None and len(outputs) > 0:
# convert to class probabilities for viewing
last_output_name, last_output_data = outputs.items()[-1]
scores = last_output_data
if scores is None:
raise RuntimeError('An error occurred while processing the images')
labels = model_job.train_task().get_labels()
images = inputs['data']
indices = (-scores).argsort(axis=0)[:top_n]
results = []
# Can't have more images per category than the number of images
images_per_category = min(top_n, len(images))
# Can't have more categories than the number of labels or the number of outputs
n_categories = min(indices.shape[1], len(labels))
for i in xrange(n_categories):
result_images = []
for j in xrange(images_per_category):
result_images.append(images[indices[j][i]])
results.append((
labels[i],
utils.image.embed_image_html(
utils.image.vis_square(np.array(result_images),
colormap='white')
)
))
return flask.render_template('models/images/classification/top_n.html',
model_job=model_job,
job=inference_job,
results=results,
)
def get_datasets():
return [(j.id(), j.name()) for j in sorted(
[j for j in scheduler.jobs.values() if isinstance(j, ImageClassificationDatasetJob) and
(j.status.is_running() or j.status == Status.DONE)],
cmp=lambda x, y: cmp(y.id(), x.id())
)
]
def get_standard_networks():
return [
('lenet', 'LeNet'),
('alexnet', 'AlexNet'),
('googlenet', 'GoogLeNet'),
]
def get_default_standard_network():
return 'alexnet'
def get_previous_networks():
return [(j.id(), j.name()) for j in sorted(
[j for j in scheduler.jobs.values() if isinstance(j, ImageClassificationModelJob)],
cmp=lambda x, y: cmp(y.id(), x.id())
)
]
def get_previous_networks_fulldetails():
return [(j) for j in sorted(
[j for j in scheduler.jobs.values() if isinstance(j, ImageClassificationModelJob)],
cmp=lambda x, y: cmp(y.id(), x.id())
)
]
def get_previous_network_snapshots():
prev_network_snapshots = []
for job_id, _ in get_previous_networks():
job = scheduler.get_job(job_id)
e = [(0, 'None')] + [(epoch, 'Epoch #%s' % epoch)
for _, epoch in reversed(job.train_task().snapshots)]
if job.train_task().pretrained_model:
e.insert(0, (-1, 'Previous pretrained model'))
prev_network_snapshots.append(e)
return prev_network_snapshots
def get_pretrained_networks():
return [(j.id(), j.name()) for j in sorted(
[j for j in scheduler.jobs.values() if isinstance(j, PretrainedModelJob)],
cmp=lambda x, y: cmp(y.id(), x.id())
)
]
def get_pretrained_networks_fulldetails():
return [(j) for j in sorted(
[j for j in scheduler.jobs.values() if isinstance(j, PretrainedModelJob)],
cmp=lambda x, y: cmp(y.id(), x.id())
)
]
| DIGITS-master | digits/model/images/classification/views.py |
# Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os.path
from ..job import ImageModelJob
from digits.utils import subclass, override
# NOTE: Increment this every time the pickled object changes
PICKLE_VERSION = 1
@subclass
class GenericImageModelJob(ImageModelJob):
"""
A Job that creates an image model for a generic network
"""
def __init__(self, **kwargs):
super(GenericImageModelJob, self).__init__(**kwargs)
self.pickver_job_model_image_generic = PICKLE_VERSION
@override
def job_type(self):
return 'Generic Image Model'
@override
def download_files(self, epoch=-1):
task = self.train_task()
snapshot_filenames = task.get_snapshot(epoch, download=True)
# get model files
model_files = task.get_model_files()
download_files = [(self.path(filename), os.path.basename(filename))
for filename in model_files.values()]
if task.dataset.get_mean_file():
download_files.append((
task.dataset.path(task.dataset.get_mean_file()),
os.path.basename(task.dataset.get_mean_file())))
# add snapshot
if not isinstance(snapshot_filenames, list):
download_files.append((snapshot_filenames,
os.path.basename(snapshot_filenames)))
else:
for snapshot_filename in snapshot_filenames:
download_files.append((snapshot_filename,
os.path.basename(snapshot_filename)))
return download_files
| DIGITS-master | digits/model/images/generic/job.py |
# Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .job import GenericImageModelJob
__all__ = ['GenericImageModelJob']
| DIGITS-master | digits/model/images/generic/__init__.py |
# Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import itertools
import json
import numpy as np
import os
import PIL.Image
import tempfile
import time
import unittest
# Find the best implementation available
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from bs4 import BeautifulSoup
from digits import extensions
from digits.config import config_value
import digits.dataset.images.generic.test_views
import digits.dataset.generic.test_views
import digits.test_views
from digits import test_utils
import digits.webapp
# May be too short on a slow system
TIMEOUT_DATASET = 45
TIMEOUT_MODEL = 60
################################################################################
# Base classes (they don't start with "Test" so nose won't run them)
################################################################################
class BaseViewsTest(digits.test_views.BaseViewsTest):
"""
Provides some functions
"""
CAFFE_NETWORK = \
"""
layer {
name: "scale"
type: "Power"
bottom: "data"
top: "scale"
power_param {
scale: 0.004
}
}
layer {
name: "hidden"
type: "InnerProduct"
bottom: "scale"
top: "output"
inner_product_param {
num_output: 2
}
}
layer {
name: "loss"
type: "EuclideanLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude { stage: "deploy" }
}
"""
TORCH_NETWORK = \
"""
return function(p)
local nDim = 1
if p.inputShape then p.inputShape:apply(function(x) nDim=nDim*x end) end
local net = nn.Sequential()
net:add(nn.MulConstant(0.004))
net:add(nn.View(-1):setNumInputDims(3)) -- flatten
-- set all weights and biases to zero as this speeds learning up
-- for the type of problem we're trying to solve in this test
local linearLayer = nn.Linear(nDim, 2)
linearLayer.weight:fill(0)
linearLayer.bias:fill(0)
net:add(linearLayer) -- c*h*w -> 2
return {
model = net,
loss = nn.MSECriterion(),
}
end
"""
TENSORFLOW_NETWORK = \
"""
class UserModel(Tower):
@model_property
def inference(self):
ninputs = self.input_shape[0] * self.input_shape[1] * self.input_shape[2]
W = tf.get_variable('W', [ninputs, 2], initializer=tf.constant_initializer(0.0))
b = tf.get_variable('b', [2], initializer=tf.constant_initializer(0.0)),
model = tf.reshape(self.x, shape=[-1, ninputs]) * 0.004
model = tf.add(tf.matmul(model, W), b)
return model
@model_property
def loss(self):
y = tf.reshape(self.y, shape=[-1, 2])
return digits.mse_loss(self.inference, y)
"""
@classmethod
def model_exists(cls, job_id):
return cls.job_exists(job_id, 'models')
@classmethod
def model_status(cls, job_id):
return cls.job_status(job_id, 'models')
@classmethod
def abort_model(cls, job_id):
return cls.abort_job(job_id, job_type='models')
@classmethod
def model_wait_completion(cls, job_id, **kwargs):
kwargs['job_type'] = 'models'
if 'timeout' not in kwargs:
kwargs['timeout'] = TIMEOUT_MODEL
return cls.job_wait_completion(job_id, **kwargs)
@classmethod
def delete_model(cls, job_id):
return cls.delete_job(job_id, job_type='models')
@classmethod
def network(cls):
if cls.FRAMEWORK == 'torch':
return cls.TORCH_NETWORK
elif cls.FRAMEWORK == 'caffe':
return cls.CAFFE_NETWORK
elif cls.FRAMEWORK == 'tensorflow':
return cls.TENSORFLOW_NETWORK
else:
raise ValueError('Unknown framework %s' % cls.FRAMEWORK)
class BaseViewsTestWithAnyDataset(BaseViewsTest):
"""
Provides a dataset
This is a common interface to work with either "images/generic"
datasets or "generic" datasets. The dataset type to use is chosen
further down in the class hierarchy, see e.g. BaseViewsTestWithDataset
"""
# Inherited classes may want to override these attributes
CROP_SIZE = None
TRAIN_EPOCHS = 3
LR_POLICY = None
LEARNING_RATE = None
BATCH_SIZE = 10
@classmethod
def setUpClass(cls, **kwargs):
super(BaseViewsTestWithAnyDataset, cls).setUpClass(**kwargs)
cls.created_models = []
@classmethod
def tearDownClass(cls):
# delete any created datasets
for job_id in cls.created_models:
cls.delete_model(job_id)
super(BaseViewsTestWithAnyDataset, cls).tearDownClass()
@classmethod
def create_model(cls, learning_rate=None, **kwargs):
"""
Create a model
Returns the job_id
Raise RuntimeError if job fails to create
Keyword arguments:
**kwargs -- data to be sent with POST request
"""
if learning_rate is None:
learning_rate = cls.LEARNING_RATE
data = {
'model_name': 'test_model',
'group_name': 'test_group',
'dataset': cls.dataset_id,
'method': 'custom',
'custom_network': cls.network(),
'batch_size': cls.BATCH_SIZE,
'train_epochs': cls.TRAIN_EPOCHS,
'random_seed': 0xCAFEBABE,
'framework': cls.FRAMEWORK,
}
if cls.CROP_SIZE is not None:
data['crop_size'] = cls.CROP_SIZE
if cls.LR_POLICY is not None:
data['lr_policy'] = cls.LR_POLICY
if learning_rate is not None:
data['learning_rate'] = learning_rate
data.update(kwargs)
request_json = data.pop('json', False)
url = '/models/images/generic'
if request_json:
url += '/json'
rv = cls.app.post(url, data=data)
if request_json:
if rv.status_code != 200:
print json.loads(rv.data)
raise RuntimeError('Model creation failed with %s' % rv.status_code)
data = json.loads(rv.data)
if 'jobs' in data.keys():
return [j['id'] for j in data['jobs']]
else:
return data['id']
# expect a redirect
if not 300 <= rv.status_code <= 310:
print 'Status code:', rv.status_code
s = BeautifulSoup(rv.data, 'html.parser')
div = s.select('div.alert-danger')
if div:
print div[0]
else:
print rv.data
raise RuntimeError('Failed to create dataset - status %s' % rv.status_code)
job_id = cls.job_id_from_response(rv)
assert cls.model_exists(job_id), 'model not found after successful creation'
cls.created_models.append(job_id)
return job_id
class BaseViewsTestWithDataset(BaseViewsTestWithAnyDataset,
digits.dataset.images.generic.test_views.BaseViewsTestWithDataset):
"""
This inherits from BaseViewsTestWithAnyDataset and
digits.dataset.images.generic.test_views.BaseViewsTestWithDataset
in order to provide an interface to test models on "images/generic" datasets
"""
pass
class BaseViewsTestWithModelWithAnyDataset(BaseViewsTestWithAnyDataset):
"""
Provides a model
"""
@classmethod
def setUpClass(cls, **kwargs):
use_mean = kwargs.pop('use_mean', None)
super(BaseViewsTestWithModelWithAnyDataset, cls).setUpClass(**kwargs)
cls.model_id = cls.create_model(json=True, use_mean=use_mean)
assert cls.model_wait_completion(cls.model_id) == 'Done', 'create failed'
class BaseTestViews(BaseViewsTest):
"""
Tests which don't require a dataset or a model
"""
def test_page_model_new(self):
rv = self.app.get('/models/images/generic/new')
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
assert 'New Image Model' in rv.data, 'unexpected page format'
def test_nonexistent_model(self):
assert not self.model_exists('foo'), "model shouldn't exist"
def test_view_config(self):
extension = extensions.view.get_default_extension()
rv = self.app.get('/models/view-config/%s' % extension.get_id())
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
def test_visualize_network(self):
rv = self.app.post('/models/visualize-network?framework=' + self.FRAMEWORK,
data={'custom_network': self.network()}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
if rv.status_code != 200:
body = s.select('body')[0]
if 'InvocationException' in str(body):
raise unittest.SkipTest('GraphViz not installed')
raise AssertionError('POST failed with %s\n\n%s' % (rv.status_code, body))
image = s.select('img')
assert image is not None, "didn't return an image"
class BaseTestCreation(BaseViewsTestWithDataset):
"""
Model creation tests
"""
def test_create_json(self):
job_id = self.create_model(json=True)
self.abort_model(job_id)
def test_create_delete(self):
job_id = self.create_model()
assert self.delete_model(job_id) == 200, 'delete failed'
assert not self.model_exists(job_id), 'model exists after delete'
def test_create_wait_delete(self):
job_id = self.create_model()
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
assert self.delete_model(job_id) == 200, 'delete failed'
assert not self.model_exists(job_id), 'model exists after delete'
def test_create_abort_delete(self):
job_id = self.create_model()
assert self.abort_model(job_id) == 200, 'abort failed'
assert self.delete_model(job_id) == 200, 'delete failed'
assert not self.model_exists(job_id), 'model exists after delete'
def test_snapshot_interval_2(self):
job_id = self.create_model(snapshot_interval=0.5)
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
rv = self.app.get('/models/%s/json' % job_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']) > 1, 'should take >1 snapshot'
def test_snapshot_interval_0_5(self):
job_id = self.create_model(train_epochs=4, snapshot_interval=2)
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
rv = self.app.get('/models/%s/json' % job_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']) == 2, 'should take 2 snapshots'
@unittest.skipIf(
not config_value('gpu_list'),
'no GPUs selected')
@unittest.skipIf(
not config_value('caffe')['cuda_enabled'],
'CUDA disabled')
@unittest.skipIf(
config_value('caffe')['multi_gpu'],
'multi-GPU enabled')
def test_select_gpu(self):
for index in config_value('gpu_list').split(','):
yield self.check_select_gpu, index
def check_select_gpu(self, gpu_index):
job_id = self.create_model(select_gpu=gpu_index)
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
@unittest.skipIf(
not config_value('gpu_list'),
'no GPUs selected')
@unittest.skipIf(
not config_value('caffe')['cuda_enabled'],
'CUDA disabled')
@unittest.skipIf(
not config_value('caffe')['multi_gpu'],
'multi-GPU disabled')
def test_select_gpus(self):
# test all possible combinations
gpu_list = config_value('gpu_list').split(',')
for i in xrange(len(gpu_list)):
for combination in itertools.combinations(gpu_list, i + 1):
yield self.check_select_gpus, combination
def check_select_gpus(self, gpu_list):
job_id = self.create_model(select_gpus_list=','.join(gpu_list), batch_size=len(gpu_list))
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
def infer_one_for_job(self, job_id):
# carry out one inference test per category in dataset
image_path = os.path.join(self.imageset_folder, self.test_image)
with open(image_path, 'rb') as infile:
# StringIO wrapping is needed to simulate POST file upload.
image_upload = (StringIO(infile.read()), 'image.png')
rv = self.app.post(
'/models/images/generic/infer_one?job_id=%s' % job_id,
data={
'image_file': image_upload,
'show_visualizations': 'y',
}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
def test_infer_one_mean_image(self):
# test the creation
job_id = self.create_model(use_mean='image')
assert self.model_wait_completion(job_id) == 'Done', 'job failed'
self.infer_one_for_job(job_id)
def test_infer_one_mean_pixel(self):
# test the creation
job_id = self.create_model(use_mean='pixel')
assert self.model_wait_completion(job_id) == 'Done', 'job failed'
self.infer_one_for_job(job_id)
def test_infer_one_mean_none(self):
# test the creation
job_id = self.create_model(use_mean='none')
assert self.model_wait_completion(job_id) == 'Done', 'job failed'
self.infer_one_for_job(job_id)
def test_retrain(self):
job1_id = self.create_model()
assert self.model_wait_completion(job1_id) == 'Done', 'first job failed'
rv = self.app.get('/models/%s/json' % job1_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']), 'should have at least snapshot'
options = {
'method': 'previous',
'previous_networks': job1_id,
}
options['%s-snapshot' % job1_id] = content['snapshots'][-1]
job2_id = self.create_model(**options)
assert self.model_wait_completion(job2_id) == 'Done', 'second job failed'
def test_retrain_twice(self):
# retrain from a job which already had a pretrained model
job1_id = self.create_model()
assert self.model_wait_completion(job1_id) == 'Done', 'first job failed'
rv = self.app.get('/models/%s/json' % job1_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']), 'should have at least snapshot'
options_2 = {
'method': 'previous',
'previous_networks': job1_id,
}
options_2['%s-snapshot' % job1_id] = content['snapshots'][-1]
job2_id = self.create_model(**options_2)
assert self.model_wait_completion(job2_id) == 'Done', 'second job failed'
options_3 = {
'method': 'previous',
'previous_networks': job2_id,
}
options_3['%s-snapshot' % job2_id] = -1
job3_id = self.create_model(**options_3)
assert self.model_wait_completion(job3_id) == 'Done', 'third job failed'
def test_diverging_network(self):
if self.FRAMEWORK == 'caffe':
raise unittest.SkipTest('Test not implemented for Caffe')
job_id = self.create_model(json=True, learning_rate=1e15)
assert self.model_wait_completion(job_id) == 'Error', 'job should have failed'
job_info = self.job_info_html(job_id=job_id, job_type='models')
assert 'Try decreasing your learning rate' in job_info
def test_clone(self):
options_1 = {
'shuffle': True,
'lr_step_size': 33.0,
'previous_networks': 'None',
'lr_inv_power': 0.5,
'lr_inv_gamma': 0.1,
'lr_poly_power': 3.0,
'lr_exp_gamma': 0.95,
'use_mean': 'image',
'custom_network_snapshot': '',
'lr_multistep_gamma': 0.5,
'lr_policy': 'step',
'crop_size': None,
'val_interval': 3.0,
'random_seed': 123,
'learning_rate': 0.01,
'standard_networks': 'None',
'lr_step_gamma': 0.1,
'lr_sigmoid_step': 50.0,
'lr_sigmoid_gamma': 0.1,
'lr_multistep_values': '50,85',
'solver_type': 'SGD',
}
job1_id = self.create_model(**options_1)
assert self.model_wait_completion(job1_id) == 'Done', 'first job failed'
rv = self.app.get('/models/%s/json' % job1_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content1 = json.loads(rv.data)
# Clone job1 as job2
options_2 = {
'clone': job1_id,
}
job2_id = self.create_model(**options_2)
assert self.model_wait_completion(job2_id) == 'Done', 'second job failed'
rv = self.app.get('/models/%s/json' % job2_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content2 = json.loads(rv.data)
# These will be different
content1.pop('id')
content2.pop('id')
content1.pop('directory')
content2.pop('directory')
content1.pop('creation time')
content2.pop('creation time')
content1.pop('job id')
content2.pop('job id')
assert (content1 == content2), 'job content does not match'
job1 = digits.webapp.scheduler.get_job(job1_id)
job2 = digits.webapp.scheduler.get_job(job2_id)
assert (job1.form_data == job2.form_data), 'form content does not match'
class BaseTestCreatedWithAnyDataset(BaseViewsTestWithModelWithAnyDataset):
"""
Tests on a model that has already been created
"""
def test_save(self):
job = digits.webapp.scheduler.get_job(self.model_id)
assert job.save(), 'Job failed to save'
def test_get_snapshot(self):
job = digits.webapp.scheduler.get_job(self.model_id)
task = job.train_task()
f = task.get_snapshot(-1)
assert f, "Failed to load snapshot"
filename = task.get_snapshot_filename(-1)
assert filename, "Failed to get filename"
def test_download(self):
for extension in ['tar', 'zip', 'tar.gz', 'tar.bz2']:
yield self.check_download, extension
def check_download(self, extension):
url = '/models/%s/download.%s' % (self.model_id, extension)
rv = self.app.get(url)
assert rv.status_code == 200, 'download "%s" failed with %s' % (url, rv.status_code)
def test_index_json(self):
rv = self.app.get('/index/json')
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
content = json.loads(rv.data)
found = False
for m in content['models']:
if m['id'] == self.model_id:
found = True
break
assert found, 'model not found in list'
def test_model_json(self):
rv = self.app.get('/models/%s/json' % self.model_id)
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert content['id'] == self.model_id, 'expected different job_id'
assert len(content['snapshots']) > 0, 'no snapshots in list'
def test_edit_name(self):
status = self.edit_job(
self.dataset_id,
name='new name'
)
assert status == 200, 'failed with %s' % status
def test_edit_notes(self):
status = self.edit_job(
self.dataset_id,
notes='new notes'
)
assert status == 200, 'failed with %s' % status
def test_infer_one(self):
image_path = os.path.join(self.imageset_folder, self.test_image)
with open(image_path, 'rb') as infile:
# StringIO wrapping is needed to simulate POST file upload.
image_upload = (StringIO(infile.read()), 'image.png')
rv = self.app.post(
'/models/images/generic/infer_one?job_id=%s' % self.model_id,
data={
'image_file': image_upload,
'show_visualizations': 'y',
}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
def test_infer_one_json(self):
image_path = os.path.join(self.imageset_folder, self.test_image)
with open(image_path, 'rb') as infile:
# StringIO wrapping is needed to simulate POST file upload.
image_upload = (StringIO(infile.read()), 'image.png')
rv = self.app.post(
'/models/images/generic/infer_one/json?job_id=%s' % self.model_id,
data={
'image_file': image_upload,
}
)
assert rv.status_code == 200, 'POST failed with %s' % rv.status_code
data = json.loads(rv.data)
assert data['outputs']['output'][0][0] > 0 and \
data['outputs']['output'][0][1] > 0, \
'image regression result is wrong: %s' % data['outputs']['output']
def test_infer_many(self):
# use the same image twice to make a list of two images
textfile_images = '%s\n%s\n' % (self.test_image, self.test_image)
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/generic/infer_many?job_id=%s' % self.model_id,
data={'image_list': file_upload}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
headers = s.select('table.table th')
assert headers is not None, 'unrecognized page format'
def test_infer_db(self):
if self.val_db_path is None:
raise unittest.SkipTest('Class has no validation db')
rv = self.app.post(
'/models/images/generic/infer_db?job_id=%s' % self.model_id,
data={'db_path': self.val_db_path}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
headers = s.select('table.table th')
assert headers is not None, 'unrecognized page format'
def test_infer_many_from_folder(self):
textfile_images = '%s\n' % os.path.basename(self.test_image)
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
# try selecting the extension explicitly
extension = extensions.view.get_default_extension()
extension_id = extension.get_id()
rv = self.app.post(
'/models/images/generic/infer_many?job_id=%s' % self.model_id,
data={'image_list': file_upload,
'image_folder': os.path.dirname(self.test_image),
'view_extension_id': extension_id}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
headers = s.select('table.table th')
assert headers is not None, 'unrecognized page format'
def test_infer_many_json(self):
textfile_images = '%s\n' % self.test_image
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/generic/infer_many/json?job_id=%s' % self.model_id,
data={'image_list': file_upload}
)
assert rv.status_code == 200, 'POST failed with %s' % rv.status_code
data = json.loads(rv.data)
assert 'outputs' in data, 'invalid response'
def test_infer_db_json(self):
if self.val_db_path is None:
raise unittest.SkipTest('Class has no validation db')
rv = self.app.post(
'/models/images/generic/infer_db/json?job_id=%s' % self.model_id,
data={'db_path': self.val_db_path}
)
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, rv.data)
data = json.loads(rv.data)
assert 'outputs' in data, 'invalid response'
class BaseTestCreated(BaseTestCreatedWithAnyDataset,
digits.dataset.images.generic.test_views.BaseViewsTestWithDataset):
"""
Tests on a model that has already been created with an "images/generic" dataset
"""
pass
class BaseTestCreatedWithGradientDataExtension(BaseTestCreatedWithAnyDataset,
digits.dataset.generic.test_views.BaseViewsTestWithDataset):
"""
Tests on a model that has already been created with a "generic" dataset,
using the gradients extension in that instance
"""
EXTENSION_ID = "image-gradients"
@classmethod
def setUpClass(cls, **kwargs):
if not hasattr(cls, 'imageset_folder'):
# Create test image
cls.imageset_folder = tempfile.mkdtemp()
image_width = cls.IMAGE_WIDTH
image_height = cls.IMAGE_HEIGHT
yy, xx = np.mgrid[:image_height,
:image_width].astype('float')
xslope, yslope = 0.5, 0.5
a = xslope * 255 / image_width
b = yslope * 255 / image_height
test_image = a * (xx - image_width / 2) + b * (yy - image_height / 2) + 127.5
test_image = test_image.astype('uint8')
pil_img = PIL.Image.fromarray(test_image)
cls.test_image = os.path.join(cls.imageset_folder, 'test.png')
pil_img.save(cls.test_image)
# note: model created in BaseTestCreatedWithAnyDataset.setUpClass method
super(BaseTestCreatedWithGradientDataExtension, cls).setUpClass()
def test_infer_extension_json(self):
rv = self.app.post(
'/models/images/generic/infer_extension/json?job_id=%s' % self.model_id,
data={
'gradient_x': 0.5,
'gradient_y': -0.5,
}
)
assert rv.status_code == 200, 'POST failed with %s' % rv.status_code
data = json.loads(rv.data)
output = data['outputs'][data['outputs'].keys()[0]]['output']
assert output[0] > 0 and \
output[1] < 0, \
'image regression result is wrong: %s' % data['outputs']['output']
class BaseTestCreatedWithImageProcessingExtension(
BaseTestCreatedWithAnyDataset,
digits.dataset.generic.test_views.BaseViewsTestWithDataset):
"""
Test Image processing extension with a dummy identity network
"""
CAFFE_NETWORK = \
"""
layer {
name: "identity"
type: "Power"
bottom: "data"
top: "output"
}
layer {
name: "loss"
type: "EuclideanLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude { stage: "deploy" }
}
"""
TORCH_NETWORK = \
"""
return function(p)
return {
-- simple identity network
model = nn.Sequential():add(nn.Identity()),
loss = nn.MSECriterion(),
}
end
"""
TENSORFLOW_NETWORK = \
"""
class UserModel(Tower):
@model_property
def inference(self):
scale = tf.get_variable('scale', [1], initializer=tf.constant_initializer(1.0))
offset = tf.get_variable('offset', [1], initializer=tf.constant_initializer(0.))
offset = tf.Print(offset,[scale, offset], message='scale offset')
model = self.x + offset
self.model = model
return tf.transpose(model, (0, 3, 1, 2)) # net output expected in NCHW format
@model_property
def loss(self):
return digits.mse_loss(self.model, self.y)
"""
EXTENSION_ID = "image-processing"
VARIABLE_SIZE_DATASET = False
NUM_IMAGES = 100
MEAN = 'none'
@classmethod
def setUpClass(cls, **kwargs):
if cls.VARIABLE_SIZE_DATASET:
cls.BATCH_SIZE = 1
cls.create_variable_size_random_imageset(
num_images=cls.NUM_IMAGES)
else:
cls.create_random_imageset(
num_images=cls.NUM_IMAGES,
image_width=cls.IMAGE_WIDTH,
image_height=cls.IMAGE_HEIGHT)
super(BaseTestCreatedWithImageProcessingExtension, cls).setUpClass(
feature_folder=cls.imageset_folder,
label_folder=cls.imageset_folder,
channel_conversion='L',
dsopts_force_same_shape='0' if cls.VARIABLE_SIZE_DATASET else '1',
use_mean=cls.MEAN)
def test_infer_one_json(self):
image_path = os.path.join(self.imageset_folder, self.test_image)
with open(image_path, 'rb') as infile:
# StringIO wrapping is needed to simulate POST file upload.
image_upload = (StringIO(infile.read()), 'image.png')
rv = self.app.post(
'/models/images/generic/infer_one/json?job_id=%s' % self.model_id,
data={'image_file': image_upload}
)
assert rv.status_code == 200, 'POST failed with %s' % rv.status_code
data = json.loads(rv.data)
data_shape = np.array(data['outputs']['output']).shape
if not self.VARIABLE_SIZE_DATASET:
if data_shape != (1, self.CHANNELS, self.IMAGE_WIDTH, self.IMAGE_HEIGHT):
raise ValueError("Shapes differ: got %s expected %s" % (repr(data_shape),
repr((1,
self.CHANNELS,
self.IMAGE_WIDTH,
self.IMAGE_HEIGHT))))
def test_infer_one_noresize_json(self):
if self.FRAMEWORK == 'tensorflow' and self.MEAN == 'image':
raise unittest.SkipTest('Mean image subtraction not supported on '
'variable-size input with Tensorflow')
# create large random image
shape = (self.CHANNELS, 10 * self.IMAGE_HEIGHT, 5 * self.IMAGE_WIDTH)
x = np.random.randint(
low=0,
high=256,
size=shape)
if self.CHANNELS == 1:
# drop channel dimension
x = x[0]
x = x.astype('uint8')
pil_img = PIL.Image.fromarray(x)
# create output stream
s = StringIO()
pil_img.save(s, format="png")
# create input stream
s = StringIO(s.getvalue())
image_upload = (s, 'image.png')
# post request
rv = self.app.post(
'/models/images/generic/infer_one/json?job_id=%s' % self.model_id,
data={'image_file': image_upload, 'dont_resize': 'y'}
)
assert rv.status_code == 200, 'POST failed with %s' % rv.status_code
data = json.loads(rv.data)
data_shape = np.array(data['outputs']['output']).shape
if data_shape != (1,) + shape:
raise ValueError("Shapes differ: got %s expected %s" % (repr(data_shape), repr((1,) + shape)))
def test_infer_db(self):
if self.VARIABLE_SIZE_DATASET:
raise unittest.SkipTest('Skip variable-size inference test')
super(BaseTestCreatedWithImageProcessingExtension, self).test_infer_db()
def test_infer_db_json(self):
if self.VARIABLE_SIZE_DATASET:
raise unittest.SkipTest('Skip variable-size inference test')
super(BaseTestCreatedWithImageProcessingExtension, self).test_infer_db_json()
class BaseTestDatasetModelInteractions(BaseViewsTestWithDataset):
"""
Test the interactions between datasets and models
"""
# If you try to create a model using a deleted dataset, it should fail
def test_create_model_deleted_dataset(self):
dataset_id = self.create_dataset()
assert self.delete_dataset(dataset_id) == 200, 'delete failed'
assert not self.dataset_exists(dataset_id), 'dataset exists after delete'
try:
self.create_model(dataset=dataset_id)
except RuntimeError:
return
assert False, 'Should have failed'
# If you try to create a model using a running dataset,
# it should wait to start until the dataset is completed
def test_create_model_running_dataset(self):
dataset_id = self.create_dataset()
model_id = self.create_model(dataset=dataset_id)
# Model should be in WAIT status while dataset is running
# Copying functionality from job_wait_completion ...
start_time = time.time()
timeout = TIMEOUT_DATASET
dataset_status = self.dataset_status(dataset_id)
while dataset_status != 'Done':
model_status = self.model_status(model_id)
if model_status == 'Initialized':
# give it some time ...
pass
elif model_status == 'Waiting':
# That's what we were waiting for
break
else:
raise Exception('Model not waiting - "%s"' % model_status)
assert (time.time() - start_time) < timeout, 'Job took more than %s seconds' % timeout
time.sleep(0.5)
dataset_status = self.dataset_status(dataset_id)
# Model should switch to RUN status after dataset is DONE
assert self.dataset_wait_completion(dataset_id) == 'Done', 'dataset creation failed'
time.sleep(1)
assert self.model_status(model_id) in ['Running', 'Done'], "model didn't start"
self.abort_model(model_id)
# If you try to delete a completed dataset with a dependent model, it should fail
def test_delete_dataset_dependent_model(self):
dataset_id = self.create_dataset()
model_id = self.create_model(dataset=dataset_id)
assert self.dataset_wait_completion(dataset_id) == 'Done', 'dataset creation failed'
assert self.delete_dataset(dataset_id) == 403, 'dataset deletion should not have succeeded'
self.abort_model(model_id)
# If you try to delete a running dataset with a dependent model, it should fail
def test_delete_running_dataset_dependent_model(self):
dataset_id = self.create_dataset()
model_id = self.create_model(dataset=dataset_id)
assert self.delete_dataset(dataset_id) == 403, 'dataset deletion should not have succeeded'
self.abort_dataset(dataset_id)
self.abort_model(model_id)
class BaseTestCreatedCropInNetwork(BaseTestCreated):
CAFFE_NETWORK = \
"""
layer {
name: "data"
type: "Data"
top: "data"
include {
phase: TRAIN
}
transform_param {
crop_size: 8
}
}
layer {
name: "data"
type: "Data"
top: "data"
include {
phase: TEST
}
transform_param {
crop_size: 8
}
}
layer {
name: "scale"
type: "Power"
bottom: "data"
top: "scale"
power_param {
scale: 0.004
}
}
layer {
name: "hidden"
type: "InnerProduct"
bottom: "scale"
top: "output"
inner_product_param {
num_output: 2
}
}
layer {
name: "loss"
type: "EuclideanLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude { stage: "deploy" }
}
"""
TORCH_NETWORK = \
"""
return function(p)
local croplen = 8, channels
if p.inputShape then channels=p.inputShape[1] else channels=1 end
local net = nn.Sequential()
net:add(nn.MulConstant(0.004))
net:add(nn.View(-1):setNumInputDims(3)) -- flatten
-- set all weights and biases to zero as this speeds learning up
-- for the type of problem we're trying to solve in this test
local linearLayer = nn.Linear(channels*croplen*croplen, 2)
linearLayer.weight:fill(0)
linearLayer.bias:fill(0)
net:add(linearLayer) -- c*croplen*croplen -> 2
return {
model = net,
loss = nn.MSECriterion(),
croplen = croplen
}
end
"""
class BaseTestCreatedCropInForm(BaseTestCreated):
CROP_SIZE = 8
################################################################################
# Test classes
################################################################################
class TestCaffeViews(BaseTestViews, test_utils.CaffeMixin):
pass
class TestCaffeCreation(BaseTestCreation, test_utils.CaffeMixin):
pass
class TestCaffeCreated(BaseTestCreated, test_utils.CaffeMixin):
pass
class TestCaffeCreatedWithGradientDataExtension(
BaseTestCreatedWithGradientDataExtension, test_utils.CaffeMixin):
pass
class TestCaffeCreatedWithGradientDataExtensionNoValSet(
BaseTestCreatedWithGradientDataExtension, test_utils.CaffeMixin):
@classmethod
def setUpClass(cls):
super(TestCaffeCreatedWithGradientDataExtensionNoValSet, cls).setUpClass(val_image_count=0)
class TestCaffeCreatedWithImageProcessingExtensionMeanImage(
BaseTestCreatedWithImageProcessingExtension, test_utils.CaffeMixin):
MEAN = 'image'
class TestCaffeCreatedWithImageProcessingExtensionMeanPixel(
BaseTestCreatedWithImageProcessingExtension, test_utils.CaffeMixin):
MEAN = 'pixel'
class TestCaffeCreatedWithImageProcessingExtensionMeanNone(
BaseTestCreatedWithImageProcessingExtension, test_utils.CaffeMixin):
MEAN = 'none'
class TestCaffeCreatedVariableSizeDataset(
BaseTestCreatedWithImageProcessingExtension, test_utils.CaffeMixin):
MEAN = 'none'
VARIABLE_SIZE_DATASET = True
class TestCaffeDatasetModelInteractions(BaseTestDatasetModelInteractions, test_utils.CaffeMixin):
pass
class TestCaffeCreatedCropInNetwork(BaseTestCreatedCropInNetwork, test_utils.CaffeMixin):
pass
class TestCaffeCreatedCropInForm(BaseTestCreatedCropInForm, test_utils.CaffeMixin):
pass
class TestTorchViews(BaseTestViews, test_utils.TorchMixin):
pass
class TestTorchCreation(BaseTestCreation, test_utils.TorchMixin):
pass
class TestTorchCreated(BaseTestCreated, test_utils.TorchMixin):
pass
class TestTorchCreatedWithGradientDataExtension(
BaseTestCreatedWithGradientDataExtension, test_utils.TorchMixin):
pass
class TestTorchCreatedWithGradientDataExtensionNoValSet(
BaseTestCreatedWithGradientDataExtension, test_utils.TorchMixin):
@classmethod
def setUpClass(cls):
super(TestTorchCreatedWithGradientDataExtensionNoValSet, cls).setUpClass(val_image_count=0)
class TestTorchCreatedWithImageProcessingExtensionMeanImage(
BaseTestCreatedWithImageProcessingExtension, test_utils.TorchMixin):
MEAN = 'image'
class TestTorchCreatedWithImageProcessingExtensionMeanPixel(
BaseTestCreatedWithImageProcessingExtension, test_utils.TorchMixin):
MEAN = 'pixel'
class TestTorchCreatedWithImageProcessingExtensionMeanNone(
BaseTestCreatedWithImageProcessingExtension, test_utils.TorchMixin):
MEAN = 'none'
class TestTorchCreatedVariableSizeDataset(
BaseTestCreatedWithImageProcessingExtension, test_utils.TorchMixin):
MEAN = 'none'
VARIABLE_SIZE_DATASET = True
class TestTorchCreatedCropInNetwork(BaseTestCreatedCropInNetwork, test_utils.TorchMixin):
pass
class TestTorchCreatedCropInForm(BaseTestCreatedCropInForm, test_utils.TorchMixin):
pass
class TestTorchDatasetModelInteractions(BaseTestDatasetModelInteractions, test_utils.TorchMixin):
pass
class TestTorchTableOutput(BaseTestCreated, test_utils.TorchMixin):
TORCH_NETWORK = \
"""
return function(p)
-- same network as in class BaseTestCreated except that each gradient
-- is learnt separately: the input is fed into nn.ConcatTable and
-- each branch outputs one of the gradients
local nDim = 1
if p.inputShape then p.inputShape:apply(function(x) nDim=nDim*x end) end
local net = nn.Sequential()
net:add(nn.MulConstant(0.004))
net:add(nn.View(-1):setNumInputDims(3)) -- flatten
-- set all weights and biases to zero as this speeds learning up
-- for the type of problem we're trying to solve in this test
local linearLayer1 = nn.Linear(nDim, 1)
linearLayer1.weight:fill(0)
linearLayer1.bias:fill(0)
local linearLayer2 = nn.Linear(nDim, 1)
linearLayer2.weight:fill(0)
linearLayer2.bias:fill(0)
-- create concat table
local parallel = nn.ConcatTable()
parallel:add(linearLayer1):add(linearLayer2)
net:add(parallel)
-- create two MSE criteria to optimize each gradient separately
local mse1 = nn.MSECriterion()
local mse2 = nn.MSECriterion()
-- now create a criterion that takes as input each of the two criteria
local finalCriterion = nn.ParallelCriterion(false):add(mse1):add(mse2)
-- create label hook
function labelHook(input, dblabel)
-- split label alongside 2nd dimension
local labelTable = dblabel:split(1,2)
return labelTable
end
return {
model = net,
loss = finalCriterion,
labelHook = labelHook,
}
end
"""
class TestTorchNDOutput(BaseTestCreated, test_utils.TorchMixin):
CROP_SIZE = 8
TORCH_NETWORK = \
"""
return function(p)
-- this model just forwards the input as is
local net = nn.Sequential():add(nn.Identity())
-- create label hook
function labelHook(input, dblabel)
return input
end
return {
model = net,
loss = nn.AbsCriterion(),
labelHook = labelHook,
}
end
"""
def test_infer_one_json(self):
image_path = os.path.join(self.imageset_folder, self.test_image)
with open(image_path, 'rb') as infile:
# StringIO wrapping is needed to simulate POST file upload.
image_upload = (StringIO(infile.read()), 'image.png')
rv = self.app.post(
'/models/images/generic/infer_one/json?job_id=%s' % self.model_id,
data={
'image_file': image_upload,
}
)
assert rv.status_code == 200, 'POST failed with %s' % rv.status_code
# make sure the shape of the output matches the shape of the input
data = json.loads(rv.data)
output = np.array(data['outputs']['output'][0])
assert output.shape == (1, self.CROP_SIZE, self.CROP_SIZE), \
'shape mismatch: %s' % str(output.shape)
class TestSweepCreation(BaseViewsTestWithDataset, test_utils.CaffeMixin):
"""
Model creation tests
"""
def test_sweep(self):
job_ids = self.create_model(json=True, learning_rate='[0.01, 0.02]', batch_size='[8, 10]')
for job_id in job_ids:
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
assert self.delete_model(job_id) == 200, 'delete failed'
assert not self.model_exists(job_id), 'model exists after delete'
class TestAllInOneNetwork(BaseTestCreation, BaseTestCreated, test_utils.CaffeMixin):
"""
Test an all-in-one network
"""
CAFFE_NETWORK = \
"""
layer {
name: "train_data"
type: "Data"
top: "scaled_data"
transform_param {
scale: 0.004
}
include { phase: TRAIN }
}
layer {
name: "train_label"
type: "Data"
top: "label"
include { phase: TRAIN }
}
layer {
name: "val_data"
type: "Data"
top: "scaled_data"
transform_param {
scale: 0.004
}
include { phase: TEST }
}
layer {
name: "val_label"
type: "Data"
top: "label"
include { phase: TEST }
}
layer {
name: "scale"
type: "Power"
bottom: "data"
top: "scaled_data"
power_param {
scale: 0.004
}
include { stage: "deploy" }
}
layer {
name: "hidden"
type: "InnerProduct"
bottom: "scaled_data"
top: "output"
inner_product_param {
num_output: 2
}
}
layer {
name: "loss"
type: "EuclideanLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude { stage: "deploy" }
}
"""
class TestTensorflowCreation(BaseTestCreation, test_utils.TensorflowMixin):
pass
class TestTensorflowCreated(BaseTestCreated, test_utils.TensorflowMixin):
pass
class TestTensorflowCreatedWithGradientDataExtension(BaseTestCreatedWithGradientDataExtension,
test_utils.TensorflowMixin):
pass
class TestTensorflowCreatedWithGradientDataExtensionNoValSet(BaseTestCreatedWithGradientDataExtension,
test_utils.TensorflowMixin):
@classmethod
def setUpClass(cls):
super(TestTensorflowCreatedWithGradientDataExtensionNoValSet, cls).setUpClass(val_image_count=0)
class TestTensorflowCreatedWithImageProcessingExtensionMeanImage(BaseTestCreatedWithImageProcessingExtension,
test_utils.TensorflowMixin):
MEAN = 'image'
class TestTensorflowCreatedWithImageProcessingExtensionMeanPixel(BaseTestCreatedWithImageProcessingExtension,
test_utils.TensorflowMixin):
MEAN = 'pixel'
class TestTensorflowCreatedWithImageProcessingExtensionMeanNone(BaseTestCreatedWithImageProcessingExtension,
test_utils.TensorflowMixin):
MEAN = 'none'
class TestTensorflowCreatedVariableSizeDataset(BaseTestCreatedWithImageProcessingExtension, test_utils.TensorflowMixin):
MEAN = 'none'
VARIABLE_SIZE_DATASET = True
@classmethod
def setUpClass(cls):
raise unittest.SkipTest('Variable-size dataset not supported in Tensorflow/DIGITS')
class TestTensorflowCreatedCropInForm(BaseTestCreatedCropInForm, test_utils.TensorflowMixin):
pass
class TestTensorflowDatasetModelInteractions(BaseTestDatasetModelInteractions, test_utils.TensorflowMixin):
pass
| DIGITS-master | digits/model/images/generic/test_views.py |
# Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from ..forms import ImageModelForm
class GenericImageModelForm(ImageModelForm):
"""
Defines the form used to create a new GenericImageModelJob
"""
pass
| DIGITS-master | digits/model/images/generic/forms.py |
# Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
import re
import tempfile
import flask
import werkzeug.exceptions
from .forms import GenericImageModelForm
from .job import GenericImageModelJob
from digits.pretrained_model.job import PretrainedModelJob
from digits import extensions, frameworks, utils
from digits.config import config_value
from digits.dataset import GenericDatasetJob, GenericImageDatasetJob
from digits.inference import ImageInferenceJob
from digits.status import Status
from digits.utils import filesystem as fs
from digits.utils import constants
from digits.utils.forms import fill_form_if_cloned, save_form_to_job
from digits.utils.routing import request_wants_json, job_from_request
from digits.webapp import scheduler
blueprint = flask.Blueprint(__name__, __name__)
@blueprint.route('/new', methods=['GET'])
@blueprint.route('/new/<extension_id>', methods=['GET'])
@utils.auth.requires_login
def new(extension_id=None):
"""
Return a form for a new GenericImageModelJob
"""
form = GenericImageModelForm()
form.dataset.choices = get_datasets(extension_id)
form.standard_networks.choices = []
form.previous_networks.choices = get_previous_networks()
form.pretrained_networks.choices = get_pretrained_networks()
prev_network_snapshots = get_previous_network_snapshots()
# Is there a request to clone a job with ?clone=<job_id>
fill_form_if_cloned(form)
return flask.render_template(
'models/images/generic/new.html',
extension_id=extension_id,
extension_title=extensions.data.get_extension(extension_id).get_title() if extension_id else None,
form=form,
frameworks=frameworks.get_frameworks(),
previous_network_snapshots=prev_network_snapshots,
previous_networks_fullinfo=get_previous_networks_fulldetails(),
pretrained_networks_fullinfo=get_pretrained_networks_fulldetails(),
multi_gpu=config_value('caffe')['multi_gpu'],
)
@blueprint.route('<extension_id>/json', methods=['POST'])
@blueprint.route('<extension_id>', methods=['POST'], strict_slashes=False)
@blueprint.route('/json', methods=['POST'])
@blueprint.route('', methods=['POST'], strict_slashes=False)
@utils.auth.requires_login(redirect=False)
def create(extension_id=None):
"""
Create a new GenericImageModelJob
Returns JSON when requested: {job_id,name,status} or {errors:[]}
"""
form = GenericImageModelForm()
form.dataset.choices = get_datasets(extension_id)
form.standard_networks.choices = []
form.previous_networks.choices = get_previous_networks()
form.pretrained_networks.choices = get_pretrained_networks()
prev_network_snapshots = get_previous_network_snapshots()
# Is there a request to clone a job with ?clone=<job_id>
fill_form_if_cloned(form)
if not form.validate_on_submit():
if request_wants_json():
return flask.jsonify({'errors': form.errors}), 400
else:
return flask.render_template(
'models/images/generic/new.html',
extension_id=extension_id,
extension_title=extensions.data.get_extension(extension_id).get_title() if extension_id else None,
form=form,
frameworks=frameworks.get_frameworks(),
previous_network_snapshots=prev_network_snapshots,
previous_networks_fullinfo=get_previous_networks_fulldetails(),
pretrained_networks_fullinfo=get_pretrained_networks_fulldetails(),
multi_gpu=config_value('caffe')['multi_gpu'],
), 400
datasetJob = scheduler.get_job(form.dataset.data)
if not datasetJob:
raise werkzeug.exceptions.BadRequest(
'Unknown dataset job_id "%s"' % form.dataset.data)
# sweeps will be a list of the the permutations of swept fields
# Get swept learning_rate
sweeps = [{'learning_rate': v} for v in form.learning_rate.data]
add_learning_rate = len(form.learning_rate.data) > 1
# Add swept batch_size
sweeps = [dict(s.items() + [('batch_size', bs)]) for bs in form.batch_size.data for s in sweeps[:]]
add_batch_size = len(form.batch_size.data) > 1
n_jobs = len(sweeps)
jobs = []
for sweep in sweeps:
# Populate the form with swept data to be used in saving and
# launching jobs.
form.learning_rate.data = sweep['learning_rate']
form.batch_size.data = sweep['batch_size']
# Augment Job Name
extra = ''
if add_learning_rate:
extra += ' learning_rate:%s' % str(form.learning_rate.data[0])
if add_batch_size:
extra += ' batch_size:%d' % form.batch_size.data[0]
job = None
try:
job = GenericImageModelJob(
username=utils.auth.get_username(),
name=form.model_name.data + extra,
group=form.group_name.data,
dataset_id=datasetJob.id(),
)
# get framework (hard-coded to caffe for now)
fw = frameworks.get_framework_by_id(form.framework.data)
pretrained_model = None
# if form.method.data == 'standard':
if form.method.data == 'previous':
old_job = scheduler.get_job(form.previous_networks.data)
if not old_job:
raise werkzeug.exceptions.BadRequest(
'Job not found: %s' % form.previous_networks.data)
use_same_dataset = (old_job.dataset_id == job.dataset_id)
network = fw.get_network_from_previous(old_job.train_task().network, use_same_dataset)
for choice in form.previous_networks.choices:
if choice[0] == form.previous_networks.data:
epoch = float(flask.request.form['%s-snapshot' % form.previous_networks.data])
if epoch == 0:
pass
elif epoch == -1:
pretrained_model = old_job.train_task().pretrained_model
else:
# verify snapshot exists
pretrained_model = old_job.train_task().get_snapshot(epoch, download=True)
if pretrained_model is None:
raise werkzeug.exceptions.BadRequest(
"For the job %s, selected pretrained_model for epoch %d is invalid!"
% (form.previous_networks.data, epoch))
# the first is the actual file if a list is returned, other should be meta data
if isinstance(pretrained_model, list):
pretrained_model = pretrained_model[0]
if not (os.path.exists(pretrained_model)):
raise werkzeug.exceptions.BadRequest(
"Pretrained_model for the selected epoch doesn't exist. "
"May be deleted by another user/process. "
"Please restart the server to load the correct pretrained_model details.")
# get logical path
pretrained_model = old_job.train_task().get_snapshot(epoch)
break
elif form.method.data == 'pretrained':
pretrained_job = scheduler.get_job(form.pretrained_networks.data)
model_def_path = pretrained_job.get_model_def_path()
weights_path = pretrained_job.get_weights_path()
network = fw.get_network_from_path(model_def_path)
pretrained_model = weights_path
elif form.method.data == 'custom':
network = fw.get_network_from_desc(form.custom_network.data)
pretrained_model = form.custom_network_snapshot.data.strip()
else:
raise werkzeug.exceptions.BadRequest(
'Unrecognized method: "%s"' % form.method.data)
policy = {'policy': form.lr_policy.data}
if form.lr_policy.data == 'fixed':
pass
elif form.lr_policy.data == 'step':
policy['stepsize'] = form.lr_step_size.data
policy['gamma'] = form.lr_step_gamma.data
elif form.lr_policy.data == 'multistep':
policy['stepvalue'] = form.lr_multistep_values.data
policy['gamma'] = form.lr_multistep_gamma.data
elif form.lr_policy.data == 'exp':
policy['gamma'] = form.lr_exp_gamma.data
elif form.lr_policy.data == 'inv':
policy['gamma'] = form.lr_inv_gamma.data
policy['power'] = form.lr_inv_power.data
elif form.lr_policy.data == 'poly':
policy['power'] = form.lr_poly_power.data
elif form.lr_policy.data == 'sigmoid':
policy['stepsize'] = form.lr_sigmoid_step.data
policy['gamma'] = form.lr_sigmoid_gamma.data
else:
raise werkzeug.exceptions.BadRequest(
'Invalid learning rate policy')
if config_value('caffe')['multi_gpu']:
if form.select_gpu_count.data:
gpu_count = form.select_gpu_count.data
selected_gpus = None
else:
selected_gpus = [str(gpu) for gpu in form.select_gpus.data]
gpu_count = None
else:
if form.select_gpu.data == 'next':
gpu_count = 1
selected_gpus = None
else:
selected_gpus = [str(form.select_gpu.data)]
gpu_count = None
# Set up data augmentation structure
data_aug = {}
data_aug['flip'] = form.aug_flip.data
data_aug['quad_rot'] = form.aug_quad_rot.data
data_aug['rot'] = form.aug_rot.data
data_aug['scale'] = form.aug_scale.data
data_aug['noise'] = form.aug_noise.data
data_aug['contrast'] = form.aug_contrast.data
data_aug['whitening'] = form.aug_whitening.data
data_aug['hsv_use'] = form.aug_hsv_use.data
data_aug['hsv_h'] = form.aug_hsv_h.data
data_aug['hsv_s'] = form.aug_hsv_s.data
data_aug['hsv_v'] = form.aug_hsv_v.data
# Python Layer File may be on the server or copied from the client.
fs.copy_python_layer_file(
bool(form.python_layer_from_client.data),
job.dir(),
(flask.request.files[form.python_layer_client_file.name]
if form.python_layer_client_file.name in flask.request.files
else ''), form.python_layer_server_file.data)
job.tasks.append(fw.create_train_task(
job=job,
dataset=datasetJob,
train_epochs=form.train_epochs.data,
snapshot_interval=form.snapshot_interval.data,
learning_rate=form.learning_rate.data[0],
lr_policy=policy,
gpu_count=gpu_count,
selected_gpus=selected_gpus,
batch_size=form.batch_size.data[0],
batch_accumulation=form.batch_accumulation.data,
val_interval=form.val_interval.data,
traces_interval=form.traces_interval.data,
pretrained_model=pretrained_model,
crop_size=form.crop_size.data,
use_mean=form.use_mean.data,
network=network,
random_seed=form.random_seed.data,
solver_type=form.solver_type.data,
rms_decay=form.rms_decay.data,
shuffle=form.shuffle.data,
data_aug=data_aug,
)
)
# Save form data with the job so we can easily clone it later.
save_form_to_job(job, form)
jobs.append(job)
scheduler.add_job(job)
if n_jobs == 1:
if request_wants_json():
return flask.jsonify(job.json_dict())
else:
return flask.redirect(flask.url_for('digits.model.views.show', job_id=job.id()))
except:
if job:
scheduler.delete_job(job)
raise
if request_wants_json():
return flask.jsonify(jobs=[j.json_dict() for j in jobs])
# If there are multiple jobs launched, go to the home page.
return flask.redirect('/')
def show(job, related_jobs=None):
"""
Called from digits.model.views.models_show()
"""
data_extensions = get_data_extensions()
view_extensions = get_view_extensions()
return flask.render_template(
'models/images/generic/show.html',
job=job,
data_extensions=data_extensions,
view_extensions=view_extensions,
related_jobs=related_jobs,
)
@blueprint.route('/timeline_tracing', methods=['GET'])
def timeline_tracing():
"""
Shows timeline trace of a model
"""
job = job_from_request()
return flask.render_template('models/timeline_tracing.html', job=job)
@blueprint.route('/large_graph', methods=['GET'])
def large_graph():
"""
Show the loss/accuracy graph, but bigger
"""
job = job_from_request()
return flask.render_template('models/large_graph.html', job=job)
@blueprint.route('/infer_one/json', methods=['POST'])
@blueprint.route('/infer_one', methods=['POST', 'GET'])
def infer_one():
"""
Infer one image
"""
model_job = job_from_request()
remove_image_path = False
if 'image_path' in flask.request.form and flask.request.form['image_path']:
image_path = flask.request.form['image_path']
elif 'image_file' in flask.request.files and flask.request.files['image_file']:
outfile = tempfile.mkstemp(suffix='.bin')
flask.request.files['image_file'].save(outfile[1])
image_path = outfile[1]
os.close(outfile[0])
remove_image_path = True
else:
raise werkzeug.exceptions.BadRequest('must provide image_path or image_file')
epoch = None
if 'snapshot_epoch' in flask.request.form:
epoch = float(flask.request.form['snapshot_epoch'])
layers = 'none'
if 'show_visualizations' in flask.request.form and flask.request.form['show_visualizations']:
layers = 'all'
if 'dont_resize' in flask.request.form and flask.request.form['dont_resize']:
resize = False
else:
resize = True
# create inference job
inference_job = ImageInferenceJob(
username=utils.auth.get_username(),
name="Infer One Image",
model=model_job,
images=[image_path],
epoch=epoch,
layers=layers,
resize=resize,
)
# schedule tasks
scheduler.add_job(inference_job)
# wait for job to complete
inference_job.wait_completion()
# retrieve inference data
inputs, outputs, model_visualization = inference_job.get_data()
# set return status code
status_code = 500 if inference_job.status == 'E' else 200
# delete job folder and remove from scheduler list
scheduler.delete_job(inference_job)
if remove_image_path:
os.remove(image_path)
if inputs is not None and len(inputs['data']) == 1:
image = utils.image.embed_image_html(inputs['data'][0])
visualizations, header_html, app_begin_html, app_end_html = get_inference_visualizations(
model_job.dataset,
inputs,
outputs)
inference_view_html = visualizations[0]
else:
image = None
inference_view_html = None
header_html = None
app_begin_html = None
app_end_html = None
if request_wants_json():
return flask.jsonify({'outputs': dict((name, blob.tolist())
for name, blob in outputs.iteritems())}), status_code
else:
return flask.render_template(
'models/images/generic/infer_one.html',
model_job=model_job,
job=inference_job,
image_src=image,
inference_view_html=inference_view_html,
header_html=header_html,
app_begin_html=app_begin_html,
app_end_html=app_end_html,
visualizations=model_visualization,
total_parameters=sum(v['param_count'] for v in model_visualization
if v['vis_type'] == 'Weights'),
), status_code
@blueprint.route('/infer_extension/json', methods=['POST'])
@blueprint.route('/infer_extension', methods=['POST', 'GET'])
def infer_extension():
"""
Perform inference using the data from an extension inference form
"""
model_job = job_from_request()
inference_db_job = None
try:
if 'data_extension_id' in flask.request.form:
data_extension_id = flask.request.form['data_extension_id']
else:
data_extension_id = model_job.dataset.extension_id
# create an inference database
inference_db_job = create_inference_db(model_job, data_extension_id)
db_path = inference_db_job.get_feature_db_path(constants.TEST_DB)
# create database creation job
epoch = None
if 'snapshot_epoch' in flask.request.form:
epoch = float(flask.request.form['snapshot_epoch'])
layers = 'none'
if 'show_visualizations' in flask.request.form and flask.request.form['show_visualizations']:
layers = 'all'
# create inference job
inference_job = ImageInferenceJob(
username=utils.auth.get_username(),
name="Inference",
model=model_job,
images=db_path,
epoch=epoch,
layers=layers,
resize=False,
)
# schedule tasks
scheduler.add_job(inference_job)
# wait for job to complete
inference_job.wait_completion()
finally:
if inference_db_job:
scheduler.delete_job(inference_db_job)
# retrieve inference data
inputs, outputs, model_visualization = inference_job.get_data()
# set return status code
status_code = 500 if inference_job.status == 'E' else 200
# delete job folder and remove from scheduler list
scheduler.delete_job(inference_job)
if outputs is not None and len(outputs) < 1:
# an error occurred
outputs = None
if inputs is not None:
keys = [str(idx) for idx in inputs['ids']]
inference_views_html, header_html, app_begin_html, app_end_html = get_inference_visualizations(
model_job.dataset,
inputs,
outputs)
else:
inference_views_html = None
header_html = None
keys = None
app_begin_html = None
app_end_html = None
if request_wants_json():
result = {}
for i, key in enumerate(keys):
result[key] = dict((name, blob[i].tolist()) for name, blob in outputs.iteritems())
return flask.jsonify({'outputs': result}), status_code
else:
return flask.render_template(
'models/images/generic/infer_extension.html',
model_job=model_job,
job=inference_job,
keys=keys,
inference_views_html=inference_views_html,
header_html=header_html,
app_begin_html=app_begin_html,
app_end_html=app_end_html,
visualizations=model_visualization,
total_parameters=sum(v['param_count'] for v in model_visualization
if v['vis_type'] == 'Weights'),
), status_code
@blueprint.route('/infer_db/json', methods=['POST'])
@blueprint.route('/infer_db', methods=['POST', 'GET'])
def infer_db():
"""
Infer a database
"""
model_job = job_from_request()
if 'db_path' not in flask.request.form or flask.request.form['db_path'] is None:
raise werkzeug.exceptions.BadRequest('db_path is a required field')
db_path = flask.request.form['db_path']
if not os.path.exists(db_path):
raise werkzeug.exceptions.BadRequest('DB "%s" does not exit' % db_path)
epoch = None
if 'snapshot_epoch' in flask.request.form:
epoch = float(flask.request.form['snapshot_epoch'])
if 'dont_resize' in flask.request.form and flask.request.form['dont_resize']:
resize = False
else:
resize = True
# create inference job
inference_job = ImageInferenceJob(
username=utils.auth.get_username(),
name="Infer Many Images",
model=model_job,
images=db_path,
epoch=epoch,
layers='none',
resize=resize,
)
# schedule tasks
scheduler.add_job(inference_job)
# wait for job to complete
inference_job.wait_completion()
# retrieve inference data
inputs, outputs, _ = inference_job.get_data()
# set return status code
status_code = 500 if inference_job.status == 'E' else 200
# delete job folder and remove from scheduler list
scheduler.delete_job(inference_job)
if outputs is not None and len(outputs) < 1:
# an error occurred
outputs = None
if inputs is not None:
keys = [str(idx) for idx in inputs['ids']]
inference_views_html, header_html, app_begin_html, app_end_html = get_inference_visualizations(
model_job.dataset,
inputs,
outputs)
else:
inference_views_html = None
header_html = None
keys = None
app_begin_html = None
app_end_html = None
if request_wants_json():
result = {}
for i, key in enumerate(keys):
result[key] = dict((name, blob[i].tolist()) for name, blob in outputs.iteritems())
return flask.jsonify({'outputs': result}), status_code
else:
return flask.render_template(
'models/images/generic/infer_db.html',
model_job=model_job,
job=inference_job,
keys=keys,
inference_views_html=inference_views_html,
header_html=header_html,
app_begin_html=app_begin_html,
app_end_html=app_end_html,
), status_code
@blueprint.route('/infer_many/json', methods=['POST'])
@blueprint.route('/infer_many', methods=['POST', 'GET'])
def infer_many():
"""
Infer many images
"""
model_job = job_from_request()
image_list = flask.request.files.get('image_list')
if not image_list:
raise werkzeug.exceptions.BadRequest('image_list is a required field')
if 'image_folder' in flask.request.form and flask.request.form['image_folder'].strip():
image_folder = flask.request.form['image_folder']
if not os.path.exists(image_folder):
raise werkzeug.exceptions.BadRequest('image_folder "%s" does not exit' % image_folder)
else:
image_folder = None
if 'num_test_images' in flask.request.form and flask.request.form['num_test_images'].strip():
num_test_images = int(flask.request.form['num_test_images'])
else:
num_test_images = None
epoch = None
if 'snapshot_epoch' in flask.request.form:
epoch = float(flask.request.form['snapshot_epoch'])
if 'dont_resize' in flask.request.form and flask.request.form['dont_resize']:
resize = False
else:
resize = True
paths = []
for line in image_list.readlines():
line = line.strip()
if not line:
continue
path = None
# might contain a numerical label at the end
match = re.match(r'(.*\S)\s+\d+$', line)
if match:
path = match.group(1)
else:
path = line
if not utils.is_url(path) and image_folder and not os.path.isabs(path):
path = os.path.join(image_folder, path)
paths.append(path)
if num_test_images is not None and len(paths) >= num_test_images:
break
# create inference job
inference_job = ImageInferenceJob(
username=utils.auth.get_username(),
name="Infer Many Images",
model=model_job,
images=paths,
epoch=epoch,
layers='none',
resize=resize,
)
# schedule tasks
scheduler.add_job(inference_job)
# wait for job to complete
inference_job.wait_completion()
# retrieve inference data
inputs, outputs, _ = inference_job.get_data()
# set return status code
status_code = 500 if inference_job.status == 'E' else 200
# delete job folder and remove from scheduler list
scheduler.delete_job(inference_job)
if outputs is not None and len(outputs) < 1:
# an error occurred
outputs = None
if inputs is not None:
paths = [paths[idx] for idx in inputs['ids']]
inference_views_html, header_html, app_begin_html, app_end_html = get_inference_visualizations(
model_job.dataset,
inputs,
outputs)
else:
inference_views_html = None
header_html = None
app_begin_html = None
app_end_html = None
if request_wants_json():
result = {}
for i, path in enumerate(paths):
result[path] = dict((name, blob[i].tolist()) for name, blob in outputs.iteritems())
return flask.jsonify({'outputs': result}), status_code
else:
return flask.render_template(
'models/images/generic/infer_many.html',
model_job=model_job,
job=inference_job,
paths=paths,
inference_views_html=inference_views_html,
header_html=header_html,
app_begin_html=app_begin_html,
app_end_html=app_end_html,
), status_code
def create_inference_db(model_job, data_extension_id):
# create instance of extension class
extension_class = extensions.data.get_extension(data_extension_id)
if hasattr(model_job.dataset, 'extension_userdata'):
extension_userdata = model_job.dataset.extension_userdata
else:
extension_userdata = {}
extension_userdata.update({'is_inference_db': True})
extension = extension_class(**extension_userdata)
extension_form = extension.get_inference_form()
extension_form_valid = extension_form.validate_on_submit()
if not extension_form_valid:
errors = extension_form.errors.copy()
raise werkzeug.exceptions.BadRequest(repr(errors))
extension.userdata.update(extension_form.data)
# create job
job = GenericDatasetJob(
username=utils.auth.get_username(),
name='Inference dataset',
group=None,
backend='lmdb',
feature_encoding='none',
label_encoding='none',
batch_size=1,
num_threads=1,
force_same_shape=0,
extension_id=data_extension_id,
extension_userdata=extension.get_user_data(),
)
# schedule tasks and wait for job to complete
scheduler.add_job(job)
job.wait_completion()
# check for errors
if job.status != Status.DONE:
msg = ""
for task in job.tasks:
if task.exception:
msg = msg + task.exception
if task.traceback:
msg = msg + task.exception
raise RuntimeError(msg)
return job
def get_datasets(extension_id):
if extension_id:
jobs = [j for j in scheduler.jobs.values()
if isinstance(j, GenericDatasetJob) and
j.extension_id == extension_id and (j.status.is_running() or j.status == Status.DONE)]
else:
jobs = [j for j in scheduler.jobs.values()
if (isinstance(j, GenericImageDatasetJob) or isinstance(j, GenericDatasetJob)) and
(j.status.is_running() or j.status == Status.DONE)]
return [(j.id(), j.name())
for j in sorted(jobs, cmp=lambda x, y: cmp(y.id(), x.id()))]
def get_inference_visualizations(dataset, inputs, outputs):
# get extension ID from form and retrieve extension class
if 'view_extension_id' in flask.request.form:
view_extension_id = flask.request.form['view_extension_id']
extension_class = extensions.view.get_extension(view_extension_id)
if extension_class is None:
raise ValueError("Unknown extension '%s'" % view_extension_id)
else:
# no view extension specified, use default
extension_class = extensions.view.get_default_extension()
extension_form = extension_class.get_config_form()
# validate form
extension_form_valid = extension_form.validate_on_submit()
if not extension_form_valid:
raise ValueError("Extension form validation failed with %s" % repr(extension_form.errors))
# create instance of extension class
extension = extension_class(dataset, **extension_form.data)
visualizations = []
# process data
n = len(inputs['ids'])
for idx in xrange(n):
input_id = inputs['ids'][idx]
input_data = inputs['data'][idx]
output_data = {key: outputs[key][idx] for key in outputs}
data = extension.process_data(
input_id,
input_data,
output_data)
template, context = extension.get_view_template(data)
visualizations.append(
flask.render_template_string(template, **context))
# get header
template, context = extension.get_header_template()
header = flask.render_template_string(template, **context) if template else None
app_begin, app_end = extension.get_ng_templates()
return visualizations, header, app_begin, app_end
def get_previous_networks():
return [(j.id(), j.name()) for j in sorted(
[j for j in scheduler.jobs.values() if isinstance(j, GenericImageModelJob)],
cmp=lambda x, y: cmp(y.id(), x.id())
)
]
def get_previous_networks_fulldetails():
return [(j) for j in sorted(
[j for j in scheduler.jobs.values() if isinstance(j, GenericImageModelJob)],
cmp=lambda x, y: cmp(y.id(), x.id())
)
]
def get_previous_network_snapshots():
prev_network_snapshots = []
for job_id, _ in get_previous_networks():
job = scheduler.get_job(job_id)
e = [(0, 'None')] + [(epoch, 'Epoch #%s' % epoch)
for _, epoch in reversed(job.train_task().snapshots)]
if job.train_task().pretrained_model:
e.insert(0, (-1, 'Previous pretrained model'))
prev_network_snapshots.append(e)
return prev_network_snapshots
def get_pretrained_networks():
return [(j.id(), j.name()) for j in sorted(
[j for j in scheduler.jobs.values() if isinstance(j, PretrainedModelJob)],
cmp=lambda x, y: cmp(y.id(), x.id())
)
]
def get_pretrained_networks_fulldetails():
return [(j) for j in sorted(
[j for j in scheduler.jobs.values() if isinstance(j, PretrainedModelJob)],
cmp=lambda x, y: cmp(y.id(), x.id())
)
]
def get_data_extensions():
"""
return all enabled data extensions
"""
data_extensions = {"all-default": "Default"}
all_extensions = extensions.data.get_extensions()
for extension in all_extensions:
data_extensions[extension.get_id()] = extension.get_title()
return data_extensions
def get_view_extensions():
"""
return all enabled view extensions
"""
view_extensions = {}
all_extensions = extensions.view.get_extensions()
for extension in all_extensions:
view_extensions[extension.get_id()] = extension.get_title()
return view_extensions
| DIGITS-master | digits/model/images/generic/views.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from . import tasks
import digits.frameworks
from digits.job import Job
from digits.utils import subclass, override
@subclass
class InferenceJob(Job):
"""
A Job that exercises the forward pass of a neural network
"""
def __init__(self, model, images, epoch, layers, resize=True, **kwargs):
"""
Arguments:
model -- job object associated with model to perform inference on
images -- list of image paths to perform inference on
epoch -- epoch of model snapshot to use
layers -- layers to import ('all' or 'none')
"""
super(InferenceJob, self).__init__(persistent=False, **kwargs)
# get handle to framework object
fw_id = model.train_task().framework_id
fw = digits.frameworks.get_framework_by_id(fw_id)
if fw is None:
raise RuntimeError(
'The "%s" framework cannot be found. Check your server configuration.'
% fw_id)
# create inference task
self.tasks.append(fw.create_inference_task(
job_dir=self.dir(),
model=model,
images=images,
epoch=epoch,
layers=layers,
resize=resize,
))
@override
def __getstate__(self):
fields_to_save = ['_id', '_name']
full_state = super(InferenceJob, self).__getstate__()
state_to_save = {}
for field in fields_to_save:
state_to_save[field] = full_state[field]
return state_to_save
def inference_task(self):
"""Return the first and only InferenceTask for this job"""
return [t for t in self.tasks if isinstance(t, tasks.InferenceTask)][0]
@override
def __setstate__(self, state):
super(InferenceJob, self).__setstate__(state)
def get_data(self):
"""Return inference data"""
task = self.inference_task()
return task.inference_inputs, task.inference_outputs, task.inference_layers
| DIGITS-master | digits/inference/job.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .images import ImageInferenceJob
from .job import InferenceJob
__all__ = [
'InferenceJob',
'ImageInferenceJob',
]
| DIGITS-master | digits/inference/__init__.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from digits.utils import subclass, override
@subclass
class Error(Exception):
pass
@subclass
class InferenceError(Error):
"""
Errors that occur during inference
"""
def __init__(self, message):
self.message = message
@override
def __str__(self):
return repr(self.message)
| DIGITS-master | digits/inference/errors.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .inference import InferenceTask
__all__ = ['InferenceTask']
| DIGITS-master | digits/inference/tasks/__init__.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import base64
from collections import OrderedDict
import h5py
import os.path
import tempfile
import re
import sys
import digits
from digits.task import Task
from digits.utils import subclass, override
from digits.utils.image import embed_image_html
@subclass
class InferenceTask(Task):
"""
A task for inference jobs
"""
def __init__(self, model, images, epoch, layers, resize, **kwargs):
"""
Arguments:
model -- trained model to perform inference on
images -- list of images to perform inference on, or path to a database
epoch -- model snapshot to use
layers -- which layers to visualize (by default only the activations of the last layer)
"""
# memorize parameters
self.model = model
self.images = images
self.epoch = epoch
self.layers = layers
self.resize = resize
self.image_list_path = None
self.inference_log_file = "inference.log"
# resources
self.gpu = None
# generated data
self.inference_data_filename = None
self.inference_inputs = None
self.inference_outputs = None
self.inference_layers = []
super(InferenceTask, self).__init__(**kwargs)
@override
def name(self):
return 'Infer Model'
@override
def __getstate__(self):
state = super(InferenceTask, self).__getstate__()
if 'inference_log' in state:
# don't save file handle
del state['inference_log']
return state
@override
def __setstate__(self, state):
super(InferenceTask, self).__setstate__(state)
@override
def before_run(self):
super(InferenceTask, self).before_run()
# create log file
self.inference_log = open(self.path(self.inference_log_file), 'a')
if type(self.images) is list:
# create a file to pass the list of images to perform inference on
imglist_handle, self.image_list_path = tempfile.mkstemp(dir=self.job_dir, suffix='.txt')
for image_path in self.images:
os.write(imglist_handle, "%s\n" % image_path)
os.close(imglist_handle)
@override
def process_output(self, line):
self.inference_log.write('%s\n' % line)
self.inference_log.flush()
timestamp, level, message = self.preprocess_output_digits(line)
if not message:
return False
# progress
match = re.match(r'Processed (\d+)\/(\d+)', message)
if match:
self.progress = float(match.group(1)) / int(match.group(2))
return True
# path to inference data
match = re.match(r'Saved data to (.*)', message)
if match:
self.inference_data_filename = match.group(1).strip()
return True
return False
@override
def after_run(self):
super(InferenceTask, self).after_run()
# retrieve inference data
visualizations = []
outputs = OrderedDict()
if self.inference_data_filename is not None:
# the HDF5 database contains:
# - input images, in a dataset "/inputs"
# - all network outputs, in a group "/outputs/"
# - layer activations and weights, if requested, in a group "/layers/"
db = h5py.File(self.inference_data_filename, 'r')
# collect paths and data
input_ids = db['input_ids'][...]
input_data = db['input_data'][...]
# collect outputs
o = []
for output_key, output_data in db['outputs'].items():
output_name = base64.urlsafe_b64decode(str(output_key))
o.append({'id': output_data.attrs['id'], 'name': output_name, 'data': output_data[...]})
# sort outputs by ID
o = sorted(o, key=lambda x: x['id'])
# retain only data (using name as key)
for output in o:
outputs[output['name']] = output['data']
# collect layer data, if applicable
if 'layers' in db.keys():
for layer_id, layer in db['layers'].items():
visualization = {
'id': int(layer_id),
'name': layer.attrs['name'],
'vis_type': layer.attrs['vis_type'],
'data_stats': {
'shape': layer.attrs['shape'],
'mean': layer.attrs['mean'],
'stddev': layer.attrs['stddev'],
'histogram': [
layer.attrs['histogram_y'].tolist(),
layer.attrs['histogram_x'].tolist(),
layer.attrs['histogram_ticks'].tolist(),
]
}
}
if 'param_count' in layer.attrs:
visualization['param_count'] = layer.attrs['param_count']
if 'layer_type' in layer.attrs:
visualization['layer_type'] = layer.attrs['layer_type']
vis = layer[...]
if vis.shape[0] > 0:
visualization['image_html'] = embed_image_html(vis)
visualizations.append(visualization)
# sort by layer ID (as HDF5 ASCII sorts)
visualizations = sorted(visualizations, key=lambda x: x['id'])
db.close()
# save inference data for further use
self.inference_inputs = {'ids': input_ids, 'data': input_data}
self.inference_outputs = outputs
self.inference_layers = visualizations
self.inference_log.close()
@override
def offer_resources(self, resources):
reserved_resources = {}
# we need one CPU resource from inference_task_pool
cpu_key = 'inference_task_pool'
if cpu_key not in resources:
return None
for resource in resources[cpu_key]:
if resource.remaining() >= 1:
reserved_resources[cpu_key] = [(resource.identifier, 1)]
# we reserve the first available GPU, if there are any
gpu_key = 'gpus'
if resources[gpu_key]:
for resource in resources[gpu_key]:
if resource.remaining() >= 1:
self.gpu = int(resource.identifier)
reserved_resources[gpu_key] = [(resource.identifier, 1)]
break
return reserved_resources
return None
@override
def task_arguments(self, resources, env):
args = [sys.executable,
os.path.join(os.path.dirname(os.path.abspath(digits.__file__)), 'tools', 'inference.py'),
self.image_list_path if self.image_list_path is not None else self.images,
self.job_dir,
self.model.id(),
'--jobs_dir=%s' % digits.config.config_value('jobs_dir'),
]
if self.epoch is not None:
args.append('--epoch=%s' % repr(self.epoch))
if self.layers == 'all':
args.append('--layers=all')
else:
args.append('--layers=none')
if self.gpu is not None:
args.append('--gpu=%d' % self.gpu)
if self.image_list_path is None:
args.append('--db')
if not self.resize:
args.append('--no-resize')
return args
| DIGITS-master | digits/inference/tasks/inference.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from ..job import InferenceJob
from digits.utils import subclass, override
@subclass
class ImageInferenceJob(InferenceJob):
"""
A Job that exercises the forward pass of an image neural network
"""
@override
def job_type(self):
return 'Image Inference'
| DIGITS-master | digits/inference/images/job.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .job import ImageInferenceJob
__all__ = ['ImageInferenceJob']
| DIGITS-master | digits/inference/images/__init__.py |
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
import re
import subprocess
import tempfile
import sys
from .errors import NetworkVisualizationError
from .framework import Framework
import digits
from digits import utils
from digits.model.tasks import TensorflowTrainTask
from digits.utils import subclass, override, constants
@subclass
class TensorflowFramework(Framework):
"""
Defines required methods to interact with the Tensorflow framework
"""
# short descriptive name
NAME = 'Tensorflow'
# identifier of framework class
CLASS = 'tensorflow'
# whether this framework can shuffle data during training
CAN_SHUFFLE_DATA = True
SUPPORTS_PYTHON_LAYERS_FILE = False
SUPPORTS_TIMELINE_TRACING = True
SUPPORTED_SOLVER_TYPES = ['SGD', 'ADADELTA', 'ADAGRAD', 'ADAGRADDA', 'MOMENTUM', 'ADAM', 'FTRL', 'RMSPROP']
SUPPORTED_DATA_TRANSFORMATION_TYPES = ['MEAN_SUBTRACTION', 'CROPPING']
SUPPORTED_DATA_AUGMENTATION_TYPES = ['FLIPPING', 'NOISE', 'CONTRAST', 'WHITENING', 'HSV_SHIFTING']
def __init__(self):
super(TensorflowFramework, self).__init__()
# id must be unique
self.framework_id = self.CLASS
@override
def create_train_task(self, **kwargs):
"""
create train task
"""
return TensorflowTrainTask(framework_id=self.framework_id, **kwargs)
@override
def get_standard_network_desc(self, network):
"""
return description of standard network
"""
networks_dir = os.path.join(os.path.dirname(digits.__file__), 'standard-networks', self.CLASS)
for filename in os.listdir(networks_dir):
path = os.path.join(networks_dir, filename)
if os.path.isfile(path):
match = None
match = re.match(r'%s.py$' % network, filename)
if match:
with open(path) as infile:
return infile.read()
# return None if not found
return None
@override
def get_network_from_desc(self, network_desc):
"""
return network object from a string representation
"""
# return the same string
return network_desc
@override
def get_network_from_previous(self, previous_network, use_same_dataset):
"""
return new instance of network from previous network
"""
# note: use_same_dataset is ignored here because for Tensorflow, DIGITS
# does not change the number of outputs of the last linear layer
# to match the number of classes in the case of a classification
# network. In order to write a flexible network description that
# accounts for the number of classes, the `nClasses` external
# parameter must be used, see documentation.
# return the same network description
return previous_network
@override
def validate_network(self, data):
"""
validate a network
"""
return True
@override
def get_network_visualization(self, **kwargs):
"""
return visualization of network
"""
desc = kwargs['desc']
dataset = kwargs['dataset']
solver_type = kwargs['solver_type'].lower() if kwargs['solver_type'] else None
use_mean = kwargs['use_mean']
crop_size = kwargs['crop_size']
num_gpus = kwargs['num_gpus']
if dataset is None:
raise NetworkVisualizationError('Make sure a dataset is selected to visualize this network.')
# save network description to temporary file
temp_network_handle, temp_network_path = tempfile.mkstemp(suffix='.py')
os.write(temp_network_handle, desc)
os.close(temp_network_handle)
# Generate a temporaty file to put the graph definition in
_, temp_graphdef_path = tempfile.mkstemp(suffix='.pbtxt')
# Another for the HTML
_, temp_html_path = tempfile.mkstemp(suffix='.html')
try: # do this in a try..finally clause to make sure we delete the temp file
# build command line
args = [sys.executable,
os.path.join(os.path.dirname(digits.__file__), 'tools', 'tensorflow', 'main.py'),
'--network=%s' % os.path.basename(temp_network_path),
'--networkDirectory=%s' % os.path.dirname(temp_network_path),
'--visualizeModelPath=%s' % temp_graphdef_path,
'--optimization=%s' % solver_type,
]
if crop_size:
args.append('--croplen=%s' % crop_size)
if use_mean and use_mean != 'none':
mean_file = dataset.get_mean_file()
assert mean_file is not None, 'Failed to retrieve mean file.'
args.append('--subtractMean=%s' % use_mean)
args.append('--mean=%s' % dataset.path(mean_file))
if hasattr(dataset, 'labels_file'):
args.append('--labels_list=%s' % dataset.path(dataset.labels_file))
train_feature_db_path = dataset.get_feature_db_path(constants.TRAIN_DB)
train_label_db_path = dataset.get_label_db_path(constants.TRAIN_DB)
val_feature_db_path = dataset.get_feature_db_path(constants.VAL_DB)
val_label_db_path = dataset.get_label_db_path(constants.VAL_DB)
args.append('--train_db=%s' % train_feature_db_path)
if train_label_db_path:
args.append('--train_labels=%s' % train_label_db_path)
if val_feature_db_path:
args.append('--validation_db=%s' % val_feature_db_path)
if val_label_db_path:
args.append('--validation_labels=%s' % val_label_db_path)
env = os.environ.copy()
# make only a selected number of GPUs visible. The ID is not important for just the vis
env['CUDA_VISIBLE_DEVICES'] = ",".join([str(i) for i in range(0, int(num_gpus))])
# execute command
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=True,
env=env)
stdout_log = ''
while p.poll() is None:
for line in utils.nonblocking_readlines(p.stdout):
timestamp, level, message = TensorflowTrainTask.preprocess_output_tensorflow(line.strip())
if line is not None:
stdout_log += line
if p.returncode:
raise NetworkVisualizationError(stdout_log)
else: # Success!
return repr(str(open(temp_graphdef_path).read()))
finally:
os.remove(temp_network_path)
os.remove(temp_graphdef_path)
| DIGITS-master | digits/frameworks/tensorflow_framework.py |
# Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
import re
import caffe.draw
import caffe_pb2
from google.protobuf import text_format
from .errors import BadNetworkError
from .framework import Framework
import digits
from digits.config import config_value
from digits.model.tasks import CaffeTrainTask
from digits.utils import subclass, override, parse_version
@subclass
class CaffeFramework(Framework):
"""
Defines required methods to interact with the Caffe framework
This class can be instantiated as many times as there are compatible
instances of Caffe
"""
# short descriptive name
NAME = 'Caffe'
# identifier of framework class (intended to be the same across
# all instances of this class)
CLASS = 'caffe'
# whether this framework can shuffle data during training
CAN_SHUFFLE_DATA = False
SUPPORTS_PYTHON_LAYERS_FILE = True
SUPPORTS_TIMELINE_TRACING = False
if config_value('caffe')['flavor'] == 'NVIDIA':
if parse_version(config_value('caffe')['version']) > parse_version('0.14.0-alpha'):
SUPPORTED_SOLVER_TYPES = ['SGD', 'NESTEROV', 'ADAGRAD',
'RMSPROP', 'ADADELTA', 'ADAM']
else:
SUPPORTED_SOLVER_TYPES = ['SGD', 'NESTEROV', 'ADAGRAD']
elif config_value('caffe')['flavor'] == 'BVLC':
SUPPORTED_SOLVER_TYPES = ['SGD', 'NESTEROV', 'ADAGRAD',
'RMSPROP', 'ADADELTA', 'ADAM']
else:
raise ValueError('Unknown flavor. Support NVIDIA and BVLC flavors only.')
SUPPORTED_DATA_TRANSFORMATION_TYPES = ['MEAN_SUBTRACTION', 'CROPPING']
SUPPORTED_DATA_AUGMENTATION_TYPES = []
@override
def __init__(self):
super(CaffeFramework, self).__init__()
self.framework_id = self.CLASS
@override
def create_train_task(self, **kwargs):
"""
create train task
"""
return CaffeTrainTask(framework_id=self.framework_id, **kwargs)
@override
def validate_network(self, data):
"""
validate a network (input data are expected to be a text
description of the network)
"""
pb = caffe_pb2.NetParameter()
try:
text_format.Merge(data, pb)
except text_format.ParseError as e:
raise BadNetworkError('Not a valid NetParameter: %s' % e)
@override
def get_standard_network_desc(self, network):
"""
return description of standard network
network is expected to be a instance of caffe_pb2.NetParameter
"""
networks_dir = os.path.join(os.path.dirname(digits.__file__), 'standard-networks', self.CLASS)
for filename in os.listdir(networks_dir):
path = os.path.join(networks_dir, filename)
if os.path.isfile(path):
match = None
match = re.match(r'%s.prototxt' % network, filename)
if match:
with open(path) as infile:
return infile.read()
# return None if not found
return None
@override
def get_network_from_desc(self, network_desc):
"""
return network object from a string representation
"""
network = caffe_pb2.NetParameter()
text_format.Merge(network_desc, network)
return network
@override
def get_network_from_previous(self, previous_network, use_same_dataset):
"""
return new instance of network from previous network
"""
network = caffe_pb2.NetParameter()
network.CopyFrom(previous_network)
if not use_same_dataset:
# Rename the final layer
# XXX making some assumptions about network architecture here
ip_layers = [l for l in network.layer if l.type == 'InnerProduct']
if len(ip_layers) > 0:
ip_layers[-1].name = '%s_retrain' % ip_layers[-1].name
return network
@override
def get_network_from_path(self, path):
"""
return network object from a file path
"""
network = caffe_pb2.NetParameter()
with open(path) as infile:
text_format.Merge(infile.read(), network)
return network
@override
def get_network_visualization(self, **kwargs):
"""
return visualization of network
"""
desc = kwargs['desc']
net = caffe_pb2.NetParameter()
text_format.Merge(desc, net)
# Throws an error if name is None
if not net.name:
net.name = 'Network'
return ('<image src="data:image/png;base64,' +
caffe.draw.draw_net(net, 'UD').encode('base64') +
'" style="max-width:100%" />')
@override
def can_accumulate_gradients(self):
if config_value('caffe')['flavor'] == 'BVLC':
return True
elif config_value('caffe')['flavor'] == 'NVIDIA':
return (parse_version(config_value('caffe')['version']) > parse_version('0.14.0-alpha'))
else:
raise ValueError('Unknown flavor. Support NVIDIA and BVLC flavors only.')
| DIGITS-master | digits/frameworks/caffe_framework.py |
# Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
from digits.inference.tasks import InferenceTask
class Framework(object):
"""
Defines required methods to interact with a framework
"""
def get_name(self):
"""
return self-descriptive name
"""
return self.NAME
def get_id(self):
"""
return unique id of framework instance
"""
return self.framework_id
def can_shuffle_data(self):
"""
return whether framework can shuffle input data during training
"""
return self.CAN_SHUFFLE_DATA
def supports_python_layers_file(self):
"""
return whether framework can shuffle input data during training
"""
return self.SUPPORTS_PYTHON_LAYERS_FILE
def supports_timeline_traces(self):
"""
return whether framework supports creating timeline traces
"""
return self.SUPPORTS_TIMELINE_TRACING
def supports_solver_type(self, solver_type):
"""
return whether framework supports this solver_type
"""
if not hasattr(self, 'SUPPORTED_SOLVER_TYPES'):
raise NotImplementedError
assert isinstance(self.SUPPORTED_SOLVER_TYPES, list)
return solver_type in self.SUPPORTED_SOLVER_TYPES
def validate_network(self, data):
"""
validate a network (must be implemented in child class)
"""
raise NotImplementedError('Please implement me')
def create_inference_task(self, **kwargs):
"""
create inference task
"""
return InferenceTask(**kwargs)
def create_train_task(self, **kwargs):
"""
create train task
"""
raise NotImplementedError('Please implement me')
def get_standard_network_desc(self, network):
"""
return text description of network
"""
raise NotImplementedError('Please implement me')
def get_network_from_desc(self, network_desc):
"""
return network object from a string representation
"""
raise NotImplementedError('Please implement me')
def get_network_from_previous(self, previous_network, use_same_dataset):
"""
return new instance of network from previous network
"""
raise NotImplementedError('Please implement me')
def get_network_from_path(self, path):
"""
return network object from a file path
"""
raise NotImplementedError('Please implement me')
def get_network_visualization(self, **kwargs):
"""
return visualization of network
"""
raise NotImplementedError('Please implement me')
def can_accumulate_gradients(self):
return False
| DIGITS-master | digits/frameworks/framework.py |
# Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .caffe_framework import CaffeFramework
from .framework import Framework
from .torch_framework import TorchFramework
from digits.config import config_value
__all__ = [
'Framework',
'CaffeFramework',
'TorchFramework',
]
if config_value('tensorflow')['enabled']:
from .tensorflow_framework import TensorflowFramework
__all__.append('TensorflowFramework')
#
# create framework instances
#
# torch is optional
torch = TorchFramework() if config_value('torch')['enabled'] else None
# tensorflow is optional
tensorflow = TensorflowFramework() if config_value('tensorflow')['enabled'] else None
# caffe is mandatory
caffe = CaffeFramework()
#
# utility functions
#
def get_frameworks():
"""
return list of all available framework instances
there may be more than one instance per framework class
"""
frameworks = [caffe]
if torch:
frameworks.append(torch)
if tensorflow:
frameworks.append(tensorflow)
return frameworks
def get_framework_by_id(framework_id):
"""
return framework instance associated with given id
"""
for fw in get_frameworks():
if fw.get_id() == framework_id:
return fw
return None
| DIGITS-master | digits/frameworks/__init__.py |
# Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from digits.utils import subclass
@subclass
class Error(Exception):
pass
@subclass
class BadNetworkError(Error):
"""
Errors that occur when validating a network
"""
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
@subclass
class NetworkVisualizationError(Error):
"""
Errors that occur when validating a network
"""
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
| DIGITS-master | digits/frameworks/errors.py |
# Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
import re
import subprocess
import time
import tempfile
import flask
from .errors import NetworkVisualizationError
from .framework import Framework
import digits
from digits import utils
from digits.config import config_value
from digits.model.tasks import TorchTrainTask
from digits.utils import subclass, override
@subclass
class TorchFramework(Framework):
"""
Defines required methods to interact with the Torch framework
"""
# short descriptive name
NAME = 'Torch'
# identifier of framework class
CLASS = 'torch'
# whether this framework can shuffle data during training
CAN_SHUFFLE_DATA = True
SUPPORTS_PYTHON_LAYERS_FILE = False
SUPPORTS_TIMELINE_TRACING = False
SUPPORTED_SOLVER_TYPES = ['SGD', 'NESTEROV', 'ADAGRAD',
'RMSPROP', 'ADADELTA', 'ADAM']
SUPPORTED_DATA_TRANSFORMATION_TYPES = ['MEAN_SUBTRACTION', 'CROPPING']
SUPPORTED_DATA_AUGMENTATION_TYPES = ['FLIPPING', 'QUAD_ROTATION', 'ARBITRARY_ROTATION',
'SCALING', 'NOISE', 'HSV_SHIFTING']
def __init__(self):
super(TorchFramework, self).__init__()
# id must be unique
self.framework_id = self.CLASS
@override
def create_train_task(self, **kwargs):
"""
create train task
"""
return TorchTrainTask(framework_id=self.framework_id, **kwargs)
@override
def get_standard_network_desc(self, network):
"""
return description of standard network
"""
networks_dir = os.path.join(os.path.dirname(digits.__file__), 'standard-networks', self.CLASS)
# Torch's GoogLeNet and AlexNet models are placed in sub folder
if (network == "alexnet" or network == "googlenet"):
networks_dir = os.path.join(networks_dir, 'ImageNet-Training')
for filename in os.listdir(networks_dir):
path = os.path.join(networks_dir, filename)
if os.path.isfile(path):
match = None
match = re.match(r'%s.lua' % network, filename)
if match:
with open(path) as infile:
return infile.read()
# return None if not found
return None
@override
def get_network_from_desc(self, network_desc):
"""
return network object from a string representation
"""
# return the same string
return network_desc
@override
def get_network_from_previous(self, previous_network, use_same_dataset):
"""
return new instance of network from previous network
"""
# note: use_same_dataset is ignored here because for Torch, DIGITS
# does not change the number of outputs of the last linear layer
# to match the number of classes in the case of a classification
# network. In order to write a flexible network description that
# accounts for the number of classes, the `nClasses` external
# parameter must be used, see documentation.
# return the same network description
return previous_network
@override
def get_network_from_path(self, path):
"""
return network object from a file path
"""
with open(path, 'r') as f:
network = f.read()
return network
@override
def validate_network(self, data):
"""
validate a network
"""
return True
@override
def get_network_visualization(self, **kwargs):
"""
return visualization of network
"""
desc = kwargs['desc']
# save network description to temporary file
temp_network_handle, temp_network_path = tempfile.mkstemp(suffix='.lua')
os.write(temp_network_handle, desc)
os.close(temp_network_handle)
try: # do this in a try..finally clause to make sure we delete the temp file
# build command line
torch_bin = config_value('torch')['executable']
args = [torch_bin,
os.path.join(os.path.dirname(digits.__file__), 'tools', 'torch', 'main.lua'),
'--network=%s' % os.path.splitext(os.path.basename(temp_network_path))[0],
'--networkDirectory=%s' % os.path.dirname(temp_network_path),
'--subtractMean=none', # we are not providing a mean image
'--visualizeModel=yes',
'--type=float'
]
# execute command
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=True,
)
# TODO: need to include regular expression for MAC color codes
regex = re.compile('\x1b\[[0-9;]*m', re.UNICODE)
# the network description will be accumulated from the command output
# when collecting_net_definition==True
collecting_net_definition = False
desc = []
unrecognized_output = []
while p.poll() is None:
for line in utils.nonblocking_readlines(p.stdout):
if line is not None:
# Remove whitespace and color codes.
# Color codes are appended to beginning and end of line by torch binary
# i.e., 'th'. Check the below link for more information
# https://groups.google.com/forum/#!searchin/torch7/color$20codes/torch7/8O_0lSgSzuA/Ih6wYg9fgcwJ # noqa
line = regex.sub('', line)
timestamp, level, message = TorchTrainTask.preprocess_output_torch(line.strip())
if message:
if message.startswith('Network definition'):
collecting_net_definition = not collecting_net_definition
else:
if collecting_net_definition:
desc.append(line)
elif len(line):
unrecognized_output.append(line)
else:
time.sleep(0.05)
if not len(desc):
# we did not find a network description
raise NetworkVisualizationError(''.join(unrecognized_output))
else:
output = flask.Markup('<pre align="left">')
for line in desc:
output += flask.Markup.escape(line)
output += flask.Markup('</pre>')
return output
finally:
os.remove(temp_network_path)
| DIGITS-master | digits/frameworks/torch_framework.py |
DIGITS-master | digits/store/__init__.py |
|
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import json
import os
import tempfile
import time
import uuid
import flask
import requests
from digits.log import logger
from digits.pretrained_model import PretrainedModelJob
from digits.utils import auth
from digits.utils.store import StoreParser
from digits.webapp import app, scheduler, socketio
blueprint = flask.Blueprint(__name__, __name__)
class Progress(object):
"""class to emit download progress"""
def __init__(self, model_id):
self._model_id = model_id
self._file = 0
self._n_files = 0
self._n_chunks = 0
self._last_progress = -1
self.emit(0)
def set_n_files(self, n_files):
""" set the number of files file this Progress object will report """
self._n_files = n_files
def set_n_chunks(self, n_chuncks):
""" set the number of chunks expected """
self._n_chunks = n_chuncks
self._file += 1
self._last_progress = -1
def emit(self, progress):
""" emit the progress to the client """
socketio.emit('update',
{
'model_id': self._model_id,
'update': 'progress',
'progress': progress,
},
namespace='/jobs',
room='job_management'
)
# micro sleep so that emit is broadcast to the client
time.sleep(0.001)
def incr(self, itr):
""" progress iterator that the request iterator is wrapped in """
for i, item in enumerate(itr):
yield item
progress = min(int(round(((self._file - 1.0) + (i + 1.0) / self._n_chunks) /
self._n_files * 100)), 100)
if progress != self._last_progress:
self.emit(progress)
self._last_progress = progress
def save_binary(url, file_name, tmp_dir, progress):
r = requests.get(os.path.join(url, file_name), stream=True)
chunk_size = 1024
total_length = int(r.headers.get('content-length'))
n_chunks = (total_length / chunk_size) + bool(total_length % chunk_size)
progress.set_n_chunks(n_chunks)
full_path = os.path.join(tmp_dir, file_name)
with open(full_path, 'wb') as f:
for chunk in progress.incr(r.iter_content(chunk_size=chunk_size)):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
return full_path
def save_tensorflow_weights(url, file_name, tmp_dir, progress):
full_path = os.path.join(tmp_dir, file_name)
save_binary(url, file_name + ".index", tmp_dir, progress)
save_binary(url, file_name + ".meta", tmp_dir, progress)
save_binary(url, file_name + ".data-00000-of-00001", tmp_dir, progress)
return full_path
def retrieve_files(url, directory, progress):
model_url = os.path.join(url, directory)
tmp_dir = tempfile.mkdtemp()
tmp = requests.get(os.path.join(model_url, 'info.json')).content
info = json.loads(tmp)
# How many files will we download?
n_files = 1 + ("model file" in info or "network file" in info) + ("labels file" in info)
progress.set_n_files(n_files)
if (info["snapshot file"].endswith(".ckpt")):
weights = save_tensorflow_weights(model_url, info["snapshot file"], tmp_dir, progress)
else:
weights = save_binary(model_url, info["snapshot file"], tmp_dir, progress)
if "model file" in info:
remote_model_file = info["model file"]
elif "network file" in info:
remote_model_file = info["network file"]
else:
return flask.jsonify({"status": "Missing model definition in info.json"}), 500
model = save_binary(model_url, remote_model_file, tmp_dir, progress)
if "labels file" in info:
label = save_binary(model_url, info["labels file"], tmp_dir, progress)
else:
label = None
if "python layer file" in info:
python_layer = save_binary(model_url, info["python layer file"], tmp_dir, progress)
else:
python_layer = None
meta_data = info
return weights, model, label, meta_data, python_layer
@blueprint.route('/push', methods=['GET'])
def push():
"""
Create a pre-trained model from model store
"""
model_id = flask.request.args.get('id')
model_grand_list = app.config['store_cache'].read()
found = False
if model_grand_list is not None:
for store in model_grand_list:
for model in model_grand_list[store]['model_list']:
if model['id'] == model_id:
url = model_grand_list[store]['base_url']
directory = model['dir_name']
found = True
break
if found:
break
if not found:
return 'Unable to find requested model', 404
else:
progress = Progress(model_id)
weights, model, label, meta_data, python_layer = retrieve_files(url, directory, progress)
job = PretrainedModelJob(
weights,
model,
label,
meta_data['framework'],
username=auth.get_username(),
name=meta_data['name']
)
scheduler.add_job(job)
response = flask.make_response(job.id())
return response
@blueprint.route('/models', methods=['GET'])
def models():
"""
perform server-to-server communication to retrieve
info.json file in all subfolders of model store
"""
if flask.request.args.get('refresh') == '1':
app.config['store_cache'].reset()
cached_data = app.config['store_cache'].read()
if cached_data is not None:
return json.dumps(cached_data)
store_urls = app.config['store_url_list']
aggregated_dict = dict()
for i, store_url in enumerate(store_urls):
if len(store_url) == 0:
continue
model_list = list()
if store_url[-1] != '/':
store_base_url = store_url + '/'
else:
store_base_url = store_url
try:
response = requests.get(os.path.join(store_base_url, 'master.json'))
if response.status_code == 200:
json_response = json.loads(response.content)
dirs = json_response['children']
msg = json_response['msg']
else: # try to retrieve from directory listing
page = requests.get(store_base_url)
parser = StoreParser()
parser.feed(page.content)
if len(parser.get_child_dirs()) > 0: # we have list of subdirectories
dirs = [d[:-1] for d in parser.get_child_dirs()]
msg = 'Thanks for visiting {}'.format(store_base_url)
else: # nothing found, try next URL
continue
except requests.exceptions.RequestException as e:
logger.warning('Skip %s due to error %s' % (store_base_url, e))
continue
for subdir in dirs:
tmp_dict = {'dir_name': subdir}
response = requests.get(os.path.join(store_base_url, subdir, 'info.json'))
if response.status_code == 200:
tmp_dict['info'] = json.loads(response.content)
tmp_dict['id'] = str(uuid.uuid4())
response = requests.get(os.path.join(store_base_url, subdir, 'aux.json'))
if response.status_code == 200:
tmp_dict['aux'] = json.loads(response.content)
model_list.append(tmp_dict)
store_info = {'base_url': store_base_url, 'welcome_msg': msg,
'model_list': model_list}
aggregated_dict[store_base_url] = store_info
app.config['store_cache'].write(aggregated_dict)
return json.dumps(aggregated_dict)
@blueprint.route('/list', methods=['GET'])
def store():
return flask.render_template(
'store.html'
)
| DIGITS-master | digits/store/views.py |
#!/usr/bin/env python2
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
import argparse
import logging
import numpy as np
import pickle
import PIL.Image
import os
import sys
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# Add path for DIGITS package
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import digits.config # noqa
from digits import utils, log # noqa
from digits.inference.errors import InferenceError # noqa
from digits.job import Job # noqa
from digits.utils.lmdbreader import DbReader # noqa
# Import digits.config before caffe to set the path
import caffe_pb2 # noqa
logger = logging.getLogger('digits.tools.inference')
# number of image embeddings to store
N_EMBEDDINGS = 10000
def parse_datum(value):
"""
Parse a Caffe datum
"""
datum = caffe_pb2.Datum()
datum.ParseFromString(value)
if datum.encoded:
s = StringIO()
s.write(datum.data)
s.seek(0)
img = PIL.Image.open(s)
img = np.array(img)
else:
import caffe.io
arr = caffe.io.datum_to_array(datum)
# CHW -> HWC
arr = arr.transpose((1, 2, 0))
if arr.shape[2] == 1:
# HWC -> HW
arr = arr[:, :, 0]
elif arr.shape[2] == 3:
# BGR -> RGB
# XXX see issue #59
arr = arr[:, :, [2, 1, 0]]
img = arr
return img
def save_attributes(attributes):
"""
Save attribute vectors
"""
zs = np.zeros(attributes['positive_attribute_z'].shape)
for i in xrange(attributes['n_attributes']):
zs[i] = attributes['positive_attribute_z'][i] / attributes['positive_count'][i] \
- attributes['negative_attribute_z'][i] / attributes['negative_count'][i]
output = open('attributes_z.pkl', 'wb')
pickle.dump(zs, output)
def save_embeddings(embeddings):
filename = 'embeddings.pkl'
logger.info('Saving embeddings to %s...' % filename)
output = open(filename, 'wb')
pickle.dump(embeddings, output)
def infer(jobs_dir,
model_id,
epoch,
batch_size,
gpu):
"""
Perform inference on a list of images using the specified model
"""
# job directory defaults to that defined in DIGITS config
if jobs_dir == 'none':
jobs_dir = digits.config.config_value('jobs_dir')
# load model job
model_dir = os.path.join(jobs_dir, model_id)
assert os.path.isdir(model_dir), "Model dir %s does not exist" % model_dir
model = Job.load(model_dir)
# load dataset job
dataset_dir = os.path.join(jobs_dir, model.dataset_id)
assert os.path.isdir(dataset_dir), "Dataset dir %s does not exist" % dataset_dir
dataset = Job.load(dataset_dir)
for task in model.tasks:
task.dataset = dataset
# retrieve snapshot file
task = model.train_task()
snapshot_filename = None
epoch = float(epoch)
if epoch == -1 and len(task.snapshots):
# use last epoch
epoch = task.snapshots[-1][1]
snapshot_filename = task.snapshots[-1][0]
else:
for f, e in task.snapshots:
if e == epoch:
snapshot_filename = f
break
if not snapshot_filename:
raise InferenceError("Unable to find snapshot for epoch=%s" % repr(epoch))
input_data = [] # sample data
input_labels = [] # sample labels
# load images from database
feature_db_path = dataset.get_feature_db_path(utils.constants.TRAIN_DB)
feature_reader = DbReader(feature_db_path)
label_db_path = dataset.get_label_db_path(utils.constants.TRAIN_DB)
label_reader = DbReader(label_db_path)
embeddings = {'count': 0, 'images': None, 'zs': None}
def aggregate(images, labels, attributes, embeddings):
# perform inference
outputs = model.train_task().infer_many(
images,
snapshot_epoch=epoch,
gpu=gpu,
resize=False)
z_vectors = outputs['output'][:, :100]
for image, label, z in zip(images, labels, z_vectors):
if embeddings['images'] is None:
embeddings['images'] = np.empty((N_EMBEDDINGS,) + image.shape)
if embeddings['zs'] is None:
embeddings['zs'] = np.empty((N_EMBEDDINGS,) + z.shape)
if embeddings['count'] < N_EMBEDDINGS:
embeddings['images'][embeddings['count']] = image
embeddings['zs'][embeddings['count']] = z
embeddings['count'] += 1
if embeddings['count'] == N_EMBEDDINGS:
save_embeddings(embeddings)
for attribute in range(attributes['n_attributes']):
if label[attribute] > 0:
attributes['positive_attribute_z'][attribute] += z
attributes['positive_count'][attribute] += 1
else:
attributes['negative_attribute_z'][attribute] += z
attributes['negative_count'][attribute] += 1
# save
save_attributes(attributes)
n_input_samples = 0
label_len = None
z_dim = 100
for key, value in feature_reader.entries():
img = parse_datum(value)
label = parse_datum(label_reader.entry(key))[0]
if label_len is None:
label_len = len(label)
attributes = {
'n_attributes': label_len,
'negative_count': np.zeros(label_len),
'positive_count': np.zeros(label_len),
'negative_attribute_z': np.zeros((label_len, z_dim)),
'positive_attribute_z': np.zeros((label_len, z_dim)),
}
elif label_len != len(label):
raise ValueError("label len differs: %d vs %d" % (label_len, len(label)))
input_data.append(img)
input_labels.append(label)
n_input_samples = n_input_samples + 1
if n_input_samples % batch_size == 0:
aggregate(input_data, input_labels, attributes, embeddings)
print("######## %d processed ########" % n_input_samples)
input_data = [] # sample data
input_labels = [] # sample labels
if n_input_samples % batch_size != 0:
aggregate(input_data, input_labels, attributes, embeddings)
print("######## %d processed ########" % n_input_samples)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Inference tool - DIGITS')
# Positional arguments
parser.add_argument(
'model',
help='Model ID')
# Optional arguments
parser.add_argument(
'-e',
'--epoch',
default='-1',
help="Epoch (-1 for last)"
)
parser.add_argument(
'-j',
'--jobs_dir',
default='none',
help='Jobs directory (default: from DIGITS config)',
)
parser.add_argument(
'-b',
'--batch_size',
type=int,
default=1024,
help='Batch size',
)
parser.add_argument(
'-g',
'--gpu',
type=int,
default=None,
help='GPU to use (as in nvidia-smi output, default: None)',
)
parser.set_defaults(resize=True)
args = vars(parser.parse_args())
try:
infer(
args['jobs_dir'],
args['model'],
args['epoch'],
args['batch_size'],
args['gpu'],
)
except Exception as e:
logger.error('%s: %s' % (type(e).__name__, e.message))
raise
| DIGITS-master | examples/gan/gan_features.py |
# The MIT License (MIT)
#
# Original work Copyright (c) 2016 Taehoon Kim
# Modified work Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import tensorflow as tf
from model import Tower
from utils import model_property
image_summary = tf.summary.image
scalar_summary = tf.summary.scalar
histogram_summary = tf.summary.histogram
merge_summary = tf.summary.merge
SummaryWriter = tf.summary.FileWriter
class batch_norm(object):
"""
This class creates an op that composes the specified tensor with a batch
normalization layer.
"""
def __init__(self, epsilon=1e-5, momentum=0.9, name="batch_norm"):
"""Instance initialization"""
with tf.variable_scope(name):
self.epsilon = epsilon
self.momentum = momentum
self.name = name
def __call__(self, x, train=True):
"""
Functional interface
Args:
x: tensor to compose
train: set to True during training and False otherwise
"""
return tf.contrib.layers.batch_norm(x,
decay=self.momentum,
updates_collections=None,
epsilon=self.epsilon,
scale=True,
is_training=train,
scope=self.name)
def conv2d(input_, output_dim,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="conv2d"):
"""
Compose specified symbol with 2D convolution layer
Args:
input_: tensor to compose. Shape: [N, H, W, C]
output_dim: number of output features maps
k_h: kernel height
k_w: kernel width
d_h: horizontal stride
d_w: vertical stride
stddev: standard deviation of gaussian distribution to use for random weight initialization
name: name scope
Returns:
Composed tensor.
"""
with tf.variable_scope(name):
w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME')
biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))
conv = tf.nn.bias_add(conv, biases)
return conv
def deconv2d(input_, output_shape,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="deconv2d", with_w=False):
"""
Compose specified symbol with 2D *transpose* convolution layer
Args:
input_: tensor to compose. Shape: [N, H, W, C]
output_shape: output shape
k_h: kernel height
k_w: kernel width
d_h: horizontal stride
d_w: vertical stride
stddev: standard deviation of gaussian distribution to use for random weight initialization
name: name scope
Returns:
Composed tensor.
"""
with tf.variable_scope(name):
# filter : [height, width, output_channels, in_channels]
w = tf.get_variable('w',
[k_h, k_w, output_shape[-1],
input_.get_shape()[-1]],
initializer=tf.random_normal_initializer(stddev=stddev))
deconv = tf.nn.conv2d_transpose(input_, w,
output_shape=output_shape,
strides=[1, d_h, d_w, 1])
biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0))
deconv = tf.reshape(tf.nn.bias_add(deconv, biases), output_shape)
if with_w:
return deconv, w, biases
else:
return deconv
def lrelu(x, leak=0.2, name="lrelu"):
"""Compose specified tensor with leaky Rectifier Linear Unit"""
return tf.maximum(x, leak*x)
def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False):
"""
Compose specified tensor with linear (fully-connected) layer
Args:
input_: tensor to compose. Shape: [N, M]
output_size: number of output neurons
scope: name scope
stddev: standard deviation of gaussian distribution to use for random weight initialization
name: name scope
with_w: whether to also return parameter variables
Returns:
Composed tensor. Shape: [N, output_size]
"""
shape = input_.get_shape().as_list()
with tf.variable_scope(scope or "Linear"):
matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32,
tf.random_normal_initializer(stddev=stddev))
bias = tf.get_variable("bias", [output_size],
initializer=tf.constant_initializer(bias_start))
if with_w:
return tf.matmul(input_, matrix) + bias, matrix, bias
else:
return tf.matmul(input_, matrix) + bias
class UserModel(Tower):
"""
User Model definition
DIGITS creates an instance of this class for every tower it needs
to create. This includes:
- one for training,
- one for validation,
- one for testing.
In the case of multi-GPU training, one training instance is created
for every GPU. DIGITS takes care of doing the gradient averaging
across GPUs so this class only needs to define the inference op
and desired loss/cost function.
"""
def __init__(self, *args, **kwargs):
"""
Identify the correct input nodes.
In the parent class, DIGITS conveniently sets the following fields:
- self.is_training: whether this is a training graph
- self.is_inference: whether this graph is created for inference/testing
- self.x: input node. Shape: [N, H, W, C]
- self.y: label. Shape: [N] for scalar labels, [N, H, W, C] otherwise.
Only defined if self._is_training is True
"""
super(UserModel, self).__init__(*args, **kwargs)
image_size = 64
output_size = 64
c_dim = 3
z_dim = 100
self.dcgan_init(image_size=image_size,
output_size=output_size,
c_dim=c_dim,
z_dim=z_dim)
@model_property
def inference(self):
"""op to use for inference"""
# scale back to [0, 255] range
return tf.to_int32((self.G+127) * 128)
@model_property
def loss(self):
"""
Loss function
Returns either an op or a list of dicts.
If the returned value is an op then DIGITS will optimize against this op
with respect to all trainable variables.
If the returned value is a list then DIGITS will optimize against each
loss in the list with respect to the specified variables.
"""
# here we are returning a list because we want to alternately optimize the
# discriminator and the generator.
losses = [
{'loss': self.d_loss, 'vars': self.d_vars},
{'loss': self.g_loss, 'vars': self.g_vars}
]
return losses
def dcgan_init(self,
image_size,
output_size,
z_dim,
c_dim,
gf_dim=64,
df_dim=64,
gfc_dim=1024,
dfc_dim=1024):
"""
Args:
output_size: (optional) The resolution in pixels of the images. [64]
z_dim: (optional) Dimension of dim for Z. [100]
gf_dim: (optional) Dimension of gen filters in first conv layer. [64]
df_dim: (optional) Dimension of discrim filters in first conv layer. [64]
gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024]
dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024]
c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3]
"""
self.image_size = image_size
self.output_size = output_size
self.z_dim = z_dim
self.gf_dim = gf_dim
self.df_dim = df_dim
self.gfc_dim = gfc_dim
self.dfc_dim = dfc_dim
self.c_dim = c_dim
self.batch_size = tf.shape(self.x)[0]
self.soft_label_margin = 0.1
# batch normalization : deals with poor initialization helps gradient flow
self.d_bn1 = batch_norm(name='d_bn1')
self.d_bn2 = batch_norm(name='d_bn2')
self.d_bn3 = batch_norm(name='d_bn3')
self.g_bn0 = batch_norm(name='g_bn0')
self.g_bn1 = batch_norm(name='g_bn1')
self.g_bn2 = batch_norm(name='g_bn2')
self.g_bn3 = batch_norm(name='g_bn3')
self.build_model()
def build_model(self):
if not self.is_inference:
# create both the generator and the discriminator
# self.x is a batch of images - shape: [N, H, W, C]
# self.y is a vector of labels - shape: [N]
# sample z from a normal distribution
self.z = tf.random_normal(shape=[self.batch_size, self.z_dim], dtype=tf.float32, seed=None, name='z')
# scale input to [-1, +1] range
self.images = (tf.reshape(self.x,
shape=[self.batch_size,
self.image_size,
self.image_size,
self.c_dim],
name='x_reshaped') - 128) / 127.
# create generator
self.G = self.generator(self.z)
# create an instance of the discriminator (real samples)
self.D, self.D_logits = self.discriminator(self.images, reuse=False)
# create another identical instance of the discriminator (fake samples)
# NOTE: we are re-using variables here to share weights between the two
# instances of the discriminator
self.D_, self.D_logits_ = self.discriminator(self.G, reuse=True)
# we are using the cross entropy loss for all these losses
# note the use of the soft label smoothing here to prevent D from getting overly confident
# on real samples
d_real = tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits,
labels=(tf.ones_like(self.D) - self.soft_label_margin),
name="loss_D_real")
self.d_loss_real = tf.reduce_mean(d_real)
d_fake = tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_,
labels=(tf.zeros_like(self.D_)),
name="loss_D_fake")
self.d_loss_fake = tf.reduce_mean(d_fake)
self.d_loss = (self.d_loss_real + self.d_loss_fake) / 2.
# the typical GAN set-up is that of a minimax game where D is trying to minimize
# its own error and G is trying to maximize D's error however note how we are flipping G labels here:
# instead of maximizing D's error, we are minimizing D's error on the 'wrong' label
# this trick helps produce a stronger gradient
g_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_,
labels=(tf.ones_like(self.D_) + self.soft_label_margin),
name="loss_G")
self.g_loss = tf.reduce_mean(g_loss)
# debug
self.summaries.append(image_summary("G", self.G, max_outputs=3))
self.summaries.append(image_summary("X", self.images, max_outputs=3))
self.summaries.append(histogram_summary("G_hist", self.G))
self.summaries.append(histogram_summary("X_hist", self.images))
self.summaries.append(scalar_summary("d_loss_real", self.d_loss_real))
self.summaries.append(scalar_summary("d_loss_fake", self.d_loss_fake))
self.summaries.append(scalar_summary("g_loss", self.g_loss))
self.summaries.append(scalar_summary("d_loss", self.d_loss))
# all trainable variables
t_vars = tf.trainable_variables()
# G variables
self.g_vars = [var for var in t_vars if 'g_' in var.name]
# D variables
self.d_vars = [var for var in t_vars if 'd_' in var.name]
# Extra hook for debug: log chi-square distance between G's output histogram and the dataset's histogram
value_range = [0.0, 1.0]
nbins = 100
hist_g = tf.to_float(tf.histogram_fixed_width(self.G, value_range, nbins=nbins)) / nbins
hist_images = tf.to_float(tf.histogram_fixed_width(self.images, value_range, nbins=nbins)) / nbins
chi_square = tf.reduce_mean(tf.div(tf.square(hist_g - hist_images), hist_g + hist_images + 1e-5))
self.summaries.append(scalar_summary("chi_square", chi_square))
else:
# Create only the generator
self.x = tf.reshape(self.x, shape=[self.batch_size, self.z_dim])
self.z = self.x[:, :self.z_dim]
self.G = self.generator(self.z)
def discriminator(self, image, y=None, reuse=False):
"""
Create the discriminator
This creates a string of layers:
- input - [N, 64, 64, 3]
- conv layer with 64 5x5 kernels and 2x2 stride - [N, 32, 32, 64]
- leaky relu - [N, 32, 32, 64]
- conv layer with 128 5x5 kernels and 2x2 stride - [N, 16, 16, 32]
- batch norm - [N, 16, 16, 32]
- leaky relu - [N, 16, 16, 32]
- conv layer with 256 5x5 kernels and 2x2 stride - [N, 8, 8, 256]
- batch norm - [N, 8, 8, 256]
- leaky relu - [N, 8, 8, 256]
- conv layer with 256 5x5 kernels and 2x2 stride - [N, 4, 4, 512]
- batch norm - [N, 4, 4, 512]
- leaky relu - [N, 4, 4, 512]
- flatten - [N, 8192]
- linear layer with 1 output neurons - [N, 1]
- sigmoid - [N,1]
Args:
image: batch of input images - shape: [N, H, W, C]
y: batch of one-hot encoded labels - shape: [N, K]
reuse: whether to re-use previously created variables
"""
with tf.variable_scope("discriminator") as scope:
if reuse:
scope.reuse_variables()
h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim * 2, name='d_h1_conv'), train=self.is_training))
h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim * 4, name='d_h2_conv'), train=self.is_training))
h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim * 8, name='d_h3_conv'), train=self.is_training))
h3_size = ((self.output_size // 16) ** 2) * self.df_dim * 8
h4 = linear(tf.reshape(h3, [self.batch_size, h3_size]), 1, 'd_h3_lin')
return tf.nn.sigmoid(h4), h4
def generator(self, z, y=None):
"""
Create the generator
This creates a string of layers:
- input - [N, 100]
- linear layer with 8192 output neurons - [N, 8192]
- reshape - [N, 4, 4, 512]
- batch norm - [N, 4, 4, 512]
- relu - [N, 4, 4, 512]
- transpose convolution with 256 filters and stride 2 - [N, 8, 8, 256]
- batch norm - [N, 8, 8, 256]
- relu - [N, 8, 8, 256]
- transpose convolution with 128 filters and stride 2 - [N, 16, 16, 128]
- batch norm - [N, 16, 16, 128]
- relu - [N, 16, 16, 128]
- transpose convolution with 64 filters and stride 2 - [N, 32, 32, 64]
- batch norm - [N, 32, 32, 64]
- relu - [N, 32, 32, 64]
- transpose convolution with 3 filters and stride 2 - [N, 64, 64, 3]
- tanh - [N, 64, 64, 3]
"""
with tf.variable_scope("generator"):
s = self.output_size
s2, s4, s8, s16 = int(s // 2), int(s // 4), int(s // 8), int(s // 16)
# project `z` and reshape
self.z_, self.h0_w, self.h0_b = linear(z, self.gf_dim * 8 * s16 * s16, 'g_h0_lin', with_w=True)
self.h0 = tf.reshape(self.z_, [-1, s16, s16, self.gf_dim * 8])
h0 = tf.nn.relu(self.g_bn0(self.h0, train=self.is_training))
self.h1, self.h1_w, self.h1_b = deconv2d(h0, [self.batch_size, s8, s8, self.gf_dim * 4],
name='g_h1', with_w=True)
h1 = tf.nn.relu(self.g_bn1(self.h1, train=self.is_training))
h2, self.h2_w, self.h2_b = deconv2d(h1, [self.batch_size, s4, s4, self.gf_dim * 2],
name='g_h2', with_w=True)
h2 = tf.nn.relu(self.g_bn2(h2, train=self.is_training))
h3, self.h3_w, self.h3_b = deconv2d(h2, [self.batch_size, s2, s2, self.gf_dim * 1],
name='g_h3', with_w=True)
h3 = tf.nn.relu(self.g_bn3(h3, train=self.is_training))
h4, self.h4_w, self.h4_b = deconv2d(h3, [self.batch_size, s, s, self.c_dim],
name='g_h4', with_w=True)
return tf.nn.tanh(h4)
| DIGITS-master | examples/gan/network-celebA.py |
# The MIT License (MIT)
#
# Original work Copyright (c) 2016 Taehoon Kim
# Modified work Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import tensorflow as tf
from model import Tower
from utils import model_property
image_summary = tf.summary.image
scalar_summary = tf.summary.scalar
histogram_summary = tf.summary.histogram
merge_summary = tf.summary.merge
SummaryWriter = tf.summary.FileWriter
class batch_norm(object):
"""
This class creates an op that composes the specified tensor with a batch
normalization layer.
"""
def __init__(self, epsilon=1e-5, momentum=0.9, name="batch_norm"):
"""Instance initialization"""
with tf.variable_scope(name):
self.epsilon = epsilon
self.momentum = momentum
self.name = name
def __call__(self, x, train=True):
"""
Functional interface
Args:
x: tensor to compose
train: set to True during training and False otherwise
"""
return tf.contrib.layers.batch_norm(x,
decay=self.momentum,
updates_collections=None,
epsilon=self.epsilon,
scale=True,
is_training=train,
scope=self.name)
def conv_cond_concat(x, y):
"""
Concatenate conditioning matrix across channel axis.
The specified input tensor is concatenated with K feature maps (K = number of classes)
across the channel dimension. Each of the K feature maps is set to all-zeros except for
the one whose index matches the target class (which is set to all-ones).
Args:
x: non-conditioned tensor. Shape: [N, H, W, C]
y: one-hot encoded conditioning matrix. Shape: [N, K]
Returns:
conditioned feature map. Shape: [N, H, W, C + K]
"""
x_shapes = x.get_shape()
y_shapes = y.get_shape()
batch_size = tf.shape(x)[0]
return tf.concat([x, y * tf.ones([batch_size, int(x_shapes[1]), int(x_shapes[2]), int(y_shapes[3])])], 3)
def conv2d(input_, output_dim,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="conv2d"):
"""
Compose specified symbol with 2D convolution layer
Args:
input_: tensor to compose. Shape: [N, H, W, C]
output_dim: number of output features maps
k_h: kernel height
k_w: kernel width
d_h: horizontal stride
d_w: vertical stride
stddev: standard deviation of gaussian distribution to use for random weight initialization
name: name scope
Returns:
Composed tensor.
"""
with tf.variable_scope(name):
w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME')
biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))
conv = tf.nn.bias_add(conv, biases)
return conv
def deconv2d(input_, output_shape,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="deconv2d", with_w=False):
"""
Compose specified symbol with 2D *transpose* convolution layer
Args:
input_: tensor to compose. Shape: [N, H, W, C]
output_shape: output shape
k_h: kernel height
k_w: kernel width
d_h: horizontal stride
d_w: vertical stride
stddev: standard deviation of gaussian distribution to use for random weight initialization
name: name scope
Returns:
Composed tensor.
"""
with tf.variable_scope(name):
# filter : [height, width, output_channels, in_channels]
w = tf.get_variable('w',
[k_h, k_w, output_shape[-1],
input_.get_shape()[-1]],
initializer=tf.random_normal_initializer(stddev=stddev))
deconv = tf.nn.conv2d_transpose(input_, w,
output_shape=output_shape,
strides=[1, d_h, d_w, 1])
biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0))
deconv = tf.reshape(tf.nn.bias_add(deconv, biases), output_shape)
if with_w:
return deconv, w, biases
else:
return deconv
def lrelu(x, leak=0.2, name="lrelu"):
"""Compose specified tensor with leaky Rectifier Linear Unit"""
return tf.maximum(x, leak*x)
def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False):
"""
Compose specified tensor with linear (fully-connected) layer
Args:
input_: tensor to compose. Shape: [N, M]
output_size: number of output neurons
scope: name scope
stddev: standard deviation of gaussian distribution to use for random weight initialization
name: name scope
with_w: whether to also return parameter variables
Returns:
Composed tensor. Shape: [N, output_size]
"""
shape = input_.get_shape().as_list()
with tf.variable_scope(scope or "Linear"):
matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32,
tf.random_normal_initializer(stddev=stddev))
bias = tf.get_variable("bias", [output_size],
initializer=tf.constant_initializer(bias_start))
if with_w:
return tf.matmul(input_, matrix) + bias, matrix, bias
else:
return tf.matmul(input_, matrix) + bias
class UserModel(Tower):
"""
User Model definition
DIGITS creates an instance of this class for every tower it needs
to create. This includes:
- one for training,
- one for validation,
- one for testing.
In the case of multi-GPU training, one training instance is created
for every GPU. DIGITS takes care of doing the gradient averaging
across GPUs so this class only needs to define the inference op
and desired loss/cost function.
"""
def __init__(self, *args, **kwargs):
"""
Identify the correct input nodes.
In the parent class, DIGITS conveniently sets the following fields:
- self.is_training: whether this is a training graph
- self.is_inference: whether this graph is created for inference/testing
- self.x: input node. Shape: [N, H, W, C]
- self.y: label. Shape: [N] for scalar labels, [N, H, W, C] otherwise.
Only defined if self._is_training is True
"""
super(UserModel, self).__init__(*args, **kwargs)
# initialize graph with parameters for MNIST
self.dcgan_init(image_size=28,
y_dim=10,
output_size=28,
c_dim=1)
@model_property
def inference(self):
"""op to use for inference"""
# inference op is the output of the generator after rescaling
# to the 8-bit range
return tf.to_int32(self.G * 255)
@model_property
def loss(self):
"""
Loss function
Returns either an op or a list of dicts.
If the returned value is an op then DIGITS will optimize against this op
with respect to all trainable variables.
If the returned value is a list then DIGITS will optimize against each
loss in the list with respect to the specified variables.
"""
# here we are returning a list because we want to alternately optimize the
# discriminator on real samples, the discriminator on fake samples and the
# generator.
losses = [
{'loss': self.d_loss_real, 'vars': self.d_vars},
{'loss': self.d_loss_fake, 'vars': self.d_vars},
{'loss': self.g_loss, 'vars': self.g_vars}
]
return losses
def dcgan_init(self, image_size=108,
output_size=64, y_dim=None, z_dim=100, gf_dim=64, df_dim=64,
gfc_dim=1024, dfc_dim=1024, c_dim=3):
"""
Create the model
Args:
output_size: (optional) The resolution in pixels of the images. [64]
y_dim: (optional) Dimension of dim for y. [None]
z_dim: (optional) Dimension of dim for Z. [100]
gf_dim: (optional) Dimension of gen filters in first conv layer. [64]
df_dim: (optional) Dimension of discrim filters in first conv layer. [64]
gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024]
dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024]
c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3]
"""
self.image_size = image_size
self.output_size = output_size
self.y_dim = y_dim
self.z_dim = z_dim
self.gf_dim = gf_dim
self.df_dim = df_dim
self.gfc_dim = gfc_dim
self.dfc_dim = dfc_dim
self.c_dim = c_dim
self.batch_size = tf.shape(self.x)[0]
# batch normalization : deals with poor initialization helps gradient flow
self.d_bn1 = batch_norm(name='d_bn1')
self.d_bn2 = batch_norm(name='d_bn2')
self.g_bn0 = batch_norm(name='g_bn0')
self.g_bn1 = batch_norm(name='g_bn1')
self.g_bn2 = batch_norm(name='g_bn2')
self.build_model()
def build_model(self):
"""Create the main ops"""
if not self.is_inference:
# create both the generator and the discriminator
# self.x is a batch of images - shape: [N, H, W, C]
# self.y is a vector of labels - shape: [N]
# sample z from a normal distribution
self.z = tf.random_normal(shape=[self.batch_size, self.z_dim], dtype=tf.float32, seed=None, name='z')
# rescale x to [0, 1]
x_reshaped = tf.reshape(self.x, shape=[self.batch_size, self.image_size, self.image_size, self.c_dim],
name='x_reshaped')
self.images = x_reshaped / 255.
# one hot encode the label - shape: [N] -> [N, self.y_dim]
self.y = tf.one_hot(self.y, self.y_dim, name='y_onehot')
# create the generator
self.G = self.generator(self.z, self.y)
# create one instance of the discriminator for real images (the input is
# images from the dataset)
self.D, self.D_logits = self.discriminator(self.images, self.y, reuse=False)
# create another instance of the discriminator for fake images (the input is
# the discriminator). Note how we are reusing variables to share weights between
# both instances of the discriminator
self.D_, self.D_logits_ = self.discriminator(self.G, self.y, reuse=True)
# aggregate losses across batch
# we are using the cross entropy loss for all these losses
d_real = tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits,
labels=tf.ones_like(self.D),
name="loss_D_real")
self.d_loss_real = tf.reduce_mean(d_real)
d_fake = tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_,
labels=tf.zeros_like(self.D_),
name="loss_D_fake")
self.d_loss_fake = tf.reduce_mean(d_fake)
self.d_loss = (self.d_loss_real + self.d_loss_fake) / 2.
# the typical GAN set-up is that of a minimax game where D is trying to minimize
# its own error and G is trying to maximize D's error however note how we are flipping G labels here:
# instead of maximizing D's error, we are minimizing D's error on the 'wrong' label
# this trick helps produce a stronger gradient
g_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_,
labels=tf.ones_like(self.D_),
name="loss_G")
self.g_loss = tf.reduce_mean(g_loss)
# create some summaries for debug and monitoring
self.summaries.append(histogram_summary("z", self.z))
self.summaries.append(histogram_summary("d", self.D))
self.summaries.append(histogram_summary("d_", self.D_))
self.summaries.append(image_summary("G", self.G, max_outputs=5))
self.summaries.append(image_summary("X", self.images, max_outputs=5))
self.summaries.append(histogram_summary("G_hist", self.G))
self.summaries.append(histogram_summary("X_hist", self.images))
self.summaries.append(scalar_summary("d_loss_real", self.d_loss_real))
self.summaries.append(scalar_summary("d_loss_fake", self.d_loss_fake))
self.summaries.append(scalar_summary("g_loss", self.g_loss))
self.summaries.append(scalar_summary("d_loss", self.d_loss))
# all trainable variables
t_vars = tf.trainable_variables()
# G's variables
self.g_vars = [var for var in t_vars if 'g_' in var.name]
# D's variables
self.d_vars = [var for var in t_vars if 'd_' in var.name]
# Extra hook for debug: log chi-square distance between G's output histogram and the dataset's histogram
value_range = [0.0, 1.0]
nbins = 100
hist_g = tf.to_float(tf.histogram_fixed_width(self.G, value_range, nbins=nbins)) / nbins
hist_images = tf.to_float(tf.histogram_fixed_width(self.images, value_range, nbins=nbins)) / nbins
chi_square = tf.reduce_mean(tf.div(tf.square(hist_g - hist_images), hist_g + hist_images + 1e-5))
self.summaries.append(scalar_summary("chi_square", chi_square))
else:
# Create only the generator
# self.x is the conditioned latent representation - shape: [self.batch_size, 1, self.z_dim + self.y_dim]
self.x = tf.reshape(self.x, shape=[self.batch_size, self.z_dim + self.y_dim])
# extract z and y
self.y = self.x[:, self.z_dim:self.z_dim + self.y_dim]
self.z = self.x[:, :self.z_dim]
# create an instance of the generator
self.G = self.generator(self.z, self.y)
def discriminator(self, image, y=None, reuse=False):
"""
Create the discriminator
This creates a string of layers:
- input - [N, 28, 28, 1]
- concat conditioning - [N, 28, 28, 11]
- conv layer with 11 5x5 kernels and 2x2 stride - [N, 14, 14, 11]
- leaky relu - [N, 14, 14, 11]
- concat conditioning - [N, 14, 14, 21]
- conv layer with 74 5x5 kernels and 2x2 stride - [N, 7, 7, 74]
- batch norm - [N, 14, 14, 64]
- leaky relu - [N, 14, 14, 64]
- flatten - [N, 3626]
- concat conditioning - [N, 3636]
- linear layer with 1014 output neurons - [N, 1024]
- batch norm - [N, 1024]
- leaky relu - [N, 1024]
- concat conditioning - [N, 1034]
- linear layer with 1 output neuron - [N, 1]
Args:
image: batch of input images - shape: [N, H, W, C]
y: batch of one-hot encoded labels - shape: [N, K]
reuse: whether to re-use previously created variables
"""
with tf.variable_scope("discriminator") as scope:
if reuse:
# re-use (share) variables
scope.reuse_variables()
yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim])
x = conv_cond_concat(image, yb)
h0 = lrelu(conv2d(x, self.c_dim + self.y_dim, name='d_h0_conv'))
h0 = conv_cond_concat(h0, yb)
h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim + self.y_dim, name='d_h1_conv'), train=self.is_training))
sz = h1.get_shape()
h1 = tf.reshape(h1, [self.batch_size, int(sz[1] * sz[2] * sz[3])])
h1 = tf.concat([h1, y], 1)
h2 = lrelu(self.d_bn2(linear(h1, self.dfc_dim, 'd_h2_lin'), train=self.is_training))
h2 = tf.concat([h2, y], 1)
h3 = linear(h2, 1, 'd_h3_lin')
return tf.nn.sigmoid(h3), h3
def generator(self, z, y=None):
"""
Create the generator
This creates a string of layers:
- input - [N, 100]
- concatenate conditioning - [N, 110]
- linear layer with 1024 output neurons - [N, 1024]
- batch norm - [N, 1024]
- relu - [N, 1024]
- concatenate conditioning - [N, 1034]
- linear layer with 7*7*128=6272 output neurons - [N, 6272]
- reshape 7x7 feature maps - [N, 7, 7, 128]
- concatenate conditioning - [N, 7, 7, 138]
- transpose convolution with 128 filters and stride 2 - [N, 14, 14, 128]
- batch norm - [N, 14, 14, 128]
- relu - [N, 14, 14, 128]
- concatenate conditioing - [N, 14, 14, 138]
- transpose convolution with 1 filter and stride 2 - [N, 28, 28, 1]
"""
with tf.variable_scope("generator"):
s = self.output_size
s2, s4 = int(s/2), int(s/4)
yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim])
z = tf.concat([z, y], 1)
h0 = tf.nn.relu(self.g_bn0(linear(z, self.gfc_dim, 'g_h0_lin'), train=self.is_training))
h0 = tf.concat([h0, y], 1)
h1 = tf.nn.relu(self.g_bn1(linear(h0, self.gf_dim*2*s4*s4, 'g_h1_lin'), train=self.is_training))
h1 = tf.reshape(h1, [self.batch_size, s4, s4, self.gf_dim * 2])
h1 = conv_cond_concat(h1, yb)
h2 = tf.nn.relu(self.g_bn2(deconv2d(h1, [self.batch_size, s2, s2, self.gf_dim * 2], name='g_h2'),
train=self.is_training))
h2 = conv_cond_concat(h2, yb)
return tf.nn.sigmoid(deconv2d(h2, [self.batch_size, s, s, self.c_dim], name='g_h3'))
| DIGITS-master | examples/gan/network-mnist.py |
# The MIT License (MIT)
#
# Original work Copyright (c) 2016 Taehoon Kim
# Modified work Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import tensorflow as tf
from model import Tower
from utils import model_property
image_summary = tf.summary.image
scalar_summary = tf.summary.scalar
histogram_summary = tf.summary.histogram
merge_summary = tf.summary.merge
SummaryWriter = tf.summary.FileWriter
class batch_norm(object):
"""
This class creates an op that composes the specified tensor with a batch
normalization layer.
"""
def __init__(self, epsilon=1e-5, momentum=0.9, name="batch_norm"):
"""Instance initialization"""
with tf.variable_scope(name):
self.epsilon = epsilon
self.momentum = momentum
self.name = name
def __call__(self, x, train=True):
"""
Functional interface
Args:
x: tensor to compose
train: set to True during training and False otherwise
"""
return tf.contrib.layers.batch_norm(x,
decay=self.momentum,
updates_collections=None,
epsilon=self.epsilon,
scale=True,
is_training=train,
scope=self.name)
def conv_cond_concat(x, y):
"""
Concatenate conditioning matrix across channel axis.
The specified input tensor is concatenated with K feature maps (K = number of classes)
across the channel dimension. Each of the K feature maps is set to all-zeros except for
the one whose index matches the target class (which is set to all-ones).
Args:
x: non-conditioned tensor. Shape: [N, H, W, C]
y: one-hot encoded conditioning matrix. Shape: [N, K]
Returns:
conditioned feature map. Shape: [N, H, W, C + K]
"""
x_shapes = x.get_shape()
y_shapes = y.get_shape()
batch_size = tf.shape(x)[0]
return tf.concat([x, y * tf.ones([batch_size, int(x_shapes[1]), int(x_shapes[2]), int(y_shapes[3])])], 3)
def conv2d(input_, output_dim,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="conv2d"):
"""
Compose specified symbol with 2D convolution layer
Args:
input_: tensor to compose. Shape: [N, H, W, C]
output_dim: number of output features maps
k_h: kernel height
k_w: kernel width
d_h: horizontal stride
d_w: vertical stride
stddev: standard deviation of gaussian distribution to use for random weight initialization
name: name scope
Returns:
Composed tensor.
"""
with tf.variable_scope(name):
w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME')
biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))
conv = tf.nn.bias_add(conv, biases)
return conv
def deconv2d(input_, output_shape,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="deconv2d", with_w=False):
"""
Compose specified symbol with 2D *transpose* convolution layer
Args:
input_: tensor to compose. Shape: [N, H, W, C]
output_shape: output shape
k_h: kernel height
k_w: kernel width
d_h: horizontal stride
d_w: vertical stride
stddev: standard deviation of gaussian distribution to use for random weight initialization
name: name scope
Returns:
Composed tensor.
"""
with tf.variable_scope(name):
# filter : [height, width, output_channels, in_channels]
w = tf.get_variable('w',
[k_h, k_w, output_shape[-1],
input_.get_shape()[-1]],
initializer=tf.random_normal_initializer(stddev=stddev))
deconv = tf.nn.conv2d_transpose(input_, w,
output_shape=output_shape,
strides=[1, d_h, d_w, 1])
biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0))
deconv = tf.reshape(tf.nn.bias_add(deconv, biases), output_shape)
if with_w:
return deconv, w, biases
else:
return deconv
def lrelu(x, leak=0.2, name="lrelu"):
"""Compose specified tensor with leaky Rectifier Linear Unit"""
return tf.maximum(x, leak*x)
def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False):
"""
Compose specified tensor with linear (fully-connected) layer
Args:
input_: tensor to compose. Shape: [N, M]
output_size: number of output neurons
scope: name scope
stddev: standard deviation of gaussian distribution to use for random weight initialization
name: name scope
with_w: whether to also return parameter variables
Returns:
Composed tensor. Shape: [N, output_size]
"""
shape = input_.get_shape().as_list()
with tf.variable_scope(scope or "Linear"):
matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32,
tf.random_normal_initializer(stddev=stddev))
bias = tf.get_variable("bias", [output_size],
initializer=tf.constant_initializer(bias_start))
if with_w:
return tf.matmul(input_, matrix) + bias, matrix, bias
else:
return tf.matmul(input_, matrix) + bias
class UserModel(Tower):
"""
User Model definition
DIGITS creates an instance of this class for every tower it needs
to create. This includes:
- one for training,
- one for validation,
- one for testing.
In the case of multi-GPU training, one training instance is created
for every GPU. DIGITS takes care of doing the gradient averaging
across GPUs so this class only needs to define the inference op
and desired loss/cost function.
"""
def __init__(self, *args, **kwargs):
"""
Identify the correct input nodes.
In the parent class, DIGITS conveniently sets the following fields:
- self.is_training: whether this is a training graph
- self.is_inference: whether this graph is created for inference/testing
- self.x: input node. Shape: [N, H, W, C]
- self.y: label. Shape: [N] for scalar labels, [N, H, W, C] otherwise.
Only defined if self._is_training is True
"""
super(UserModel, self).__init__(*args, **kwargs)
# initialize graph with parameters for MNIST
self.dcgan_init(image_size=28,
y_dim=10,
output_size=28,
c_dim=1)
@model_property
def inference(self):
""" op to use for inference """
# rescale
images = self.G * 255
# flatten G output
images_flat = tf.reshape(images, [self.batch_size, self.image_size * self.image_size * self.c_dim])
# now return encoded z concatenated with G output
# during inference the visualization script will need to extract
# both z and the generated image to display them separately
zgen_flat = tf.reshape(self.DzGEN, [self.batch_size, self.z_dim])
return tf.concat([zgen_flat, images_flat], 1)
@model_property
def loss(self):
"""
Loss function
Returns either an op or a list of dicts.
If the returned value is an op then DIGITS will optimize against this op
with respect to all trainable variables.
If the returned value is a list then DIGITS will optimize against each
loss in the list with respect to the specified variables.
"""
# here we are returning a list because we want to alternately optimize the
# discriminator on real samples, the discriminator on fake samples and the
# generator.
losses = [
{'loss': self.dzgen_loss, 'vars': self.d_vars},
]
return losses
def dcgan_init(self, image_size=108,
output_size=64, y_dim=None, z_dim=100, gf_dim=64, df_dim=64,
gfc_dim=1024, dfc_dim=1024, c_dim=3):
"""
Create the model
Args:
output_size: (optional) The resolution in pixels of the images. [64]
y_dim: (optional) Dimension of dim for y. [None]
z_dim: (optional) Dimension of dim for Z. [100]
gf_dim: (optional) Dimension of gen filters in first conv layer. [64]
df_dim: (optional) Dimension of discrim filters in first conv layer. [64]
gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024]
dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024]
c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3]
"""
self.image_size = image_size
self.output_size = output_size
self.y_dim = y_dim
self.z_dim = z_dim
self.gf_dim = gf_dim
self.df_dim = df_dim
self.gfc_dim = gfc_dim
self.dfc_dim = dfc_dim
self.c_dim = c_dim
self.batch_size = tf.shape(self.x)[0]
# batch normalization : deals with poor initialization helps gradient flow
self.d_bn1 = batch_norm(name='d_bn1')
self.d_bn2 = batch_norm(name='d_bn2')
self.g_bn0 = batch_norm(name='g_bn0')
self.g_bn1 = batch_norm(name='g_bn1')
self.g_bn2 = batch_norm(name='g_bn2')
self.build_model()
def build_model(self):
"""Create the main ops"""
if self.is_inference:
# HACK: we are hard-coding class 3 during inference
# TODO: find way to pass this from UI
self.y = tf.to_int32(3*tf.ones(shape=[self.batch_size]))
# create both the generator and the discriminator/encoder
# self.x is a batch of images - shape: [N, H, W, C]
# self.y is a vector of labels - shape: [N]
# rescale to [0,1] range
x_reshaped = tf.reshape(self.x, shape=[self.batch_size, self.image_size, self.image_size, self.c_dim],
name='x_reshaped')
self.images = x_reshaped / 255.
# one-hot encode y - shape: [N] -> [N, self.y_dim]
self.y = tf.one_hot(self.y, self.y_dim, name='y_onehot')
# create discriminator/encoder
self.DzGEN, self.D_logits = self.discriminator(self.images, self.y, reuse=False)
# create generator
self.G = self.generator(self.DzGEN, self.y)
# we only have one loss function here (L2 distance between input image and generator output)
self.dzgen_loss = tf.reduce_mean(tf.square(self.G - self.images), name="loss_DzGEN")
# debug
self.summaries.append(image_summary("G", self.G, max_outputs=5))
self.summaries.append(image_summary("X", self.images, max_outputs=5))
self.summaries.append(histogram_summary("G_hist", self.G))
self.summaries.append(histogram_summary("X_hist", self.images))
self.summaries.append(scalar_summary("DzGen_loss", self.dzgen_loss))
# all trainable variables
t_vars = tf.trainable_variables()
# D variables
self.d_vars = [var for var in t_vars if 'd_' in var.name]
def discriminator(self, image, y=None, reuse=False):
"""
Create the discriminator/encoder
This creates a string of layers:
- input - [N, 28, 28, 1]
- concat conditioning - [N, 28, 28, 11]
- conv layer with 11 5x5 kernels and 2x2 stride - [N, 14, 14, 11]
- leaky relu - [N, 14, 14, 11]
- concat conditioning - [N, 14, 14, 21]
- conv layer with 74 5x5 kernels and 2x2 stride - [N, 7, 7, 74]
- batch norm - [N, 14, 14, 64]
- leaky relu - [N, 14, 14, 64]
- flatten - [N, 3626]
- concat conditioning - [N, 3636]
- linear layer with 1014 output neurons - [N, 1024]
- batch norm - [N, 1024]
- leaky relu - [N, 1024]
- concat conditioning - [N, 1034]
- linear layer with 1 output neuron - [N, z_dim]
Args:
image: batch of input images - shape: [N, H, W, C]
y: batch of one-hot encoded labels - shape: [N, K]
reuse: whether to re-use previously created variables
"""
# NOTE: although we are really creating an encoder here we need to re-use the same
# variable scope (i.e. "discriminator") as in the original GAN so we can re-use
# learned parameters
with tf.variable_scope("discriminator") as scope:
if reuse:
# re-use (share) variables
scope.reuse_variables()
yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim])
x = conv_cond_concat(image, yb)
h0 = lrelu(conv2d(x, self.c_dim + self.y_dim, name='d_h0_conv'))
h0 = conv_cond_concat(h0, yb)
h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim + self.y_dim, name='d_h1_conv'), train=self.is_training))
sz = h1.get_shape()
h1 = tf.reshape(h1, [self.batch_size, int(sz[1] * sz[2] * sz[3])])
h1 = tf.concat([h1, y], 1)
h2 = lrelu(self.d_bn2(linear(h1, self.dfc_dim, 'd_h2_lin'), train=self.is_training))
h2 = tf.concat([h2, y], 1)
h3 = linear(h2, self.z_dim, 'd_h3_lin_retrain')
return h3, h3
def generator(self, z, y=None):
"""
Create the generator
This creates a string of layers:
- input - [N, 100]
- concatenate conditioning - [N, 110]
- linear layer with 1024 output neurons - [N, 1024]
- batch norm - [N, 1024]
- relu - [N, 1024]
- concatenate conditioning - [N, 1034]
- linear layer with 7*7*128=6272 output neurons - [N, 6272]
- reshape 7x7 feature maps - [N, 7, 7, 128]
- concatenate conditioning - [N, 7, 7, 138]
- transpose convolution with 128 filters and stride 2 - [N, 14, 14, 128]
- batch norm - [N, 14, 14, 128]
- relu - [N, 14, 14, 128]
- concatenate conditioing - [N, 14, 14, 138]
- transpose convolution with 1 filter and stride 2 - [N, 28, 28, 1]
"""
with tf.variable_scope("generator"):
s = self.output_size
s2, s4 = int(s/2), int(s/4)
# yb = tf.expand_dims(tf.expand_dims(y, 1),2)
yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim])
z = tf.concat([z, y], 1)
h0 = tf.nn.relu(self.g_bn0(linear(z, self.gfc_dim, 'g_h0_lin'), train=False))
h0 = tf.concat([h0, y], 1)
h1 = tf.nn.relu(self.g_bn1(linear(h0, self.gf_dim*2*s4*s4, 'g_h1_lin'), train=False))
h1 = tf.reshape(h1, [self.batch_size, s4, s4, self.gf_dim * 2])
h1 = conv_cond_concat(h1, yb)
h2 = tf.nn.relu(self.g_bn2(deconv2d(h1, [self.batch_size, s2, s2, self.gf_dim * 2],
name='g_h2'), train=False))
h2 = conv_cond_concat(h2, yb)
return tf.nn.sigmoid(deconv2d(h2, [self.batch_size, s, s, self.c_dim], name='g_h3'))
| DIGITS-master | examples/gan/network-mnist-encoder.py |
#!/usr/bin/env python2
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
import argparse
import os
import pickle
import shutil
import numpy as np
import PIL.Image
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
TB_DIR = os.path.join(os.getcwd(), "gan-tb")
SPRITE_IMAGE_FILENAME = os.path.join(TB_DIR, "sprite.png")
def save_tb_embeddings(embeddings_filename):
f = open(embeddings_filename, 'rb')
embeddings = pickle.load(f)
images = embeddings['images']
zs = embeddings['zs']
# overwrite Tensorboard log dir if necessary
if os.path.exists(TB_DIR):
shutil.rmtree(TB_DIR)
os.makedirs(TB_DIR)
# create grid image
img_width, img_height = save_sprite_image(images)
with tf.device('cpu:0'):
# create embedding var
embedding_var = tf.Variable(initial_value=zs)
# save projector config
summary_writer = tf.summary.FileWriter(TB_DIR)
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = embedding_var.name
embedding.sprite.image_path = SPRITE_IMAGE_FILENAME
embedding.sprite.single_image_dim.extend([img_width, img_height])
projector.visualize_embeddings(summary_writer, config)
# save embeddings
sess = tf.Session()
sess.run(embedding_var.initializer)
saver = tf.train.Saver([embedding_var])
saver.save(sess, os.path.join(TB_DIR, 'model.ckpt'))
def save_sprite_image(images):
n_embeddings = images.shape[0]
grid_cols = int(np.sqrt(n_embeddings))
grid_rows = int(np.ceil(float(n_embeddings) / grid_cols))
img_height, img_width, img_channels = images[0].shape
grid_image = np.empty((img_height * grid_rows, img_width * grid_cols, img_channels))
for i, image in enumerate(images):
row = i / grid_cols
col = i % grid_cols
x = img_width * col
y = img_height * row
grid_image[y:y + img_height, x:x + img_width] = image
grid_image = PIL.Image.fromarray(grid_image.astype('uint8'))
grid_image.save(SPRITE_IMAGE_FILENAME)
return img_width, img_height
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Inference tool - DIGITS')
# Positional arguments
parser.add_argument(
'embeddings_file',
help='Embeddings pickle file')
args = vars(parser.parse_args())
try:
save_tb_embeddings(
args['embeddings_file'],
)
except Exception as e:
print('%s: %s' % (type(e).__name__, e.message))
raise
| DIGITS-master | examples/gan/gan_embeddings.py |
# The MIT License (MIT)
#
# Original work Copyright (c) 2016 Taehoon Kim
# Modified work Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import tensorflow as tf
from model import Tower
from utils import model_property
image_summary = tf.summary.image
scalar_summary = tf.summary.scalar
histogram_summary = tf.summary.histogram
merge_summary = tf.summary.merge
SummaryWriter = tf.summary.FileWriter
class batch_norm(object):
"""
This class creates an op that composes the specified tensor with a batch
normalization layer.
"""
def __init__(self, epsilon=1e-5, momentum=0.9, name="batch_norm"):
"""Instance initialization"""
with tf.variable_scope(name):
self.epsilon = epsilon
self.momentum = momentum
self.name = name
def __call__(self, x, train=True):
"""
Functional interface
Args:
x: tensor to compose
train: set to True during training and False otherwise
"""
return tf.contrib.layers.batch_norm(x,
decay=self.momentum,
updates_collections=None,
epsilon=self.epsilon,
scale=True,
is_training=train,
scope=self.name)
def conv2d(input_, output_dim,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="conv2d"):
"""
Compose specified symbol with 2D convolution layer
Args:
input_: tensor to compose. Shape: [N, H, W, C]
output_dim: number of output features maps
k_h: kernel height
k_w: kernel width
d_h: horizontal stride
d_w: vertical stride
stddev: standard deviation of gaussian distribution to use for random weight initialization
name: name scope
Returns:
Composed tensor.
"""
with tf.variable_scope(name):
w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME')
biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))
conv = tf.nn.bias_add(conv, biases)
return conv
def deconv2d(input_, output_shape,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="deconv2d", with_w=False):
"""
Compose specified symbol with 2D *transpose* convolution layer
Args:
input_: tensor to compose. Shape: [N, H, W, C]
output_shape: output shape
k_h: kernel height
k_w: kernel width
d_h: horizontal stride
d_w: vertical stride
stddev: standard deviation of gaussian distribution to use for random weight initialization
name: name scope
Returns:
Composed tensor.
"""
with tf.variable_scope(name):
# filter : [height, width, output_channels, in_channels]
w = tf.get_variable('w',
[k_h, k_w, output_shape[-1],
input_.get_shape()[-1]],
initializer=tf.random_normal_initializer(stddev=stddev))
deconv = tf.nn.conv2d_transpose(input_, w,
output_shape=output_shape,
strides=[1, d_h, d_w, 1])
biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0))
deconv = tf.reshape(tf.nn.bias_add(deconv, biases), output_shape)
if with_w:
return deconv, w, biases
else:
return deconv
def lrelu(x, leak=0.2, name="lrelu"):
"""Compose specified tensor with leaky Rectifier Linear Unit"""
return tf.maximum(x, leak*x)
def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False):
"""
Compose specified tensor with linear (fully-connected) layer
Args:
input_: tensor to compose. Shape: [N, M]
output_size: number of output neurons
scope: name scope
stddev: standard deviation of gaussian distribution to use for random weight initialization
name: name scope
with_w: whether to also return parameter variables
Returns:
Composed tensor. Shape: [N, output_size]
"""
shape = input_.get_shape().as_list()
with tf.variable_scope(scope or "Linear"):
matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32,
tf.random_normal_initializer(stddev=stddev))
bias = tf.get_variable("bias", [output_size],
initializer=tf.constant_initializer(bias_start))
if with_w:
return tf.matmul(input_, matrix) + bias, matrix, bias
else:
return tf.matmul(input_, matrix) + bias
class UserModel(Tower):
"""
User Model definition
DIGITS creates an instance of this class for every tower it needs
to create. This includes:
- one for training,
- one for validation,
- one for testing.
In the case of multi-GPU training, one training instance is created
for every GPU. DIGITS takes care of doing the gradient averaging
across GPUs so this class only needs to define the inference op
and desired loss/cost function.
"""
def __init__(self, *args, **kwargs):
"""
Identify the correct input nodes.
In the parent class, DIGITS conveniently sets the following fields:
- self.is_training: whether this is a training graph
- self.is_inference: whether this graph is created for inference/testing
- self.x: input node. Shape: [N, H, W, C]
- self.y: label. Shape: [N] for scalar labels, [N, H, W, C] otherwise.
Only defined if self._is_training is True
"""
super(UserModel, self).__init__(*args, **kwargs)
image_size = 64
output_size = 64
c_dim = 3
z_dim = 100
self.dcgan_init(image_size=image_size,
output_size=output_size,
c_dim=c_dim,
z_dim=z_dim)
@model_property
def inference(self):
""" op to use for inference """
# scale back to [0, 255] range
images = (self.G * 127) + 128
images_flat = tf.reshape(images, [self.batch_size, self.image_size * self.image_size * self.c_dim])
# concatenate encoded z and generated image into a single flat structure
zgen_flat = tf.reshape(self.DzGEN, [self.batch_size, self.z_dim])
return tf.concat([zgen_flat, images_flat], 1)
@model_property
def loss(self):
"""
Loss function
Returns either an op or a list of dicts.
If the returned value is an op then DIGITS will optimize against this op
with respect to all trainable variables.
If the returned value is a list then DIGITS will optimize against each
loss in the list with respect to the specified variables.
"""
# here we are returning a list because we want to alternately optimize the
# discriminator and the generator.
losses = [
{'loss': self.dzgen_loss, 'vars': self.d_vars},
]
return losses
def dcgan_init(self,
image_size,
output_size,
z_dim,
c_dim,
gf_dim=64,
df_dim=64,
gfc_dim=1024,
dfc_dim=1024):
"""
Args:
output_size: (optional) The resolution in pixels of the images. [64]
z_dim: (optional) Dimension of dim for Z. [100]
gf_dim: (optional) Dimension of gen filters in first conv layer. [64]
df_dim: (optional) Dimension of discrim filters in first conv layer. [64]
gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024]
dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024]
c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3]
"""
self.image_size = image_size
self.output_size = output_size
self.z_dim = z_dim
self.gf_dim = gf_dim
self.df_dim = df_dim
self.gfc_dim = gfc_dim
self.dfc_dim = dfc_dim
self.c_dim = c_dim
self.batch_size = tf.shape(self.x)[0]
self.soft_label_margin = 0.1
# batch normalization : deals with poor initialization helps gradient flow
self.d_bn1 = batch_norm(name='d_bn1')
self.d_bn2 = batch_norm(name='d_bn2')
self.d_bn3 = batch_norm(name='d_bn3')
self.g_bn0 = batch_norm(name='g_bn0')
self.g_bn1 = batch_norm(name='g_bn1')
self.g_bn2 = batch_norm(name='g_bn2')
self.g_bn3 = batch_norm(name='g_bn3')
self.build_model()
def build_model(self):
# reshape/rescale x
self.images = (tf.reshape(self.x, shape=[self.batch_size,
self.image_size,
self.image_size,
self.c_dim],
name='x_reshaped') - 128) / 127.
# create discriminator/encoder
self.DzGEN, self.D_logits = self.discriminator(self.images, reuse=False)
# create generator
self.G = self.generator(self.DzGEN)
# loss is now L2 distance between input image and generator output
self.dzgen_loss = tf.reduce_mean(tf.square(self.G - self.images), name="loss_DzGEN")
# debug
self.summaries.append(image_summary("G", self.G, max_outputs=3))
self.summaries.append(image_summary("X", self.images, max_outputs=3))
self.summaries.append(histogram_summary("G_hist", self.G))
self.summaries.append(histogram_summary("X_hist", self.images))
self.summaries.append(scalar_summary("DzGen_loss", self.dzgen_loss))
# all trainable variables
t_vars = tf.trainable_variables()
# d variables
self.d_vars = [var for var in t_vars if 'd_' in var.name]
def discriminator(self, image, y=None, reuse=False):
"""
Create the discriminator
This creates a string of layers:
- input - [N, 64, 64, 3]
- conv layer with 64 5x5 kernels and 2x2 stride - [N, 32, 32, 64]
- leaky relu - [N, 32, 32, 64]
- conv layer with 128 5x5 kernels and 2x2 stride - [N, 16, 16, 32]
- batch norm - [N, 16, 16, 32]
- leaky relu - [N, 16, 16, 32]
- conv layer with 256 5x5 kernels and 2x2 stride - [N, 8, 8, 256]
- batch norm - [N, 8, 8, 256]
- leaky relu - [N, 8, 8, 256]
- conv layer with 256 5x5 kernels and 2x2 stride - [N, 4, 4, 512]
- batch norm - [N, 4, 4, 512]
- leaky relu - [N, 4, 4, 512]
- flatten - [N, 8192]
- linear layer with 1 output neurons - [N, 1]
- sigmoid - [N,1]
Args:
image: batch of input images - shape: [N, H, W, C]
y: batch of one-hot encoded labels - shape: [N, K]
reuse: whether to re-use previously created variables
"""
# NOTE: although we are really creating an encoder here we need to re-use the same
# variable scope (i.e. "discriminator") as in the original GAN so we can re-use
# learned parameters
with tf.variable_scope("discriminator") as scope:
if reuse:
scope.reuse_variables()
h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv'), train=self.is_training))
h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim*4, name='d_h2_conv'), train=self.is_training))
h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim*8, name='d_h3_conv'), train=self.is_training))
h3_size = ((self.output_size // 16) ** 2) * self.df_dim * 8
h4 = linear(tf.reshape(h3, [self.batch_size, h3_size]), self.z_dim, 'd_h3_lin_retrain')
return h4, h4
def generator(self, z, y=None):
"""
Create the generator
This creates a string of layers:
- input - [N, 100]
- linear layer with 8192 output neurons - [N, 8192]
- reshape - [N, 4, 4, 512]
- batch norm - [N, 4, 4, 512]
- relu - [N, 4, 4, 512]
- transpose convolution with 256 filters and stride 2 - [N, 8, 8, 256]
- batch norm - [N, 8, 8, 256]
- relu - [N, 8, 8, 256]
- transpose convolution with 128 filters and stride 2 - [N, 16, 16, 128]
- batch norm - [N, 16, 16, 128]
- relu - [N, 16, 16, 128]
- transpose convolution with 64 filters and stride 2 - [N, 32, 32, 64]
- batch norm - [N, 32, 32, 64]
- relu - [N, 32, 32, 64]
- transpose convolution with 3 filters and stride 2 - [N, 64, 64, 3]
- tanh - [N, 64, 64, 3]
"""
with tf.variable_scope("generator"):
s = self.output_size
s2, s4, s8, s16 = int(s/2), int(s/4), int(s/8), int(s/16)
# project `z` and reshape
self.z_, self.h0_w, self.h0_b = linear(z, self.gf_dim*8*s16*s16, 'g_h0_lin', with_w=True)
self.h0 = tf.reshape(self.z_, [-1, s16, s16, self.gf_dim * 8])
h0 = tf.nn.relu(self.g_bn0(self.h0, train=False))
self.h1, self.h1_w, self.h1_b = deconv2d(h0, [self.batch_size, s8, s8, self.gf_dim*4],
name='g_h1', with_w=True)
h1 = tf.nn.relu(self.g_bn1(self.h1, train=False))
h2, self.h2_w, self.h2_b = deconv2d(h1, [self.batch_size, s4, s4, self.gf_dim*2],
name='g_h2', with_w=True)
h2 = tf.nn.relu(self.g_bn2(h2, train=False))
h3, self.h3_w, self.h3_b = deconv2d(h2, [self.batch_size, s2, s2, self.gf_dim*1],
name='g_h3', with_w=True)
h3 = tf.nn.relu(self.g_bn3(h3, train=False))
h4, self.h4_w, self.h4_b = deconv2d(h3, [self.batch_size, s, s, self.c_dim],
name='g_h4', with_w=True)
return tf.nn.tanh(h4)
| DIGITS-master | examples/gan/network-celebA-encoder.py |
#!/usr/bin/env python2
# Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
"""
Classify an image using a model archive file
"""
import argparse
import os
import tarfile
import tempfile
import time
import zipfile
from example import classify
def unzip_archive(archive):
"""
Unzips an archive into a temporary directory
Returns a link to that directory
Arguments:
archive -- the path to an archive file
"""
assert os.path.exists(archive), 'File not found - %s' % archive
tmpdir = os.path.join(tempfile.gettempdir(), os.path.basename(archive))
assert tmpdir != archive # That wouldn't work out
if os.path.exists(tmpdir):
# files are already extracted
pass
else:
if tarfile.is_tarfile(archive):
print 'Extracting tarfile ...'
with tarfile.open(archive) as tf:
tf.extractall(path=tmpdir)
elif zipfile.is_zipfile(archive):
print 'Extracting zipfile ...'
with zipfile.ZipFile(archive) as zf:
zf.extractall(path=tmpdir)
else:
raise ValueError('Unknown file type for %s' % os.path.basename(archive))
return tmpdir
def classify_with_archive(archive, image_files, batch_size=None, use_gpu=True):
"""
"""
tmpdir = unzip_archive(archive)
caffemodel = None
deploy_file = None
mean_file = None
labels_file = None
for filename in os.listdir(tmpdir):
full_path = os.path.join(tmpdir, filename)
if filename.endswith('.caffemodel'):
caffemodel = full_path
elif filename == 'deploy.prototxt':
deploy_file = full_path
elif filename.endswith('.binaryproto'):
mean_file = full_path
elif filename == 'labels.txt':
labels_file = full_path
else:
print 'Unknown file:', filename
assert caffemodel is not None, 'Caffe model file not found'
assert deploy_file is not None, 'Deploy file not found'
classify(caffemodel, deploy_file, image_files,
mean_file=mean_file, labels_file=labels_file,
batch_size=batch_size, use_gpu=use_gpu)
if __name__ == '__main__':
script_start_time = time.time()
parser = argparse.ArgumentParser(description='Classification example using an archive - DIGITS')
# Positional arguments
parser.add_argument('archive', help='Path to a DIGITS model archive')
parser.add_argument('image_file', nargs='+', help='Path[s] to an image')
# Optional arguments
parser.add_argument('--batch-size', type=int)
parser.add_argument('--nogpu', action='store_true', help="Don't use the GPU")
args = vars(parser.parse_args())
classify_with_archive(args['archive'], args['image_file'],
batch_size=args['batch_size'],
use_gpu=(not args['nogpu']),
)
print 'Script took %f seconds.' % (time.time() - script_start_time,)
| DIGITS-master | examples/classification/use_archive.py |
#!/usr/bin/env python2
# Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
"""
Classify an image using individual model files
Use this script as an example to build your own tool
"""
import argparse
import os
import time
from google.protobuf import text_format
import numpy as np
import PIL.Image
import scipy.misc
os.environ['GLOG_minloglevel'] = '2' # Suppress most caffe output
import caffe # noqa
from caffe.proto import caffe_pb2 # noqa
def get_net(caffemodel, deploy_file, use_gpu=True):
"""
Returns an instance of caffe.Net
Arguments:
caffemodel -- path to a .caffemodel file
deploy_file -- path to a .prototxt file
Keyword arguments:
use_gpu -- if True, use the GPU for inference
"""
if use_gpu:
caffe.set_mode_gpu()
# load a new model
return caffe.Net(deploy_file, caffemodel, caffe.TEST)
def get_transformer(deploy_file, mean_file=None):
"""
Returns an instance of caffe.io.Transformer
Arguments:
deploy_file -- path to a .prototxt file
Keyword arguments:
mean_file -- path to a .binaryproto file (optional)
"""
network = caffe_pb2.NetParameter()
with open(deploy_file) as infile:
text_format.Merge(infile.read(), network)
if network.input_shape:
dims = network.input_shape[0].dim
else:
dims = network.input_dim[:4]
t = caffe.io.Transformer(inputs={'data': dims})
t.set_transpose('data', (2, 0, 1)) # transpose to (channels, height, width)
# color images
if dims[1] == 3:
# channel swap
t.set_channel_swap('data', (2, 1, 0))
if mean_file:
# set mean pixel
with open(mean_file, 'rb') as infile:
blob = caffe_pb2.BlobProto()
blob.MergeFromString(infile.read())
if blob.HasField('shape'):
blob_dims = blob.shape
assert len(blob_dims) == 4, 'Shape should have 4 dimensions - shape is "%s"' % blob.shape
elif blob.HasField('num') and blob.HasField('channels') and \
blob.HasField('height') and blob.HasField('width'):
blob_dims = (blob.num, blob.channels, blob.height, blob.width)
else:
raise ValueError('blob does not provide shape or 4d dimensions')
pixel = np.reshape(blob.data, blob_dims[1:]).mean(1).mean(1)
t.set_mean('data', pixel)
return t
def load_image(path, height, width, mode='RGB'):
"""
Load an image from disk
Returns an np.ndarray (channels x width x height)
Arguments:
path -- path to an image on disk
width -- resize dimension
height -- resize dimension
Keyword arguments:
mode -- the PIL mode that the image should be converted to
(RGB for color or L for grayscale)
"""
image = PIL.Image.open(path)
image = image.convert(mode)
image = np.array(image)
# squash
image = scipy.misc.imresize(image, (height, width), 'bilinear')
return image
def forward_pass(images, net, transformer, batch_size=None):
"""
Returns scores for each image as an np.ndarray (nImages x nClasses)
Arguments:
images -- a list of np.ndarrays
net -- a caffe.Net
transformer -- a caffe.io.Transformer
Keyword arguments:
batch_size -- how many images can be processed at once
(a high value may result in out-of-memory errors)
"""
if batch_size is None:
batch_size = 1
caffe_images = []
for image in images:
if image.ndim == 2:
caffe_images.append(image[:, :, np.newaxis])
else:
caffe_images.append(image)
dims = transformer.inputs['data'][1:]
scores = None
for chunk in [caffe_images[x:x + batch_size] for x in xrange(0, len(caffe_images), batch_size)]:
new_shape = (len(chunk),) + tuple(dims)
if net.blobs['data'].data.shape != new_shape:
net.blobs['data'].reshape(*new_shape)
for index, image in enumerate(chunk):
image_data = transformer.preprocess('data', image)
net.blobs['data'].data[index] = image_data
start = time.time()
output = net.forward()[net.outputs[-1]]
end = time.time()
if scores is None:
scores = np.copy(output)
else:
scores = np.vstack((scores, output))
print 'Processed %s/%s images in %f seconds ...' % (len(scores), len(caffe_images), (end - start))
return scores
def read_labels(labels_file):
"""
Returns a list of strings
Arguments:
labels_file -- path to a .txt file
"""
if not labels_file:
print 'WARNING: No labels file provided. Results will be difficult to interpret.'
return None
labels = []
with open(labels_file) as infile:
for line in infile:
label = line.strip()
if label:
labels.append(label)
assert len(labels), 'No labels found'
return labels
def classify(caffemodel, deploy_file, image_files,
mean_file=None, labels_file=None, batch_size=None, use_gpu=True):
"""
Classify some images against a Caffe model and print the results
Arguments:
caffemodel -- path to a .caffemodel
deploy_file -- path to a .prototxt
image_files -- list of paths to images
Keyword arguments:
mean_file -- path to a .binaryproto
labels_file path to a .txt file
use_gpu -- if True, run inference on the GPU
"""
# Load the model and images
net = get_net(caffemodel, deploy_file, use_gpu)
transformer = get_transformer(deploy_file, mean_file)
_, channels, height, width = transformer.inputs['data']
if channels == 3:
mode = 'RGB'
elif channels == 1:
mode = 'L'
else:
raise ValueError('Invalid number for channels: %s' % channels)
images = [load_image(image_file, height, width, mode) for image_file in image_files]
labels = read_labels(labels_file)
# Classify the image
scores = forward_pass(images, net, transformer, batch_size=batch_size)
#
# Process the results
#
indices = (-scores).argsort()[:, :5] # take top 5 results
classifications = []
for image_index, index_list in enumerate(indices):
result = []
for i in index_list:
# 'i' is a category in labels and also an index into scores
if labels is None:
label = 'Class #%s' % i
else:
label = labels[i]
result.append((label, round(100.0 * scores[image_index, i], 4)))
classifications.append(result)
for index, classification in enumerate(classifications):
print '{:-^80}'.format(' Prediction for %s ' % image_files[index])
for label, confidence in classification:
print '{:9.4%} - "{}"'.format(confidence / 100.0, label)
print
if __name__ == '__main__':
script_start_time = time.time()
parser = argparse.ArgumentParser(description='Classification example - DIGITS')
# Positional arguments
parser.add_argument('caffemodel', help='Path to a .caffemodel')
parser.add_argument('deploy_file', help='Path to the deploy file')
parser.add_argument('image_file', nargs='+', help='Path[s] to an image')
# Optional arguments
parser.add_argument('-m', '--mean', help='Path to a mean file (*.npy)')
parser.add_argument('-l', '--labels', help='Path to a labels file')
parser.add_argument('--batch-size', type=int)
parser.add_argument('--nogpu', action='store_true', help="Don't use the GPU")
args = vars(parser.parse_args())
classify(
args['caffemodel'],
args['deploy_file'],
args['image_file'],
args['mean'],
args['labels'],
args['batch_size'],
not args['nogpu'],
)
print 'Script took %f seconds.' % (time.time() - script_start_time,)
| DIGITS-master | examples/classification/example.py |
# Tensorflow Triangle binary segmentation model using TensorFlow-Slim
from model import Tower
from utils import model_property
import tensorflow as tf
import tensorflow.contrib.slim as slim
import utils as digits
class UserModel(Tower):
@model_property
def inference(self):
_x = tf.reshape(self.x, shape=[-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]])
with slim.arg_scope([slim.conv2d, slim.conv2d_transpose],
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=slim.l2_regularizer(0.05)):
# 1*H*W -> 32*H*W
model = slim.conv2d(_x, 32, [3, 3], padding='SAME', scope='conv1')
# 32*H*W -> 1024*H/16*W/16
model = slim.conv2d(model, 1024, [16, 16], padding='VALID', scope='conv2', stride=16)
model = slim.conv2d_transpose(model, self.input_shape[2], [16, 16],
stride=16, padding='VALID', activation_fn=None, scope='deconv_1')
return model
@model_property
def loss(self):
y = tf.reshape(self.y, shape=[-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]])
# For a fancy tensorboard summary: put the input, label and model side by side (sbs) for a fancy image summary:
# tf.summary.image(sbs.op.name, sbs, max_outputs=3, collections=["training summary"])
return digits.mse_loss(self.inference, y)
| DIGITS-master | examples/binary-segmentation/binary_segmentation-TF.py |
#!/usr/bin/env python2
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
"""
Functions for creating a dummy image segmentation dataset
"""
import argparse
import numpy as np
import os
import PIL.Image
import PIL.ImageDraw
import shutil
import time
INPUT_FOLDER = "input"
TARGET_FOLDER = "target"
def create_images(folder, image_count, image_size, grid_size):
"""
Create image pairs for segmentation dataset
"""
# create folders
if os.path.exists(folder):
shutil.rmtree(folder)
input_folder = os.path.join(folder, INPUT_FOLDER)
os.makedirs(input_folder)
target_folder = os.path.join(folder, TARGET_FOLDER)
os.makedirs(target_folder)
# create random x,y coordinates for image_count triangles
coords = np.random.uniform(size=(image_count, 6)) * image_size
for idx in xrange(image_count):
triangle = coords[idx].tolist()
# create blank images
image_input = PIL.Image.new("L", (image_size, image_size), 255)
image_target = PIL.Image.new("L", (image_size, image_size), 255)
# draw an empty triangle
draw = PIL.ImageDraw.Draw(image_input)
draw.polygon(
triangle,
outline=0,
)
# draw a full triangle
draw = PIL.ImageDraw.Draw(image_target)
draw.polygon(
triangle,
outline=0,
fill=0
)
# save images
input_fname = os.path.join(input_folder, "%08d.png" % idx)
target_fname = os.path.join(target_folder, "%08d.png" % idx)
image_input.save(input_fname)
image_target.save(target_fname)
# create sample image grid
image_grid = PIL.Image.new("L", (grid_size * image_size, grid_size * image_size), 255)
coords = np.random.uniform(size=(grid_size, grid_size, 3, 2)) * image_size
draw = PIL.ImageDraw.Draw(image_grid)
for x in xrange(grid_size):
for y in xrange(grid_size):
triangle = coords[x][y]
# shift
triangle += np.array([x * image_size, y * image_size])
triangle = triangle.reshape(6).tolist()
# draw an empty triangle
draw.polygon(
triangle,
outline=0,
)
image_grid.save(os.path.join(folder, "grid.png"))
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create segmentation image pairs')
# Positional arguments
parser.add_argument('output', help='Output folder')
# Optional arguments
parser.add_argument(
'-c', '--image_count', type=int,
default=10000, help='How many images to create')
parser.add_argument(
'-s',
'--image_size',
type=int,
default=32,
help='How many images to create')
parser.add_argument(
'-g',
'--grid_size',
type=int,
default=10,
help='Size of image grid in sample image')
args = vars(parser.parse_args())
start_time = time.time()
create_images(args['output'], args['image_count'], args['image_size'], args['grid_size'])
print 'Done after %s seconds' % (time.time() - start_time,)
| DIGITS-master | examples/binary-segmentation/create_images.py |
# Tensorflow MNIST autoencoder model using TensorFlow-Slim
from model import Tower
from utils import model_property
import tensorflow as tf
import tensorflow.contrib.slim as slim
import utils as digits
class UserModel(Tower):
@model_property
def inference(self):
with slim.arg_scope([slim.fully_connected],
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=slim.l2_regularizer(0.0005)):
const = tf.constant(0.00390625)
model = tf.multiply(self.x, const)
model = tf.reshape(model, shape=[-1, 784]) # equivalent to `model = slim.flatten(_x)`
model = slim.fully_connected(model, 300, scope='fc1')
model = slim.fully_connected(model, 50, scope='fc2')
model = slim.fully_connected(model, 300, scope='fc3')
model = slim.fully_connected(model, 784, activation_fn=None, scope='fc4')
model = tf.reshape(model, shape=[-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]])
# The below image summary makes it very easy to review your result
tf.summary.image(self.x.op.name, self.x, max_outputs=5, collections=['summaries'])
tf.summary.image(model.op.name, model, max_outputs=5, collections=['summaries'])
return model
@model_property
def loss(self):
return digits.mse_loss(self.inference, self.x)
| DIGITS-master | examples/autoencoder/autoencoder-TF.py |
#!/usr/bin/env python2
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
"""
Downloads BVLC Alexnet and perform the require net surgery to convert into an FCN Alexnet
"""
import urllib
import caffe
ALEXNET_PROTOTXT_URL = "https://raw.githubusercontent.com/BVLC/caffe/rc3/models/bvlc_alexnet/deploy.prototxt"
ALEXNET_PROTOTXT_FILENAME = "bvlc_alexnet.deploy.prototxt"
ALEXNET_MODEL_URL = "http://dl.caffe.berkeleyvision.org/bvlc_alexnet.caffemodel"
ALEXNET_MODEL_FILENAME = "bvlc_alexnet.caffemodel"
FCN_ALEXNET_PROTOTXT_FILENAME = "fcn_alexnet.deploy.prototxt"
FCN_ALEXNET_MODEL_FILENAME = "fcn_alexnet.caffemodel"
def download(url, filename):
print "Downloading %s..." % url
urllib.urlretrieve(url, filename)
def generate_fcn():
# download files
print "Downloading files (this might take a few minutes)..."
download(ALEXNET_PROTOTXT_URL, ALEXNET_PROTOTXT_FILENAME)
download(ALEXNET_MODEL_URL, ALEXNET_MODEL_FILENAME)
caffe.set_mode_cpu()
print "Loading Alexnet model..."
alexnet = caffe.Net(ALEXNET_PROTOTXT_FILENAME, ALEXNET_MODEL_FILENAME, caffe.TEST)
print "Loading FCN-Alexnet prototxt..."
fcn_alexnet = caffe.Net(FCN_ALEXNET_PROTOTXT_FILENAME, caffe.TEST)
print "Transplanting parameters..."
transplant(fcn_alexnet, alexnet)
print "Saving FCN-Alexnet model to %s " % FCN_ALEXNET_MODEL_FILENAME
fcn_alexnet.save(FCN_ALEXNET_MODEL_FILENAME)
def transplant(new_net, net, suffix=''):
# from fcn.berkeleyvision.org
for p in net.params:
p_new = p + suffix
if p_new not in new_net.params:
print 'dropping', p
continue
for i in range(len(net.params[p])):
if i > (len(new_net.params[p_new]) - 1):
print 'dropping', p, i
break
if net.params[p][i].data.shape != new_net.params[p_new][i].data.shape:
print 'coercing', p, i, 'from', net.params[p][i].data.shape, 'to', new_net.params[p_new][i].data.shape
else:
print 'copying', p, ' -> ', p_new, i
new_net.params[p_new][i].data.flat = net.params[p][i].data.flat
if __name__ == '__main__':
generate_fcn()
| DIGITS-master | examples/semantic-segmentation/net_surgery.py |
#!/usr/bin/env python2
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
"""
Functions for creating a text classification dataset out of .csv files
The expected CSV structure is:
<Class>,<Text Field 1>, ..., <Text Field N>
"""
import argparse
import caffe
import csv
import lmdb
import numpy as np
import os
import PIL.Image
import shutil
import time
DB_BATCH_SIZE = 1024
ALPHABET = "abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\"/\\|_@#$%^&*~`+ =<>()[]{}"
FEATURE_LEN = 1024 # must have integer square root
def _save_image(image, filename):
# convert from (channels, heights, width) to (height, width)
image = image[0]
image = PIL.Image.fromarray(image)
image.save(filename)
def create_dataset(folder, input_file_name, db_batch_size=None, create_images=False, labels_file=None):
"""
Creates LMDB database and images (if create_images==True)
"""
if db_batch_size is None:
db_batch_size = DB_BATCH_SIZE
# open output LMDB
output_db = lmdb.open(folder, map_async=True, max_dbs=0)
print "Reading input file %s..." % input_file_name
# create character dict
cdict = {}
for i, c in enumerate(ALPHABET):
cdict[c] = i + 2 # indices start at 1, skip first index for 'other' characters
samples = {}
with open(input_file_name) as f:
reader = csv.DictReader(f, fieldnames=['class'], restkey='fields')
for row in reader:
label = row['class']
if label not in samples:
samples[label] = []
sample = np.ones(FEATURE_LEN) # one by default (i.e. 'other' character)
count = 0
for field in row['fields']:
for char in field.lower():
if char in cdict:
sample[count] = cdict[char]
count += 1
if count >= FEATURE_LEN - 1:
break
samples[label].append(sample)
samples_per_class = None
classes = samples.keys()
class_samples = []
for c in classes:
if samples_per_class is None:
samples_per_class = len(samples[c])
else:
assert samples_per_class == len(samples[c])
class_samples.append(samples[c])
indices = np.arange(samples_per_class)
np.random.shuffle(indices)
labels = None
if labels_file is not None:
labels = map(str.strip, open(labels_file, "r").readlines())
assert len(classes) == len(samples)
else:
labels = classes
print "Class labels: %s" % repr(labels)
if create_images:
for label in labels:
os.makedirs(os.path.join(args['output'], label))
print "Storing data into %s..." % folder
batch = []
for idx in indices:
for c, cname in enumerate(classes):
class_id = c + 1 # indices start at 1
sample = class_samples[c][idx].astype('uint8')
sample = sample[np.newaxis, np.newaxis, ...]
sample = sample.reshape((1, np.sqrt(FEATURE_LEN), np.sqrt(FEATURE_LEN)))
if create_images:
filename = os.path.join(args['output'], labels[c], '%d.png' % idx)
_save_image(sample, filename)
datum = caffe.io.array_to_datum(sample, class_id)
batch.append(('%d_%d' % (idx, class_id), datum))
if len(batch) >= db_batch_size:
_write_batch_to_lmdb(output_db, batch)
batch = []
# close database
output_db.close()
return
def _write_batch_to_lmdb(db, batch):
"""
Write a batch of (key,value) to db
"""
try:
with db.begin(write=True) as lmdb_txn:
for key, datum in batch:
lmdb_txn.put(key, datum.SerializeToString())
except lmdb.MapFullError:
# double the map_size
curr_limit = db.info()['map_size']
new_limit = curr_limit * 2
try:
db.set_mapsize(new_limit) # double it
except AttributeError as e:
version = tuple(int(x) for x in lmdb.__version__.split('.'))
if version < (0, 87):
raise ImportError('py-lmdb is out of date (%s vs 0.87)' % lmdb.__version__)
else:
raise e
# try again
_write_batch_to_lmdb(db, batch)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create Dataset tool')
# Positional arguments
parser.add_argument('input', help='Input .csv file')
parser.add_argument('output', help='Output Folder')
parser.add_argument('--create-images', action='store_true')
parser.add_argument('--labels', default=None)
args = vars(parser.parse_args())
if os.path.exists(args['output']):
shutil.rmtree(args['output'])
os.makedirs(args['output'])
start_time = time.time()
create_dataset(
args['output'],
args['input'],
create_images=args['create_images'],
labels_file=args['labels'],
)
print 'Done after %s seconds' % (time.time() - start_time,)
| DIGITS-master | examples/text-classification/create_dataset.py |
from model import Tower
from utils import model_property
import tensorflow as tf
import tensorflow.contrib.slim as slim
import utils as digits
class UserModel(Tower):
@model_property
def inference(self):
_x = tf.reshape(self.x, shape=[-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]])
# tf.image_summary(_x.op.name, _x, max_images=10, collections=[digits.GraphKeys.SUMMARIES_TRAIN])
# Split out the color channels
_, model_g, model_b = tf.split(_x, 3, 3, name='split_channels')
# tf.image_summary(model_g.op.name, model_g, max_images=10, collections=[digits.GraphKeys.SUMMARIES_TRAIN])
# tf.image_summary(model_b.op.name, model_b, max_images=10, collections=[digits.GraphKeys.SUMMARIES_TRAIN])
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=slim.l2_regularizer(0.0005)):
with tf.variable_scope("siamese") as scope:
def make_tower(net):
net = slim.conv2d(net, 20, [5, 5], padding='VALID', scope='conv1')
net = slim.max_pool2d(net, [2, 2], padding='VALID', scope='pool1')
net = slim.conv2d(net, 50, [5, 5], padding='VALID', scope='conv2')
net = slim.max_pool2d(net, [2, 2], padding='VALID', scope='pool2')
net = slim.flatten(net)
net = slim.fully_connected(net, 500, scope='fc1')
net = slim.fully_connected(net, 2, activation_fn=None, scope='fc2')
return net
model_g = make_tower(model_g)
model_g = tf.reshape(model_g, shape=[-1, 2])
scope.reuse_variables()
model_b = make_tower(model_b)
model_b = tf.reshape(model_b, shape=[-1, 2])
return [model_g, model_b]
@model_property
def loss(self):
_y = tf.reshape(self.y, shape=[-1])
_y = tf.to_float(_y)
model = self.inference
return digits.constrastive_loss(model[0], model[1], _y)
| DIGITS-master | examples/siamese/siamese-TF.py |
#!/usr/bin/env python2
# Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
"""
Functions for creating temporary LMDBs
Used in test_views
"""
import argparse
import os
import random
import re
import sys
import time
import lmdb
import numpy as np
import PIL.Image
if __name__ == '__main__':
dirname = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(dirname, '..', '..'))
import digits.config # noqa
from digits import utils # noqa
# Import digits.config first to set the path to Caffe
import caffe.io # noqa
import caffe_pb2 # noqa
IMAGE_SIZE = 10
TRAIN_IMAGE_COUNT = 1000
VAL_IMAGE_COUNT = 1000
TEST_IMAGE_COUNT = 10
DB_BATCH_SIZE = 100
def create_lmdbs(folder, file_list, image_count=None, db_batch_size=None):
"""
Creates LMDBs for generic inference
Returns the filename for a test image
Creates these files in "folder":
train_images/
train_labels/
val_images/
val_labels/
mean.binaryproto
test.png
"""
if image_count is None:
train_image_count = TRAIN_IMAGE_COUNT
else:
train_image_count = image_count
val_image_count = VAL_IMAGE_COUNT
if db_batch_size is None:
db_batch_size = DB_BATCH_SIZE
# read file list
images = []
f = open(file_list)
for line in f.readlines():
line = line.strip()
if not line:
continue
path = None
# might contain a numerical label at the end
match = re.match(r'(.*\S)\s+(\d+)$', line)
if match:
path = match.group(1)
ground_truth = int(match.group(2))
images.append([path, ground_truth])
print "Found %d image paths in image list" % len(images)
for phase, image_count in [
('train', train_image_count),
('val', val_image_count)]:
print "Will create %d pairs of %s images" % (image_count, phase)
# create DBs
image_db = lmdb.open(os.path.join(folder, '%s_images' % phase),
map_async=True, max_dbs=0)
label_db = lmdb.open(os.path.join(folder, '%s_labels' % phase),
map_async=True, max_dbs=0)
# add up all images to later create mean image
image_sum = None
shape = None
# save test images (one for each label)
testImagesSameClass = []
testImagesDifferentClass = []
# arrays for image and label batch writing
image_batch = []
label_batch = []
for i in xrange(image_count):
# pick up random indices from image list
index1 = random.randint(0, len(images) - 1)
index2 = random.randint(0, len(images) - 1)
# label=1 if images are from the same class otherwise label=0
label = 1 if int(images[index1][1]) == int(images[index2][1]) else 0
# load images from files
image1 = np.array(utils.image.load_image(images[index1][0]))
image2 = np.array(utils.image.load_image(images[index2][0]))
if not shape:
# initialize image sum for mean image
shape = image1.shape
image_sum = np.zeros((3, shape[0], shape[1]), 'float64')
assert(image1.shape == shape and image2.shape == shape)
# create BGR image: blue channel will contain first image,
# green channel will contain second image
image_pair = np.zeros(image_sum.shape)
image_pair[0] = image1
image_pair[1] = image2
image_sum += image_pair
# save test images on first pass
if label > 0 and len(testImagesSameClass) < TEST_IMAGE_COUNT:
testImagesSameClass.append(image_pair)
if label == 0 and len(testImagesDifferentClass) < TEST_IMAGE_COUNT:
testImagesDifferentClass.append(image_pair)
# encode into Datum object
image = image_pair.astype('uint8')
datum = caffe.io.array_to_datum(image, -1)
image_batch.append([str(i), datum])
# create label Datum
label_datum = caffe_pb2.Datum()
label_datum.channels, label_datum.height, label_datum.width = 1, 1, 1
label_datum.float_data.extend(np.array([label]).flat)
label_batch.append([str(i), label_datum])
if (i % db_batch_size == (db_batch_size - 1)) or (i == image_count - 1):
_write_batch_to_lmdb(image_db, image_batch)
_write_batch_to_lmdb(label_db, label_batch)
image_batch = []
label_batch = []
if i % (image_count / 20) == 0:
print "%d/%d" % (i, image_count)
# close databases
image_db.close()
label_db.close()
# save mean
mean_image = (image_sum / image_count).astype('uint8')
_save_mean(mean_image, os.path.join(folder, '%s_mean.binaryproto' % phase))
_save_mean(mean_image, os.path.join(folder, '%s_mean.png' % phase))
# create test images
for idx, image in enumerate(testImagesSameClass):
_save_image(image, os.path.join(folder, '%s_test_same_class_%d.png' % (phase, idx)))
for idx, image in enumerate(testImagesDifferentClass):
_save_image(image, os.path.join(folder, '%s_test_different_class_%d.png' % (phase, idx)))
return
def _write_batch_to_lmdb(db, batch):
"""
Write a batch of (key,value) to db
"""
try:
with db.begin(write=True) as lmdb_txn:
for key, datum in batch:
lmdb_txn.put(key, datum.SerializeToString())
except lmdb.MapFullError:
# double the map_size
curr_limit = db.info()['map_size']
new_limit = curr_limit * 2
try:
db.set_mapsize(new_limit) # double it
except AttributeError as e:
version = tuple(int(x) for x in lmdb.__version__.split('.'))
if version < (0, 87):
raise ImportError('py-lmdb is out of date (%s vs 0.87)' % lmdb.__version__)
else:
raise e
# try again
_write_batch_to_lmdb(db, batch)
def _save_image(image, filename):
# converting from BGR to RGB
image = image[[2, 1, 0], ...] # channel swap
# convert to (height, width, channels)
image = image.astype('uint8').transpose((1, 2, 0))
image = PIL.Image.fromarray(image)
image.save(filename)
def _save_mean(mean, filename):
"""
Saves mean to file
Arguments:
mean -- the mean as an np.ndarray
filename -- the location to save the image
"""
if filename.endswith('.binaryproto'):
blob = caffe_pb2.BlobProto()
blob.num = 1
blob.channels = mean.shape[0]
blob.height = mean.shape[1]
blob.width = mean.shape[2]
blob.data.extend(mean.astype(float).flat)
with open(filename, 'wb') as outfile:
outfile.write(blob.SerializeToString())
elif filename.endswith(('.jpg', '.jpeg', '.png')):
_save_image(mean, filename)
else:
raise ValueError('unrecognized file extension')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create-LMDB tool - DIGITS')
# Positional arguments
parser.add_argument('folder', help='Where to save the images')
parser.add_argument('file_list', help='File list')
# Optional arguments
parser.add_argument('-c', '--image_count', type=int, help='How many images')
args = vars(parser.parse_args())
if os.path.exists(args['folder']):
print 'ERROR: Folder already exists'
sys.exit(1)
else:
os.makedirs(args['folder'])
print 'Creating images at "%s" ...' % args['folder']
start_time = time.time()
create_lmdbs(
args['folder'],
args['file_list'],
image_count=args['image_count'],
)
print 'Done after %s seconds' % (time.time() - start_time,)
| DIGITS-master | examples/siamese/create_db.py |
from model import Tower
from utils import model_property
import tensorflow as tf
import tensorflow.contrib.slim as slim
import utils as digits
class UserModel(Tower):
@model_property
def inference(self):
x = tf.reshape(self.x, shape=[-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]])
# scale (divide by MNIST std)
x = x * 0.0125
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=slim.l2_regularizer(0.0005)):
model = slim.conv2d(x, 20, [5, 5], padding='VALID', scope='conv1')
model = slim.max_pool2d(model, [2, 2], padding='VALID', scope='pool1')
model = slim.conv2d(model, 50, [5, 5], padding='VALID', scope='conv2')
model = slim.max_pool2d(model, [2, 2], padding='VALID', scope='pool2')
model = slim.flatten(model)
model = slim.fully_connected(model, 500, scope='fc1')
model = slim.dropout(model, 0.5, is_training=self.is_training, scope='do1')
model = slim.fully_connected(model, 10, activation_fn=None, scope='fc2_not_in_use')
model = slim.fully_connected(model, self.nclasses, activation_fn=None, scope='fc2_true')
return model
@model_property
def loss(self):
model = self.inference
loss = digits.classification_loss(model, self.y)
accuracy = digits.classification_accuracy(model, self.y)
self.summaries.append(tf.summary.scalar(accuracy.op.name, accuracy))
return loss
| DIGITS-master | examples/fine-tuning/lenet-fine-tune-tf.py |
#!/usr/bin/env python2
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
"""
Prepares KITTI data for ingestion by DIGITS
"""
import argparse
import os
import re
import shutil
import zipfile
def extract_data(input_dir, output_dir):
"""
Extract zipfiles at input_dir into output_dir
"""
if os.path.isdir(output_dir):
print ' Using extracted data at %s.' % output_dir
return
for filename in (
'data_object_label_2.zip',
'data_object_image_2.zip',
'devkit_object.zip'):
filename = os.path.join(input_dir, filename)
zf = zipfile.ZipFile(filename, 'r')
print 'Unzipping %s ...' % filename
zf.extractall(output_dir)
def get_image_to_video_mapping(devkit_dir):
"""
Return a mapping from image filename (e.g. 7282 which is training/image_2/007282.png)
to video and frame (e.g. {'video': '2011_09_26_0005', 'frame': 109})
"""
image_to_video = {}
mapping_lines = None
with open(os.path.join(devkit_dir, 'mapping', 'train_mapping.txt'), 'r') as infile:
mapping_lines = infile.readlines()
with open(os.path.join(devkit_dir, 'mapping', 'train_rand.txt'), 'r') as infile:
for image_index, mapping_index in enumerate(infile.read().split(',')):
mapping_index = mapping_index.strip()
if not mapping_index:
continue
mapping_index = int(mapping_index) - 1
map_line = mapping_lines[mapping_index]
match = re.match('^\s*[\d_]+\s+(\d{4}_\d{2}_\d{2})_drive_(\d{4})_sync\s+(\d+)$\s*$', map_line)
if not match:
raise ValueError('Unrecognized mapping line "%s"' % map_line)
date = match.group(1)
video_id = match.group(2)
video_name = '%s_%s' % (date, video_id)
frame_index = int(match.group(3))
if image_index in image_to_video:
raise ValueError('Conflicting mappings for image %s' % image_index)
image_to_video[image_index] = {
'video': video_name,
'frame': frame_index,
}
return image_to_video
def split_by_video(training_dir, mapping, split_dir,
use_symlinks=True):
"""
Create one directory per video in split_dir
"""
new_images_dir = os.path.join(split_dir, 'images')
new_labels_dir = os.path.join(split_dir, 'labels')
if os.path.isdir(new_images_dir):
shutil.rmtree(new_images_dir)
if os.path.isdir(new_labels_dir):
shutil.rmtree(new_labels_dir)
for old_image_fname in os.listdir(os.path.join(training_dir, 'image_2')):
old_image_path = os.path.abspath(os.path.join(training_dir, 'image_2', old_image_fname))
image_index_str, image_ext = os.path.splitext(
os.path.basename(old_image_fname))
image_index_int = int(image_index_str)
video_name = mapping[image_index_int]['video']
frame_id = '%09d' % mapping[image_index_int]['frame']
# Copy image
new_image_dir = os.path.join(new_images_dir, video_name)
if not os.path.isdir(new_image_dir):
os.makedirs(new_image_dir)
new_image_fname = '%s_%s%s' % (frame_id, image_index_str, image_ext)
new_image_path = os.path.join(new_image_dir, new_image_fname)
if use_symlinks:
os.symlink(old_image_path, new_image_path)
else:
shutil.copyfile(old_image_path, new_image_path)
# Copy label
old_label_fname = '%s.txt' % image_index_str
old_label_path = os.path.abspath(os.path.join(training_dir, 'label_2', old_label_fname))
new_label_fname = '%s_%s.txt' % (frame_id, image_index_str)
new_label_dir = os.path.join(new_labels_dir, video_name)
if not os.path.isdir(new_label_dir):
os.makedirs(new_label_dir)
new_label_path = os.path.join(new_label_dir, new_label_fname)
if use_symlinks:
os.symlink(old_label_path, new_label_path)
else:
shutil.copyfile(old_label_path, new_label_path)
def split_for_training(split_dir, train_dir, val_dir,
use_symlinks=True):
"""
Create directories of images for training and validation
"""
if os.path.isdir(train_dir):
shutil.rmtree(train_dir)
if os.path.isdir(val_dir):
shutil.rmtree(val_dir)
for images_dirname in os.listdir(os.path.join(split_dir, 'images')):
match = re.match('^(\d{4})_(\d{2})_(\d{2})_(\d+)$', images_dirname)
if not match:
raise ValueError('Unrecognized format of directory named "%s"' % images_dirname)
# year = int(match.group(1))
month = int(match.group(2))
date = int(match.group(3))
video_id = int(match.group(4))
# Filter out some videos for the validation set
# XXX this is pretty arbitrary
if month == 9 and date == 26 and video_id <= 18:
output_dir = val_dir
else:
output_dir = train_dir
# Copy images
old_images_dir = os.path.join(split_dir, 'images', images_dirname)
new_images_dir = os.path.join(output_dir, 'images')
if not os.path.isdir(new_images_dir):
os.makedirs(new_images_dir)
for fname in os.listdir(old_images_dir):
old_image_path = os.path.realpath(os.path.join(old_images_dir, fname))
new_image_path = os.path.join(new_images_dir, os.path.basename(old_image_path))
if use_symlinks:
os.symlink(old_image_path, new_image_path)
else:
shutil.move(old_image_path, new_image_path)
# Copy labels
old_labels_dir = os.path.join(split_dir, 'labels', images_dirname)
new_labels_dir = os.path.join(output_dir, 'labels')
if not os.path.isdir(new_labels_dir):
os.makedirs(new_labels_dir)
for fname in os.listdir(old_labels_dir):
old_label_path = os.path.realpath(os.path.join(old_labels_dir, fname))
new_label_path = os.path.join(new_labels_dir, os.path.basename(old_label_path))
if use_symlinks:
os.symlink(old_label_path, new_label_path)
else:
shutil.move(old_label_path, new_label_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Prepare KITTI data')
parser.add_argument('-i', '--input-dir', default='',
help='The directory containing the original KITTI zipfiles [default "./"]')
parser.add_argument('-o', '--output-dir', default='kitti-data',
help='The output directory [default "./kitti-data"]')
parser.add_argument('-s', '--no-symlinks', action='store_true',
help='Copy files instead of making symlinks')
args = parser.parse_args()
print 'Extracting zipfiles ...'
extract_data(
args.input_dir,
os.path.join(args.output_dir, 'raw'),
)
print 'Calculating image to video mapping ...'
mapping = get_image_to_video_mapping(
os.path.join(args.output_dir, 'raw'),
)
print 'Splitting images by video ...'
split_by_video(
os.path.join(args.output_dir, 'raw', 'training'),
mapping,
os.path.join(args.output_dir, 'video-split'),
use_symlinks=(not args.no_symlinks),
)
print 'Creating train/val split ...'
split_for_training(
os.path.join(args.output_dir, 'video-split'),
os.path.join(args.output_dir, 'train'),
os.path.join(args.output_dir, 'val'),
use_symlinks=(not args.no_symlinks),
)
print 'Done.'
| DIGITS-master | examples/object-detection/prepare_kitti_data.py |
from model import Tower
from utils import model_property
import tensorflow as tf
import utils as digits
class UserModel(Tower):
@model_property
def inference(self):
const = tf.constant(0.004)
normed = tf.multiply(self.x, const)
# The reshaping have to be done for tensorflow to get the shape right
right_shape = tf.reshape(normed, shape=[-1, 50, 50])
transposed = tf.transpose(right_shape, [0, 2, 1])
squeezed = tf.reshape(transposed, shape=[-1, 2500])
# Define weights
weights = {
'w1': tf.get_variable('w1', [2500, 2])
}
biases = {
'b1': tf.get_variable('b1', [2])
}
# Linear activation
model = tf.matmul(squeezed, weights['w1']) + biases['b1']
tf.summary.image(model.op.name, model, max_outputs=1, collections=["Training Summary"])
return model
@model_property
def loss(self):
label = tf.reshape(self.y, shape=[-1, 2])
model = self.inference
loss = digits.mse_loss(model, label)
return loss
| DIGITS-master | examples/regression/regression-TF.py |
#!/usr/bin/env python3
#
# @ build_bios.py
# Builds BIOS using configuration files and dynamically
# imported functions from board directory
#
# Copyright (c) 2019 - 2020, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2021, American Megatrends International LLC.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
"""
This module builds BIOS using configuration files and dynamically
imported functions from board directory
"""
import os
import re
import sys
import glob
import signal
import shutil
import argparse
import traceback
import subprocess
from importlib import import_module
try:
# python 2.7
import ConfigParser as configparser
except ImportError:
# python 3
import configparser
def pre_build(build_config, build_type="DEBUG", silent=False, toolchain=None):
"""Sets the environment variables that shall be used for the build
:param build_config: The build configuration as defined in the JSON
configuration files
:type build_config: Dictionary
:param build_type: The build target, DEBUG, RELEASE, RELEASE_PDB,
TEST_RELEASE
:type build_type: String
:param silent: Enables build in silent mode
:type silent: Boolean
:param toolchain: Specifies the tool chain tag to use for the build
:type toolchain: String
:returns: The updated environment variables
:rtype: Dictionary
"""
# get current environment variables
config = os.environ.copy()
# patch the build config
build_config = patch_config(build_config)
# update the current config with the build config
config.update(build_config)
# make the config and build python 2.7 compatible
config = py_27_fix(config)
# Set WORKSPACE environment.
config["WORKSPACE"] = os.path.abspath(os.path.join("..", "..", "..", ""))
print("Set WORKSPACE as: {}".format(config["WORKSPACE"]))
# Check whether Git has been installed and been added to system path.
try:
subprocess.Popen(["git", "--help"], stdout=subprocess.PIPE)
except OSError:
print("The 'git' command is not recognized.")
print("Please make sure that Git is installed\
and has been added to system path.")
sys.exit(1)
# Create the Conf directory under WORKSPACE
if not os.path.isdir(os.path.join(config["WORKSPACE"], "Conf")):
try:
# create directory
os.makedirs(os.path.join(config["WORKSPACE"], "Conf"))
# copy files to it
config_template_path = os.path.join(config["WORKSPACE"],
config["BASE_TOOLS_PATH"],
"Conf")
config_path = os.path.join(config["WORKSPACE"], "Conf")
shutil.copyfile(config_template_path +
os.sep + "target.template",
config_path + os.sep + "target.txt")
shutil.copyfile(config_template_path +
os.sep + "tools_def.template",
config_path + os.sep + "tools_def.txt")
shutil.copyfile(config_template_path +
os.sep + "build_rule.template",
config_path + os.sep + "build_rule.txt")
except OSError:
print("Error while creating Conf")
sys.exit(1)
# Set other environments.
# Basic Rule:
# Platform override Silicon override Core
# Source override Binary
config["WORKSPACE_PLATFORM"] = os.path.join(config["WORKSPACE"],
config["WORKSPACE_PLATFORM"])
config["WORKSPACE_SILICON"] = os.path.join(config["WORKSPACE"],
config["WORKSPACE_SILICON"])
config["WORKSPACE_FEATURES"] = os.path.join(config["WORKSPACE"],
config["WORKSPACE_FEATURES"])
config["WORKSPACE_DRIVERS"] = os.path.join(config["WORKSPACE"],
config["WORKSPACE_DRIVERS"])
config["WORKSPACE_PLATFORM_BIN"] = \
os.path.join(config["WORKSPACE"], config["WORKSPACE_PLATFORM_BIN"])
config["WORKSPACE_SILICON_BIN"] = \
os.path.join(config["WORKSPACE"], config["WORKSPACE_SILICON_BIN"])
config["WORKSPACE_FSP_BIN"] = os.path.join(config["WORKSPACE"],
config["WORKSPACE_FSP_BIN"])
# add to package path
config["PACKAGES_PATH"] = config["WORKSPACE_PLATFORM"]
config["PACKAGES_PATH"] += os.pathsep + config["WORKSPACE_SILICON"]
config["PACKAGES_PATH"] += os.pathsep + config["WORKSPACE_SILICON_BIN"]
config["PACKAGES_PATH"] += os.pathsep + config["WORKSPACE_FEATURES"]
# add all feature domains in WORKSPACE_FEATURES to package path
for filename in os.listdir(config["WORKSPACE_FEATURES"]):
filepath = os.path.join(config["WORKSPACE_FEATURES"], filename)
# feature domains folder does not contain dec file
if os.path.isdir(filepath) and \
not glob.glob(os.path.join(filepath, "*.dec")):
config["PACKAGES_PATH"] += os.pathsep + filepath
config["PACKAGES_PATH"] += os.pathsep + config["WORKSPACE_DRIVERS"]
config["PACKAGES_PATH"] += os.pathsep + \
os.path.join(config["WORKSPACE"], config["WORKSPACE_FSP_BIN"])
config["PACKAGES_PATH"] += os.pathsep + \
os.path.join(config["WORKSPACE"], config["WORKSPACE_CORE"])
config["PACKAGES_PATH"] += os.pathsep + os.path.join(config["WORKSPACE"])
config["PACKAGES_PATH"] += os.pathsep + config["WORKSPACE_PLATFORM_BIN"]
config["EDK_TOOLS_PATH"] = os.path.join(config["WORKSPACE"],
config["EDK_TOOLS_PATH"])
config["BASE_TOOLS_PATH"] = config["EDK_TOOLS_PATH"]
config["EDK_TOOLS_BIN"] = os.path.join(config["WORKSPACE"],
config["EDK_TOOLS_BIN"])
#
# Board may have different FSP binary between API and Dispatch modes.
# In API mode if FSP_BIN_PKG_FOR_API_MODE is assigned, it should
# override FSP_BIN_PKG.
#
if config.get("API_MODE_FSP_WRAPPER_BUILD", "FALSE") == "TRUE":
if config.get("FSP_BIN_PKG_FOR_API_MODE") is not None:
config['FSP_BIN_PKG'] = config['FSP_BIN_PKG_FOR_API_MODE']
config["PLATFORM_FSP_BIN_PACKAGE"] = \
os.path.join(config['WORKSPACE_FSP_BIN'], config['FSP_BIN_PKG'])
config['PROJECT_DSC'] = os.path.join(config["WORKSPACE_PLATFORM"],
config['PROJECT_DSC'])
config['BOARD_PKG_PCD_DSC'] = os.path.join(config["WORKSPACE_PLATFORM"],
config['BOARD_PKG_PCD_DSC'])
config["CONF_PATH"] = os.path.join(config["WORKSPACE"], "Conf")
# get the python path
if os.environ.get("PYTHON_HOME") is None:
if os.environ.get("PYTHONPATH") is not None:
config["PYTHON_HOME"] = os.environ.get("PYTHONPATH")
else:
config["PYTHON_HOME"] = os.path.dirname(sys.executable)
config["PYTHONPATH"] = config["PYTHON_HOME"]
if config.get("PYTHON_HOME") is None or \
not os.path.exists(config.get("PYTHON_HOME")):
print("PYTHON_HOME environment variable is not found")
sys.exit(1)
# if python is installed, disable the binary base tools.
# python is installed if this code is running :)
if config.get("PYTHON_HOME") is not None:
if config.get("EDK_TOOLS_BIN") is not None:
del config["EDK_TOOLS_BIN"]
# Run edk setup and update config
if os.name == 'nt':
edk2_setup_cmd = [os.path.join(config["EFI_SOURCE"], "edksetup"),
"Rebuild"]
if config.get("EDK_SETUP_OPTION") and \
config["EDK_SETUP_OPTION"] != " ":
edk2_setup_cmd.append(config["EDK_SETUP_OPTION"])
_, _, result, return_code = execute_script(edk2_setup_cmd,
config,
collect_env=True,
shell=True)
if return_code == 0 and result is not None and isinstance(result,
dict):
config.update(result)
# nmake BaseTools source
# and enable BaseTools source build
shell = True
command = ["nmake", "-f", os.path.join(config["BASE_TOOLS_PATH"],
"Makefile")]
if os.name == "posix": # linux
shell = False
command = ["make", "-C", os.path.join(config["BASE_TOOLS_PATH"])]
_, _, result, return_code = execute_script(command, config, shell=shell)
if return_code != 0:
build_failed(config)
#
# build platform silicon tools
#
# save the current workspace
saved_work_directory = config["WORKSPACE"]
# change the workspace to silicon tools directory
config["WORKSPACE"] = os.path.join(config["WORKSPACE_SILICON"], "Tools")
command = ["nmake"]
if os.name == "posix": # linux
command = ["make"]
# add path to generated FitGen binary to
# environment path variable
config["PATH"] += os.pathsep + \
os.path.join(config["BASE_TOOLS_PATH"],
"Source", "C", "bin")
# build the silicon tools
_, _, result, return_code = execute_script(command, config, shell=shell)
if return_code != 0:
build_failed(config)
# restore WORKSPACE environment variable
config["WORKSPACE"] = saved_work_directory
config["SILENT_MODE"] = 'TRUE' if silent else 'FALSE'
print("==============================================")
if os.path.isfile(os.path.join(config['WORKSPACE'], "Prep.log")):
os.remove(os.path.join(config['WORKSPACE'], "Prep.log"))
config["PROJECT"] = os.path.join(config["PLATFORM_BOARD_PACKAGE"],
config["BOARD"])
# Setup Build
# @todo: Need better TOOL_CHAIN_TAG detection
if toolchain is not None:
config["TOOL_CHAIN_TAG"] = toolchain
elif config.get("TOOL_CHAIN_TAG") is None:
if os.name == 'nt':
config["TOOL_CHAIN_TAG"] = "VS2015"
else:
config["TOOL_CHAIN_TAG"] = "GCC5"
# echo Show CL revision
config["PrepRELEASE"] = build_type
if build_type == "DEBUG":
config["TARGET"] = 'DEBUG'
config["TARGET_SHORT"] = 'D'
else:
config["TARGET"] = 'RELEASE'
config["TARGET_SHORT"] = 'R'
# set BUILD_DIR_PATH path
config["BUILD_DIR_PATH"] = os.path.join(config["WORKSPACE"],
'Build',
config["PROJECT"],
"{}_{}".format(
config["TARGET"],
config["TOOL_CHAIN_TAG"]))
# set BUILD_DIR path
config["BUILD_DIR"] = os.path.join('Build',
config["PROJECT"],
"{}_{}".format(
config["TARGET"],
config["TOOL_CHAIN_TAG"]))
config["BUILD_X64"] = os.path.join(config["BUILD_DIR_PATH"], 'X64')
config["BUILD_IA32"] = os.path.join(config["BUILD_DIR_PATH"], 'IA32')
if not os.path.isdir(config["BUILD_DIR_PATH"]):
try:
os.makedirs(config["BUILD_DIR_PATH"])
except OSError:
print("Error while creating Build folder")
sys.exit(1)
# Set FSP_WRAPPER_BUILD
if config['FSP_WRAPPER_BUILD'] == "TRUE":
# Create dummy Fsp_Rebased_S_padded.fd to build the BiosInfo.inf
# if it is wrapper build, due to the SECTION inclusion
open(os.path.join(config["WORKSPACE_FSP_BIN"],
config["FSP_BIN_PKG"],
"Fsp_Rebased_S_padded.fd"), 'w').close()
if not os.path.isdir(config["BUILD_X64"]):
try:
os.mkdir(config["BUILD_X64"])
except OSError:
print("Error while creating {}".format(config["BUILD_X64"]))
sys.exit(1)
# update config file with changes
update_target_file(config)
# Additional pre build scripts for this platform
result = pre_build_ex(config)
if result is not None and isinstance(result, dict):
config.update(result)
# print user settings
print("BIOS_SIZE_OPTION = {}".format(config["BIOS_SIZE_OPTION"]))
print("EFI_SOURCE = {}".format(config["EFI_SOURCE"]))
print("TARGET = {}".format(config["TARGET"]))
print("TARGET_ARCH = {}".format("IA32 X64"))
print("TOOL_CHAIN_TAG = {}".format(config["TOOL_CHAIN_TAG"]))
print("WORKSPACE = {}".format(config["WORKSPACE"]))
print("WORKSPACE_CORE = {}".format(config["WORKSPACE_CORE"]))
print("EXT_BUILD_FLAGS = {}".format(config["EXT_BUILD_FLAGS"]))
return config
def build(config):
"""Builds the BIOS image
:param config: The environment variables to be used
in the build process
:type config: Dictionary
:returns: nothing
"""
if config["FSP_WRAPPER_BUILD"] == "TRUE":
pattern = "Fsp_Rebased.*\\.fd$"
file_dir = os.path.join(config['WORKSPACE_FSP_BIN'],
config['FSP_BIN_PKG'])
for item in os.listdir(file_dir):
if re.search(pattern, item):
os.remove(os.path.join(file_dir, item))
command = [os.path.join(config['PYTHON_HOME'], "python"),
os.path.join(config['WORKSPACE_PLATFORM'],
config['PLATFORM_PACKAGE'],
'Tools', 'Fsp',
'RebaseFspBinBaseAddress.py'),
os.path.join(config['WORKSPACE_PLATFORM'],
config['FLASH_MAP_FDF']),
os.path.join(config['WORKSPACE_FSP_BIN'],
config['FSP_BIN_PKG']),
"Fsp.fd",
"0x0"]
_, _, _, return_code = execute_script(command, config, shell=False)
if return_code != 0:
print("ERROR:RebaseFspBinBaseAddress failed")
sys.exit(return_code)
# create Fsp_Rebased.fd which is Fsp_Rebased_S.fd +
# Fsp_Rebased_M + Fsp_Rebased_T
with open(os.path.join(file_dir, "Fsp_Rebased_S.fd"), 'rb') as fsp_s, \
open(os.path.join(file_dir,
"Fsp_Rebased_M.fd"), 'rb') as fsp_m, \
open(os.path.join(file_dir,
"Fsp_Rebased_T.fd"), 'rb') as fsp_t:
fsp_rebased = fsp_s.read() + fsp_m.read() + fsp_t.read()
with open(os.path.join(file_dir,
"Fsp_Rebased.fd"), 'wb') as new_fsp:
new_fsp.write(fsp_rebased)
if not os.path.isfile(os.path.join(file_dir, "Fsp_Rebased.fd")):
print("!!! ERROR:failed to create fsp!!!")
sys.exit(1)
# Output the build variables the user has selected.
print("==========================================")
print(" User Selected build options:")
print(" SILENT_MODE = ", config.get("SILENT_MODE"))
print(" REBUILD_MODE = ", config.get("REBUILD_MODE"))
print(" BUILD_ROM_ONLY = ", config.get("BUILD_ROM_ONLY"))
print(" BINARY_CACHE_CMD_LINE = ", config.get("HASH"), config.get("BINARY_CACHE_CMD_LINE"))
print("==========================================")
command = ["build", "-n", config["NUMBER_OF_PROCESSORS"]]
if config["REBUILD_MODE"] and config["REBUILD_MODE"] != "":
command.append(config["REBUILD_MODE"])
if config["EXT_BUILD_FLAGS"] and config["EXT_BUILD_FLAGS"] != "":
ext_build_flags = config["EXT_BUILD_FLAGS"].split(" ")
ext_build_flags = [x.strip() for x in ext_build_flags]
ext_build_flags = [x for x in ext_build_flags if x != ""]
command.extend(ext_build_flags)
if config.get('BINARY_CACHE_CMD_LINE'):
command.append(config['HASH'])
command.append(config['BINARY_CACHE_CMD_LINE'])
if config.get("SILENT_MODE", "FALSE") == "TRUE":
command.append("--silent")
command.append("--quiet")
else:
command.append("--log=" + config.get("BUILD_LOG", "Build.log"))
command.append("--report-file=" +
config.get("BUILD_REPORT", "BuildReport.log"))
if config.get("VERBOSE", "FALSE") == "TRUE":
command.append("--verbose")
if config.get("MAX_SOCKET") is not None:
command.append("-D")
command.append("MAX_SOCKET=" + config["MAX_SOCKET"])
if config.get("API_MODE_FSP_WRAPPER_BUILD", "FALSE") == "TRUE":
#Override PCD to enable API mode FSP wrapper.
command.append("--pcd")
command.append("gIntelFsp2WrapperTokenSpaceGuid.PcdFspModeSelection=1")
if config.get("PERFORMANCE_BUILD", "FALSE") == "TRUE":
command.append("--pcd")
command.append("gMinPlatformPkgTokenSpaceGuid.PcdPerformanceEnable=True")
shell = True
if os.name == "posix":
shell = False
_, _, _, exit_code = execute_script(command, config, shell=shell)
if exit_code != 0:
build_failed(config)
# Additional build scripts for this platform
result = build_ex(config)
if result is not None and isinstance(result, dict):
config.update(result)
return config
def post_build(config):
"""Post build process of BIOS image
:param config: The environment variables to be used in the build process
:type config: Dictionary
:returns: nothing
"""
print("Running post_build to complete the build process.")
board_fd = config["BOARD"].upper()
final_fd = os.path.join(config["BUILD_DIR_PATH"], "FV",
"{}.fd".format(board_fd))
if config["BIOS_INFO_GUID"]:
# Generate the fit table
print("Generating FIT ...")
if os.path.isfile(final_fd):
temp_fd = os.path.join(config["BUILD_DIR_PATH"], "FV",
"{}_.fd".format(board_fd))
shell = True
command = ["FitGen", "-D",
final_fd, temp_fd, "-NA",
"-I", config["BIOS_INFO_GUID"]] #@todo: Need mechanism to add additional options to the FitGen command line
if os.name == "posix": # linux
shell = False
_, _, result, return_code = execute_script(command, config, shell=shell)
if return_code != 0:
print("Error while generating fit")
else:
# copy output to final binary
shutil.copyfile(temp_fd, final_fd)
# remove temp file
os.remove(temp_fd)
else:
print("{} does not exist".format(final_fd))
# remove temp file
# Additional build scripts for this platform
result = post_build_ex(config)
if result is not None and isinstance(result, dict):
config.update(result)
# cleanup
pattern = "Fsp_Rebased.*\\.fd$"
file_dir = os.path.join(config['WORKSPACE_FSP_BIN'],
config['FSP_BIN_PKG'])
for item in os.listdir(file_dir):
if re.search(pattern, item):
os.remove(os.path.join(file_dir, item))
if config.get("DYNAMIC_BUILD_INIT_FILES") is not None:
for item in config["DYNAMIC_BUILD_INIT_FILES"].split(","):
try:
os.remove(item) # remove __init__.py
os.remove(item + "c") # remove __init__.pyc as well
except OSError:
pass
print("Done")
if os.path.isfile(final_fd):
print("Fd file can be found at {}".format(final_fd))
def build_failed(config):
"""Displays results when build fails
:param config: The environment variables used in the build process
:type config: Dictionary
:returns: nothing
"""
print(" The EDKII BIOS Build has failed!")
# clean up
if config.get("DYNAMIC_BUILD_INIT_FILES") is not None:
for item in config["DYNAMIC_BUILD_INIT_FILES"].split(","):
if os.path.isfile(item):
try:
os.remove(item) # remove __init__.py
os.remove(item + "c") # remove __init__.pyc as well
except OSError:
pass
sys.exit(1)
def import_platform_lib(path, function):
"""Imports custom functions for the platforms being built
:param path: the location of the custom build script to be executed
:type path: String
:param path: the function to be executed
:type path: String
:returns: nothing
"""
if path.endswith(".py"):
path = path[:-3]
path = path.replace(os.sep, ".")
module = import_module(path)
lib = getattr(module, function)
return lib
def pre_build_ex(config):
""" An extension of the pre_build process as defined platform
specific pre_build setup script
:param config: The environment variables used in the pre build process
:type config: Dictionary
:returns: config dictionary
:rtype: Dictionary
"""
if config.get("ADDITIONAL_SCRIPTS"):
try:
platform_function =\
import_platform_lib(config["ADDITIONAL_SCRIPTS"],
"pre_build_ex")
functions = {"execute_script": execute_script}
return platform_function(config, functions)
except ImportError as error:
print(config["ADDITIONAL_SCRIPTS"], str(error))
build_failed(config)
return None
def build_ex(config):
""" An extension of the build process as defined platform
specific build setup script
:param config: The environment variables used in the build process
:type config: Dictionary
:returns: config dictionary
:rtype: Dictionary
"""
if config.get("ADDITIONAL_SCRIPTS"):
try:
platform_function =\
import_platform_lib(config["ADDITIONAL_SCRIPTS"],
"build_ex")
functions = {"execute_script": execute_script}
return platform_function(config, functions)
except ImportError as error:
print("error", config["ADDITIONAL_SCRIPTS"], str(error))
build_failed(config)
return None
def post_build_ex(config):
""" An extension of the post build process as defined platform
specific build setup script
:param config: The environment variables used in the post build
process
:type config: Dictionary
:returns: config dictionary
:rtype: Dictionary
"""
if config.get("ADDITIONAL_SCRIPTS"):
try:
platform_function =\
import_platform_lib(config["ADDITIONAL_SCRIPTS"],
"post_build_ex")
functions = {"execute_script": execute_script}
return platform_function(config, functions)
except ImportError as error:
print(config["ADDITIONAL_SCRIPTS"], str(error))
build_failed(config)
return None
def clean_ex(config):
""" An extension of the platform cleaning
:param config: The environment variables used in the clean process
:type config: Dictionary
:returns: config dictionary
:rtype: Dictionary
"""
if config.get("ADDITIONAL_SCRIPTS"):
try:
platform_function =\
import_platform_lib(config["ADDITIONAL_SCRIPTS"],
"clean_ex")
functions = {"execute_script": execute_script}
return platform_function(config, functions)
except ImportError as error:
print(config["ADDITIONAL_SCRIPTS"], str(error))
build_failed(config)
return None
def get_environment_variables(std_out_str, marker):
"""Gets the environment variables from a process
:param std_out_str: The std_out pipe
:type std_out_str: String
:param marker: A begining and end mark of environment
variables printed to std_out
:type marker: String
:returns: The environment variables read from the process' std_out pipe
:rtype: Tuple
"""
start_env_update = False
environment_vars = {}
out_put = ""
for line in std_out_str.split("\n"):
if start_env_update and len(line.split("=")) == 2:
key, value = line.split("=")
environment_vars[key] = value
else:
out_put += "\n" + line.replace(marker, "")
if marker in line:
if start_env_update:
start_env_update = False
else:
start_env_update = True
return (out_put, environment_vars)
def execute_script(command, env_variables, collect_env=False,
enable_std_pipe=False, shell=True):
"""launches a process that executes a script/shell command passed to it
:param command: The command/script with its commandline
arguments to be executed
:type command: List:String
:param env_variables: Environment variables passed to the process
:type env_variables: String
:param collect_env: Enables the collection of environment variables
when process execution is done
:type collect_env: Boolean
:param enable_std_pipe: Enables process out to be piped to
:type enable_std_pipe: String
:returns: a tuple of std_out, stderr , environment variables,
return code
:rtype: Tuple: (std_out, stderr , enVar, return_code)
"""
print("Calling " + " ".join(command))
env_marker = '-----env-----'
env = {}
kwarg = {"env": env_variables,
"universal_newlines": True,
"shell": shell,
"cwd": env_variables["WORKSPACE"]}
if enable_std_pipe or collect_env:
kwarg["stdout"] = subprocess.PIPE
kwarg["stderr"] = subprocess.PIPE
# collect environment variables
if collect_env:
# get the binary that prints environment variables based on os
if os.name == 'nt':
get_var_command = "set"
else:
get_var_command = "env"
# modify the command to print the environment variables
if isinstance(command, list):
command += ["&&", "echo", env_marker, "&&",
get_var_command, "&&", "echo", env_marker]
else:
command += " " + " ".join(["&&", "echo", env_marker,
"&&", get_var_command,
"&&", "echo", env_marker])
# execute the command
execute = subprocess.Popen(command, **kwarg)
std_out, stderr = execute.communicate()
code = execute.returncode
# wait for process to be done
execute.wait()
# if collect environment variables
if collect_env:
# get the new environment variables
std_out, env = get_environment_variables(std_out, env_marker)
return (std_out, stderr, env, code)
def patch_config(config):
""" An extension of the platform cleaning
:param config: The environment variables used in the build process
:type config: Dictionary
:returns: config dictionary
:rtype: Dictionary
"""
new_config = {}
for key in config:
new_config[str(key)] = str(config[key].replace("/", os.sep))
return new_config
def py_27_fix(config):
""" Prepares build for python 2.7 => build
:param config: The environment variables used in the build process
:type config: Dictionary
:returns: config dictionary
:rtype: Dictionary
"""
if not sys.version_info > (3, 0):
path_list = []
# create __init__.py in directories in this path
if config.get("ADDITIONAL_SCRIPTS"):
# get the directory
path_to_directory =\
os.path.dirname(config.get("ADDITIONAL_SCRIPTS"))
path = ""
for directories in path_to_directory.split(os.sep):
path += directories + os.sep
init_file = path + os.sep + "__init__.py"
if not os.path.isfile(init_file):
open(init_file, 'w').close()
path_list.append(init_file)
config["DYNAMIC_BUILD_INIT_FILES"] = ",".join(path_list)
return config
def clean(build_config, board=False):
"""Cleans the build workspace
:param config: The environment variables used in the build process
:type config: Dictionary
:param board: This flag specifies specific board clean
:type board: Bool
:returns: nothing
"""
# patch the config
build_config = patch_config(build_config)
# get current environment variables
config = os.environ.copy()
# update it with the build variables
config.update(build_config)
if config.get('WORKSPACE') is None or not config.get('WORKSPACE'):
config["WORKSPACE"] =\
os.path.abspath(os.path.join("..", "..", "..", ""))
# build cleanall
print("Cleaning directories...")
if board:
platform_pkg = config.get("PLATFORM_BOARD_PACKAGE", None)
if platform_pkg is None or\
not os.path.isdir(os.path.join(config['WORKSPACE'],
"Build", platform_pkg)):
print("Platform package not found")
sys.exit(1)
else:
print("Removing " + os.path.join(config['WORKSPACE'],
"Build", platform_pkg))
shutil.rmtree(os.path.join(config['WORKSPACE'],
"Build", platform_pkg))
else:
if os.path.isdir(os.path.join(config['WORKSPACE'], "Build")):
print("Removing " + os.path.join(config['WORKSPACE'], "Build"))
shutil.rmtree(os.path.join(config['WORKSPACE'], "Build"))
if os.path.isdir(os.path.join(config['WORKSPACE'], "Conf")):
print("Removing " + os.path.join(config['WORKSPACE'], "Conf"))
shutil.rmtree(os.path.join(config['WORKSPACE'], "Conf"))
print("Cleaning files...")
if os.path.isfile(os.path.join(config['WORKSPACE'],
config.get("BUILD_REPORT",
"BuildReport.log"))):
print("Removing ", os.path.join(config['WORKSPACE'],
config.get("BUILD_REPORT",
"BuildReport.log")))
os.remove(os.path.join(config['WORKSPACE'],
config.get("BUILD_REPORT", "BuildReport.log")))
print(" All done...")
sys.exit(0)
def update_target_file(config):
"""Updates Conf's target file that will be used in the build
:param config: The environment variables used in the build process
:type config: Dictionary
:returns: True if update was successful and False if update fails
:rtype: Boolean
"""
contents = None
result = False
with open(os.path.join(config["CONF_PATH"], "target.txt"), 'r') as target:
contents = target.readlines()
options_list = ['ACTIVE_PLATFORM', 'TARGET',
'TARGET_ARCH', 'TOOL_CHAIN_TAG', 'BUILD_RULE_CONF']
modified = []
# remove these options from the config file
for line in contents:
if line.replace(" ", "")[0] != '#' and\
any(opt in line for opt in options_list):
continue
modified.append(line)
# replace with config options provided
string = "{} = {}\n".format("ACTIVE_PLATFORM",
os.path.join(
config['WORKSPACE_PLATFORM'],
config['PLATFORM_BOARD_PACKAGE'],
config['BOARD'],
config['PROJECT_DSC']))
modified.append(string)
string = "{} = {}\n".format("TARGET", config['TARGET'])
modified.append(string)
string = "TARGET_ARCH = IA32 X64\n"
modified.append(string)
string = "{} = {}\n".format("TOOL_CHAIN_TAG", config['TOOL_CHAIN_TAG'])
modified.append(string)
string = "{} = {}\n".format("BUILD_RULE_CONF",
os.path.join("Conf", "build_rule.txt"))
modified.append(string)
if modified is not None:
with open(os.path.join(config["WORKSPACE"],
"Conf", "target.txt"), 'w') as target:
for line in modified:
target.write(line)
result = True
return result
def get_config():
"""Reads the default projects config file
:returns: The config defined in the the Build.cfg file
:rtype: Dictionary
"""
config_file = configparser.RawConfigParser()
config_file.optionxform = str
config_file.read('build.cfg')
config_dictionary = {}
for section in config_file.sections():
dictionary = dict(config_file.items(section))
config_dictionary[section] = dictionary
return config_dictionary
def get_platform_config(platform_name, config_data):
""" Reads the platform specific config file
param platform_name: The name of the platform to be built
:type platform_name: String
param configData: The environment variables to be
used in the build process
:type configData: Dictionary
:returns: The config defined in the the Build.cfg file
:rtype: Dictionary
"""
config = {}
platform_data = config_data.get("PLATFORMS")
path = platform_data.get(platform_name)
config_file = configparser.RawConfigParser()
config_file.optionxform = str
config_file.read(path)
for section in config_file.sections():
config[section] = dict(config_file.items(section))
return config
def get_cmd_config_arguments(arguments):
"""Get commandline config arguments
param arguments: The environment variables to be used in the build process
:type arguments: argparse
:returns: The config dictionary built from the commandline arguments
:rtype: Dictionary
"""
result = {}
if arguments.capsule is True:
result["CAPSULE_BUILD"] = "1"
if arguments.performance is True:
result["PERFORMANCE_BUILD"] = "TRUE"
if arguments.fsp is True:
result["FSP_WRAPPER_BUILD"] = "TRUE"
if arguments.fspapi is True:
result["API_MODE_FSP_WRAPPER_BUILD"] = "TRUE"
if not arguments.UseHashCache:
result['BINARY_CACHE_CMD_LINE'] = ''
elif arguments.BinCacheDest:
result['HASH'] = '--hash'
result['BINARY_CACHE_CMD_LINE'] = '--binary-destination=%s' % arguments.BinCacheDest
elif arguments.BinCacheSource:
result['HASH'] = '--hash'
result['BINARY_CACHE_CMD_LINE'] = '--binary-source=%s' % arguments.BinCacheSource
else:
result['BINARY_CACHE_CMD_LINE'] = ''
return result
def get_cmd_arguments(build_config):
""" Get commandline inputs from user
param config_data: The environment variables to be
used in the build process
:type config_data: Dictionary
:returns: The commandline arguments input by the user
:rtype: argparse object
"""
class PrintPlatforms(argparse.Action):
""" this is an argparse action that lists the available platforms
"""
def __call__(self, parser, namespace, values, option_string=None):
print("Platforms:")
for key in build_config.get("PLATFORMS"):
print(" " + key)
setattr(namespace, self.dest, values)
sys.exit(0)
# get the build commands
parser = argparse.ArgumentParser(description="Build Help")
parser.add_argument('--platform', '-p', dest="platform",
help='the platform to build',
choices=build_config.get("PLATFORMS"),
required=('-l' not in sys.argv and
'--cleanall' not in sys.argv))
parser.add_argument('--toolchain', '-t', dest="toolchain",
help="using the Tool Chain Tagname to build \
the platform,overriding \
target.txt's TOOL_CHAIN_TAG definition")
parser.add_argument("--DEBUG", '-d', help="debug flag",
action='store_const', dest="target",
const="DEBUG", default="DEBUG")
parser.add_argument("--RELEASE", '-r', help="release flag",
action='store_const',
dest="target", const="RELEASE")
parser.add_argument("--TEST_RELEASE", '-tr', help="test Release flag",
action='store_const',
dest="target", const="TEST_RELEASE")
parser.add_argument("--RELEASE_PDB", '-rp', help="release flag",
action='store_const', dest="target",
const="RELEASE_PDB")
parser.add_argument('--list', '-l', action=PrintPlatforms,
help='lists available platforms', nargs=0)
parser.add_argument('--cleanall', dest='clean_all',
help='cleans all', action='store_true')
parser.add_argument('--clean', dest='clean',
help='cleans specific platform', action='store_true')
parser.add_argument("--capsule", help="capsule build enabled",
action='store_true', dest="capsule")
parser.add_argument("--silent", help="silent build enabled",
action='store_true', dest="silent")
parser.add_argument("--performance", help="performance build enabled",
action='store_true', dest="performance")
parser.add_argument("--fsp", help="fsp wrapper build enabled",
action='store_true', dest="fsp")
parser.add_argument("--fspapi", help="API mode fsp wrapper build enabled",
action='store_true', dest="fspapi")
parser.add_argument("--hash", action="store_true", dest="UseHashCache", default=False,
help="Enable hash-based caching during build process.")
parser.add_argument("--binary-destination", help="Generate a cache of binary \
files in the specified directory.",
action='store', dest="BinCacheDest")
parser.add_argument("--binary-source", help="Consume a cache of binary files \
from the specified directory.",
action='store', dest="BinCacheSource")
return parser.parse_args()
def keyboard_interruption(int_signal, int_frame):
""" Catches a keyboard interruption handler
param int_signal: The signal this handler is called with
:type int_signal: Signal
param int_frame: The signal this handler is called with
:type int_frame: frame
:rtype: nothing
"""
print("Signal #: {} Frame: {}".format(int_signal, int_frame))
print("Quiting...")
sys.exit(0)
def main():
""" The main function of this module
:rtype: nothing
"""
# to quit the build
signal.signal(signal.SIGINT, keyboard_interruption)
# get general build configurations
build_config = get_config()
# get commandline parameters
arguments = get_cmd_arguments(build_config)
if arguments.clean_all:
clean(build_config.get("DEFAULT_CONFIG"))
# get platform specific config
platform_config = get_platform_config(arguments.platform, build_config)
# update general build config with platform specific config
config = build_config.get("DEFAULT_CONFIG")
config.update(platform_config.get("CONFIG"))
# if user selected clean
if arguments.clean:
clean(config, board=True)
# Override config with cmd arguments
cmd_config_args = get_cmd_config_arguments(arguments)
config.update(cmd_config_args)
# get pre_build configurations
config = pre_build(config,
build_type=arguments.target,
toolchain=arguments.toolchain,
silent=arguments.silent)
# build selected platform
config = build(config)
# post build
post_build(config)
if __name__ == "__main__":
try:
EXIT_CODE = 0
main()
except Exception as error:
EXIT_CODE = 1
traceback.print_exc()
sys.exit(EXIT_CODE)
| edk2-platforms-master | Platform/Intel/build_bios.py |
## @file
# PreBuild operations for Vlv2TbltDevicePkg
#
# Copyright (c) 2019, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
PreBuild
'''
import os
import sys
import argparse
import subprocess
import glob
import shutil
import struct
import datetime
#
# Globals for help information
#
__prog__ = 'PreBuild'
__copyright__ = 'Copyright (c) 2019, Intel Corporation. All rights reserved.'
__description__ = 'Vlv2Tbl2DevicePkg pre-build operations.\n'
#
# Globals
#
gWorkspace = ''
gBaseToolsPath = ''
gArgs = None
def LogAlways(Message):
sys.stdout.write (__prog__ + ': ' + Message + '\n')
sys.stdout.flush()
def Log(Message):
global gArgs
if not gArgs.Verbose:
return
sys.stdout.write (__prog__ + ': ' + Message + '\n')
sys.stdout.flush()
def Error(Message, ExitValue=1):
sys.stderr.write (__prog__ + ': ERROR: ' + Message + '\n')
sys.exit (ExitValue)
def RelativePath(target):
global gWorkspace
Log('RelativePath' + target)
return os.path.relpath (target, gWorkspace)
def NormalizePath(target):
if isinstance(target, tuple):
return os.path.normpath (os.path.join (*target))
else:
return os.path.normpath (target)
def RemoveFile(target):
target = NormalizePath(target)
if not target or target == os.pathsep:
Error ('RemoveFile() invalid target')
if os.path.exists(target):
os.remove (target)
Log ('remove %s' % (RelativePath (target)))
def RemoveDirectory(target):
target = NormalizePath(target)
if not target or target == os.pathsep:
Error ('RemoveDirectory() invalid target')
if os.path.exists(target):
Log ('rmdir %s' % (RelativePath (target)))
shutil.rmtree(target)
def CreateDirectory(target):
target = NormalizePath(target)
if not os.path.exists(target):
Log ('mkdir %s' % (RelativePath (target)))
os.makedirs (target)
def Copy(src, dst):
src = NormalizePath(src)
dst = NormalizePath(dst)
for File in glob.glob(src):
Log ('copy %s -> %s' % (RelativePath (File), RelativePath (dst)))
shutil.copy (File, dst)
def GenCapsuleDevice (BaseName, PayloadFileName, Guid, Version, Lsv, CapsulesPath, CapsulesSubDir):
global gBaseToolsPath
LogAlways ('Generate Capsule: {0} {1:08x} {2:08x} {3}'.format (Guid, Version, Lsv, PayloadFileName))
VersionString = '.'.join([str(ord(x)) for x in struct.pack('>I', Version).decode()])
FmpCapsuleFile = NormalizePath ((CapsulesPath, CapsulesSubDir, BaseName + '.' + VersionString + '.cap'))
Command = GenerateCapsuleCommand.format (
FMP_CAPSULE_GUID = Guid,
FMP_CAPSULE_VERSION = Version,
FMP_CAPSULE_LSV = Lsv,
BASE_TOOLS_PATH = gBaseToolsPath,
FMP_CAPSULE_FILE = FmpCapsuleFile,
FMP_CAPSULE_PAYLOAD = PayloadFileName
)
Command = ' '.join(Command.splitlines()).strip()
if gArgs.Verbose:
Command = Command + ' -v'
Log (Command)
Process = subprocess.Popen(Command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
ProcessOutput = Process.communicate()
if Process.returncode == 0:
Log (ProcessOutput[0].decode())
else:
LogAlways (Command)
LogAlways (ProcessOutput[0].decode())
Error ('GenerateCapsule returned an error')
Copy (PayloadFileName, (CapsulesPath, 'firmware.bin'))
MetaInfoXml = MetaInfoXmlTemplate
MetaInfoXml = MetaInfoXml.replace ('FMP_CAPSULE_GUID', Guid)
MetaInfoXml = MetaInfoXml.replace ('FMP_CAPSULE_BASE_NAME', BaseName)
MetaInfoXml = MetaInfoXml.replace ('FMP_CAPSULE_VERSION_DECIMAL', str(Version))
MetaInfoXml = MetaInfoXml.replace ('FMP_CAPSULE_STRING', VersionString)
MetaInfoXml = MetaInfoXml.replace ('FMP_CAPSULE_DATE', str(datetime.date.today()))
f = open (NormalizePath ((CapsulesPath, 'firmware.metainfo.xml')), 'w')
f.write(MetaInfoXml)
f.close()
f = open (NormalizePath ((CapsulesPath, 'Lvfs.ddf')), 'w')
f.write(LvfsDdfTemplate)
f.close()
if sys.platform == "win32":
Command = 'makecab /f ' + NormalizePath ((CapsulesPath, 'Lvfs.ddf'))
else:
Command = 'gcab --create firmware.cab firmware.bin firmware.metainfo.xml'
Log (Command)
Process = subprocess.Popen(Command, cwd=CapsulesPath, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
ProcessOutput = Process.communicate()
if Process.returncode == 0:
Log (ProcessOutput[0].decode())
else:
LogAlways (Command)
LogAlways (ProcessOutput[0].decode())
Error ('GenerateCapsule returned an error')
FmpCabinetFile = NormalizePath ((CapsulesPath, CapsulesSubDir, BaseName + '.' + VersionString + '.cab'))
Copy ((CapsulesPath, 'firmware.cab'), FmpCabinetFile)
RemoveFile ((CapsulesPath, 'firmware.cab'))
RemoveFile ((CapsulesPath, 'setup.inf'))
RemoveFile ((CapsulesPath, 'setup.rpt'))
RemoveFile ((CapsulesPath, 'Lvfs.ddf'))
RemoveFile ((CapsulesPath, 'firmware.metainfo.xml'))
RemoveFile ((CapsulesPath, 'firmware.bin'))
BiosIdTemplate = '''
BOARD_ID = MNW2MAX
BOARD_REV = $BOARD_REV
BOARD_EXT = $ARCH
VERSION_MAJOR = 0090
BUILD_TYPE = $BUILD_TYPE
VERSION_MINOR = 01
'''
if __name__ == '__main__':
#
# Create command line argument parser object
#
parser = argparse.ArgumentParser (
prog = __prog__,
description = __description__ + __copyright__,
conflict_handler = 'resolve'
)
parser.add_argument (
'-a', '--arch', dest = 'Arch', nargs = '+', action = 'append',
required = True,
help = '''ARCHS is one of list: IA32, X64, IPF, ARM, AARCH64 or EBC,
which overrides target.txt's TARGET_ARCH definition. To
specify more archs, please repeat this option.'''
)
parser.add_argument (
'-t', '--tagname', dest = 'ToolChain', required = True,
help = '''Using the Tool Chain Tagname to build the platform,
overriding target.txt's TOOL_CHAIN_TAG definition.'''
)
parser.add_argument (
'-p', '--platform', dest = 'PlatformFile', required = True,
help = '''Build the platform specified by the DSC file name argument,
overriding target.txt's ACTIVE_PLATFORM definition.'''
)
parser.add_argument (
'-b', '--buildtarget', dest = 'BuildTarget', required = True,
help = '''Using the TARGET to build the platform, overriding
target.txt's TARGET definition.'''
)
parser.add_argument (
'--conf=', dest = 'ConfDirectory', required = True,
help = '''Specify the customized Conf directory.'''
)
parser.add_argument (
'-D', '--define', dest = 'Define', nargs='*', action = 'append',
help = '''Macro: "Name [= Value]".'''
)
parser.add_argument (
'-v', '--verbose', dest = 'Verbose', action = 'store_true',
help = '''Turn on verbose output with informational messages printed'''
)
parser.add_argument (
'--package', dest = 'Package', nargs = '*', action = 'append',
help = '''The directory name of a package of tests to copy'''
)
#
# Parse command line arguments
#
gArgs, remaining = parser.parse_known_args()
gArgs.BuildType = 'all'
for BuildType in ['all', 'fds', 'genc', 'genmake', 'clean', 'cleanall', 'modules', 'libraries', 'run']:
if BuildType in remaining:
gArgs.BuildType = BuildType
remaining.remove(BuildType)
break
gArgs.Remaining = ' '.join(remaining)
#
# Get WORKSPACE environment variable
#
try:
gWorkspace = os.environ['WORKSPACE']
except:
Error ('WORKSPACE environment variable not set')
#
# Get PACKAGES_PATH and generate prioritized list of paths
#
PathList = [gWorkspace]
try:
PathList += os.environ['PACKAGES_PATH'].split(os.pathsep)
except:
pass
#
# Determine full path to BaseTools
#
Vlv2Tbl2DevicePkgPath = ''
for Path in PathList:
if gBaseToolsPath == '':
if os.path.exists (os.path.join (Path, 'BaseTools')):
gBaseToolsPath = os.path.join (Path, 'BaseTools')
if Vlv2Tbl2DevicePkgPath == '':
if os.path.exists (os.path.join (Path, 'Vlv2TbltDevicePkg')):
Vlv2Tbl2DevicePkgPath = os.path.join (Path, 'Vlv2TbltDevicePkg')
if gBaseToolsPath == '':
Error ('Can not find BaseTools in WORKSPACE or PACKAGES_PATH')
if Vlv2Tbl2DevicePkgPath == '':
Error ('Can not find Vlv2Tbl2DevicePkg in WORKSPACE or PACKAGES_PATH')
#
# Parse OUTPUT_DIRECTORY from DSC file
#
for Path in PathList:
if os.path.exists (os.path.join (Path, gArgs.PlatformFile)):
Dsc = open (os.path.join (Path, gArgs.PlatformFile), 'r').readlines()
break
for Line in Dsc:
if Line.strip().startswith('OUTPUT_DIRECTORY'):
OutputDirectory = Line.strip().split('=')[1].strip()
break
#
# Determine full paths to EDK II build directory, EDK II build output
# directory and the CPU arch of the UEFI phase.
#
CommandDir = os.path.dirname(sys.argv[0])
EdkiiBuildDir = os.path.join (gWorkspace, OutputDirectory)
EdkiiBuildOutput = os.path.join (EdkiiBuildDir, gArgs.BuildTarget + '_' + gArgs.ToolChain)
UefiArch = gArgs.Arch[0][0]
if len (gArgs.Arch) > 1:
if ['X64'] in gArgs.Arch:
UefiArch = 'X64'
if gArgs.BuildType == 'run':
Error ("'run' target not supported")
if gArgs.BuildType == 'clean':
sys.exit (0)
#
# Create output directories to put BiosId files
#
try:
CreateDirectory ((gWorkspace, 'Build'))
except:
pass
try:
CreateDirectory ((EdkiiBuildDir))
except:
pass
try:
CreateDirectory ((EdkiiBuildOutput))
except:
pass
#
# Generate BiosId files
#
BiosId = BiosIdTemplate
if sys.platform == "win32":
# Built from a Windows Host OS
BiosId = BiosId.replace ('$BOARD_REV', 'W')
else:
# Built from a Linux/Unix/Mac Host OS
BiosId = BiosId.replace ('$BOARD_REV', 'L')
if UefiArch == 'X64':
BiosId = BiosId.replace ('$ARCH', 'X64')
else:
BiosId = BiosId.replace ('$ARCH', 'I32')
BiosId = BiosId.replace ('$BUILD_TYPE', gArgs.BuildTarget[0])
BiosIdFileName = NormalizePath ((EdkiiBuildOutput, 'BiosId.env'))
f = open (BiosIdFileName, 'w')
f.write(BiosId)
f.close()
Command = 'python ' + NormalizePath ((Vlv2Tbl2DevicePkgPath, '../Tools/GenBiosId/GenBiosId.py'))
Command = Command + ' -i ' + BiosIdFileName
Command = Command + ' -o ' + NormalizePath ((EdkiiBuildOutput, 'BiosId.bin'))
Command = Command + ' -ot ' + NormalizePath ((EdkiiBuildOutput, 'BiosId.txt'))
LogAlways (Command)
Process = subprocess.Popen(Command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
ProcessOutput = Process.communicate()
if Process.returncode == 0:
Log (ProcessOutput[0].decode())
else:
LogAlways (Command)
LogAlways (ProcessOutput[0].decode())
Error ('GenBiosId returned an error')
| edk2-platforms-master | Platform/Intel/Vlv2TbltDevicePkg/PreBuild.py |
## @file
# Generate capsules for Vlv2TbltDevicePkg
# openssl must be install and in path
#
# Copyright (c) 2019, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
GenCapsuleAll
'''
import os
import sys
import argparse
import subprocess
import glob
import shutil
import struct
import datetime
#
# Globals for help information
#
__prog__ = 'GenCapsuleAll'
__copyright__ = 'Copyright (c) 2019, Intel Corporation. All rights reserved.'
__description__ = 'Generate Vlv2Tbl2DevicePkg capsules.\n'
#
# Globals
#
gWorkspace = ''
gBaseToolsPath = ''
gArgs = None
def LogAlways(Message):
sys.stdout.write (__prog__ + ': ' + Message + '\n')
sys.stdout.flush()
def Log(Message):
global gArgs
if not gArgs.Verbose:
return
sys.stdout.write (__prog__ + ': ' + Message + '\n')
sys.stdout.flush()
def Error(Message, ExitValue=1):
sys.stderr.write (__prog__ + ': ERROR: ' + Message + '\n')
sys.exit (ExitValue)
def RelativePath(target):
global gWorkspace
Log('RelativePath' + target)
return os.path.relpath (target, gWorkspace)
def NormalizePath(target):
if isinstance(target, tuple):
return os.path.normpath (os.path.join (*target))
else:
return os.path.normpath (target)
def RemoveFile(target):
target = NormalizePath(target)
if not target or target == os.pathsep:
Error ('RemoveFile() invalid target')
if os.path.exists(target):
os.remove (target)
Log ('remove %s' % (RelativePath (target)))
def RemoveDirectory(target):
target = NormalizePath(target)
if not target or target == os.pathsep:
Error ('RemoveDirectory() invalid target')
if os.path.exists(target):
Log ('rmdir %s' % (RelativePath (target)))
shutil.rmtree(target)
def CreateDirectory(target):
target = NormalizePath(target)
if not os.path.exists(target):
Log ('mkdir %s' % (RelativePath (target)))
os.makedirs (target)
def Copy(src, dst):
src = NormalizePath(src)
dst = NormalizePath(dst)
for File in glob.glob(src):
Log ('copy %s -> %s' % (RelativePath (File), RelativePath (dst)))
shutil.copy (File, dst)
GenerateCapsuleCommand = '''
GenerateCapsule
--encode
--guid {FMP_CAPSULE_GUID}
--fw-version {FMP_CAPSULE_VERSION}
--lsv {FMP_CAPSULE_LSV}
--capflag PersistAcrossReset
--capflag InitiateReset
--signer-private-cert={BASE_TOOLS_PATH}/Source/Python/Pkcs7Sign/TestCert.pem
--other-public-cert={BASE_TOOLS_PATH}/Source/Python/Pkcs7Sign/TestSub.pub.pem
--trusted-public-cert={BASE_TOOLS_PATH}/Source/Python/Pkcs7Sign/TestRoot.pub.pem
-o {FMP_CAPSULE_FILE}
{FMP_CAPSULE_PAYLOAD}
'''
MetaInfoXmlTemplate = '''
<?xml version="1.0" encoding="UTF-8"?>
<component type="firmware">
<id>com.intel.FMP_CAPSULE_BASE_NAME.firmware</id>
<name>FMP_CAPSULE_BASE_NAME</name>
<summary>System firmware for the FMP_CAPSULE_BASE_NAME</summary>
<description>
Description of System firmware for the FMP_CAPSULE_BASE_NAME
</description>
<provides>
<firmware type="flashed">FMP_CAPSULE_GUID</firmware>
</provides>
<url type="homepage">http://www.tianocore.org</url>
<metadata_license>CC0-1.0</metadata_license>
<project_license>BSD</project_license>
<developer_name>Tianocore</developer_name>
<releases>
<release version="FMP_CAPSULE_VERSION_DECIMAL" date="FMP_CAPSULE_DATE">
<description>
Build FMP_CAPSULE_STRING
</description>
</release>
</releases>
<!-- most OEMs do not need to do this... -->
<custom>
<value key="LVFS::InhibitDownload"/>
</custom>
</component>
'''
LvfsDdfTemplate = '''
.OPTION EXPLICIT ; Generate errors on variable typos
.Set CabinetNameTemplate=firmware.cab ; The name of the file
.set DiskDirectoryTemplate=CDROM ; All cabinets go in a single directory
.Set Cabinet=on ;
.Set Compress=on ;
.Set DiskDirectory1=.
.Set MaxDiskSize=99999744 ; multiple of 512
;*** Files to zip ;
;
firmware.bin
firmware.metainfo.xml
;***
'''
def GenCapsuleDevice (BaseName, PayloadFileName, Guid, Version, Lsv, CapsulesPath, CapsulesSubDir):
global gBaseToolsPath
LogAlways ('Generate Capsule: {0} {1:08x} {2:08x} {3}'.format (Guid, Version, Lsv, PayloadFileName))
VersionString = '.'.join([str(ord(x)) for x in struct.pack('>I', Version).decode()])
FmpCapsuleFile = NormalizePath ((CapsulesPath, CapsulesSubDir, BaseName + '.' + VersionString + '.cap'))
Command = GenerateCapsuleCommand.format (
FMP_CAPSULE_GUID = Guid,
FMP_CAPSULE_VERSION = Version,
FMP_CAPSULE_LSV = Lsv,
BASE_TOOLS_PATH = gBaseToolsPath,
FMP_CAPSULE_FILE = FmpCapsuleFile,
FMP_CAPSULE_PAYLOAD = PayloadFileName
)
Command = ' '.join(Command.splitlines()).strip()
if gArgs.Verbose:
Command = Command + ' -v'
Log (Command)
Process = subprocess.Popen(Command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
ProcessOutput = Process.communicate()
if Process.returncode == 0:
Log (ProcessOutput[0].decode())
else:
LogAlways (Command)
LogAlways (ProcessOutput[0].decode())
Error ('GenerateCapsule returned an error')
Copy (PayloadFileName, (CapsulesPath, 'firmware.bin'))
MetaInfoXml = MetaInfoXmlTemplate
MetaInfoXml = MetaInfoXml.replace ('FMP_CAPSULE_GUID', Guid)
MetaInfoXml = MetaInfoXml.replace ('FMP_CAPSULE_BASE_NAME', BaseName)
MetaInfoXml = MetaInfoXml.replace ('FMP_CAPSULE_VERSION_DECIMAL', str(Version))
MetaInfoXml = MetaInfoXml.replace ('FMP_CAPSULE_STRING', VersionString)
MetaInfoXml = MetaInfoXml.replace ('FMP_CAPSULE_DATE', str(datetime.date.today()))
f = open (NormalizePath ((CapsulesPath, 'firmware.metainfo.xml')), 'w')
f.write(MetaInfoXml)
f.close()
f = open (NormalizePath ((CapsulesPath, 'Lvfs.ddf')), 'w')
f.write(LvfsDdfTemplate)
f.close()
if sys.platform == "win32":
Command = 'makecab /f ' + NormalizePath ((CapsulesPath, 'Lvfs.ddf'))
else:
Command = 'gcab --create firmware.cab firmware.bin firmware.metainfo.xml'
Log (Command)
Process = subprocess.Popen(Command, cwd=CapsulesPath, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
ProcessOutput = Process.communicate()
if Process.returncode == 0:
Log (ProcessOutput[0].decode())
else:
LogAlways (Command)
LogAlways (ProcessOutput[0].decode())
Error ('GenerateCapsule returned an error')
FmpCabinetFile = NormalizePath ((CapsulesPath, CapsulesSubDir, BaseName + '.' + VersionString + '.cab'))
Copy ((CapsulesPath, 'firmware.cab'), FmpCabinetFile)
RemoveFile ((CapsulesPath, 'firmware.cab'))
RemoveFile ((CapsulesPath, 'setup.inf'))
RemoveFile ((CapsulesPath, 'setup.rpt'))
RemoveFile ((CapsulesPath, 'Lvfs.ddf'))
RemoveFile ((CapsulesPath, 'firmware.metainfo.xml'))
RemoveFile ((CapsulesPath, 'firmware.bin'))
def GenCapsuleSampleDevice (SampleDeviceName, Guid, Version, Lsv, CapsulesPath, CapsulesSubDir):
BinaryPayload = SampleDeviceName.encode() + bytearray(0x18 - len (SampleDeviceName.encode()))
BinaryPayload = BinaryPayload + struct.pack('<I', Version)
BinaryPayload = BinaryPayload + struct.pack('<I', Lsv)
PayloadFileName = NormalizePath ((CapsulesPath, SampleDeviceName + '.bin'))
f = open (PayloadFileName, 'wb')
f.write(BinaryPayload)
f.close()
GenCapsuleDevice(SampleDeviceName, PayloadFileName, Guid, Version, Lsv, CapsulesPath, CapsulesSubDir)
RemoveFile (PayloadFileName)
if __name__ == '__main__':
#
# Create command line argument parser object
#
parser = argparse.ArgumentParser (
prog = __prog__,
description = __description__ + __copyright__,
conflict_handler = 'resolve'
)
parser.add_argument (
'-a', '--arch', dest = 'Arch', nargs = '+', action = 'append',
required = True,
help = '''ARCHS is one of list: IA32, X64, IPF, ARM, AARCH64 or EBC,
which overrides target.txt's TARGET_ARCH definition. To
specify more archs, please repeat this option.'''
)
parser.add_argument (
'-t', '--tagname', dest = 'ToolChain', required = True,
help = '''Using the Tool Chain Tagname to build the platform,
overriding target.txt's TOOL_CHAIN_TAG definition.'''
)
parser.add_argument (
'-p', '--platform', dest = 'PlatformFile', required = True,
help = '''Build the platform specified by the DSC file name argument,
overriding target.txt's ACTIVE_PLATFORM definition.'''
)
parser.add_argument (
'-b', '--buildtarget', dest = 'BuildTarget', required = True,
help = '''Using the TARGET to build the platform, overriding
target.txt's TARGET definition.'''
)
parser.add_argument (
'--conf=', dest = 'ConfDirectory', required = True,
help = '''Specify the customized Conf directory.'''
)
parser.add_argument (
'-D', '--define', dest = 'Define', nargs='*', action = 'append',
help = '''Macro: "Name [= Value]".'''
)
parser.add_argument (
'-v', '--verbose', dest = 'Verbose', action = 'store_true',
help = '''Turn on verbose output with informational messages printed'''
)
parser.add_argument (
'--package', dest = 'Package', nargs = '*', action = 'append',
help = '''The directory name of a package of tests to copy'''
)
#
# Parse command line arguments
#
gArgs, remaining = parser.parse_known_args()
gArgs.BuildType = 'all'
for BuildType in ['all', 'fds', 'genc', 'genmake', 'clean', 'cleanall', 'modules', 'libraries', 'run']:
if BuildType in remaining:
gArgs.BuildType = BuildType
remaining.remove(BuildType)
break
gArgs.Remaining = ' '.join(remaining)
#
# Get WORKSPACE environment variable
#
try:
gWorkspace = os.environ['WORKSPACE']
except:
Error ('WORKSPACE environment variable not set')
#
# Get PACKAGES_PATH and generate prioritized list of paths
#
PathList = [gWorkspace]
try:
PathList += os.environ['PACKAGES_PATH'].split(os.pathsep)
except:
pass
#
# Determine full path to BaseTools
#
for Path in PathList:
if os.path.exists (os.path.join (Path, 'BaseTools')):
gBaseToolsPath = os.path.join (Path, 'BaseTools')
break
#
# Parse OUTPUT_DIRECTORY from DSC file
#
for Path in PathList:
if os.path.exists (os.path.join (Path, gArgs.PlatformFile)):
Dsc = open (os.path.join (Path, gArgs.PlatformFile), 'r').readlines()
break
for Line in Dsc:
if Line.strip().startswith('OUTPUT_DIRECTORY'):
OutputDirectory = Line.strip().split('=')[1].strip()
break
#
# Determine full paths to EDK II build directory, EDK II build output
# directory and the CPU arch of the UEFI phase.
#
CommandDir = os.path.dirname(sys.argv[0])
EdkiiBuildDir = os.path.join (gWorkspace, OutputDirectory)
EdkiiBuildOutput = os.path.join (EdkiiBuildDir, gArgs.BuildTarget + '_' + gArgs.ToolChain)
UefiArch = gArgs.Arch[0][0]
if len (gArgs.Arch) > 1:
if ['X64'] in gArgs.Arch:
UefiArch = 'X64'
CapsulesPath = NormalizePath((EdkiiBuildDir, 'Capsules'))
CapsulesSubDir = 'TestCert' + '_' + UefiArch + '_' + gArgs.BuildTarget + '_' + gArgs.ToolChain
#
# Create output directories
#
try:
CreateDirectory ((CapsulesPath))
except:
pass
try:
CreateDirectory ((CapsulesPath, CapsulesSubDir))
except:
pass
#
# Copy CapsuleApp
#
Copy ((EdkiiBuildOutput, UefiArch, 'CapsuleApp.efi'), (CapsulesPath, CapsulesSubDir))
#
# Generate capsules for the Red Sample Device
#
GenCapsuleSampleDevice('Red','72e2945a-00da-448e-9aa7-075ad840f9d4',0x00000010,0x00000000, CapsulesPath, CapsulesSubDir)
GenCapsuleSampleDevice('Red','72e2945a-00da-448e-9aa7-075ad840f9d4',0x00000011,0x00000000, CapsulesPath, CapsulesSubDir)
GenCapsuleSampleDevice('Red','72e2945a-00da-448e-9aa7-075ad840f9d4',0x00000012,0x00000000, CapsulesPath, CapsulesSubDir)
#
# Generate capsules for the Green Sample Device
#
GenCapsuleSampleDevice('Green','79179bfd-704d-4c90-9e02-0ab8d968c18a',0x00000020,0x00000020, CapsulesPath, CapsulesSubDir)
GenCapsuleSampleDevice('Green','79179bfd-704d-4c90-9e02-0ab8d968c18a',0x00000021,0x00000020, CapsulesPath, CapsulesSubDir)
GenCapsuleSampleDevice('Green','79179bfd-704d-4c90-9e02-0ab8d968c18a',0x00000022,0x00000020, CapsulesPath, CapsulesSubDir)
#
# Generate capsules for the Blue Sample Device
#
GenCapsuleSampleDevice('Blue','149da854-7d19-4faa-a91e-862ea1324be6',0x00000010,0x00000000, CapsulesPath, CapsulesSubDir)
GenCapsuleSampleDevice('Blue','149da854-7d19-4faa-a91e-862ea1324be6',0x00000011,0x00000000, CapsulesPath, CapsulesSubDir)
GenCapsuleSampleDevice('Blue','149da854-7d19-4faa-a91e-862ea1324be6',0x00000012,0x00000012, CapsulesPath, CapsulesSubDir)
GenCapsuleSampleDevice('Blue','149da854-7d19-4faa-a91e-862ea1324be6',0x00000013,0x00000012, CapsulesPath, CapsulesSubDir)
GenCapsuleSampleDevice('Blue','149da854-7d19-4faa-a91e-862ea1324be6',0x00000014,0x00000012, CapsulesPath, CapsulesSubDir)
#
# Generate capsules for Minnow Max Firmware Updates
#
RomFileName = os.path.join (EdkiiBuildOutput, 'FV', 'VLV.fd')
GenCapsuleDevice('MinnowMax', RomFileName,'4096267b-da0a-42eb-b5eb-fef31d207cb4',0x0000000C,0x00000000, CapsulesPath, CapsulesSubDir)
| edk2-platforms-master | Platform/Intel/Vlv2TbltDevicePkg/Feature/Capsule/GenerateCapsule/GenCapsuleAll.py |
## @file
# Trim files preprocessed by compiler
#
# Copyright (c) 2019, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
import os
import sys
import time
import logging
import struct
import datetime
import argparse
import platform
from collections import OrderedDict
try:
from configparser import ConfigParser
except:
from ConfigParser import ConfigParser
# Config message
_BIOS_Signature = "$IBIOSI$"
_ConfigItem = {
"BOARD_ID": {'Value': '', 'Length': 7},
"BOARD_REV": {'Value': '', 'Length': 1},
"BOARD_EXT": {'Value': '', 'Length': 3},
"BUILD_TYPE": {'Value': '', 'Length': 1},
"VERSION_MAJOR": {'Value': '0000', 'Length': 4},
"VERSION_MINOR": {'Value': '00', 'Length': 2},
}
# Version message
__prog__ = 'GenBiosld'
__description__ = 'Trim files preprocessed by compiler'
__copyright__ = 'Copyright (c) 2019, Intel Corporation. All rights reserved.<BR> '
__version__ = '%s Version %s' % (__prog__, '0.1 ')
# ExtraData message
_Usage = "Usage: GenBiosId -i Configfile -o OutputFile [-ot OutputTextFile]"
_ConfigSectionNotDefine = "Not support the config file format, need config section"
_ErrorMessageTemplate = '\n\n%(tool)s...\n : error: %(msg)s\n\t%(extra)s'
_ErrorLogger = logging.getLogger("tool_error")
_ErrorFormatter = logging.Formatter("%(message)s")
_ConfigLenInvalid = "Config item %s length is invalid"
_ConfigItemInvalid = "Item %s is invalid"
# Error message
INFO = 20
ERRORCODE = 50
OPTION_MISSING = 'Missing option'
FORMAT_INVALID = 'Invalid syntax/format'
FILE_NOT_FOUND = 'File/directory not found in workspace'
FORMAT_UNKNOWN_ERROR = 'Unknown error in syntax/format'
FORMAT_NOT_SUPPORTED = 'Not supported syntax/format'
def SetEdkLogger():
_ErrorLogger.setLevel(INFO)
_ErrorCh = logging.StreamHandler(sys.stderr)
_ErrorCh.setFormatter(_ErrorFormatter)
_ErrorLogger.addHandler(_ErrorCh)
return _ErrorLogger
# Output the error message and exit the tool
def EdkLogger(ToolName, Message, ExtraData):
_ErrorLogger = SetEdkLogger()
TemplateDict = {"tool": ToolName, "msg": Message, "extra": ExtraData}
LogText = _ErrorMessageTemplate % TemplateDict
_ErrorLogger.log(ERRORCODE, LogText)
sys.exit(1)
# Open the file in the correct way
def FileOpen(FileName, Mode, Buffer=-1):
def LongFilePath(FileName):
FileName = os.path.normpath(FileName)
if platform.system() == 'Windows':
if FileName.startswith('\\\\?\\'):
return FileName
if FileName.startswith('\\\\'):
return '\\\\?\\UNC\\' + FileName[2:]
if os.path.isabs(FileName):
return '\\\\?\\' + FileName
return FileName
return open(LongFilePath(FileName), Mode, Buffer)
# Parse command line options
def MyOptionParser():
parser = argparse.ArgumentParser(prog=__prog__,
description=__description__ + __copyright__ + _Usage,
conflict_handler='resolve')
parser.add_argument('-v', '--version', action='version', version=__version__,
help="show program's version number and exit")
parser.add_argument('-i', '--int', metavar='FILENAME', dest='InputFile', help="Input Config file")
parser.add_argument('-o', '--out', metavar='FILENAME', dest='OutputFile', help="Output file")
parser.add_argument('-ot', '--text', metavar='FILENAME', dest='OutputTextFile', help="Output Text file")
parser.add_argument('-nt', '--notimestamp', dest='NoTimestamp', action='store_true', default=False, help="Set timestamp to zero")
Options = parser.parse_args()
return Options
# Check the Tool for missing variables
def CheckOptions(Options):
if len(sys.argv) not in [5,6] and not (len(sys.argv) in [7,8] and Options.OutputTextFile):
EdkLogger("GenBiosId", OPTION_MISSING, ExtraData=_Usage)
elif not Options.InputFile or not Options.OutputFile:
EdkLogger("GenBiosId", OPTION_MISSING, ExtraData=_Usage)
InputFile = Options.InputFile
OutputFile = Options.OutputFile
OutputTextFile = Options.OutputTextFile
NoTimestamp = Options.NoTimestamp
if not os.path.exists(InputFile):
EdkLogger("GenBiosId", FILE_NOT_FOUND, ExtraData="Input file not found")
return InputFile, OutputFile, OutputTextFile, NoTimestamp
# Read input file and get config
def ReadInputFile(InputFile):
InputDict = OrderedDict()
with open(InputFile) as File:
FileLines = File.readlines()
for Line in FileLines:
if Line.strip().startswith('#'):
continue
if '=' in Line:
Key, Value = Line.split('=')
InputDict[Key.strip()] = Value.strip()
return InputDict
# Parse the input file and extract the information
def ParserInputFile(InputDict, NoTimestamp):
for Item in InputDict:
if Item not in _ConfigItem:
EdkLogger("GenBiosId", FORMAT_INVALID, ExtraData=_ConfigItemInvalid % Item)
_ConfigItem[Item]['Value'] = InputDict[Item]
if len(_ConfigItem[Item]['Value']) != _ConfigItem[Item]['Length']:
# The length of the Board ID is being updated based on the BOARD_ID string
#If the PCH_TYPE is empty space/single quotes(''), removing the empty space/single quotes('') and concatenating the TARGET_PLATFORM_SHORT and BUILD flag strings
if(_ConfigItem["BOARD_ID"]['Value'][3:5] == "\'\'"):
_ConfigItem["BOARD_ID"]['Value']=_ConfigItem["BOARD_ID"]['Value'][0:3]+_ConfigItem["BOARD_ID"]['Value'][5:len(_ConfigItem["BOARD_ID"]['Value'])]
_ConfigItem["BOARD_ID"]['Length']=len(_ConfigItem["BOARD_ID"]['Value'])
else:
EdkLogger("GenBiosId", FORMAT_INVALID, ExtraData=_ConfigLenInvalid % Item)
for Item in _ConfigItem:
if not _ConfigItem[Item]['Value']:
EdkLogger("GenBiosId", FORMAT_UNKNOWN_ERROR, ExtraData="Item %s is missing" % Item)
utcnow = datetime.datetime.utcnow()
if NoTimestamp:
TimeStamp = "\0\0\0\0\0\0\0\0\0\0"
else:
TimeStamp = time.strftime("%y%m%d%H%M", utcnow.timetuple())
Id_Str = _ConfigItem['BOARD_ID']['Value'] + _ConfigItem['BOARD_REV']['Value'] + '.' + _ConfigItem['BOARD_EXT'][
'Value'] + '.' + _ConfigItem['VERSION_MAJOR']['Value'] + \
'.' + _ConfigItem["BUILD_TYPE"]['Value'] + _ConfigItem['VERSION_MINOR']['Value'] + '.' + TimeStamp
return Id_Str
# Output information to a file
def PrintOutputFile(OutputFile, OutputTextFile, Id_Str):
with FileOpen(OutputFile, 'wb') as FdOut:
for i in _BIOS_Signature:
FdOut.write(struct.pack('B', ord(i)))
for i in Id_Str:
FdOut.write(struct.pack('H', ord(i)))
FdOut.write(struct.pack('H', 0x00))
if OutputTextFile:
with FileOpen(OutputTextFile, 'w') as FdOut:
FdOut.write(Id_Str)
# Tool entrance method
def Main():
Options = MyOptionParser()
InputFile, OutputFile, OutputTextFile, NoTimestamp = CheckOptions(Options)
InputDict = ReadInputFile(InputFile)
Id_Str = ParserInputFile(InputDict, NoTimestamp)
PrintOutputFile(OutputFile, OutputTextFile, Id_Str)
return 0
if __name__ == '__main__':
r = Main()
## 0-127 is a safe return range, and 1 is a standard default error
if r < 0 or r > 127: r = 1
sys.exit(r)
| edk2-platforms-master | Platform/Intel/Tools/GenBiosId/GenBiosId.py |
## @file
# Get all recursive package paths from special directories.
#
# Copyright (c) 2020, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
import os
import glob
import argparse
#
# Globals for help information
#
__prog__ = 'GetPackagesPath.py'
__copyright__ = 'Copyright (c) 2020, Intel Corporation. All rights reserved.'
__description__ = 'Gets all recursive package paths in specified directory.\n'
def __get_packages_path(root):
""" Gets all recursive package paths in specified directory.
A directory is a package path if it satisfies conditions below:
1. it is a directory
2. it is not an EDK II Package. An EDK II Package (directory) is
a directory that contains an EDK II package declaration (DEC) file.
3. it contains at least one first level EDK II Package.
Note: A directory is not package path but its subdirectory could be.
Example: edk2-platforms/Features is not package path
but edk2-platforms/Features/Intel is.
:param root: The specified directory to find package paths in it,
the caller should ensure it is an valid directory
:type root: String
:returns: Return all recursive package paths
:rtype: String list
"""
paths = []
contain_package = False
for filename in os.listdir(root):
# skip files whose name starts with ".", such as ".git"
if filename.startswith('.'):
continue
filepath = os.path.join(root, filename)
if os.path.isdir(filepath):
if glob.glob(os.path.join(filepath, '*.dec')):
# it is an EDK II Package
contain_package = True
else:
# get package paths for subdirectory if it is not package
paths = paths + __get_packages_path(filepath)
if contain_package:
# root is a package path because it contains EDK II Package
# in first level folder, inset it to head of list
paths.insert(0, root)
# return package paths
return paths
def get_packages_path(directories):
""" For each direcory in directories, gets all recursive package paths
in this directory and joins them into one string.
:param directories: the list of directory
:type directories: String list
:returns: Return string of package paths
:rtype: String
"""
packages_path = ''
for directory in directories:
directory = os.path.abspath(directory)
if (not os.path.exists(directory)) or (not os.path.isdir(directory)):
continue
if glob.glob(os.path.join(directory, '*.dec')):
# it is an EDK II Package
continue
paths = __get_packages_path(directory)
for path in paths:
if packages_path == '':
packages_path = path
else:
packages_path += os.pathsep + path
return packages_path
if __name__ == '__main__':
# Create command line argument parser object
parser = argparse.ArgumentParser(
prog=__prog__,
description=__description__ + __copyright__,
conflict_handler='resolve'
)
parser.add_argument('directory', nargs='+',
help='Specified directory where package packages are got from')
args = parser.parse_args()
print(get_packages_path(args.directory))
| edk2-platforms-master | Platform/Intel/Tools/AppendPackagesPath/GetPackagesPath.py |
## @file
# generate UQI (Universal Question Identifier) unicode string for HII question PROMPT string. UQI string can be used to
# identify each HII question.
#
# Copyright (c) 2019, Intel Corporation. All rights reserved.
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
import re
import sys
import os
import getopt
import codecs
import fnmatch
import logging
import argparse
# global variable declarations
QuestionError = False
FileHeader = '//\r\n// FILE auto-generated by UniTool\r\n//\r\n\r\n#langdef uqi "uqi"\r\n\r\n'
UqiList = re.compile('^#string[ \t]+([A-Z_0-9]+)[ \t]+#language[ \t]+uqi[ \t\r\n]+"(?:[x\S]{1,2})([0-9a-fA-F]{4,5})"',
re.M).findall
AllUqis = {}
StringDict = {}
GlobalVarId = {}
Options = {}
# Version message
__prog__ = 'UniTool'
__description__ = 'The script generate UQI unicode string for HII question PROMPT string.\n'
__copyright__ = 'Copyright (c) 2019, Intel Corporation. All rights reserved.<BR>'
__version__ = '%s Version %s' % (__prog__, '0.1 ')
_Usage = "Syntax: %s [-b] [-u] [-l] [-x] [-h] [-d 'rootDirectory1'] [-d 'rootDirectory2'] [-d 'rootDirectory3']... \n[-q e|w]" \
"'rootDirectory0' 'uqiFile'|'uqiFileDirectory' ['excludedDirectory1'] ['excludedDirectory2'] ['excludedDirectory3']...\n" \
% (os.path.basename(sys.argv[0]))
# **********************************************************************
# description: Get uni file encoding
#
# arguments: Filename - name of uni file
#
# returns: utf-8 or utf-16
#
def GetUniFileEncoding(Filename):
#
# Detect Byte Order Mark at beginning of file. Default to UTF-8
#
Encoding = 'utf-8'
#
# Read file
#
try:
with open(Filename, mode='rb') as UniFile:
FileIn = UniFile.read()
except:
return Encoding
if (FileIn.startswith(codecs.BOM_UTF16_BE) or FileIn.startswith(codecs.BOM_UTF16_LE)):
Encoding = 'utf-16'
return Encoding
# rewrite function os.path.walk
def Walk(Top, Func, Arg):
try:
Names = os.listdir(Top)
except os.error:
return
Func(Arg, Top, Names)
for Name in Names:
Name = os.path.join(Top, Name)
if os.path.isdir(Name):
Walk(Name, Func, Arg)
# **********************************************************************
# description: Parses commandline arguments and options
# Calls function processUni to build dictionary of strings
# Calls other functions according to user specified options
#
# arguments: argv - contains all input from command line
# - must contain path to root directory
# - may contain options -h, -u, -l, -b or -x before path
#
# returns: none
#
def main():
##### Read input arguments and options
global AllUqis, UqiList, QuestionError
parser = argparse.ArgumentParser(prog=__prog__,
description=__description__ + __copyright__,
usage=_Usage,
conflict_handler='resolve')
parser.add_argument('Path', nargs='+',
help='the path for files to be converted.It could be directory or file path.')
parser.add_argument('-v', '--version', action='version', version=__version__,
help="show program's version number and exit")
parser.add_argument('-b', '--build', action='store_true', dest='BuildOption',
help="Build option returns error if any new UQI needs assigning " \
"based on vfi/vfr/hfr/sd/sdi when no -u option is specified")
parser.add_argument('-u', '--updata', action='store_true', dest='UpdateUQIs',
help="Create new UQIs that does not already exist in uqiFile for" \
"any string requiring a UQI based on vfi/vfr/hfr/sd/sdi" \
"NOTE: 'uqiFile' cannot be readonly!")
parser.add_argument('-l', '--lang', action='store_true', dest='LangOption',
help="Language deletion option (keeps only English and uqi)" \
"moves all UQIs to 'uqiFile', NOTE: Uni files cannot be readonly!")
parser.add_argument('-x', '--exclude', action='store_true', dest='ExcludeOption',
help="Exclude 'rootDirectory'/'excludedDirectory1' &" \
"'rootDirectory'/'excludedDirectory2'... from UQI list build")
parser.add_argument('-d', '--dir', action='append', metavar='FILEDIR', dest='DirName',
help="Add multiple root directories to process")
parser.add_argument('-q', '--question', dest='Question', choices=['w', 'e'],
help="Print warning(w) or return error(e) if different HII questions" \
"are referring same string token")
Opts = parser.parse_args()
Destname = ''
DirNameList = []
ExDirList = []
if Opts.Path:
DirNameList.append(Opts.Path[0])
Destname = Opts.Path[1]
ExDirList = Opts.Path[2:]
if Opts.DirName:
DirNameList.extend(Opts.DirName)
QuestionOption = Opts.Question
ExcludeOption = Opts.ExcludeOption
BuildOption = Opts.BuildOption
UpdateUQIs = Opts.UpdateUQIs
LangOption = Opts.LangOption
ExPathList = []
if ExDirList:
try:
for EachExDir in ExDirList:
for EachRootDir in DirNameList:
if EachExDir == EachRootDir:
print("\nERROR: excludedDirectory is same as rootDirectory\n")
return
ExPathList.append(EachRootDir + os.sep + EachExDir)
except:
print(_Usage)
return
global Options
Options = {'Destname': Destname, 'DirNameList': DirNameList, 'ExPathList': ExPathList, 'BuildOption': BuildOption,
'UpdateUQIs': UpdateUQIs, 'LangOption': LangOption, 'ExcludeOption': ExcludeOption,
'QuestionOption': QuestionOption}
print("UQI file: %s" % Destname)
for EachDirName in DirNameList:
Walk(EachDirName, processUni, None)
if QuestionError:
return
if os.path.isdir(Options['Destname']):
DestFileName = Options['Destname'] + os.sep + 'UqiList.uni'
else:
DestFileName = Options['Destname']
if os.path.exists(DestFileName) and (DestFileName not in list(AllUqis.keys())):
try:
Encoding = GetUniFileEncoding(DestFileName)
with codecs.open(DestFileName, 'r+', Encoding) as destFile:
DestFileBuffer = destFile.read()
except IOError as e:
print("ERROR: " + e.args[1])
return
AllUqis[DestFileName] = UqiList(DestFileBuffer)
if BuildOption:
ReturnVal = newUqi()
if (ReturnVal == 1):
print('Please fix UQI ERROR(s) above before proceeding.')
else:
print("No UQI issues detected\n")
return
# **********************************************************************
# description: newUqi collects a list of all currently used uqi values in the tree
# Halt build if any duplicated string or value in UQI list.
# If -u option was specified, creates new UQIs that does not
# already exist in uqiFile for any string requiring a UQI.
#
# arguments: none
#
# returns: 0 on success
# 1 on error - this should cause the build to halt
#
Syntax = "S"
SyntaxRE = re.compile('#string[ \t]+[A-Z_0-9]+[ \t]+#language[ \t]+uqi[ \t\r\n]+"([x\S]{1,2}).*', re.DOTALL).findall
def newUqi():
global Options, GlobalVarId, AllUqis, Syntax, SyntaxRE
UqiRange = []
UqiStringList = []
CreateUQI = []
ReturnVal = 0
BaseNumSpaces = 47 # Used to line up the UQI values in the resulting uqiFile
# Look for duplication in the current UQIs and collect current range of UQIs
for path in AllUqis.keys():
for UqiString in AllUqis[path]: # path contains the path and Filename of each uni file
# Checks for duplicated strings in UQI list
for TempString in UqiStringList:
if TempString == UqiString[0]:
print("ERROR: UQI string %s was assigned more than once and will cause corruption!" % UqiString[0])
print("Delete one occurrence of the string and rerun tool.")
ReturnVal = 1 # halt build
UqiStringList.append(UqiString[0])
# Checks for duplicated UQI values in UQI list
if int(UqiString[1], 16) in UqiRange:
print("ERROR: UQI value %04x was assigned more than once and will cause corruption!" % int(UqiString[1],
16))
print("Delete one occurrance of the UQI and rerun tool to create alternate value.")
ReturnVal = 1 # halt build
UqiRange.append(int(UqiString[1], 16))
for StringValue in GlobalVarId.keys():
StringFound = False
for path in StringDict.keys():
for UniString in StringDict[path]: # path contains the path and Filename of each uni file
if (StringValue == UniString):
StringFound = True
break
if not StringFound:
print("ERROR: No definition for %s referred by HII question" % (StringValue))
ReturnVal = 1 # halt build
# Require a UQI for any string in vfr/vfi files
for StringValue in GlobalVarId.keys():
# Ignore strings defined as STRING_TOKEN(0)
if (StringValue != "0"):
# Check if this string already exists in the UQI list
if (StringValue not in UqiStringList) and (StringValue not in CreateUQI):
CreateUQI.append(StringValue)
if not Options['UpdateUQIs']:
print("ERROR: No UQI for %s referred by HII question" % (StringValue))
ReturnVal = 1 # halt build after printing all error messages
if (ReturnVal == 1):
return ReturnVal
# Update uqiFile with necessary UQIs
if Options['UpdateUQIs'] and CreateUQI:
if os.path.isdir(Options['Destname']):
DestFileName = Options['Destname'] + os.sep + 'UqiList.uni'
else:
DestFileName = Options['Destname']
try:
Encoding = GetUniFileEncoding(DestFileName)
with codecs.open(DestFileName, 'r+', Encoding) as OutputFile:
PlatformUQI = OutputFile.read()
except IOError as e:
print("ERROR: " + e.args[1])
if (e.args[0] == 2):
try:
with codecs.open(DestFileName, 'w', Encoding) as OutputFile:
print(DestFileName + " did not exist. Creating new file.")
PlatformUQI = FileHeader
except:
print("Error creating " + DestFileName + ".")
return 1
if (e.args[1] == "Permission denied"):
print(
"\n%s is Readonly. You must uncheck the ReadOnly attibute to run the -u option.\n" % DestFileName)
return 1
# Determines and sets the UQI number format
# TODO: there is probably a more elegant way to do this...
SyntaxL = SyntaxRE(PlatformUQI)
if len(SyntaxL) != 0:
Syntax = SyntaxL[0]
# script is reading the file in and writing it back instead of appending because the codecs module
# automatically adds a BOM wherever you start writing. This caused build failure.
UqiRange.sort()
if (UqiRange == []):
NextUqi = 0
else:
NextUqi = UqiRange[len(UqiRange) - 1] + 1
for StringValue in CreateUQI:
print("%s will be assigned a new UQI value" % StringValue)
UqiRange.append(NextUqi)
#
# Lines up the UQI values in the resulting uqiFile
#
Spaces = " " * (BaseNumSpaces - len(StringValue))
PlatformUQI += '#string %s%s #language uqi \"%s%04x\"\r\n' % (StringValue, Spaces, Syntax, NextUqi)
print("#string %s%s #language uqi \"%s%04X\"" % (StringValue, Spaces, Syntax, NextUqi))
NextUqi += 1
with codecs.open(DestFileName, 'r+', Encoding) as OutputFile:
OutputFile.seek(0)
OutputFile.write(PlatformUQI)
return 0
# **********************************************************************
# description: Parses each uni file to collect dictionary of strings
# Removes additional languages and overwrites current uni files
# if -l option was specified
#
# arguments: path - directory location of file including file name
# Filename - name of file to be modified
#
# returns: error string if failure occurred;
# none if completed sucessfully
#
# the following are global so that parsefile is quicker
FindUniString = re.compile(
'^#string[ \t]+([A-Z_0-9]+)(?:[ \t\r\n]+#language[ \t]+[a-zA-Z-]{2,5}[ \t\r\n]+".*"[ \t]*[\r]?[\n]?)*',
re.M).findall
OtherLang = re.compile(
'^#string[ \t]+[A-Z_0-9]+(?:[ \t\r\n]+#language[ \t]+[a-zA-Z-]{2,5}[ \t\r\n]+".*"[ \t]*[\r]?[\n]?)*', re.M).findall
EachLang = re.compile('[ \t\r\n]+#language[ \t]+([a-zA-Z-]{2,5})[ \t\r\n]+".*"[ \t]*[\r]?[\n]?').findall
UqiStrings = re.compile('^#string[ \t]+[A-Z_0-9]+[ \t]+#language[ \t]+uqi[ \t\r\n]+".*"[ \t]*[\r]?[\n]?', re.M)
def parsefile(path, Filename):
global Options, StringDict, AllUqis, UqiList, FindUniString, OtherLang, EachLang, UqiStrings
FullPath = path + os.sep + Filename
try:
UniEncoding = GetUniFileEncoding(FullPath)
with codecs.open(FullPath, 'r', UniEncoding) as UniFile:
Databuffer = UniFile.read()
except:
print("Error opening " + FullPath + " for reading.")
return
WriteFile = False
if os.path.isdir(Options['Destname']):
DestFileName = Options['Destname'] + os.sep + 'UqiList.uni'
else:
DestFileName = Options['Destname']
if Options['LangOption']:
try:
UqiEncoding = GetUniFileEncoding(DestFileName)
with codecs.open(DestFileName, 'r+', UqiEncoding) as OutputFile:
PlatformUQI = OutputFile.read()
except IOError as e:
print("ERROR: " + e.args[1])
if (e.args[0] == 2):
try:
with codecs.open(DestFileName, 'w', UqiEncoding) as OutputFile:
print(DestFileName + " did not exist. Creating new file.")
PlatformUQI = FileHeader
except:
print("Error creating " + DestFileName + ".")
return
else:
print("Error opening " + DestFileName + " for appending.")
return
if (Filename != DestFileName.split(os.sep)[-1]):
Uqis = re.findall(UqiStrings, Databuffer)
if Uqis:
for Uqi in Uqis:
PlatformUQI += Uqi
with codecs.open(DestFileName, 'r+', UqiEncoding) as OutputFile:
OutputFile.seek(0)
OutputFile.write(PlatformUQI)
Databuffer = re.sub(UqiStrings, '', Databuffer)
if Uqis:
WriteFile = True
print("Deleted uqis from %s" % FullPath)
stringlist = OtherLang(Databuffer)
for stringfound in stringlist:
ThisString = EachLang(stringfound)
for LanguageFound in ThisString:
if ((LanguageFound != 'en') and (LanguageFound != 'en-US') and (LanguageFound != 'eng') and (
LanguageFound != 'uqi')):
Databuffer = re.sub(re.escape(stringfound), '', Databuffer)
WriteFile = True
print("Deleted %s from %s" % (LanguageFound, FullPath))
if (Filename != DestFileName.split(os.sep)[-1]):
# adding strings to dictionary
StringDict[r'%s' % FullPath] = FindUniString(Databuffer)
# adding UQIs to dictionary
AllUqis[r'%s' % FullPath] = UqiList(Databuffer)
if WriteFile:
try:
with codecs.open(FullPath, 'w', UniEncoding) as UniFile:
UniFile.write(Databuffer)
except:
print("Error opening " + FullPath + " for writing.")
return
# **********************************************************************
# description: Searches tree for uni files
# Calls parsefile to collect dictionary of strings in each uni file
# Calls searchVfiFile for each vfi or vfr file found
#
# arguments: argument list is built by os.path.walk function call
# arg - None
# dirname - directory location of files
# names - specific files to search in directory
#
# returns: none
#
def processUni(args, dirname, names):
global Options
# Remove excludedDirectory
if Options['ExcludeOption']:
for EachExDir in Options['ExPathList']:
for dir in names:
if os.path.join(dirname, dir) == EachExDir:
names.remove(dir)
for entry in names:
FullPath = dirname + os.sep + entry
if fnmatch.fnmatch(FullPath, '*.uni'):
parsefile(dirname, entry)
if fnmatch.fnmatch(FullPath, '*.vf*'):
searchVfiFile(FullPath)
if fnmatch.fnmatch(FullPath, '*.sd'):
searchVfiFile(FullPath)
if fnmatch.fnmatch(FullPath, '*.sdi'):
searchVfiFile(FullPath)
if fnmatch.fnmatch(FullPath, '*.hfr'):
searchVfiFile(FullPath)
return
# **********************************************************************
# description: Compose a dictionary of all strings that may need UQIs assigned
# to them and key is the string
#
# arguments: Filename - name of file to search for strings
#
# returns: none
#
# separate regexes for readability
StringGroups = re.compile(
'^[ \t]*(?:oneof|numeric|checkbox|orderedlist)[ \t]+varid.+?(?:endoneof|endnumeric|endcheckbox|endorderedlist);',
re.DOTALL | re.M).findall
StringVarIds = re.compile(
'[ \t]*(?:oneof|numeric|checkbox|orderedlist)[ \t]+varid[ \t]*=[ \t]*([a-zA-Z_0-9]+\.[a-zA-Z_0-9]+)').findall
StringTokens = re.compile('prompt[ \t]*=[ \t]*STRING_TOKEN[ \t]*\(([a-zA-Z_0-9]+)\)').findall
def searchVfiFile(Filename):
global Options, GlobalVarId, StringGroups, StringVarIds, StringTokens, QuestionError
try:
with open(Filename, 'r') as VfiFile:
Databuffer = VfiFile.read()
# Finds specified lines in file
VfiStringGroup = StringGroups(Databuffer)
# Searches for prompts within specified lines
for EachGroup in VfiStringGroup:
for EachString in StringTokens(EachGroup):
# Ignore strings defined as STRING_TOKEN(0), STRING_TOKEN(STR_EMPTY) or STRING_TOKEN(STR_NULL)
if (EachString != "0") and (EachString != "STR_EMPTY") and (EachString != "STR_NULL"):
if EachString not in GlobalVarId:
GlobalVarId[EachString] = StringVarIds(EachGroup)
else:
if (GlobalVarId[EachString][0] != StringVarIds(EachGroup)[0]):
if Options['QuestionOption']:
if Options['QuestionOption'] == "e":
QuestionError = True
print("ERROR:"),
if Options['QuestionOption'] == "w":
print("WARNING:"),
print("%s referred by different HII questions(%s and %s)" % (
EachString, GlobalVarId[EachString][0], StringVarIds(EachGroup)[0]))
except:
print("Error opening file at %s for reading." % Filename)
if __name__ == '__main__':
sys.exit(main())
| edk2-platforms-master | Platform/Intel/Tools/UniTool/UniTool.py |
## @file
#
# Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
AmlGenOffset
'''
import os
import sys
import argparse
import subprocess
import uuid
import struct
import collections
import binascii
import re
from ctypes import *
#
# Globals for help information
#
__prog__ = 'AmlGenOffset'
__version__ = '%s Version %s' % (__prog__, '0.1 ')
__copyright__ = 'Copyright (c) 2017, Intel Corporation. All rights reserved.'
__usage__ = '%s -e|-d [options] <input_file>' % (__prog__)
if __name__ == '__main__':
#
# Create command line argument parser object
#
parser = argparse.ArgumentParser(prog=__prog__, usage=__usage__, description=__copyright__, conflict_handler='resolve')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-e", action="store_true", dest='Encode', help='encode file')
group.add_argument("-d", action="store_true", dest='Decode', help='decode file')
parser.add_argument("-o", "--output", dest='OutputFileName', type=str, metavar='filename', help="specify the output filename", required=True)
parser.add_argument("-v", "--verbose", dest='Verbose', action="store_true", help="increase output messages")
parser.add_argument("-q", "--quiet", dest='Quiet', action="store_true", help="reduce output messages")
parser.add_argument("--debug", dest='Debug', type=int, metavar='[0-9]', choices=range(0,10), default=0, help="set debug level")
parser.add_argument("--aml_filter", dest='AmlFilterStr', type=str, help="specify the AML filter.")
parser.add_argument(metavar="input_file", dest='InputFile', type=argparse.FileType('r'), help="specify the input filename")
#
# Parse command line arguments
#
args = parser.parse_args()
if args.Encode:
print('Unsupported')
if args.Decode:
args.OutputFileName = os.path.normpath(args.OutputFileName)
args.OutputFile = open(args.OutputFileName, 'w')
AmlFilter = args.AmlFilterStr
filter_pattern = '|'.join(AmlFilter.split(' '))
lines = args.InputFile.readlines()
args.InputFile.close()
for line in lines:
if line.strip().startswith('{\"') == False:
if line.strip().startswith('* Compilation') == False and line.strip().startswith('* ASL+') == False and line.strip().startswith('* Copyright') == False:
args.OutputFile.write(line)
else:
match_obj = re.search(filter_pattern, line, re.M | re.I)
if match_obj is not None:
args.OutputFile.write(line)
args.OutputFile.close()
| edk2-platforms-master | Platform/Intel/MinPlatformPkg/Tools/AmlGenOffset/AmlGenOffset.py |
## @ RebaseFspBinBaseAddress.py
#
# Copyright (c) 2019 - 2021, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
import os
import sys
import re
import subprocess
if len(sys.argv) not in [5,6]:
print ("RebaseFspBinBaseAddress.py - Error in number of arguments received")
print ("Usage - RebaseFspBinBaseAddress.py <FlashMap file path> <FspBinPkg Folder> <Fsp.fd file name>\
<pad_offset for Fsp-S Base Address> <OPTIONAL SplitFspBin.py tool path>")
exit(1)
flashMapName = sys.argv[1]
fspBinPath = sys.argv[2]
fspBinFile = sys.argv[3]
fvOffset = int(sys.argv[4], 16)
fspBinFileRebased = "Fsp_Rebased.fd"
splitFspBinPath = os.path.join("edk2","IntelFsp2Pkg","Tools","SplitFspBin.py")
if len(sys.argv) == 6:
splitFspBinPath = sys.argv[5]
#
# Make sure argument passed or valid
#
if not os.path.exists(flashMapName):
print ("WARNING! " + str(flashMapName) + " is not found.")
exit(1)
fspBinFilePath = fspBinPath + os.sep + fspBinFile
if not os.path.exists(fspBinFilePath):
print ("WARNING! " + str(fspBinFilePath) + " is not found.")
exit(1)
if not os.path.exists(splitFspBinPath):
print ("WARNING! " + str(splitFspBinPath) + " is not found.")
exit(1)
#
# Get the FSP-S / FSP-M-T FV Base Address from Flash Map
#
file = open (flashMapName, "r")
data = file.read ()
# Get the Flash Base Address
flashBase = int(data.split("FLASH_BASE")[1].split("=")[1].split()[0], 16)
# Based on Build Target, select the section in the FlashMap file
flashmap = data
# Get FSP-S & FSP-M & FSP-T offset & calculate the base
for line in flashmap.split("\n"):
if "PcdFlashFvFspSOffset" in line:
fspSBaseOffset = int(line.split("=")[1].split()[0], 16)
if "PcdFlashFvFspMOffset" in line:
fspMBaseOffset = int(line.split("=")[1].split()[0], 16)
if "PcdFlashFvFspTOffset" in line:
fspTBaseOffset = int(line.split("=")[1].split()[0], 16)
file.close()
#
# Get FSP-M Size, in order to calculate the FSP-T Base. Used SplitFspBin.py script
# to dump the header, and get the ImageSize in FSP-M section
#
pythontool = 'python'
if 'PYTHON_HOME' in os.environ:
pythontool = os.environ['PYTHON_HOME'] + os.sep + 'python'
else:
pythontool = sys.executable
Process = subprocess.Popen([pythontool, splitFspBinPath, "info","-f",fspBinFilePath], stdout=subprocess.PIPE)
Output = Process.communicate()[0]
FsptInfo = Output.rsplit(b"FSP_M", 1);
for line in FsptInfo[1].split(b"\n"):
if b"ImageSize" in line:
fspMSize = int(line.split(b"=")[1], 16)
break
# Calculate FSP-S/M/T base address, to which re-base has to be done
fspSBaseAddress = flashBase + fspSBaseOffset + fvOffset
fspMBaseAddress = flashBase + fspMBaseOffset
fspTBaseAddress = flashBase + fspTBaseOffset
#
# Re-base FSP bin file to new address and save it as fspBinFileRebased using SplitFspBin.py
#
rebaseArguments = fspBinFilePath + " -c s m t -b " + str(hex(fspSBaseAddress).rstrip("L")) + " " + str(hex(fspMBaseAddress).rstrip("L")) + " " + str(hex(fspTBaseAddress).rstrip("L")) + " -o" + fspBinPath + " -n " + fspBinFileRebased
os.system('"' + pythontool + '"' + " " + splitFspBinPath + " rebase -f" + rebaseArguments)
#
# Split FSP bin to FSP-S/M/T segments
#
splitArguments = fspBinPath + os.sep + fspBinFileRebased + " -o " + fspBinPath + " -n Fsp_Rebased.fd"
os.system('"' + pythontool + '"' + " " + splitFspBinPath + " split -f" + splitArguments)
exit(0)
| edk2-platforms-master | Platform/Intel/MinPlatformPkg/Tools/Fsp/RebaseFspBinBaseAddress.py |
## @ PatchFspBinBaseAddress.py
#
# Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
import os
import re
import sys
import struct
from datetime import date
fspSBaseAddress = 0
fspMBaseAddress = 0
fspTBaseAddress = 0
def GetFspBaseAddress (binfile):
offset = 0;
for index in range(1,4):
attribute = readDataFromFile(binfile, offset+0xB7, 1) >> 4
if attribute == 0x3:
global fspSBaseAddress
fspSBaseAddress = readDataFromFile(binfile, offset+0xB0, 4)
if attribute == 0x2:
global fspMBaseAddress
fspMBaseAddress = readDataFromFile(binfile, offset+0xB0, 4)
if attribute == 0x1:
global fspTBaseAddress
fspTBaseAddress = readDataFromFile(binfile, offset+0xB0, 4)
offset += readDataFromFile(binfile, offset+0xAC, 4)
return 0
#
# Read data from file
#
# param [in] binfile Binary file
# param [in] offset Offset
# param [in] len Length
#
# retval value Value
#
def readDataFromFile (binfile, offset, len=1):
fd = open(binfile, "rb")
fsize = os.path.getsize(binfile)
offval = offset & 0xFFFFFFFF
if (offval & 0x80000000):
offval = fsize - (0xFFFFFFFF - offval + 1)
fd.seek(offval)
bytearray = [ord(b) for b in fd.read(len)]
value = 0
idx = len - 1
while idx >= 0:
value = value << 8 | bytearray[idx]
idx = idx - 1
fd.close()
return value
def updateFspFvsBase (binfile, TargetFile):
ext_file = str(os.path.splitext(TargetFile)[-1]).lower()
if not os.path.exists(binfile):
print "WARNING! " + str(binfile) + " is not found."
return 1
if not os.path.exists(TargetFile):
print "WARNING! " + str(TargetFile) + " is not found."
return 1
GetFspBaseAddress(binfile)
if ext_file == ".dsc":
DscFile = open(TargetFile, "r")
DscLines = DscFile.readlines()
DscFile.close()
DscContent = []
for line in DscLines:
DscContent.append(line)
DscFile = open(TargetFile,"w")
for index in range(len(DscContent)):
DscLine = DscContent[index]
Match = re.match("([_a-zA-Z0-9]+).Pcd(Fspt|Fspm|Fsps)BaseAddress",DscLine)
if Match:
DscLine = Match.group(1) + ".Pcd" + Match.group(2) + "BaseAddress|0x"
if Match.group(2) == 'Fspt':
BaseAddrStr = str(hex(fspTBaseAddress)[2:]).zfill(8).upper().rstrip('L')
elif Match.group(2) == 'Fspm':
BaseAddrStr = str(hex(fspMBaseAddress)[2:]).zfill(8).upper().rstrip('L')
elif Match.group(2) == 'Fsps':
BaseAddrStr = str(hex(fspSBaseAddress)[2:]).zfill(8).upper().rstrip('L')
DscLine = DscLine + BaseAddrStr + "\n"
DscFile.writelines(DscLine)
DscFile.close()
return 0
def Main():
#
# Parse the options and args
#
if len(sys.argv) != 3:
print "error"
return 1
ret = updateFspFvsBase (sys.argv[1], sys.argv[2])
if ret != 0:
return 1
return 0
if __name__ == '__main__':
sys.exit(Main())
| edk2-platforms-master | Platform/Intel/MinPlatformPkg/Tools/Fsp/PatchFspBinFvsBaseAddress.py |
## @ RebaseAndPatchFspBinBaseAddress.py
#
# Copyright (c) 2017 - 2021, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
import os
import sys
import re
import subprocess
if len(sys.argv) not in [6,7]:
print ("RebaseAndPatchFspBinBaseAddress.py - Error in number of arguments received")
print ("Usage - RebaseAndPatchFspBinBaseAddress.py <FlashMap file path> <FspBinPkg Folder> <Fsp.fd file name>\
<Dsc file path to be patched> <pad_offset for Fsp-S Base Address> <OPTIONAL SplitFspBin.py tool path>")
exit(1)
flashMapName = sys.argv[1]
fspBinPath = sys.argv[2]
fspBinFile = sys.argv[3]
targetDscFile = sys.argv[4]
fvOffset = int(sys.argv[5], 16)
fspBinFileRebased = "Fsp_Rebased.fd"
splitFspBinPath = os.path.join("edk2","IntelFsp2Pkg","Tools","SplitFspBin.py")
if len(sys.argv) == 7:
splitFspBinPath = sys.argv[6]
#
# Make sure argument passed or valid
#
if not os.path.exists(flashMapName):
print ("WARNING! " + str(flashMapName) + " is not found.")
exit(1)
fspBinFilePath = fspBinPath + os.sep + fspBinFile
if not os.path.exists(fspBinFilePath):
print ("WARNING! " + str(fspBinFilePath) + " is not found.")
exit(1)
if not os.path.exists(targetDscFile):
print ("WARNING! " + str(targetDscFile) + " is not found.")
exit(1)
ext_file = str(os.path.splitext(targetDscFile)[-1]).lower()
if ext_file != ".dsc":
print ("WARNING! " + str(targetDscFile) + " is not a dsc file")
exit(1)
if not os.path.exists(splitFspBinPath):
print ("WARNING! " + str(splitFspBinPath) + " is not found.")
exit(1)
#
# Get the FSP-S / FSP-M-T FV Base Address from Flash Map
#
file = open (flashMapName, "r")
data = file.read ()
# Get the Flash Base Address
flashBase = int(data.split("FLASH_BASE")[1].split("=")[1].split()[0], 16)
# Based on Build Target, select the section in the FlashMap file
flashmap = data
# Get FSP-S & FSP-M & FSP-T offset & calculate the base
for line in flashmap.split("\n"):
if "PcdFlashFvFspSOffset" in line:
fspSBaseOffset = int(line.split("=")[1].split()[0], 16)
if "PcdFlashFvFspMOffset" in line:
fspMBaseOffset = int(line.split("=")[1].split()[0], 16)
if "PcdFlashFvFspTOffset" in line:
fspTBaseOffset = int(line.split("=")[1].split()[0], 16)
file.close()
#
# Get FSP-M Size, in order to calculate the FSP-T Base. Used SplitFspBin.py script
# to dump the header, and get the ImageSize in FSP-M section
#
pythontool = sys.executable
Process = subprocess.Popen([pythontool, splitFspBinPath, "info","-f",fspBinFilePath], stdout=subprocess.PIPE)
Output = Process.communicate()[0]
FsptInfo = Output.rsplit(b"FSP_M", 1);
for line in FsptInfo[1].split(b"\n"):
if b"ImageSize" in line:
fspMSize = int(line.split(b"=")[1], 16)
break
# Calculate FSP-S/M/T base address, to which re-base has to be done
fspSBaseAddress = flashBase + fspSBaseOffset + fvOffset
fspMBaseAddress = flashBase + fspMBaseOffset
fspTBaseAddress = flashBase + fspTBaseOffset
#
# Re-base FSP bin file to new address and save it as fspBinFileRebased using SplitFspBin.py
#
rebaseArguments = fspBinFilePath + " -c s m t -b " + str(hex(fspSBaseAddress).rstrip("L")) + " " + str(hex(fspMBaseAddress).rstrip("L")) + " " + str(hex(fspTBaseAddress).rstrip("L")) + " -o" + fspBinPath + " -n " + fspBinFileRebased
os.system('"' + pythontool + '"' + " " + splitFspBinPath + " rebase -f" + rebaseArguments)
#
# Split FSP bin to FSP-S/M/T segments
#
splitArguments = fspBinPath + os.sep + fspBinFileRebased + " -o " + fspBinPath + " -n Fsp_Rebased.fd"
os.system('"' + pythontool + '"' + " " + splitFspBinPath + " split -f" + splitArguments)
#
# Patch dsc file with the re-based FSP-S/M/T address, so internally build will use the same.
#
DscFile = open(targetDscFile, "r")
DscLines = DscFile.readlines()
DscFile.close()
DscContent = []
for line in DscLines:
DscContent.append(line)
DscFile = open(targetDscFile,"w")
for index in range(len(DscContent)):
DscLine = DscContent[index]
Match = re.match("([\s_a-zA-Z0-9]+).Pcd(Fspt|Fspm|Fsps)BaseAddress",DscLine)
if Match:
DscLine = Match.group(1) + ".Pcd" + Match.group(2) + "BaseAddress|0x"
if Match.group(2) == 'Fspt':
BaseAddrStr = str(hex(fspTBaseAddress)[2:]).zfill(8).upper().rstrip('L')
elif Match.group(2) == 'Fspm':
BaseAddrStr = str(hex(fspMBaseAddress)[2:]).zfill(8).upper().rstrip('L')
elif Match.group(2) == 'Fsps':
BaseAddrStr = str(hex(fspSBaseAddress)[2:]).zfill(8).upper().rstrip('L')
DscLine = DscLine + BaseAddrStr + "\n"
DscFile.writelines(DscLine)
DscFile.close()
exit(0)
| edk2-platforms-master | Platform/Intel/MinPlatformPkg/Tools/Fsp/RebaseAndPatchFspBinBaseAddress.py |
## @ SyncBinFvInf.py
#
# Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
import os
import re
import sys
import time
import shutil
from ctypes import *
class GUID(Structure):
_fields_ = [
('Guid1', c_uint32),
('Guid2', c_uint16),
('Guid3', c_uint16),
('Guid4', ARRAY(c_uint8, 8)),
]
class EFI_FIRMWARE_VOLUME_HEADER(Structure):
_fields_ = [
('ZeroVector', ARRAY(c_uint8, 16)),
('FileSystemGuid', GUID),
('FvLength', c_uint64),
('Signature', c_uint32),
('Attributes', c_uint32),
('HeaderLength', c_uint16),
('Checksum', c_uint16),
('ExtHeaderOffset', c_uint16),
('Reserved', c_uint8),
('Revision', c_uint8),
]
#
# File Types Definitions
#
EFI_FV_FILETYPE_ALL = 0x00
EFI_FV_FILETYPE_RAW = 0x01
EFI_FV_FILETYPE_FREEFORM = 0x02
EFI_FV_FILETYPE_SECURITY_CORE = 0x03
EFI_FV_FILETYPE_PEI_CORE = 0x04
EFI_FV_FILETYPE_DXE_CORE = 0x05
EFI_FV_FILETYPE_PEIM = 0x06
EFI_FV_FILETYPE_DRIVER = 0x07
EFI_FV_FILETYPE_COMBINED_PEIM_DRIVER = 0x08
EFI_FV_FILETYPE_APPLICATION = 0x09
EFI_FV_FILETYPE_SMM = 0x0A
EFI_FV_FILETYPE_FIRMWARE_VOLUME_IMAGE = 0x0B
EFI_FV_FILETYPE_COMBINED_SMM_DXE = 0x0C
EFI_FV_FILETYPE_SMM_CORE = 0x0D
EFI_FV_FILETYPE_OEM_MIN = 0xc0
EFI_FV_FILETYPE_OEM_MAX = 0xdf
EFI_FV_FILETYPE_DEBUG_MIN = 0xe0
EFI_FV_FILETYPE_DEBUG_MAX = 0xef
EFI_FV_FILETYPE_FFS_MIN = 0xf0
EFI_FV_FILETYPE_FFS_MAX = 0xff
EFI_FV_FILETYPE_FFS_PAD = 0xf0
#
# FFS File Attributes.
#
FFS_ATTRIB_LARGE_FILE = 0x01
FFS_ATTRIB_DATA_ALIGNMENT_2 = 0x02
FFS_ATTRIB_FIXED = 0x04
FFS_ATTRIB_DATA_ALIGNMENT = 0x38
FFS_ATTRIB_CHECKSUM = 0x40
#
# FFS File State Bits.
#
EFI_FILE_HEADER_CONSTRUCTION = 0x01
EFI_FILE_HEADER_VALID = 0x02
EFI_FILE_DATA_VALID = 0x04
EFI_FILE_MARKED_FOR_UPDATE = 0x08
EFI_FILE_DELETED = 0x10
EFI_FILE_HEADER_INVALID = 0x20
class EFI_FFS_FILE_HEADER(Structure):
_fields_ = [
('Name', GUID),
('IntegrityCheck', c_uint16),
('Type', c_uint8),
('Attributes', c_uint8),
('Size', ARRAY(c_uint8, 3)),
('State', c_uint8),
]
class EFI_FFS_FILE_HEADER2(Structure):
_fields_ = [
('Name', GUID),
('IntegrityCheck', c_uint16),
('Type', c_uint8),
('Attributes', c_uint8),
('Size', ARRAY(c_uint8, 3)),
('State', c_uint8),
('ExtendedSize', c_uint64),
]
#
# Pseudo type. It is used as a wild card when retrieving sections.
# The section type EFI_SECTION_ALL matches all section types.
#
EFI_SECTION_ALL = 0x00
#
# Encapsulation section Type values.
#
EFI_SECTION_COMPRESSION = 0x01
EFI_SECTION_GUID_DEFINED = 0x02
EFI_SECTION_DISPOSABLE = 0x03
#
# Leaf section Type values.
#
EFI_SECTION_PE32 = 0x10
EFI_SECTION_PIC = 0x11
EFI_SECTION_TE = 0x12
EFI_SECTION_DXE_DEPEX = 0x13
EFI_SECTION_VERSION = 0x14
EFI_SECTION_USER_INTERFACE = 0x15
EFI_SECTION_COMPATIBILITY16 = 0x16
EFI_SECTION_FIRMWARE_VOLUME_IMAGE = 0x17
EFI_SECTION_FREEFORM_SUBTYPE_GUID = 0x18
EFI_SECTION_RAW = 0x19
EFI_SECTION_PEI_DEPEX = 0x1B
EFI_SECTION_SMM_DEPEX = 0x1C
class EFI_COMMON_SECTION_HEADER(Structure):
_fields_ = [
('Size', ARRAY(c_uint8, 3)),
('Type', c_uint8),
]
class EFI_COMMON_SECTION_HEADER2(Structure):
_fields_ = [
('Size', ARRAY(c_uint8, 3)),
('Type', c_uint8),
('ExtendedSize', c_uint32),
]
class FileChecker:
def __init__(self):
self.SyncSectionList = ["Packages", "PatchPcd", "PcdEx"]
self.RebasePcd = ["", "", ""]
self.FvName = ""
self.FfsGuidList = []
self.FfsInfList = []
self.FfsOffsetList = []
self.PeOffsetList = []
self.target = ""
self.sourceRoot = ""
self.sourceInfList = []
self.destRoot = ""
self.reportFile = ""
def GetSectionName(self, line):
splitLine = line[1:-1].split(".")
return splitLine[0]
def IsSyncSection(self, line):
name = self.GetSectionName(line)
for sectionName in self.SyncSectionList:
if (cmp (sectionName, name) == 0) :
return True
return False
def PrintRebasePcd(self, pcd):
if cmp (pcd[0], "") != 0:
print "PCD: " + pcd[0] + "|" + pcd[1] + "(" + pcd[2] + ")"
def PrintList(self, fileList):
for file in fileList:
print file
def GetFileList(self, dir, fileList, checkFunc):
if os.path.isdir(dir):
for sub in os.listdir(dir):
if sub[0] == "." :
continue
newDir = os.path.join(dir,sub)
if (os.path.isdir(newDir) == True):
self.GetFileList(newDir, fileList, checkFunc)
else :
AppendName = checkFunc (newDir)
if cmp (AppendName, "") != 0:
#print "AppendName = " + AppendName
if AppendName not in fileList:
fileList.append(AppendName)
def GetInfFileGuid(self, fileName):
guid = ""
try :
file = open(fileName)
except Exception:
print "fail to open " + fileName
return
try:
while 1:
line = file.readline()
if not line:
break
newline = line[:-1]
if cmp (line[:11], " FILE_GUID") == 0:
splitLine = line.split("=")
templine = splitLine[1]
guid = templine[1:1+36]
finally:
file.close()
return guid
def GetInfNameFromGuid(self, fileList, guid):
for file in fileList:
fileGuid = self.GetInfFileGuid (file)
if (cmp (fileGuid.upper(), guid.upper()) == 0) :
return file
return ""
def CheckSourceInf(self, file):
if (cmp (file[-4:], ".inf") == 0) and (file.find("BinPkg") != -1) and (file.find(self.target) != -1) and (file.find("FVFSP") == -1):
return file
return ""
def ParseInfFile(self, fileName, destFile, Offset):
SyncToDest = False
PatchOffset = False
try :
file = open(fileName)
except Exception:
print "fail to open " + fileName
return
try:
while 1:
line = file.readline()
if not line:
break
newline = line[:-1]
if cmp (line[0], "[") == 0:
SyncToDest = self.IsSyncSection(line)
PatchOffset = False
if SyncToDest == True :
if (cmp (self.GetSectionName(line), "PatchPcd") == 0) :
PatchOffset = True
if (PatchOffset == True) and ('|' in line) :
splitLine = line.split("|")
destFile.writelines(splitLine[0] + "|" + splitLine[1] + "|")
DataOffset = int(splitLine[2], 16)
DataOffset = DataOffset + int(Offset, 16)
destFile.writelines(hex(DataOffset))
destFile.writelines(" # " + splitLine[2][:-1] + "+" + Offset + "\n")
else :
destFile.writelines(line)
else :
destFile.write("# ")
destFile.writelines(line)
finally:
file.close()
return
def ParseInfFiles(self, fileList, FfsOffsetList, destFileName, RebasePcd):
try :
destFile = open(destFileName, "a")
except Exception:
print "fail to open " + destFileName
return
try:
if cmp (RebasePcd[0], "") != 0 :
destFile.write("\n#![Pcd]\n")
destFile.write("#! ")
destFile.write(RebasePcd[0])
destFile.write("|")
destFile.write(RebasePcd[1])
destFile.write("\n\n")
index = 0
for file in fileList:
if (cmp (file, "") == 0) :
index = index + 1
continue
print "handling - (" + str(index) + ") :" + file
self.ParseInfFile (file, destFile, FfsOffsetList[index])
index = index + 1
finally:
destFile.close()
return
def GetGuildFfsGuidList(self, fileName):
self.FfsGuidList = []
self.FfsOffsetList = []
try :
file = open(fileName)
except Exception:
print "fail to open " + fileName
return
try:
while 1:
line = file.readline()
if not line:
break
#0x000D4868 A8499E65-A6F6-48B0-96DB-45C266030D83
MatchString = "(0x[0-9a-fA-F]{8}) ([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})"
match = re.match(MatchString, line)
if match is not None:
offset = match.group(1)
self.FfsOffsetList.append(offset)
ffsGuid = match.group(2)
self.FfsGuidList.append(ffsGuid)
finally:
file.close()
return
def GetPeOffsetList(self, fileName):
self.PeOffsetList = []
try :
file = open(fileName, "rb")
except Exception:
print "fail to open " + fileName
return
try:
FvBuffer = file.read()
FvData = bytearray(FvBuffer)
for FfsOffset in self.FfsOffsetList:
PeOffsetFound = False
PeOffset = int(FfsOffset, 16)
FfsHeader = EFI_FFS_FILE_HEADER.from_buffer (FvData, PeOffset)
FfsSize = FfsHeader.Size[0] + (FfsHeader.Size[1] << 8) + (FfsHeader.Size[2] << 16)
PeOffset = PeOffset + sizeof(EFI_FFS_FILE_HEADER)
while (PeOffset < int(FfsOffset, 16) + FfsSize) :
SectionHeader = EFI_COMMON_SECTION_HEADER.from_buffer (FvData, PeOffset)
if (SectionHeader.Type == EFI_SECTION_PE32) or (SectionHeader.Type == EFI_SECTION_TE) :
PeOffset = PeOffset + sizeof(EFI_COMMON_SECTION_HEADER)
self.PeOffsetList.append(hex(PeOffset))
PeOffsetFound = True
break
else:
SectionSize = SectionHeader.Size[0] + (SectionHeader.Size[1] << 8) + (SectionHeader.Size[2] << 16)
PeOffset = (PeOffset + SectionSize + 3) & ~0x3
if (PeOffsetFound == False):
self.PeOffsetList.append(0)
finally:
file.close()
return
def ProcessFvInf(self, fvName, RebasePcd):
destFile = os.path.join(self.destRoot,fvName+"\\"+self.target+"\\"+fvName+".Fv.txt")
print "\nprocessing - " + destFile
self.GetGuildFfsGuidList (destFile)
#print "FfsGuidList"
#self.PrintList(self.FfsGuidList)
#print "FfsOffsetList"
#self.PrintList(self.FfsOffsetList)
destFile = os.path.join(self.destRoot,fvName+"\\"+self.target+"\\"+fvName+".Fv")
self.GetPeOffsetList (destFile)
#print "PeOffsetList"
#self.PrintList(self.PeOffsetList)
self.FfsInfList = []
for guid in self.FfsGuidList:
fileName = self.GetInfNameFromGuid(self.sourceInfList, guid)
print " adding - " + guid + " : " + fileName
self.FfsInfList.append(fileName)
#print "FfsInfList"
#self.PrintList(self.FfsInfList)
shutil.copy(os.path.join(self.destRoot,fvName+"\\"+self.target+"\\"+fvName+".Base.inf"),os.path.join(self.destRoot,fvName+"\\"+self.target+"\\"+fvName+".inf"))
self.ParseInfFiles (self.FfsInfList, self.PeOffsetList, os.path.join(self.destRoot,fvName+"\\"+self.target+"\\"+fvName+".inf"), RebasePcd)
def GetPcdFromReport(self, file, pcd):
FoundPkg = False
pcdSplit = pcd.split(".")
TargetPkg = pcdSplit[0]
TargetPcd = pcdSplit[1]
while 1:
line = file.readline()
if not line:
break
newline = line[:-1]
if (cmp (newline, TargetPkg) == 0):
FoundPkg = True
continue
if (cmp (newline, "") == 0) or ((cmp (newline[0], " ") != 0) and (cmp (newline[0], "0") != 0)):
FoundPkg = False
if (FoundPkg == True) :
newline = newline.strip()
splitLine = newline.split(" ", 2)
if (cmp (splitLine[0], "*F") == 0) or (cmp (splitLine[0], "*P") == 0):
if (cmp (splitLine[1], TargetPcd) == 0):
print "found - " + TargetPkg + "." + TargetPcd
splitLine = splitLine[2].strip()[1:].strip().split(" ", 1)
if (cmp (splitLine[0], "FIXED") == 0) or (cmp (splitLine[0], "PATCH") == 0):
SplitLine = splitLine[1].strip()[1:].split(")", 1)
Type = SplitLine[0]
Value = SplitLine[1].strip()[1:].strip().split()[0]
print " Type - (" + Type + "), Value - (" + Value + ")"
return [Value, Type]
return ["", ""]
def GetRebaseAddressFromReport(self):
try :
file = open(self.reportFile)
except Exception:
print "fail to open " + self.reportFile
return
try:
file.seek(0)
if (cmp(self.RebasePcd[0], "") != 0):
print "checking - " + self.RebasePcd[0]
ValuePair = self.GetPcdFromReport (file, self.RebasePcd[0])
self.RebasePcd[1] = ValuePair[0]
self.RebasePcd[2] = ValuePair[1]
finally:
file.close()
def DumpFileList(self, dir):
#print "DumpFileList - " + dir
if os.path.exists(dir) == False:
return
if os.path.isdir(dir):
for sub in os.listdir(dir):
#print "Get sub - " + sub
if sub[0] == "." :
continue
newDir = os.path.join(dir,sub)
if (os.path.isdir(newDir) == True):
self.DumpFileList(newDir)
else :
print "file - " + newDir
def main():
global FileChecker
fileChecker = FileChecker()
if (len(sys.argv) != 6) and (len(sys.argv) != 7):
print "usage: SyncBinFvInf <Target> <SourceRoot> <DestRoot> <ReportFile> <FspFvName> [<RebasePcdName>]"
return 0
fileChecker.target = sys.argv[1]
fileChecker.sourceRoot = sys.argv[2]
fileChecker.destRoot = sys.argv[3]
fileChecker.reportFile = sys.argv[4]
fileChecker.FvName = sys.argv[5]
if (len(sys.argv) == 7):
fileChecker.RebasePcd[0] = sys.argv[6]
fileChecker.GetRebaseAddressFromReport()
fileChecker.PrintRebasePcd (fileChecker.RebasePcd)
fileChecker.GetFileList (fileChecker.sourceRoot, fileChecker.sourceInfList, fileChecker.CheckSourceInf)
fileChecker.ProcessFvInf (fileChecker.FvName, fileChecker.RebasePcd)
if __name__ == '__main__':
sys.exit(main())
| edk2-platforms-master | Platform/Intel/MinPlatformPkg/Tools/PatchFv/SyncBinFvInf.py |
## @ PatchBinFv.py
#
# Copyright (c) 2017 - 2019, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
import os
import re
import sys
import time
import shutil
import struct
import binascii
from ctypes import *
class GUID(Structure):
_fields_ = [
('Guid1', c_uint32),
('Guid2', c_uint16),
('Guid3', c_uint16),
('Guid4', ARRAY(c_uint8, 8)),
]
class EFI_FIRMWARE_VOLUME_HEADER(Structure):
_fields_ = [
('ZeroVector', ARRAY(c_uint8, 16)),
('FileSystemGuid', GUID),
('FvLength', c_uint64),
('Signature', c_uint32),
('Attributes', c_uint32),
('HeaderLength', c_uint16),
('Checksum', c_uint16),
('ExtHeaderOffset', c_uint16),
('Reserved', c_uint8),
('Revision', c_uint8),
]
class EFI_FIRMWARE_VOLUME_EXT_HEADER(Structure):
_fields_ = [
('FvName', GUID),
('ExtHeaderSize', c_uint32),
]
#
# File Types Definitions
#
EFI_FV_FILETYPE_ALL = 0x00
EFI_FV_FILETYPE_RAW = 0x01
EFI_FV_FILETYPE_FREEFORM = 0x02
EFI_FV_FILETYPE_SECURITY_CORE = 0x03
EFI_FV_FILETYPE_PEI_CORE = 0x04
EFI_FV_FILETYPE_DXE_CORE = 0x05
EFI_FV_FILETYPE_PEIM = 0x06
EFI_FV_FILETYPE_DRIVER = 0x07
EFI_FV_FILETYPE_COMBINED_PEIM_DRIVER = 0x08
EFI_FV_FILETYPE_APPLICATION = 0x09
EFI_FV_FILETYPE_SMM = 0x0A
EFI_FV_FILETYPE_FIRMWARE_VOLUME_IMAGE = 0x0B
EFI_FV_FILETYPE_COMBINED_SMM_DXE = 0x0C
EFI_FV_FILETYPE_SMM_CORE = 0x0D
EFI_FV_FILETYPE_OEM_MIN = 0xc0
EFI_FV_FILETYPE_OEM_MAX = 0xdf
EFI_FV_FILETYPE_DEBUG_MIN = 0xe0
EFI_FV_FILETYPE_DEBUG_MAX = 0xef
EFI_FV_FILETYPE_FFS_MIN = 0xf0
EFI_FV_FILETYPE_FFS_MAX = 0xff
EFI_FV_FILETYPE_FFS_PAD = 0xf0
#
# FFS File Attributes.
#
FFS_ATTRIB_LARGE_FILE = 0x01
FFS_ATTRIB_DATA_ALIGNMENT_2 = 0x02
FFS_ATTRIB_FIXED = 0x04
FFS_ATTRIB_DATA_ALIGNMENT = 0x38
FFS_ATTRIB_CHECKSUM = 0x40
#
# FFS File State Bits.
#
EFI_FILE_HEADER_CONSTRUCTION = 0x01
EFI_FILE_HEADER_VALID = 0x02
EFI_FILE_DATA_VALID = 0x04
EFI_FILE_MARKED_FOR_UPDATE = 0x08
EFI_FILE_DELETED = 0x10
EFI_FILE_HEADER_INVALID = 0x20
class EFI_FFS_FILE_HEADER(Structure):
_fields_ = [
('Name', GUID),
('IntegrityCheck', c_uint16),
('Type', c_uint8),
('Attributes', c_uint8),
('Size', ARRAY(c_uint8, 3)),
('State', c_uint8),
]
class EFI_FFS_FILE_HEADER2(Structure):
_fields_ = [
('Name', GUID),
('IntegrityCheck', c_uint16),
('Type', c_uint8),
('Attributes', c_uint8),
('Size', ARRAY(c_uint8, 3)),
('State', c_uint8),
('ExtendedSize', c_uint64),
]
#
# Pseudo type. It is used as a wild card when retrieving sections.
# The section type EFI_SECTION_ALL matches all section types.
#
EFI_SECTION_ALL = 0x00
#
# Encapsulation section Type values.
#
EFI_SECTION_COMPRESSION = 0x01
EFI_SECTION_GUID_DEFINED = 0x02
EFI_SECTION_DISPOSABLE = 0x03
#
# Leaf section Type values.
#
EFI_SECTION_PE32 = 0x10
EFI_SECTION_PIC = 0x11
EFI_SECTION_TE = 0x12
EFI_SECTION_DXE_DEPEX = 0x13
EFI_SECTION_VERSION = 0x14
EFI_SECTION_USER_INTERFACE = 0x15
EFI_SECTION_COMPATIBILITY16 = 0x16
EFI_SECTION_FIRMWARE_VOLUME_IMAGE = 0x17
EFI_SECTION_FREEFORM_SUBTYPE_GUID = 0x18
EFI_SECTION_RAW = 0x19
EFI_SECTION_PEI_DEPEX = 0x1B
EFI_SECTION_SMM_DEPEX = 0x1C
class EFI_COMMON_SECTION_HEADER(Structure):
_fields_ = [
('Size', ARRAY(c_uint8, 3)),
('Type', c_uint8),
]
class EFI_COMMON_SECTION_HEADER2(Structure):
_fields_ = [
('Size', ARRAY(c_uint8, 3)),
('Type', c_uint8),
('ExtendedSize', c_uint32),
]
class EFI_FV_FILETYPE:
ALL = 0x00
RAW = 0x01
FREEFORM = 0x02
SECURITY_CORE = 0x03
PEI_CORE = 0x04
DXE_CORE = 0x05
PEIM = 0x06
DRIVER = 0x07
COMBINED_PEIM_DRIVER = 0x08
APPLICATION = 0x09
SMM = 0x0a
FIRMWARE_VOLUME_IMAGE = 0x0b
COMBINED_SMM_DXE = 0x0c
SMM_CORE = 0x0d
OEM_MIN = 0xc0
OEM_MAX = 0xdf
DEBUG_MIN = 0xe0
DEBUG_MAX = 0xef
FFS_MIN = 0xf0
FFS_MAX = 0xff
FFS_PAD = 0xf0
class EFI_SECTION_TYPE:
ALL = 0x00
COMPRESSION = 0x01
GUID_DEFINED = 0x02
DISPOSABLE = 0x03
PE32 = 0x10
PIC = 0x11
TE = 0x12
DXE_DEPEX = 0x13
VERSION = 0x14
USER_INTERFACE = 0x15
COMPATIBILITY16 = 0x16
FIRMWARE_VOLUME_IMAGE = 0x17
FREEFORM_SUBTYPE_GUID = 0x18
RAW = 0x19
PEI_DEPEX = 0x1b
SMM_DEPEX = 0x1c
IMAGE_FILE_MACHINE_I386 = 0x014c
IMAGE_FILE_MACHINE_X64 = 0x8664
EFI_IMAGE_DIRECTORY_ENTRY_BASERELOC = 5
class EFI_IMAGE_DOS_HEADER(Structure):
_fields_ = [
('e_magic', c_uint16),
('e_cblp', c_uint16),
('e_cp', c_uint16),
('e_crlc', c_uint16),
('e_cparhdr', c_uint16),
('e_minalloc', c_uint16),
('e_maxalloc', c_uint16),
('e_ss', c_uint16),
('e_sp', c_uint16),
('e_csum', c_uint16),
('e_ip', c_uint16),
('e_cs', c_uint16),
('e_lfarlc', c_uint16),
('e_ovno', c_uint16),
('e_res', ARRAY(c_uint16, 4)),
('e_oemid', c_uint16),
('e_oeminfo', c_uint16),
('e_res2', ARRAY(c_uint16, 10)),
('e_lfanew', c_uint16)
]
class EFI_IMAGE_DATA_DIRECTORY(Structure):
_fields_ = [
('VirtualAddress', c_uint32),
('Size', c_uint32)
]
class EFI_IMAGE_FILE_HEADER(Structure):
_fields_ = [
('Machine', c_uint16),
('NumberOfSections', c_uint16),
('TimeDateStamp', c_uint32),
('PointerToSymbolTable', c_uint32),
('NumberOfSymbols', c_uint32),
('SizeOfOptionalHeader', c_uint16),
('Characteristics', c_uint16)
]
class EFI_IMAGE_OPTIONAL_HEADER32(Structure):
_fields_ = [
('Magic', c_uint16),
('MajorLinkerVersion', c_uint8),
('MinorLinkerVersion', c_uint8),
('SizeOfCode', c_uint32),
('SizeOfInitializedData', c_uint32),
('SizeOfUninitializedData', c_uint32),
('AddressOfEntryPoint', c_uint32),
('BaseOfCode', c_uint32),
('BaseOfData', c_uint32),
('ImageBase', c_uint32),
('SectionAlignment', c_uint32),
('FileAlignment', c_uint32),
('MajorOperatingSystemVersion', c_uint16),
('MinorOperatingSystemVersion', c_uint16),
('MajorImageVersion', c_uint16),
('MinorImageVersion', c_uint16),
('MajorSubsystemVersion', c_uint16),
('MinorSubsystemVersion', c_uint16),
('Win32VersionValue', c_uint32),
('SizeOfImage', c_uint32),
('SizeOfHeaders', c_uint32),
('CheckSum' , c_uint32),
('Subsystem', c_uint16),
('DllCharacteristics', c_uint16),
('SizeOfStackReserve', c_uint32),
('SizeOfStackCommit' , c_uint32),
('SizeOfHeapReserve', c_uint32),
('SizeOfHeapCommit' , c_uint32),
('LoaderFlags' , c_uint32),
('NumberOfRvaAndSizes', c_uint32),
('DataDirectory', ARRAY(EFI_IMAGE_DATA_DIRECTORY, 16))
]
class EFI_IMAGE_OPTIONAL_HEADER64(Structure):
_fields_ = [
('Magic', c_uint16),
('MajorLinkerVersion', c_uint8),
('MinorLinkerVersion', c_uint8),
('SizeOfCode', c_uint32),
('SizeOfInitializedData', c_uint32),
('SizeOfUninitializedData', c_uint32),
('AddressOfEntryPoint', c_uint32),
('BaseOfCode', c_uint32),
('ImageBase', c_uint64),
('SectionAlignment', c_uint32),
('FileAlignment', c_uint32),
('MajorOperatingSystemVersion', c_uint16),
('MinorOperatingSystemVersion', c_uint16),
('MajorImageVersion', c_uint16),
('MinorImageVersion', c_uint16),
('MajorSubsystemVersion', c_uint16),
('MinorSubsystemVersion', c_uint16),
('Win32VersionValue', c_uint32),
('SizeOfImage', c_uint32),
('SizeOfHeaders', c_uint32),
('CheckSum' , c_uint32),
('Subsystem', c_uint16),
('DllCharacteristics', c_uint16),
('SizeOfStackReserve', c_uint64),
('SizeOfStackCommit' , c_uint64),
('SizeOfHeapReserve', c_uint64),
('SizeOfHeapCommit' , c_uint64),
('LoaderFlags' , c_uint32),
('NumberOfRvaAndSizes', c_uint32),
('DataDirectory', ARRAY(EFI_IMAGE_DATA_DIRECTORY, 16))
]
class EFI_IMAGE_NT_HEADERS32(Structure):
_fields_ = [
('Signature', c_uint32),
('FileHeader', EFI_IMAGE_FILE_HEADER),
('OptionalHeader', EFI_IMAGE_OPTIONAL_HEADER32)
]
class EFI_IMAGE_NT_HEADERS64(Structure):
_fields_ = [
('Signature', c_uint32),
('FileHeader', EFI_IMAGE_FILE_HEADER),
('OptionalHeader', EFI_IMAGE_OPTIONAL_HEADER64)
]
class EFI_IMAGE_SECTION_HEADER(Structure):
_fields_ = [
('Name', ARRAY(c_uint8, 8)),
('VirtualSize', c_uint32),
('VirtualAddress', c_uint32),
('SizeOfRawData', c_uint32),
('PointerToRawData', c_uint32),
('PointerToRelocations', c_uint32),
('PointerToLinenumbers', c_uint32),
('NumberOfRelocations', c_uint16),
('NumberOfLinenumbers', c_uint16),
('Characteristics', c_uint32),
]
class EFI_TE_IMAGE_HEADER(Structure):
_fields_ = [
('Signature', ARRAY(c_char, 2)),
('Machine', c_uint16),
('NumberOfSections', c_uint8),
('Subsystem', c_uint8),
('StrippedSize', c_uint16),
('AddressOfEntryPoint', c_uint32),
('BaseOfCode', c_uint32),
('ImageBase', c_uint64),
('DataDirectoryBaseReloc', EFI_IMAGE_DATA_DIRECTORY),
('DataDirectoryDebug', EFI_IMAGE_DATA_DIRECTORY)
]
class EFI_IMAGE_DIRECTORY_ENTRY:
EXPORT = 0
IMPORT = 1
RESOURCE = 2
EXCEPTION = 3
SECURITY = 4
BASERELOC = 5
DEBUG = 6
COPYRIGHT = 7
GLOBALPTR = 8
TLS = 9
LOAD_CONFIG = 10
class PE_RELOC_BLOCK_HEADER(Structure):
_fields_ = [
('PageRVA', c_uint32),
('BlockSize', c_uint32)
]
def AlignPtr (offset, alignment = 8):
return (offset + alignment - 1) & ~(alignment - 1)
def Bytes2Val (bytes):
return reduce(lambda x,y: (x<<8)|y, bytes[::-1] )
def Val2Bytes (value, blen):
return [(value>>(i*8) & 0xff) for i in range(blen)]
class PeTeImage:
def __init__(self, offset, data):
self.Offset = offset
tehdr = EFI_TE_IMAGE_HEADER.from_buffer (data, 0)
if tehdr.Signature == 'VZ': # TE image
self.TeHdr = tehdr
elif tehdr.Signature == 'MZ': # PE32 image
self.TeHdr = None
self.DosHdr = EFI_IMAGE_DOS_HEADER.from_buffer (data, 0)
self.PeHdr = EFI_IMAGE_NT_HEADERS32.from_buffer (data, self.DosHdr.e_lfanew)
if self.PeHdr.Signature != 0x4550:
raise Exception("ERROR: Invalid PE32 header !")
if self.PeHdr.FileHeader.SizeOfOptionalHeader < EFI_IMAGE_OPTIONAL_HEADER32.DataDirectory.offset:
raise Exception("ERROR: Unsupported PE32 image !")
if self.PeHdr.OptionalHeader.NumberOfRvaAndSizes <= EFI_IMAGE_DIRECTORY_ENTRY.BASERELOC:
raise Exception("ERROR: No relocation information available !")
self.Offset = offset
self.Data = data
self.RelocList = []
def IsTeImage(self):
return self.TeHdr is not None
def ParseReloc(self):
if self.IsTeImage():
rsize = self.TeHdr.DataDirectoryBaseReloc.Size
roffset = sizeof(self.TeHdr) - self.TeHdr.StrippedSize + self.TeHdr.DataDirectoryBaseReloc.VirtualAddress
else:
rsize = self.PeHdr.OptionalHeader.DataDirectory[EFI_IMAGE_DIRECTORY_ENTRY.BASERELOC].Size
roffset = self.PeHdr.OptionalHeader.DataDirectory[EFI_IMAGE_DIRECTORY_ENTRY.BASERELOC].VirtualAddress
alignment = 4
offset = roffset
while offset < roffset + rsize:
offset = AlignPtr(offset, 4)
blkhdr = PE_RELOC_BLOCK_HEADER.from_buffer(self.Data, offset)
offset += sizeof(blkhdr)
# Read relocation type,offset pairs
rlen = blkhdr.BlockSize - sizeof(PE_RELOC_BLOCK_HEADER)
rnum = rlen/sizeof(c_uint16)
rdata = (c_uint16 * rnum).from_buffer(self.Data, offset)
for each in rdata:
roff = each & 0xfff
rtype = each >> 12
if rtype == 0: # IMAGE_REL_BASED.ABSOLUTE:
continue
if rtype != 3: # IMAGE_REL_BASED_HIGHLOW
raise Exception("ERROR: Unsupported relocation type %d!" % rtype)
# Calculate the offset of the relocation
aoff = blkhdr.PageRVA + roff
if self.IsTeImage():
aoff += sizeof(self.TeHdr) - self.TeHdr.StrippedSize
self.RelocList.append((rtype, aoff))
offset += sizeof(rdata)
def Rebase(self, delta, fdbin):
count = 0
if delta == 0:
return count
for (rtype, roff) in self.RelocList:
if rtype == 0x03: # HIGHLOW
offset = roff + self.Offset
value = Bytes2Val(fdbin[offset:offset+sizeof(c_uint32)])
value += delta
fdbin[offset:offset+sizeof(c_uint32)] = Val2Bytes(value, sizeof(c_uint32))
count += 1
else:
raise Exception('ERROR: Unknown relocation type %d !' % rtype)
if self.IsTeImage():
offset = self.Offset + EFI_TE_IMAGE_HEADER.ImageBase.offset
size = EFI_TE_IMAGE_HEADER.ImageBase.size
else:
offset = self.Offset + self.DosHdr.e_lfanew
offset += EFI_IMAGE_NT_HEADERS32.OptionalHeader.offset
offset += EFI_IMAGE_OPTIONAL_HEADER32.ImageBase.offset
size = EFI_IMAGE_OPTIONAL_HEADER32.ImageBase.size
value = Bytes2Val(fdbin[offset:offset+size]) + delta
fdbin[offset:offset+size] = Val2Bytes(value, size)
return count
class Section:
def __init__(self, offset, secdata):
self.SecHdr = EFI_COMMON_SECTION_HEADER.from_buffer (secdata, 0)
self.SecData = secdata[0:int(self.SecHdr.Size)]
self.Offset = offset
class FirmwareFile:
def __init__(self, offset, filedata):
self.FfsHdr = EFI_FFS_FILE_HEADER.from_buffer (filedata, 0)
self.FfsData = filedata[0:int(self.FfsHdr.Size)]
self.Offset = offset
self.SecList = []
def ParseFfs(self):
ffssize = len(self.FfsData)
offset = sizeof(self.FfsHdr)
if self.FfsHdr.Name != '\xff' * 16:
while offset < ffssize:
sechdr = EFI_COMMON_SECTION_HEADER.from_buffer (self.FfsData, offset)
sec = Section (offset, self.FfsData[offset:offset + int(sechdr.Size)])
self.SecList.append(sec)
offset += int(sechdr.Size)
offset = AlignPtr(offset, 4)
class FirmwareVolume:
def __init__(self, offset, fvdata):
self.FvHdr = EFI_FIRMWARE_VOLUME_HEADER.from_buffer (fvdata, 0)
self.FvData = fvdata[0 : self.FvHdr.FvLength]
self.Offset = offset
if self.FvHdr.ExtHeaderOffset > 0:
self.FvExtHdr = EFI_FIRMWARE_VOLUME_EXT_HEADER.from_buffer (self.FvData, self.FvHdr.ExtHeaderOffset)
else:
self.FvExtHdr = None
self.FfsList = []
def ParseFv(self):
fvsize = len(self.FvData)
if self.FvExtHdr:
offset = self.FvHdr.ExtHeaderOffset + self.FvExtHdr.ExtHeaderSize
else:
offset = self.FvHdr.HeaderLength
offset = AlignPtr(offset)
while offset < fvsize:
ffshdr = EFI_FFS_FILE_HEADER.from_buffer (self.FvData, offset)
if (ffshdr.Name == '\xff' * 16) and (int(ffshdr.Size) == 0xFFFFFF):
offset = fvsize
else:
ffs = FirmwareFile (offset, self.FvData[offset:offset + int(ffshdr.Size)])
ffs.ParseFfs()
self.FfsList.append(ffs)
offset += int(ffshdr.Size)
offset = AlignPtr(offset)
class FileChecker:
def __init__(self):
# sourceRoot == WORKSPACE
# sourceRoot != PACKAGES_PATH
self.RebasePcd = ["", "", "", ""]
self.FvName = ""
self.target = ""
self.sourceRoot = ""
self.reportFile = ""
def GetSectionName(self, line):
splitLine = line[1:-1].split(".")
return splitLine[0]
def IsSyncSection(self, line):
name = self.GetSectionName(line)
for sectionName in self.SyncSectionList:
if (cmp (sectionName, name) == 0) :
return True
return False
def PrintRebasePcd(self, pcd):
print "PCD: " + pcd[0] + "|" + pcd[3] + " <== " + pcd[1] + "(" + pcd[2] + ")"
def RebaseFv(self, fvName, rebasePcd):
sourceFileName = os.path.join(self.sourceRoot,fvName,self.target,fvName+".Fv")
print "rebasing(FV) - " + sourceFileName
try :
file = open(sourceFileName, "rb")
except Exception:
print "fail to open " + sourceFileName
return
try:
buffer = file.read()
data = bytearray(buffer)
file.close()
FvHeader = EFI_FIRMWARE_VOLUME_HEADER.from_buffer (data, 0)
print "HeaderLength - " + hex(FvHeader.HeaderLength)
print "ExtHeaderOffset - " + hex(FvHeader.ExtHeaderOffset)
if (FvHeader.ExtHeaderOffset == 0):
Offset = FvHeader.HeaderLength
else:
FvExHeader = EFI_FIRMWARE_VOLUME_EXT_HEADER.from_buffer(data, FvHeader.ExtHeaderOffset)
print " FvName - %08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (FvExHeader.FvName.Guid1, FvExHeader.FvName.Guid2, FvExHeader.FvName.Guid3, FvExHeader.FvName.Guid4[0], FvExHeader.FvName.Guid4[1], FvExHeader.FvName.Guid4[2], FvExHeader.FvName.Guid4[3], FvExHeader.FvName.Guid4[4], FvExHeader.FvName.Guid4[5], FvExHeader.FvName.Guid4[6], FvExHeader.FvName.Guid4[7])
print " ExtHeaderSize - " + hex(FvExHeader.ExtHeaderSize)
Offset = FvHeader.ExtHeaderOffset + FvExHeader.ExtHeaderSize
Offset = (Offset + 0x7) & ~0x7
while (Offset < FvHeader.FvLength) :
FfsHeader = EFI_FFS_FILE_HEADER.from_buffer (data, Offset)
FfsOffset = Offset
FfsSize = FfsHeader.Size[0] + (FfsHeader.Size[1] << 8) + (FfsHeader.Size[2] << 16)
if (FfsSize == 0xFFFFFF) :
break
#print "Ffs - " + hex(FfsOffset)
if (FfsHeader.Type == 0xFF) or (FfsHeader.Type == EFI_FV_FILETYPE_FFS_PAD) :
Offset = (FfsOffset + FfsSize + 7) & ~0x7
continue
print "Ffs - %08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (FfsHeader.Name.Guid1, FfsHeader.Name.Guid2, FfsHeader.Name.Guid3, FfsHeader.Name.Guid4[0], FfsHeader.Name.Guid4[1], FfsHeader.Name.Guid4[2], FfsHeader.Name.Guid4[3], FfsHeader.Name.Guid4[4], FfsHeader.Name.Guid4[5], FfsHeader.Name.Guid4[6], FfsHeader.Name.Guid4[7])
Offset = Offset + sizeof(EFI_FFS_FILE_HEADER)
while (Offset < FfsOffset + FfsSize) :
SectionHeader = EFI_COMMON_SECTION_HEADER.from_buffer (data, Offset)
#print " Section - " + hex(Offset)
if (SectionHeader.Type == EFI_SECTION_PE32) or (SectionHeader.Type == EFI_SECTION_TE) :
PeOffset = Offset + sizeof(EFI_COMMON_SECTION_HEADER)
print " PE - " + hex(PeOffset) + "(" + binascii.hexlify(data[PeOffset:PeOffset+2]) + ")"
newbase = int(rebasePcd[1],16)
oldbase = int(rebasePcd[3],16)
delta = newbase - oldbase
print " delta - " + hex(delta) + "(" + hex(oldbase) + " <== " + hex(newbase) + ")"
PeLength = FfsSize-sizeof(EFI_FFS_FILE_HEADER);
img = PeTeImage(PeOffset, data[PeOffset:PeOffset + PeLength])
img.ParseReloc()
img.Rebase(delta, data)
SectionSize = SectionHeader.Size[0] + (SectionHeader.Size[1] << 8) + (SectionHeader.Size[2] << 16)
Offset = (Offset + SectionSize + 3) & ~0x3
Offset = (FfsOffset + FfsSize + 7) & ~0x7
file = open(sourceFileName, "wb")
file.write(data)
finally:
file.close()
def GetPcdFromReport(self, file, pcd):
FoundPkg = False
pcdSplit = pcd.split(".")
TargetPkg = pcdSplit[0]
TargetPcd = pcdSplit[1]
while 1:
line = file.readline()
if not line:
break
newline = line[:-1].replace('\r','')
if (cmp (newline, TargetPkg) == 0):
FoundPkg = True
continue
if (cmp (newline, "") == 0) or ((cmp (newline[0], " ") != 0) and (cmp (newline[0], "0") != 0)):
FoundPkg = False
if (FoundPkg == True) :
newline = newline.strip()
splitLine = newline.split(" ", 2)
if (cmp (splitLine[0], "*F") == 0) or (cmp (splitLine[0], "*P") == 0):
if (cmp (splitLine[1], TargetPcd) == 0):
print "found - " + TargetPkg + "." + TargetPcd
splitLine = splitLine[2].strip()[1:].strip().split(" ", 1)
if (cmp (splitLine[0], "FIXED") == 0) or (cmp (splitLine[0], "PATCH") == 0):
SplitLine = splitLine[1].strip()[1:].split(")", 1)
Type = SplitLine[0]
Value = SplitLine[1].strip()[1:].strip().split()[0]
print " Type - (" + Type + "), Value - (" + Value + ")"
return [Value, Type]
return ["", ""]
def GetOldFvBase (self, fvName, PcdName):
ParseBase = False
Value = ""
fileName = os.path.join(self.sourceRoot,fvName,self.target,fvName+".inf")
try :
file = open(fileName)
except Exception:
print "fail to open " + fileName
return
try:
while 1:
line = file.readline()
if not line:
break
newline = line[:-1].replace('\r','')
if cmp (newline, "") == 0:
continue
if cmp (newline, "#![Pcd]") == 0:
ParseBase = True
continue
if ParseBase == True :
if (cmp (line[0:2], "#!") != 0) :
ParseBase = False
continue
newline = newline[2:].strip()
splitLine = newline.split("|")
if cmp (PcdName, splitLine[0]) == 0:
Value = splitLine[1]
finally:
file.close()
return Value
def SetNewFvBase (self, fvName, PcdName, OldFvBase, NewFvBase):
fileName = os.path.join(self.sourceRoot,fvName,self.target,fvName+".inf")
print "update - " + fileName
try :
file = open(fileName, "r")
except Exception:
print "fail to open " + fileName
return
try:
lines = file.readlines()
file.close()
ParseBase = False
for index in range(len(lines)):
line = lines[index]
if not line:
break
newline = line[:-1].strip()
if cmp (newline, "") == 0:
continue
if cmp (newline, "#![Pcd]") == 0:
ParseBase = True
continue
if ParseBase == True :
if (cmp (line[0:2], "#!") != 0) :
ParseBase = False
continue
newline = newline[2:].strip()
splitLine = newline.split("|")
if cmp (PcdName, splitLine[0]) == 0:
if cmp (OldFvBase, splitLine[1]) != 0:
print "ERROR: OldFvBase mismatch!"
else:
lines[index] = "#! " + PcdName + "|" + NewFvBase + "\n"
break
file = open(fileName, "w")
file.writelines(lines)
finally:
file.close()
def GetRebaseAddressFromReport(self):
try :
file = open(self.reportFile)
except Exception:
print "fail to open " + self.reportFile
return
try:
file.seek(0)
print "checking - " + self.RebasePcd[0]
ValuePair = self.GetPcdFromReport (file, self.RebasePcd[0])
self.RebasePcd[1] = ValuePair[0]
self.RebasePcd[2] = ValuePair[1]
finally:
file.close()
def main():
global FileChecker
fileChecker = FileChecker()
if (len(sys.argv) != 6) :
print "usage: RebaseBinFv <Target> <SourceRoot> <ReportFile> <FvName> <RebasePcdName>"
return 0
fileChecker.target = sys.argv[1]
fileChecker.sourceRoot = sys.argv[2]
fileChecker.reportFile = sys.argv[3]
fileChecker.FvName = sys.argv[4]
fileChecker.RebasePcd[0] = sys.argv[5]
fileChecker.GetRebaseAddressFromReport()
fileChecker.RebasePcd[3] = fileChecker.GetOldFvBase (fileChecker.FvName, fileChecker.RebasePcd[0])
fileChecker.PrintRebasePcd(fileChecker.RebasePcd)
fileChecker.RebaseFv (fileChecker.FvName, fileChecker.RebasePcd)
fileChecker.SetNewFvBase (fileChecker.FvName, fileChecker.RebasePcd[0], fileChecker.RebasePcd[3], fileChecker.RebasePcd[1])
if __name__ == '__main__':
sys.exit(main())
| edk2-platforms-master | Platform/Intel/MinPlatformPkg/Tools/PatchFv/RebaseBinFv.py |
## @ PatchBfv.py
#
# Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
import os
import re
import sys
import time
import shutil
import struct
import binascii
from ctypes import *
class FileChecker:
def __init__(self):
self.fdName = ""
self.reportFile = ""
self.pcd = ["", "", ""]
def PrintPcd(self):
print "PCD: " + self.pcd[0] + "|" + self.pcd[1] + "(" + self.pcd[2] + ")"
def ProcessReport(self):
try :
file = open(self.reportFile)
except Exception:
print "fail to open " + self.reportFile
return
try:
file.seek(0)
print "checking - " + self.pcd[0]
ValuePair = self.GetPcdFromReport (file, self.pcd[0])
self.pcd[1] = ValuePair[0]
self.pcd[2] = ValuePair[1]
finally:
file.close()
self.PrintPcd()
def PatchFd(self):
fileName = self.fdName
print "patching BFV - " + fileName
try :
file = open(fileName, "rb")
except Exception:
print "fail to open " + fileName
return
try:
buffer = file.read()
data = bytearray(buffer)
file.close()
offset = -4
l = struct.pack("L", int(self.pcd[1],16))
print " [" + hex(offset) + "] " + binascii.hexlify(data[-4:]) + " <= " + binascii.hexlify(l)
data[-4:] = l
file = open(fileName, "wb")
file.write(data[0:])
finally:
file.close()
def GetPcdFromReport(self, file, pcd):
FoundPkg = False
pcdSplit = pcd.split(".")
TargetPkg = pcdSplit[0]
TargetPcd = pcdSplit[1]
while 1:
line = file.readline()
if not line:
break
newline = line[:-1]
if (cmp (newline, TargetPkg) == 0):
FoundPkg = True
continue
if (cmp (newline, "") == 0) or ((cmp (newline[0], " ") != 0) and (cmp (newline[0], "0") != 0)):
FoundPkg = False
if (FoundPkg == True) :
newline = newline.strip()
splitLine = newline.split(" ", 2)
if (cmp (splitLine[0], "*F") == 0) or (cmp (splitLine[0], "*P") == 0) :
if (cmp (splitLine[1], TargetPcd) == 0):
print "found - " + TargetPkg + "." + TargetPcd
splitLine = splitLine[2].strip()[1:].strip().split(" ", 1)
if (cmp (splitLine[0], "FIXED") == 0) or (cmp (splitLine[0], "PATCH") == 0):
SplitLine = splitLine[1].strip()[1:].split(")", 1)
Type = SplitLine[0]
Value = SplitLine[1].strip()[1:].strip().split()[0]
print " Type - (" + Type + "), Value - (" + Value + ")"
return [Value, Type]
return ["", ""]
def main():
global FileChecker
fileChecker = FileChecker()
if (len(sys.argv) != 4) :
print "usage: PatchBfv <FdFile> <ReportFile> <BfvPcdName>"
return 0
fileChecker.fdName = sys.argv[1]
fileChecker.reportFile = sys.argv[2]
fileChecker.pcd[0] = sys.argv[3]
fileChecker.ProcessReport ()
fileChecker.PatchFd ()
if __name__ == '__main__':
sys.exit(main())
| edk2-platforms-master | Platform/Intel/MinPlatformPkg/Tools/PatchFv/PatchBfv.py |
## @ PatchBinFv.py
#
# Copyright (c) 2017 - 2019, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
import os
import re
import sys
import time
import shutil
import struct
import binascii
from ctypes import *
class FileChecker:
def __init__(self):
self.SyncSectionList = ["PatchPcd"]
self.FvName = ""
self.target = ""
self.sourceRoot = ""
self.reportFile = ""
self.InfPcdList = []
def GetSectionName(self, line):
splitLine = line[1:-1].split(".")
return splitLine[0]
def IsSyncSection(self, line):
name = self.GetSectionName(line)
for sectionName in self.SyncSectionList:
if (cmp (sectionName, name) == 0) :
return True
return False
def PrintPcdList(self, pcdList):
for pcd in pcdList:
print "PCD: " + pcd[0] + "|" + pcd[1] + "|" + pcd[2] + " <== " + pcd[3] + "(" + pcd[4] + ")"
def GetInfFileGuid(self, fileName):
guid = ""
try :
file = open(fileName)
except Exception:
print "fail to open " + fileName
return
try:
while 1:
line = file.readline()
if not line:
break
newline = line[:-1]
if cmp (line[:11], " FILE_GUID") == 0:
splitLine = line.split("=")
templine = splitLine[1]
guid = templine[1:1+36]
finally:
file.close()
return guid
def ParseInfFile(self, fileName):
SyncToDest = False
try :
file = open(fileName)
except Exception:
print "fail to open " + fileName
return
try:
while 1:
line = file.readline()
if not line:
break
newline = line[:-1]
if cmp (line[0], "#") == 0:
continue
if cmp (line[0], "[") == 0:
SyncToDest = self.IsSyncSection(line)
PatchOffset = False
if (cmp (self.GetSectionName(line), "PatchPcd") == 0) :
PatchOffset = True
continue
if SyncToDest == True :
line = line.strip()
if (cmp (line, "") == 0) :
continue
if (cmp (line[0], "#") == 0) :
continue
splitLine = line.split(" ")
line = splitLine[0]
splitLine = line.split("|")
self.InfPcdList.append([splitLine[0], splitLine[1], splitLine[2], "", ""])
finally:
file.close()
return
def ProcessFvInf(self, fvName):
sourceFileName = os.path.join(self.sourceRoot,fvName,self.target,fvName+".inf")
print "\nprocessing - " + sourceFileName
fileGuid = self.GetInfFileGuid (sourceFileName)
print "FV NAME GUID - " + fileGuid
self.InfPcdList = []
self.ParseInfFile(sourceFileName)
self.InfPcdList.sort()
#self.PrintPcdList(self.InfPcdList)
try :
file = open(self.reportFile)
except Exception:
print "fail to open " + self.reportFile
return
try:
for pcd in self.InfPcdList:
file.seek(0)
print "checking - " + pcd[0]
ValuePair = self.GetPcdFromReport (file, pcd[0])
pcd[3] = ValuePair[0]
pcd[4] = ValuePair[1]
finally:
file.close()
self.PrintPcdList(self.InfPcdList)
def PatchFv(self, fvName):
sourceFileName = os.path.join(self.sourceRoot,fvName,self.target,fvName+".Fv")
print "patching - " + sourceFileName
try :
file = open(sourceFileName, "rb")
except Exception:
print "fail to open " + sourceFileName
return
try:
buffer = file.read()
data = bytearray(buffer)
file.close()
for pcd in self.InfPcdList:
offset = int(pcd[2], 16)
if (cmp (pcd[4], "BOOLEAN") == 0) or (cmp (pcd[4], "UINT8") == 0):
b = struct.pack("B", int(pcd[3],16))
print " [" + hex(offset) + "] " + binascii.hexlify(data[offset:offset+1]) + " <= " + binascii.hexlify(b)
data[offset:offset+1] = b
elif (cmp (pcd[4], "UINT16") == 0):
h = struct.pack("H", int(pcd[3],16))
print " [" + hex(offset) + "] " + binascii.hexlify(data[offset:offset+2]) + " <= " + binascii.hexlify(h)
data[offset:offset+2] = h
elif (cmp (pcd[4], "UINT32") == 0):
l = struct.pack("I", int(pcd[3],16))
print " [" + hex(offset) + "] " + binascii.hexlify(data[offset:offset+4]) + " <= " + binascii.hexlify(l)
data[offset:offset+4] = l
elif (cmp (pcd[4], "UINT64") == 0):
q = struct.pack("Q", int(pcd[3],16))
print " [" + hex(offset) + "] " + binascii.hexlify(data[offset:offset+8]) + " <= " + binascii.hexlify(q)
data[offset:offset+8] = q
file = open(sourceFileName, "wb")
file.write(data)
finally:
file.close()
def GetPcdFromReport(self, file, pcd):
FoundPkg = False
pcdSplit = pcd.split(".")
TargetPkg = pcdSplit[0]
TargetPcd = pcdSplit[1]
while 1:
line = file.readline()
if not line:
break
newline = line[:-1]
if (cmp (newline, TargetPkg) == 0):
FoundPkg = True
continue
if (cmp (newline, "") == 0) or ((cmp (newline[0], " ") != 0) and (cmp (newline[0], "0") != 0)):
FoundPkg = False
if (FoundPkg == True) :
newline = newline.strip()
splitLine = newline.split(" ", 2)
if (cmp (splitLine[0], "*F") == 0) or (cmp (splitLine[0], "*P") == 0):
if (cmp (splitLine[1], TargetPcd) == 0):
print "found - " + TargetPkg + "." + TargetPcd
splitLine = splitLine[2].strip()[1:].strip().split(" ", 1)
if (cmp (splitLine[0], "FIXED") == 0) or (cmp (splitLine[0], "PATCH") == 0):
SplitLine = splitLine[1].strip()[1:].split(")", 1)
Type = SplitLine[0]
Value = SplitLine[1].strip()[1:].strip().split()[0]
print " Type - (" + Type + "), Value - (" + Value + ")"
return [Value, Type]
return ["", ""]
def main():
global FileChecker
fileChecker = FileChecker()
if (len(sys.argv) != 5) :
print "usage: PatchBinFv <Target> <SourceRoot> <ReportFile> <FvName>"
return 0
fileChecker.target = sys.argv[1]
fileChecker.sourceRoot = sys.argv[2]
fileChecker.reportFile = sys.argv[3]
fileChecker.FvName = sys.argv[4]
fileChecker.ProcessFvInf (fileChecker.FvName)
fileChecker.PatchFv (fileChecker.FvName)
if __name__ == '__main__':
sys.exit(main())
| edk2-platforms-master | Platform/Intel/MinPlatformPkg/Tools/PatchFv/PatchBinFv.py |
## @file
#
# Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
ParseVar
'''
import os
import sys
import argparse
import subprocess
import uuid
import struct
import collections
import binascii
from ctypes import *
#
# Globals for help information
#
__prog__ = 'ParseVar'
__version__ = '%s Version %s' % (__prog__, '0.1 ')
__copyright__ = 'Copyright (c) 2017, Intel Corporation. All rights reserved.'
__usage__ = '%s -e|-d [options] <input_file>' % (__prog__)
class GUID(Structure):
_fields_ = [
('Guid1', c_uint32),
('Guid2', c_uint16),
('Guid3', c_uint16),
('Guid4', ARRAY(c_uint8, 8)),
]
class TIME(Structure):
_fields_ = [
('Year', c_uint16),
('Month', c_uint8),
('Day', c_uint8),
('Hour', c_uint8),
('Minute', c_uint8),
('Second', c_uint8),
('Pad1', c_uint8),
('Nanosecond', c_uint32),
('TimeZone', c_uint16),
('Daylight', c_uint8),
('Pad2', c_uint8),
]
EFI_VARIABLE_GUID = [0xddcf3616, 0x3275, 0x4164, 0x98, 0xb6, 0xfe, 0x85, 0x70, 0x7f, 0xfe, 0x7d]
EFI_AUTHENTICATED_VARIABLE_GUID = [0xaaf32c78, 0x947b, 0x439a, 0xa1, 0x80, 0x2e, 0x14, 0x4e, 0xc3, 0x77, 0x92]
# Variable Store Header Format.
VARIABLE_STORE_FORMATTED = 0x5a
# Variable Store Header State.
VARIABLE_STORE_HEALTHY = 0xfe
class VARIABLE_STORE_HEADER(Structure):
_fields_ = [
('Signature', GUID),
('Size', c_uint32),
('Format', c_uint8),
('State', c_uint8),
('Reserved', c_uint16),
('Reserved1', c_uint32),
]
# Variable data start flag.
VARIABLE_DATA = 0x55AA
# Variable State flags.
VAR_IN_DELETED_TRANSITION = 0xfe
VAR_DELETED = 0xfd
VAR_HEADER_VALID_ONLY = 0x7f
VAR_ADDED = 0x3f
class VARIABLE_HEADER(Structure):
_fields_ = [
('StartId', c_uint16),
('State', c_uint8),
('Reserved', c_uint8),
('Attributes', c_uint32),
('NameSize', c_uint32),
('DataSize', c_uint32),
('VendorGuid', GUID),
]
class AUTHENTICATED_VARIABLE_HEADER(Structure):
_fields_ = [
('StartId', c_uint16),
('State', c_uint8),
('Reserved', c_uint8),
('Attributes', c_uint32),
('MonotonicCount', c_uint64),
('TimeStamp', TIME),
('PubKeyIndex', c_uint32),
('NameSize', c_uint32),
('DataSize', c_uint32),
('VendorGuid', GUID),
]
# Alignment of Variable Data Header in Variable Store region.
HEADER_ALIGNMENT = 4
class DEFAULT_INFO(Structure):
_fields_ = [
('DefaultId', c_uint16),
('BoardId', c_uint8),
('Reserved', c_uint8),
]
class DEFAULT_DATA(Structure):
_fields_ = [
('HeaderSize', c_uint16),
('DefaultInfo', DEFAULT_INFO),
]
def DumpHexData(Data):
CharPerLine = 8
DataLen = len(Data)
Count = DataLen / CharPerLine
Rem = DataLen % CharPerLine
CountIndex = 0
for CountIndex in range (0, Count) :
TempData = Data[CountIndex * CharPerLine:CountIndex * CharPerLine + CharPerLine]
print "#// %04x: "%(CountIndex*CharPerLine) + binascii.hexlify(TempData)
if Rem != 0 :
TempData = Data[Count * CharPerLine:Count * CharPerLine + Rem]
print "#// %04x: "%(Count*CharPerLine) + binascii.hexlify(TempData)
def DumpBin(Data):
DataLen = len(Data)
for Index in range (0, DataLen - 1) :
print "0x%02x,"%(Data[Index]),
print "0x%02x"%(Data[DataLen - 1]),
def DumpName(Data):
DataLen = len(Data)
for Index in range (0, DataLen/2 - 1) :
print "%c"%(Data[Index * 2]),
if __name__ == '__main__':
#
# Create command line argument parser object
#
parser = argparse.ArgumentParser(prog=__prog__, version=__version__, usage=__usage__, description=__copyright__, conflict_handler='resolve')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-e", action="store_true", dest='Encode', help='encode file')
group.add_argument("-d", action="store_true", dest='Decode', help='decode file')
parser.add_argument("-v", "--verbose", dest='Verbose', action="store_true", help="increase output messages")
parser.add_argument("-q", "--quiet", dest='Quiet', action="store_true", help="reduce output messages")
parser.add_argument("--debug", dest='Debug', type=int, metavar='[0-9]', choices=range(0,10), default=0, help="set debug level")
parser.add_argument(metavar="input_file", dest='InputFile', type=argparse.FileType('rb'), help="specify the input filename")
#
# Parse command line arguments
#
args = parser.parse_args()
#
# Read input file into a buffer and save input filename
#
args.InputFileName = args.InputFile.name
args.InputFileBuffer = args.InputFile.read()
args.InputFile.close()
if args.Encode:
print 'Unsupported'
if args.Decode:
print '#//FCE binary'
FullInputFileBuffer = bytearray(args.InputFileBuffer)
FullSize = len(FullInputFileBuffer)
DefaultData = DEFAULT_DATA.from_buffer (bytearray(args.InputFileBuffer), 0)
print "#// DEFAULT_DATA:"
print "#// HeaderSize - 0x%04x" % DefaultData.HeaderSize
print "#// DefaultId - 0x%04x" % DefaultData.DefaultInfo.DefaultId
print "#// BoardId - 0x%02x" % DefaultData.DefaultInfo.BoardId
print ""
Offset = DefaultData.HeaderSize
VariableStoreHeader = VARIABLE_STORE_HEADER.from_buffer (bytearray(args.InputFileBuffer), Offset)
print "#// VARIABLE_STORE_HEADER:"
print "#// Signature - %08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (VariableStoreHeader.Signature.Guid1, VariableStoreHeader.Signature.Guid2, VariableStoreHeader.Signature.Guid3, VariableStoreHeader.Signature.Guid4[0], VariableStoreHeader.Signature.Guid4[1], VariableStoreHeader.Signature.Guid4[2], VariableStoreHeader.Signature.Guid4[3], VariableStoreHeader.Signature.Guid4[4], VariableStoreHeader.Signature.Guid4[5], VariableStoreHeader.Signature.Guid4[6], VariableStoreHeader.Signature.Guid4[7])
print "#// Size - 0x%08x" % VariableStoreHeader.Size
print "#// Format - 0x%02x" % VariableStoreHeader.Format
print "#// State - 0x%02x" % VariableStoreHeader.State
print ""
Offset += sizeof(VARIABLE_STORE_HEADER)
Offset = (Offset + HEADER_ALIGNMENT - 1) & (~(HEADER_ALIGNMENT - 1))
if VariableStoreHeader.Format != VARIABLE_STORE_FORMATTED :
sys.exit(0)
if VariableStoreHeader.State != VARIABLE_STORE_HEALTHY :
sys.exit(0)
VarIndex = 1
while Offset < FullSize :
VariableHeader = VARIABLE_HEADER.from_buffer (bytearray(args.InputFileBuffer), Offset)
print "#// VARIABLE_HEADER:"
print "#// StartId - 0x%04x" % VariableHeader.StartId
print "#// State - 0x%02x" % VariableHeader.State
print "#// Attributes - 0x%08x" % VariableHeader.Attributes
print "#// NameSize - 0x%08x" % VariableHeader.NameSize
print "#// DataSize - 0x%08x" % VariableHeader.DataSize
print "#// VendorGuid - %08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (VariableHeader.VendorGuid.Guid1, VariableHeader.VendorGuid.Guid2, VariableHeader.VendorGuid.Guid3, VariableHeader.VendorGuid.Guid4[0], VariableHeader.VendorGuid.Guid4[1], VariableHeader.VendorGuid.Guid4[2], VariableHeader.VendorGuid.Guid4[3], VariableHeader.VendorGuid.Guid4[4], VariableHeader.VendorGuid.Guid4[5], VariableHeader.VendorGuid.Guid4[6], VariableHeader.VendorGuid.Guid4[7])
Offset += sizeof(VARIABLE_HEADER)
if VariableHeader.StartId != VARIABLE_DATA :
sys.exit(0)
Name = FullInputFileBuffer[Offset:Offset + VariableHeader.NameSize]
AsciiName = []
for Int in Name:
if Int == 0:
continue
AsciiName.append(chr(Int))
AsciiName = ''.join(AsciiName)
print "#// Name - L\"" + AsciiName + "\""
#print "#// Name - L\"",
#DumpName(Name)
#print "\""
Offset += VariableHeader.NameSize
print "#// Data - "
Data=FullInputFileBuffer[Offset:Offset + VariableHeader.DataSize]
DumpHexData (Data)
Offset += VariableHeader.DataSize
print " gOemSkuTokenSpaceGuid.Pcd" + AsciiName + "Data|{",
DumpBin(Data)
#print "}|VOID*|0x000F%04x" % VarIndex
print "}"
print ""
#OutputFile = open(AsciiName, 'wb')
#OutputFile.write(Data)
#OutputFile.close()
VarIndex = VarIndex + 1
Offset = (Offset + HEADER_ALIGNMENT - 1) & (~(HEADER_ALIGNMENT - 1))
| edk2-platforms-master | Platform/Intel/MinPlatformPkg/Tools/ParseVar/ParseVar.py |
## @ CheckCodeBase.py
#
# Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
import os
import re
import sys
import time
import shutil
class FileChecker:
def __init__(self):
# sourceRoot == WORKSPACE
# sourceRoot != PACKAGES_PATH
self.sourceRoot = ""
self.includeCoreList = ["\\CryptoPkg\\", "\\FatBinPkg\\", "\\FatPkg\\", "\\IntelFrameworkModulePkg\\", "\\IntelFrameworkPkg\\", "\\IntelFsp2Pkg\\", "\\IntelFsp2WrapperPkg\\", "\\IntelFspPkg\\", "\\IntelFspWrapperPkg\\", "\\IntelSiliconPkg\\", "\\MdeModulePkg\\", "\\MdePkg\\", "\\NetworkPkg\\", "\\PcAtChipsetPkg\\", "\\PerformancePkg\\", "\\SecurityPkg\\", "\\ShellBinPkg\\", "\\ShellPkg\\", "\\SignedCapsulePkg\\", "\\SourceLevelDebugPkg\\", "\\UefiCpuPkg\\"]
self.includeList = self.includeCoreList
self.excluseCoreList = ["\\BaseTools\\", "\\Conf\\", "\\Tools\\", "\\Build\\", "\\tool\\", "\\.svn", "\\.git", "\\Override\\", "\\SampleCode\\", "\\openssl"]
self.excluseList = self.excluseCoreList
self.usedModuleList = []
self.usedLibraryList = []
self.makefileList = []
self.usedIncludeFileList = []
self.usedModuleFileList = []
self.usedLibraryFileList = []
self.usedFileList = []
self.usedPackageList = []
self.allModuleList = []
self.allLibraryList = []
self.allDirList = []
self.allIncludeFileList = []
self.allModuleFileList = []
self.allLibraryFileList = []
self.allFileList = []
self.allPackageList = []
self.unusedModuleList = []
self.unusedLibraryList = []
self.unusedIncludeFileList = []
self.unusedModuleFileList = []
self.unusedLibraryFileList = []
self.unusedFileList = []
self.unusedPackageList = []
self.unusedPackageFileList = []
def CheckFile(self, file):
if os.path.isfile(file):
return file.decode('gbk')
else:
return ""
def CheckDir(self, file):
if os.path.isdir(file):
return file.decode('gbk')
else:
return ""
def IsIncludeFile(self, file):
if (cmp (file[-2:], ".h") == 0) and (file.find ("Pkg\\Include") != -1):
return True
else:
return False
def IsLibraryFile(self, file):
if (file.find ("Pkg\\Library") != -1):
return True
else:
return False
def CheckIncludeFile(self, file):
if self.IsIncludeFile(file) and (self.IsInExclusiveList(file) == False):
return file.decode('gbk')
else:
return ""
def GetModulePathFromMakefile (self, file):
makefile = open (file, "r")
data = makefile.read ()
for line in data.split("\n"):
# MODULE_DIR = c:\home\edkiigit\edk2\MdeModulePkg\Library\BaseSerialPortLib16550
if "MODULE_DIR =" in line:
moduleDir = line.split(" = ")[1]
# BUILD_DIR = c:\home\edkiigit\Build\KabylakeOpenBoardPkg\KabylakeRvp3\DEBUG_VS2015x86
if "BUILD_DIR =" in line:
buildDir = line.split(" = ")[1]
makefile.close()
# record sourceRoot = c:\home\Edk-II
self.sourceRoot = buildDir.split("\\Build\\")[0]
if (self.IsInExclusiveList(moduleDir) == False):
self.AddPackageList(moduleDir, self.usedPackageList)
return moduleDir
def AddPackageList(self, file, packageList):
packageName = file.split("Pkg")
packagePath = packageName[0] + "Pkg"
if packagePath not in packageList:
packageList.append(packagePath)
def IsInPackageList(self, file):
for package in self.usedPackageList:
if file.find(package) != -1:
return True
return False
def CheckUsedModule(self, file):
if (cmp (file[-8:], "Makefile") == 0) and (self.IsLibraryFile(file) == False):
finalDir = self.GetModulePathFromMakefile(file)
if self.IsInExclusiveList(finalDir):
return ""
return finalDir.decode('gbk')
else:
return ""
def CheckUsedLibrary(self, file):
if (cmp (file[-8:], "Makefile") == 0) and self.IsLibraryFile(file):
finalDir = self.GetModulePathFromMakefile(file)
if self.IsInExclusiveList(finalDir):
return ""
return finalDir.decode('gbk')
else:
return ""
def CheckMakefile(self, file):
if cmp (file[-8:], "Makefile") == 0:
finalDir = self.GetModulePathFromMakefile(file)
if self.IsInExclusiveList(finalDir):
return ""
return file.decode('gbk')
else:
return ""
def ParseMakefile(self, fileName):
try :
file = open(fileName)
except Exception:
print "fail to open " + fileName
return
try:
while 1:
line = file.readline()
if not line:
break
if cmp (line[0], "#") == 0:
continue
newline = line[:-1]
#print "check - (" + newline + ")"
headFile = ""
#COMMON_DEPS = $(WORKSPACE)\MdePkg\Include\Protocol\DebugSupport.h \
# $(WORKSPACE)\MdePkg\Include\Ppi\PciCfg2.h \
#$(OUTPUT_DIR)\X64\Semaphore.obj : $(WORKSPACE)\UefiCpuPkg\Include\Library\MtrrLib.h
#MatchString = "[\w\\\s\=\:\.\(\)\$]*\$\(WORKSPACE\)([\\\w]*\.h)[\\w\s]*"
MatchString1 = "(COMMON_DEPS = | |\$\(OUTPUT_DIR\)[\\\\\w]*.obj : )\$\(WORKSPACE\)\\\\([\\\\\w]*\.h)"
match1 = re.match(MatchString1, newline)
if match1 is not None:
#print "match1 - " + newline
#print "0 - " + match1.group(0)
#print "1 - " + match1.group(1)
#print "2 - " + match1.group(2)
headFile = match1.group(2)
if (cmp (headFile, "") != 0) and self.IsIncludeFile(headFile):
finalPath = os.path.join (self.sourceRoot, headFile)
if (self.IsInExclusiveList(finalPath) == False) and (finalPath not in self.usedIncludeFileList):
self.usedIncludeFileList.append(finalPath)
self.AddPackageList (finalPath, self.usedPackageList)
finally:
file.close()
return
def IsInIncludeList(self, file):
for list in self.includeList:
result = file.find(list);
if (result != -1):
return True
return False
def IsInExclusiveList(self, file):
if self.IsInIncludeList(file) == False:
return True
for list in self.excluseList:
result = file.find(list);
if (result != -1):
return True
for list in self.excluseList:
full_file = file + "\\"
result = full_file.find(list);
if (result != -1):
return True
return False
def IsFinalDir(self, file):
for s in os.listdir(file):
newFile=os.path.join(file,s)
if cmp (newFile[-4:], ".inf") == 0:
return True
return False
def IsInUsedModuleList(self, file):
if file in self.usedModuleList:
return True
else:
return False
def IsInUsedLibraryList(self, file):
if file in self.usedLibraryList:
return True
else:
return False
def IsUsedModule(self, file):
for used in self.usedModuleList:
final = used + "\\"
result = file.find(final);
if (result != -1):
return True
return False
def IsUsedLibrary(self, file):
for used in self.usedLibraryList:
final = used + "\\"
result = file.find(final);
if (result != -1):
return True
return False
def IsUnusedModule(self, file):
for unused in self.unusedModuleList:
final = unused + "\\"
result = file.find(final);
if (result != -1):
return True
return False
def IsUnusedLibrary(self, file):
for unused in self.unusedLibraryList:
final = unused + "\\"
result = file.find(final);
if (result != -1):
return True
return False
def IsUnusedPackage(self, file):
for unused in self.unusedPackageList:
final = unused + "\\"
result = file.find(final);
if (result != -1):
return True
return False
def CheckUnusedModule(self, file):
if os.path.isdir(file):
if (self.IsInExclusiveList(file) == False) and self.IsFinalDir(file) and (self.IsInUsedModuleList(file) == False) and (self.IsLibraryFile(file) == False):
return file.decode('gbk')
else:
return ""
else:
return ""
def CheckUnusedLibrary(self, file):
if os.path.isdir(file):
if (self.IsInExclusiveList(file) == False) and self.IsFinalDir(file) and (self.IsInUsedLibraryList(file) == False) and self.IsLibraryFile(file):
return file.decode('gbk')
else:
return ""
else:
return ""
def CheckAllModule(self, file):
if os.path.isdir(file):
if (self.IsInExclusiveList(file) == False) and self.IsFinalDir(file) and (self.IsLibraryFile(file) == False):
return file.decode('gbk')
else:
return ""
else:
return ""
def CheckAllLibrary(self, file):
if os.path.isdir(file):
if (self.IsInExclusiveList(file) == False) and self.IsFinalDir(file) and self.IsLibraryFile(file):
return file.decode('gbk')
else:
return ""
else:
return ""
def CheckAllDir(self, file):
if os.path.isdir(file):
if (self.IsInExclusiveList(file) == False):
return file.decode('gbk')
else:
return ""
else:
return ""
def CheckAllModuleFile(self, file):
if os.path.isfile(file):
if (self.IsInExclusiveList(file) == False) and (self.IsUsedModule(file) or self.IsUnusedModule(file)) and (self.IsLibraryFile(file) == False):
return file.decode('gbk')
else:
return ""
else:
return ""
def CheckAllLibraryFile(self, file):
if os.path.isfile(file):
if (self.IsInExclusiveList(file) == False) and (self.IsUsedLibrary(file) or self.IsUnusedLibrary(file)) and self.IsLibraryFile(file):
return file.decode('gbk')
else:
return ""
else:
return ""
def CheckAllFile(self, file):
if os.path.isfile(file):
if (self.IsInExclusiveList(file) == False):
return file.decode('gbk')
else:
return ""
else:
return ""
def CheckUsedModuleFile(self, file):
if os.path.isfile(file):
if (self.IsInExclusiveList(file) == False) and self.IsUsedModule(file):
return file.decode('gbk')
else:
return ""
else:
return ""
def CheckUsedLibraryFile(self, file):
if os.path.isfile(file):
if (self.IsInExclusiveList(file) == False) and self.IsUsedLibrary(file):
return file.decode('gbk')
else:
return ""
else:
return ""
def CheckUnusedModuleFile(self, file):
if os.path.isfile(file):
if (self.IsInExclusiveList(file) == False) and self.IsUnusedModule(file):
return file.decode('gbk')
else:
return ""
else:
return ""
def CheckUnusedLibraryFile(self, file):
if os.path.isfile(file):
if (self.IsInExclusiveList(file) == False) and self.IsUnusedLibrary(file):
return file.decode('gbk')
else:
return ""
else:
return ""
def CheckUnusedPackageFile(self, file):
if os.path.isfile(file):
if (self.IsInExclusiveList(file) == False) and self.IsUnusedPackage(file):
return file.decode('gbk')
else:
return ""
else:
return ""
def GetFileList(self, dir, fileList, checkFunc):
newDir = dir
AppendName = checkFunc (dir)
if cmp (AppendName, "") != 0:
#print "AppendName = " + AppendName
if AppendName not in fileList:
fileList.append(AppendName)
if os.path.isdir(dir):
for sub in os.listdir(dir):
newDir = os.path.join(dir,sub)
self.GetFileList(newDir, fileList, checkFunc)
return fileList
def DeleteEmptyDir(self, dir):
if os.path.exists(dir) == False:
return
if os.path.isdir(dir):
for sub in os.listdir(dir):
newDir = os.path.join(dir,sub)
if (os.path.isdir(newDir) == True):
self.DeleteEmptyDir(newDir)
if not os.listdir(dir):
print "deleting empty " + dir
os.rmdir(dir)
def DeleteEmptyDirList(self, fileList):
for file in fileList:
self.DeleteEmptyDir(file)
def DeleteUnusedFile(self, fileList):
for file in fileList:
if os.path.exists(file) == False:
continue
print "deleting " + file
try :
if os.path.isdir(file):
shutil.rmtree(file)
if os.path.isfile(file):
os.remove(file)
except :
print "deleting error (" + file + ")"
def PrintFileTime(self, file):
statinfo = os.stat(file)
strucatime = time.localtime(statinfo.st_atime)
print "atime: " + str(statinfo.st_atime) + " (" + str(strucatime.tm_year) + "." + str(strucatime.tm_mon) + "." + str(strucatime.tm_mday) + " " + str(strucatime.tm_hour) + ":" + str(strucatime.tm_min) + ":" + str(strucatime.tm_sec) + ")"
strucctime = time.localtime(statinfo.st_ctime)
print "ctime: " + str(statinfo.st_ctime) + " (" + str(strucctime.tm_year) + "." + str(strucctime.tm_mon) + "." + str(strucctime.tm_mday) + " " + str(strucctime.tm_hour) + ":" + str(strucctime.tm_min) + ":" + str(strucctime.tm_sec) + ")"
strucmtime = time.localtime(statinfo.st_mtime)
print "mtime: " + str(statinfo.st_mtime) + " (" + str(strucmtime.tm_year) + "." + str(strucmtime.tm_mon) + "." + str(strucmtime.tm_mday) + " " + str(strucmtime.tm_hour) + ":" + str(strucmtime.tm_min) + ":" + str(strucmtime.tm_sec) + ")"
def TouchFileTime(self, file):
#currentTime = time.time()
#os.utime(file, (currentTime, currentTime))
os.utime(file, None)
def TouchFileListTime(self, fileList):
for file in fileList:
self.TouchFileTime(file)
def PrintFileList(self, fileList):
for file in fileList:
print file
print " Count - " + str(len(fileList))
def SortFileList(self, fileList):
fileList.sort()
def PrintFileListTime(self, fileList):
for file in fileList:
print file
self.PrintFileTime(file)
def GetUsedModuleList(self):
if (len(self.usedModuleList) == 0):
self.usedModuleList = self.GetFileList(sys.argv[1], [], self.CheckUsedModule)
self.SortFileList(self.usedModuleList)
def GetUsedLibraryList(self):
if (len(self.usedLibraryList) == 0):
self.usedLibraryList = self.GetFileList(sys.argv[1], [], self.CheckUsedLibrary)
self.SortFileList(self.usedLibraryList)
def GetMakefileList(self):
if (len(self.makefileList) == 0):
self.GetUsedModuleList()
self.makefileList = self.GetFileList(sys.argv[1], [], self.CheckMakefile)
def GetUsedIncludeFileList(self):
if (len(self.usedIncludeFileList) == 0):
self.GetMakefileList()
#print "\nMakefile:"
#fileChecker.PrintFileList (fileChecker.makefileList)
for file in self.makefileList:
self.ParseMakefile(file)
self.SortFileList(self.usedIncludeFileList)
def GetUsedModuleFileList(self):
if (len(self.usedModuleFileList) == 0):
self.GetUsedModuleList()
self.usedModuleFileList = self.GetFileList(self.sourceRoot, [], self.CheckUsedModuleFile)
self.SortFileList(self.usedModuleFileList)
def GetUsedLibraryFileList(self):
if (len(self.usedLibraryFileList) == 0):
self.GetUsedLibraryList()
self.usedLibraryFileList = self.GetFileList(self.sourceRoot, [], self.CheckUsedLibraryFile)
self.SortFileList(self.usedLibraryFileList)
def GetUsedFileList(self):
if (len(self.usedFileList) == 0):
self.GetAllFileList()
self.GetUnusedFileList()
self.usedFileList = []
for file in self.allFileList:
if (file not in self.unusedFileList) and self.IsInPackageList(file):
self.usedFileList.append(file)
self.SortFileList(self.usedFileList)
def GetUsedPackageList(self):
self.GetUsedModuleList()
self.GetUsedLibraryList()
self.GetUsedIncludeFileList()
self.SortFileList(self.usedPackageList)
def GetAllModuleList(self):
if (len(self.allModuleList) == 0):
self.GetUsedModuleList()
self.allModuleList = self.GetFileList(self.sourceRoot, [], self.CheckAllModule)
self.SortFileList(self.allModuleList)
def GetAllLibraryList(self):
if (len(self.allLibraryList) == 0):
self.GetUsedLibraryList()
self.allLibraryList = self.GetFileList(self.sourceRoot, [], self.CheckAllLibrary)
self.SortFileList(self.allLibraryList)
def GetAllIncludeFileList(self):
if (len(self.allIncludeFileList) == 0):
self.GetUsedModuleList()
self.allIncludeFileList = self.GetFileList(self.sourceRoot, [], self.CheckIncludeFile)
self.SortFileList(self.allIncludeFileList)
def GetAllModuleFileList(self):
if (len(self.allFileList) == 0):
self.GetUsedModuleList()
self.GetUnusedModuleList()
self.allModuleFileList = self.GetFileList(self.sourceRoot, [], self.CheckAllModuleFile)
self.SortFileList(self.allModuleFileList)
def GetAllLibraryFileList(self):
if (len(self.allFileList) == 0):
self.GetUsedLibraryList()
self.GetUnusedLibraryList()
self.allLibraryFileList = self.GetFileList(self.sourceRoot, [], self.CheckAllLibraryFile)
self.SortFileList(self.allLibraryFileList)
def GetAllFileList(self):
if (len(self.allFileList) == 0):
self.GetUsedModuleList()
self.allFileList = self.GetFileList(self.sourceRoot, [], self.CheckAllFile)
self.SortFileList(self.allFileList)
def GetAllPackageList(self):
if (len(self.allPackageList) == 0):
self.GetAllFileList()
prefixLength = len(self.sourceRoot)
for file in self.allFileList:
finalPath = os.path.join (self.sourceRoot, file[prefixLength + 1:])
self.AddPackageList(finalPath, self.allPackageList)
self.SortFileList(self.allPackageList)
def GetUnusedModuleList(self):
if (len(self.unusedModuleList) == 0):
self.GetUsedModuleList()
self.unusedModuleList = self.GetFileList(self.sourceRoot, [], self.CheckUnusedModule)
def GetUnusedLibraryList(self):
if (len(self.unusedLibraryList) == 0):
self.GetUsedLibraryList()
self.unusedLibraryList = self.GetFileList(self.sourceRoot, [], self.CheckUnusedLibrary)
def GetUnusedIncludeFileList(self):
if (len(self.unusedIncludeFileList) == 0):
self.GetUsedIncludeFileList()
self.GetAllIncludeFileList()
self.unusedIncludeFileList = []
for file in self.allIncludeFileList:
if file not in self.usedIncludeFileList:
self.unusedIncludeFileList.append(file)
self.SortFileList(self.unusedIncludeFileList)
def GetUnusedModuleFileList(self):
if (len(self.unusedModuleFileList) == 0):
self.GetUnusedModuleList ()
self.unusedModuleFileList = self.GetFileList(self.sourceRoot, [], self.CheckUnusedModuleFile)
self.SortFileList(self.unusedModuleFileList)
def GetUnusedLibraryFileList(self):
if (len(self.unusedLibraryFileList) == 0):
self.GetUnusedLibraryList ()
self.unusedLibraryFileList = self.GetFileList(self.sourceRoot, [], self.CheckUnusedLibraryFile)
self.SortFileList(self.unusedLibraryFileList)
def GetUnusedFileList(self):
if (len(self.unusedFileList) == 0):
self.GetUnusedIncludeFileList()
self.GetUnusedModuleFileList()
self.GetUnusedLibraryFileList()
self.GetUnusedPackageList()
self.unusedPackageFileList = self.GetFileList(self.sourceRoot, [], self.CheckUnusedPackageFile)
self.unusedFileList = []
self.unusedFileList.extend(self.unusedIncludeFileList)
self.unusedFileList.extend(self.unusedModuleFileList)
self.unusedFileList.extend(self.unusedLibraryFileList)
for file in self.unusedPackageFileList:
if file not in self.unusedFileList:
self.unusedFileList.append(file)
self.SortFileList(self.unusedFileList)
def GetUnusedPackageList(self):
if (len(self.unusedPackageList) == 0):
self.GetUsedPackageList()
self.GetAllPackageList()
for package in self.allPackageList:
if package not in self.usedPackageList:
self.unusedPackageList.append(package)
self.SortFileList(self.unusedPackageList)
#
# Print out the usage
#
def usage():
print "Usage: \n\tCheckCodeBase <Build Dir> used|unused|all"
print " used - used library, modules, include file, library files, module files, all used files"
print " unused - unused library, modules, include file, library files, module files, all unused files"
print " all - all library, modules, include file, library files, module files, all files"
print " library : the directory of a library"
print " module : the directory of a driver"
print " include file : the header files in include directory"
print " library file : all files in a library directory"
print " module file : all files in a driver directory"
print " all file : all files in project, including any other metadata files or batch files"
#print "Usage: \n\tCheckCodeBase <Build Dir> time|touch"
print "For Example: \n\tCheckCodeBase Build\KabylakeOpenBoardPkg\KabylakeRvp3\DEBUG_VS2015x86 used"
def main():
global FileChecker
fileChecker = FileChecker()
if len(sys.argv) < 3:
usage()
return 1
if cmp (sys.argv[2], "used") == 0:
fileChecker.GetUsedModuleList ()
print "\n Used Module List:"
fileChecker.PrintFileList (fileChecker.usedModuleList)
fileChecker.GetUsedLibraryList ()
print "\n Used Library List:"
fileChecker.PrintFileList (fileChecker.usedLibraryList)
fileChecker.GetUsedIncludeFileList()
print "\n Used Include File List:"
fileChecker.PrintFileList (fileChecker.usedIncludeFileList)
fileChecker.GetUsedModuleFileList()
print "\n Used Module File List:"
fileChecker.PrintFileList (fileChecker.usedModuleFileList)
fileChecker.GetUsedLibraryFileList()
print "\n Used Library File List:"
fileChecker.PrintFileList (fileChecker.usedLibraryFileList)
fileChecker.GetUsedFileList()
print "\n All Used File List:"
fileChecker.PrintFileList (fileChecker.usedFileList)
fileChecker.GetUsedPackageList()
print "\n Used Package List:"
fileChecker.PrintFileList (fileChecker.usedPackageList)
print "\n ==== Used Module Summary ===="
print " Module Count - " + str(len(fileChecker.usedModuleList))
print " Library Count - " + str(len(fileChecker.usedLibraryList))
print " Include File Count - " + str(len(fileChecker.usedIncludeFileList))
print " Module File Count - " + str(len(fileChecker.usedModuleFileList))
print " Library File Count - " + str(len(fileChecker.usedLibraryFileList))
print " All File Count - " + str(len(fileChecker.usedFileList))
elif cmp (sys.argv[2], "all") == 0:
fileChecker.GetAllModuleList()
print "\n All Module List:"
fileChecker.PrintFileList (fileChecker.allModuleList)
fileChecker.GetAllLibraryList()
print "\n All Library List:"
fileChecker.PrintFileList (fileChecker.allLibraryList)
fileChecker.GetAllIncludeFileList()
print "\n All Include File List:"
fileChecker.PrintFileList (fileChecker.allIncludeFileList)
fileChecker.GetAllModuleFileList()
print "\n All Module File List:"
fileChecker.PrintFileList (fileChecker.allModuleFileList)
fileChecker.GetAllLibraryFileList()
print "\n All Library File List:"
fileChecker.PrintFileList (fileChecker.allLibraryFileList)
fileChecker.GetAllFileList()
print "\n All File List:"
fileChecker.PrintFileList (fileChecker.allFileList)
fileChecker.GetAllPackageList()
print "\n All Package List:"
fileChecker.PrintFileList (fileChecker.allPackageList)
print "\n ==== All Module Summary ===="
print " Module Count - " + str(len(fileChecker.allModuleList))
print " Library Count - " + str(len(fileChecker.allLibraryList))
print " Include File Count - " + str(len(fileChecker.allIncludeFileList))
print " Module File Count - " + str(len(fileChecker.allModuleFileList))
print " Library File Count - " + str(len(fileChecker.allLibraryFileList))
print " All File Count - " + str(len(fileChecker.allFileList))
elif cmp (sys.argv[2], "unused") == 0:
fileChecker.GetUnusedModuleList()
print "\n Unused Module List:"
fileChecker.PrintFileList (fileChecker.unusedModuleList)
fileChecker.GetUnusedLibraryList()
print "\n Unused Library List:"
fileChecker.PrintFileList (fileChecker.unusedLibraryList)
fileChecker.GetUnusedIncludeFileList()
print "\n Unused Include List:"
fileChecker.PrintFileList (fileChecker.unusedIncludeFileList)
fileChecker.GetUnusedModuleFileList()
print "\n Unused Module File List:"
fileChecker.PrintFileList (fileChecker.unusedModuleFileList)
fileChecker.GetUnusedLibraryFileList()
print "\n Unused Library File List:"
fileChecker.PrintFileList (fileChecker.unusedLibraryFileList)
fileChecker.GetUnusedFileList()
print "\n Unused File List:"
fileChecker.PrintFileList (fileChecker.unusedFileList)
fileChecker.GetUnusedPackageList()
print "\n Unused Package List:"
fileChecker.PrintFileList (fileChecker.unusedPackageList)
print "\n ==== Unused Module Summary ===="
print " Module Count - " + str(len(fileChecker.unusedModuleList))
print " Library Count - " + str(len(fileChecker.unusedLibraryList))
print " Include File Count - " + str(len(fileChecker.unusedIncludeFileList))
print " Module File Count - " + str(len(fileChecker.unusedModuleFileList))
print " Library File Count - " + str(len(fileChecker.unusedLibraryFileList))
print " All File Count - " + str(len(fileChecker.unusedFileList))
elif cmp (sys.argv[2], "delete_unused") == 0:
fileChecker.GetUnusedModuleList()
fileChecker.GetUnusedLibraryList()
fileChecker.GetUnusedIncludeFileList()
fileChecker.GetUnusedPackageList()
fileChecker.DeleteUnusedFile(fileChecker.unusedPackageList)
fileChecker.DeleteUnusedFile(fileChecker.unusedModuleList)
fileChecker.DeleteUnusedFile(fileChecker.unusedLibraryList)
fileChecker.DeleteUnusedFile(fileChecker.unusedIncludeFileList)
fileChecker.allDirList = fileChecker.GetFileList(fileChecker.sourceRoot, [], fileChecker.CheckAllDir)
fileChecker.DeleteEmptyDirList(fileChecker.allDirList)
elif cmp (sys.argv[2], "time") == 0:
fileChecker.allDirList = fileChecker.GetFileList(sys.argv[1], [], fileChecker.CheckFile)
fileChecker.PrintFileListTime(fileChecker.allDirList)
elif cmp (sys.argv[2], "touch") == 0:
fileChecker.allDirList = fileChecker.GetFileList(sys.argv[1], [], fileChecker.CheckFile)
fileChecker.TouchFileListTime(fileChecker.allDirList)
else:
print "Unknown - " + sys.argv[2]
if __name__ == '__main__':
sys.exit(main())
| edk2-platforms-master | Platform/Intel/MinPlatformPkg/Tools/Help/CheckCodeBase.py |
# @ build_board.py
# Extensions for building WilsonCityRvp using build_bios.py
#
# Copyright (c) 2021, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
"""
This module serves as a sample implementation of the build extension
scripts
"""
import os
import sys
def pre_build_ex(config, functions):
"""Additional Pre BIOS build function
:param config: The environment variables to be used in the build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: nothing
"""
print("pre_build_ex")
config["BUILD_DIR_PATH"] = os.path.join(config["WORKSPACE"],
'Build',
config["PLATFORM_BOARD_PACKAGE"],
"{}_{}".format(
config["TARGET"],
config["TOOL_CHAIN_TAG"]))
# set BUILD_DIR path
config["BUILD_DIR"] = os.path.join('Build',
config["PLATFORM_BOARD_PACKAGE"],
"{}_{}".format(
config["TARGET"],
config["TOOL_CHAIN_TAG"]))
config["BUILD_X64"] = os.path.join(config["BUILD_DIR_PATH"], 'X64')
config["BUILD_IA32"] = os.path.join(config["BUILD_DIR_PATH"], 'IA32')
if not os.path.isdir(config["BUILD_DIR_PATH"]):
try:
os.makedirs(config["BUILD_DIR_PATH"])
except OSError:
print("Error while creating Build folder")
sys.exit(1)
#@todo: Replace this with PcdFspModeSelection
if config.get("API_MODE_FSP_WRAPPER_BUILD", "FALSE") == "TRUE":
config["EXT_BUILD_FLAGS"] += " -D FSP_MODE=0"
else:
config["EXT_BUILD_FLAGS"] += " -D FSP_MODE=1"
if config.get("API_MODE_FSP_WRAPPER_BUILD", "FALSE") == "TRUE":
raise ValueError("FSP API Mode is currently unsupported on Ice Lake Xeon Scalable")
# Build the ACPI AML offset table *.offset.h
print("Info: re-generating PlatformOffset header files")
execute_script = functions.get("execute_script")
# AML offset arch is X64, not sure if it matters.
command = ["build", "-a", "X64", "-t", config["TOOL_CHAIN_TAG"], "-D", "MAX_SOCKET=" + config["MAX_SOCKET"]]
if config["EXT_BUILD_FLAGS"] and config["EXT_BUILD_FLAGS"] != "":
ext_build_flags = config["EXT_BUILD_FLAGS"].split(" ")
ext_build_flags = [x.strip() for x in ext_build_flags]
ext_build_flags = [x for x in ext_build_flags if x != ""]
command.extend(ext_build_flags)
aml_offsets_split = os.path.split(os.path.normpath(config["AML_OFFSETS_PATH"]))
command.append("-p")
command.append(os.path.normpath(config["AML_OFFSETS_PATH"]) + '.dsc')
command.append("-m")
command.append(os.path.join(aml_offsets_split[0], aml_offsets_split[1], aml_offsets_split[1] + '.inf'))
command.append("-y")
command.append(os.path.join(config["WORKSPACE"], "PreBuildReport.txt"))
command.append("--log=" + os.path.join(config["WORKSPACE"], "PreBuild.log"))
shell = True
if os.name == "posix": # linux
shell = False
_, _, _, code = execute_script(command, config, shell=shell)
if code != 0:
print(" ".join(command))
print("Error re-generating PlatformOffset header files")
sys.exit(1)
# Build AmlGenOffset command to consume the *.offset.h and produce AmlOffsetTable.c for StaticSkuDataDxe use.
# Get destination path and filename from config
relative_file_path = os.path.normpath(config["STRIPPED_AML_OFFSETS_FILE_PATH"]) # get path relative to Platform/Intel
out_file_path = os.path.join(config["WORKSPACE_PLATFORM"], relative_file_path) # full path to output file
out_file_dir = os.path.dirname(out_file_path) # remove filename
out_file_root_ext = os.path.splitext(os.path.basename(out_file_path)) # root and extension of output file
# Get relative path for the generated offset.h file
relative_dsdt_file_path = os.path.normpath(config["DSDT_TABLE_FILE_PATH"]) # path relative to Platform/Intel
dsdt_file_root_ext = os.path.splitext(os.path.basename(relative_dsdt_file_path)) # root and extension of generated offset.h file
# Generate output directory if it doesn't exist
if not os.path.exists(out_file_dir):
os.mkdir(out_file_dir)
command = ["python",
os.path.join(config["MIN_PACKAGE_TOOLS"], "AmlGenOffset", "AmlGenOffset.py"),
"-d", "--aml_filter", config["AML_FILTER"],
"-o", out_file_path,
os.path.join(config["BUILD_X64"], aml_offsets_split[0], aml_offsets_split[1], aml_offsets_split[1], "OUTPUT", os.path.dirname(relative_dsdt_file_path), dsdt_file_root_ext[0] + ".offset.h")]
# execute the command
_, _, _, code = execute_script(command, config, shell=shell)
if code != 0:
print(" ".join(command))
print("Error re-generating PlatformOffset header files")
sys.exit(1)
print("GenOffset done")
return None
def _merge_files(files, ofile):
with open(ofile, 'wb') as of:
for x in files:
if not os.path.exists(x):
return
with open(x, 'rb') as f:
of.write(f.read())
def build_ex(config, functions):
"""Additional BIOS build function
:param config: The environment variables to be used in the build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: config dictionary
:rtype: Dictionary
"""
print("build_ex")
fv_path = os.path.join(config["BUILD_DIR_PATH"], "FV")
binary_fd = os.path.join(fv_path, "BINARY.fd")
main_fd = os.path.join(fv_path, "MAIN.fd")
secpei_fd = os.path.join(fv_path, "SECPEI.fd")
board_fd = config["BOARD"].upper()
final_fd = os.path.join(fv_path, "{}.fd".format(board_fd))
_merge_files((binary_fd, main_fd, secpei_fd), final_fd)
return None
def post_build_ex(config, functions):
"""Additional Post BIOS build function
:param config: The environment variables to be used in the post
build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: config dictionary
:rtype: Dictionary
"""
print("post_build_ex")
fv_path = os.path.join(config["BUILD_DIR_PATH"], "FV")
board_fd = config["BOARD"].upper()
final_fd = os.path.join(fv_path, "{}.fd".format(board_fd))
final_ifwi = os.path.join(fv_path, "{}.bin".format(board_fd))
ifwi_ingredients_path = os.path.join(config["WORKSPACE_PLATFORM_BIN"], "Ifwi", config["BOARD"])
flash_descriptor = os.path.join(ifwi_ingredients_path, "FlashDescriptor.bin")
intel_me = os.path.join(ifwi_ingredients_path, "Me.bin")
_merge_files((flash_descriptor, intel_me, final_fd), final_ifwi)
if os.path.isfile(final_fd):
print("IFWI image can be found at {}".format(final_ifwi))
return None
def clean_ex(config, functions):
"""Additional clean function
:param config: The environment variables to be used in the build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: config dictionary
:rtype: Dictionary
"""
print("clean_ex")
return None
| edk2-platforms-master | Platform/Intel/WhitleyOpenBoardPkg/WilsonCityRvp/build_board.py |
# @ build_board.py
# Extensions for building SuperMicro using build_bios.py
#
# Copyright (c) 2021, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
"""
This module serves as a sample implementation of the build extension
scripts
"""
import os
import sys
def pre_build_ex(config, functions):
"""Additional Pre BIOS build function
:param config: The environment variables to be used in the build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: nothing
"""
print("pre_build_ex")
config["BUILD_DIR_PATH"] = os.path.join(config["WORKSPACE"],
'Build',
config["PLATFORM_BOARD_PACKAGE"],
"{}_{}".format(
config["TARGET"],
config["TOOL_CHAIN_TAG"]))
# set BUILD_DIR path
config["BUILD_DIR"] = os.path.join('Build',
config["PLATFORM_BOARD_PACKAGE"],
"{}_{}".format(
config["TARGET"],
config["TOOL_CHAIN_TAG"]))
config["BUILD_X64"] = os.path.join(config["BUILD_DIR_PATH"], 'X64')
config["BUILD_IA32"] = os.path.join(config["BUILD_DIR_PATH"], 'IA32')
if not os.path.isdir(config["BUILD_DIR_PATH"]):
try:
os.makedirs(config["BUILD_DIR_PATH"])
except OSError:
print("Error while creating Build folder")
sys.exit(1)
#@todo: Replace this with PcdFspModeSelection
if config.get("API_MODE_FSP_WRAPPER_BUILD", "FALSE") == "TRUE":
config["EXT_BUILD_FLAGS"] += " -D FSP_MODE=0"
else:
config["EXT_BUILD_FLAGS"] += " -D FSP_MODE=1"
if config.get("API_MODE_FSP_WRAPPER_BUILD", "FALSE") == "TRUE":
raise ValueError("FSP API Mode is currently unsupported on Ice Lake Xeon Scalable")
return None
def _merge_files(files, ofile):
with open(ofile, 'wb') as of:
for x in files:
if not os.path.exists(x):
return
with open(x, 'rb') as f:
of.write(f.read())
def build_ex(config, functions):
"""Additional BIOS build function
:param config: The environment variables to be used in the build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: config dictionary
:rtype: Dictionary
"""
print("build_ex")
fv_path = os.path.join(config["BUILD_DIR_PATH"], "FV")
binary_fd = os.path.join(fv_path, "BINARY.fd")
main_fd = os.path.join(fv_path, "MAIN.fd")
secpei_fd = os.path.join(fv_path, "SECPEI.fd")
board_fd = config["BOARD"].upper()
final_fd = os.path.join(fv_path, "{}.fd".format(board_fd))
_merge_files((binary_fd, main_fd, secpei_fd), final_fd)
return None
def post_build_ex(config, functions):
"""Additional Post BIOS build function
:param config: The environment variables to be used in the post
build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: config dictionary
:rtype: Dictionary
"""
print("post_build_ex")
fv_path = os.path.join(config["BUILD_DIR_PATH"], "FV")
board_fd = config["BOARD"].upper()
final_fd = os.path.join(fv_path, "{}.fd".format(board_fd))
final_ifwi = os.path.join(fv_path, "{}.bin".format(board_fd))
ifwi_ingredients_path = os.path.join(config["WORKSPACE_PLATFORM_BIN"], "Ifwi", config["BOARD"])
flash_descriptor = os.path.join(ifwi_ingredients_path, "FlashDescriptor.bin")
intel_me = os.path.join(ifwi_ingredients_path, "Me.bin")
_merge_files((flash_descriptor, intel_me, final_fd), final_ifwi)
if os.path.isfile(final_fd):
print("IFWI image can be found at {}".format(final_ifwi))
return None
def clean_ex(config, functions):
"""Additional clean function
:param config: The environment variables to be used in the build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: config dictionary
:rtype: Dictionary
"""
print("clean_ex")
return None
| edk2-platforms-master | Platform/Intel/WhitleyOpenBoardPkg/BoardPortTemplate/build_board.py |
# @ build_board.py
# Extensions for building CooperCityRvp using build_bios.py
#
# Copyright (c) 2021, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
"""
This module serves as a sample implementation of the build extension
scripts
"""
import os
import sys
def pre_build_ex(config, functions):
"""Additional Pre BIOS build function
:param config: The environment variables to be used in the build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: nothing
"""
print("pre_build_ex")
config["BUILD_DIR_PATH"] = os.path.join(config["WORKSPACE"],
'Build',
config["PLATFORM_BOARD_PACKAGE"],
"{}_{}".format(
config["TARGET"],
config["TOOL_CHAIN_TAG"]))
# set BUILD_DIR path
config["BUILD_DIR"] = os.path.join('Build',
config["PLATFORM_BOARD_PACKAGE"],
"{}_{}".format(
config["TARGET"],
config["TOOL_CHAIN_TAG"]))
config["BUILD_X64"] = os.path.join(config["BUILD_DIR_PATH"], 'X64')
config["BUILD_IA32"] = os.path.join(config["BUILD_DIR_PATH"], 'IA32')
if not os.path.isdir(config["BUILD_DIR_PATH"]):
try:
os.makedirs(config["BUILD_DIR_PATH"])
except OSError:
print("Error while creating Build folder")
sys.exit(1)
#@todo: Replace this with PcdFspModeSelection
if config.get("API_MODE_FSP_WRAPPER_BUILD", "FALSE") == "TRUE":
config["EXT_BUILD_FLAGS"] += " -D FSP_MODE=0"
else:
config["EXT_BUILD_FLAGS"] += " -D FSP_MODE=1"
return None
def _merge_files(files, ofile):
with open(ofile, 'wb') as of:
for x in files:
if not os.path.exists(x):
return
with open(x, 'rb') as f:
of.write(f.read())
def build_ex(config, functions):
"""Additional BIOS build function
:param config: The environment variables to be used in the build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: config dictionary
:rtype: Dictionary
"""
print("build_ex")
fv_path = os.path.join(config["BUILD_DIR_PATH"], "FV")
binary_fd = os.path.join(fv_path, "BINARY.fd")
main_fd = os.path.join(fv_path, "MAIN.fd")
secpei_fd = os.path.join(fv_path, "SECPEI.fd")
board_fd = config["BOARD"].upper()
final_fd = os.path.join(fv_path, "{}.fd".format(board_fd))
_merge_files((binary_fd, main_fd, secpei_fd), final_fd)
return None
def post_build_ex(config, functions):
"""Additional Post BIOS build function
:param config: The environment variables to be used in the post
build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: config dictionary
:rtype: Dictionary
"""
print("post_build_ex")
fv_path = os.path.join(config["BUILD_DIR_PATH"], "FV")
board_fd = config["BOARD"].upper()
final_fd = os.path.join(fv_path, "{}.fd".format(board_fd))
final_ifwi = os.path.join(fv_path, "{}.bin".format(board_fd))
ifwi_ingredients_path = os.path.join(config["WORKSPACE_PLATFORM_BIN"], "Ifwi", config["BOARD"])
flash_descriptor = os.path.join(ifwi_ingredients_path, "FlashDescriptor.bin")
intel_me = os.path.join(ifwi_ingredients_path, "Me.bin")
_merge_files((flash_descriptor, intel_me, final_fd), final_ifwi)
if os.path.isfile(final_fd):
print("IFWI image can be found at {}".format(final_ifwi))
return None
def clean_ex(config, functions):
"""Additional clean function
:param config: The environment variables to be used in the build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: config dictionary
:rtype: Dictionary
"""
print("clean_ex")
return None
| edk2-platforms-master | Platform/Intel/WhitleyOpenBoardPkg/CooperCityRvp/build_board.py |
# @ build_board.py
# Extensions for building JunctionCity using build_bios.py
#
#
# Copyright (c) 2021, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2021, American Megatrends International LLC. <BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
"""
This module serves as a sample implementation of the build extension
scripts
"""
import os
import sys
def pre_build_ex(config, functions):
"""Additional Pre BIOS build function
:param config: The environment variables to be used in the build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: nothing
"""
print("pre_build_ex")
config["BUILD_DIR_PATH"] = os.path.join(config["WORKSPACE"],
'Build',
config["PLATFORM_BOARD_PACKAGE"],
"{}_{}".format(
config["TARGET"],
config["TOOL_CHAIN_TAG"]))
# set BUILD_DIR path
config["BUILD_DIR"] = os.path.join('Build',
config["PLATFORM_BOARD_PACKAGE"],
"{}_{}".format(
config["TARGET"],
config["TOOL_CHAIN_TAG"]))
config["BUILD_X64"] = os.path.join(config["BUILD_DIR_PATH"], 'X64')
config["BUILD_IA32"] = os.path.join(config["BUILD_DIR_PATH"], 'IA32')
if not os.path.isdir(config["BUILD_DIR_PATH"]):
try:
os.makedirs(config["BUILD_DIR_PATH"])
except OSError:
print("Error while creating Build folder")
sys.exit(1)
#@todo: Replace this with PcdFspModeSelection
if config.get("API_MODE_FSP_WRAPPER_BUILD", "FALSE") == "TRUE":
config["EXT_BUILD_FLAGS"] += " -D FSP_MODE=0"
else:
config["EXT_BUILD_FLAGS"] += " -D FSP_MODE=1"
if config.get("API_MODE_FSP_WRAPPER_BUILD", "FALSE") == "TRUE":
raise ValueError("FSP API Mode is currently unsupported on Ice Lake Xeon Scalable")
# Build the ACPI AML offset table *.offset.h
print("Info: re-generating PlatformOffset header files")
execute_script = functions.get("execute_script")
# AML offset arch is X64, not sure if it matters.
command = ["build", "-a", "X64", "-t", config["TOOL_CHAIN_TAG"], "-D", "MAX_SOCKET=" + config["MAX_SOCKET"]]
if config["EXT_BUILD_FLAGS"] and config["EXT_BUILD_FLAGS"] != "":
ext_build_flags = config["EXT_BUILD_FLAGS"].split(" ")
ext_build_flags = [x.strip() for x in ext_build_flags]
ext_build_flags = [x for x in ext_build_flags if x != ""]
command.extend(ext_build_flags)
aml_offsets_split = os.path.split(os.path.normpath(config["AML_OFFSETS_PATH"]))
command.append("-p")
command.append(os.path.normpath(config["AML_OFFSETS_PATH"]) + '.dsc')
command.append("-m")
command.append(os.path.join(aml_offsets_split[0], aml_offsets_split[1], aml_offsets_split[1] + '.inf'))
command.append("-y")
command.append(os.path.join(config["WORKSPACE"], "PreBuildReport.txt"))
command.append("--log=" + os.path.join(config["WORKSPACE"], "PreBuild.log"))
shell = True
if os.name == "posix": # linux
shell = False
_, _, _, code = execute_script(command, config, shell=shell)
if code != 0:
print(" ".join(command))
print("Error re-generating PlatformOffset header files")
sys.exit(1)
# Build AmlGenOffset command to consume the *.offset.h and produce AmlOffsetTable.c for StaticSkuDataDxe use.
# Get destination path and filename from config
relative_file_path = os.path.normpath(config["STRIPPED_AML_OFFSETS_FILE_PATH"]) # get path relative to Platform/Intel
out_file_path = os.path.join(config["WORKSPACE_PLATFORM"], relative_file_path) # full path to output file
out_file_dir = os.path.dirname(out_file_path) # remove filename
out_file_root_ext = os.path.splitext(os.path.basename(out_file_path)) # root and extension of output file
# Get relative path for the generated offset.h file
relative_dsdt_file_path = os.path.normpath(config["DSDT_TABLE_FILE_PATH"]) # path relative to Platform/Intel
dsdt_file_root_ext = os.path.splitext(os.path.basename(relative_dsdt_file_path)) # root and extension of generated offset.h file
# Generate output directory if it doesn't exist
if not os.path.exists(out_file_dir):
os.mkdir(out_file_dir)
command = ["python",
os.path.join(config["MIN_PACKAGE_TOOLS"], "AmlGenOffset", "AmlGenOffset.py"),
"-d", "--aml_filter", config["AML_FILTER"],
"-o", out_file_path,
os.path.join(config["BUILD_X64"], aml_offsets_split[0], aml_offsets_split[1], aml_offsets_split[1], "OUTPUT", os.path.dirname(relative_dsdt_file_path), dsdt_file_root_ext[0] + ".offset.h")]
# execute the command
_, _, _, code = execute_script(command, config, shell=shell)
if code != 0:
print(" ".join(command))
print("Error re-generating PlatformOffset header files")
sys.exit(1)
print("GenOffset done")
return None
def _merge_files(files, ofile):
with open(ofile, 'wb') as of:
for x in files:
if not os.path.exists(x):
return
with open(x, 'rb') as f:
of.write(f.read())
def build_ex(config, functions):
"""Additional BIOS build function
:param config: The environment variables to be used in the build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: config dictionary
:rtype: Dictionary
"""
print("build_ex")
fv_path = os.path.join(config["BUILD_DIR_PATH"], "FV")
binary_fd = os.path.join(fv_path, "BINARY.fd")
main_fd = os.path.join(fv_path, "MAIN.fd")
secpei_fd = os.path.join(fv_path, "SECPEI.fd")
board_fd = config["BOARD"].upper()
final_fd = os.path.join(fv_path, "{}.fd".format(board_fd))
_merge_files((binary_fd, main_fd, secpei_fd), final_fd)
return None
def post_build_ex(config, functions):
"""Additional Post BIOS build function
:param config: The environment variables to be used in the post
build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: config dictionary
:rtype: Dictionary
"""
print("post_build_ex")
fv_path = os.path.join(config["BUILD_DIR_PATH"], "FV")
board_fd = config["BOARD"].upper()
final_fd = os.path.join(fv_path, "{}.fd".format(board_fd))
final_ifwi = os.path.join(fv_path, "{}.bin".format(board_fd))
ifwi_ingredients_path = os.path.join(config["WORKSPACE_PLATFORM_BIN"], "Ifwi", config["BOARD"])
flash_descriptor = os.path.join(ifwi_ingredients_path, "FlashDescriptor.bin")
intel_me = os.path.join(ifwi_ingredients_path, "Me.bin")
_merge_files((flash_descriptor, intel_me, final_fd), final_ifwi)
if os.path.isfile(final_fd):
print("IFWI image can be found at {}".format(final_ifwi))
return None
def clean_ex(config, functions):
"""Additional clean function
:param config: The environment variables to be used in the build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: config dictionary
:rtype: Dictionary
"""
print("clean_ex")
return None
| edk2-platforms-master | Platform/Intel/WhitleyOpenBoardPkg/JunctionCity/build_board.py |
# @ build_board.py
# Extensions for building Aowanda using build_bios.py
#
#
# Copyright (c) 2021, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2022, American Megatrends International LLC. <BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
"""
This module serves as a sample implementation of the build extension
scripts
"""
import os
import sys
def pre_build_ex(config, functions):
"""Additional Pre BIOS build function
:param config: The environment variables to be used in the build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: nothing
"""
print("pre_build_ex")
config["BUILD_DIR_PATH"] = os.path.join(config["WORKSPACE"],
'Build',
config["PLATFORM_BOARD_PACKAGE"],
"{}_{}".format(
config["TARGET"],
config["TOOL_CHAIN_TAG"]))
# set BUILD_DIR path
config["BUILD_DIR"] = os.path.join('Build',
config["PLATFORM_BOARD_PACKAGE"],
"{}_{}".format(
config["TARGET"],
config["TOOL_CHAIN_TAG"]))
config["BUILD_X64"] = os.path.join(config["BUILD_DIR_PATH"], 'X64')
config["BUILD_IA32"] = os.path.join(config["BUILD_DIR_PATH"], 'IA32')
if not os.path.isdir(config["BUILD_DIR_PATH"]):
try:
os.makedirs(config["BUILD_DIR_PATH"])
except OSError:
print("Error while creating Build folder")
sys.exit(1)
#@todo: Replace this with PcdFspModeSelection
if config.get("API_MODE_FSP_WRAPPER_BUILD", "FALSE") == "TRUE":
config["EXT_BUILD_FLAGS"] += " -D FSP_MODE=0"
else:
config["EXT_BUILD_FLAGS"] += " -D FSP_MODE=1"
if config.get("API_MODE_FSP_WRAPPER_BUILD", "FALSE") == "TRUE":
raise ValueError("FSP API Mode is currently unsupported on Ice Lake Xeon Scalable")
# Build the ACPI AML offset table *.offset.h
print("Info: re-generating PlatformOffset header files")
execute_script = functions.get("execute_script")
# AML offset arch is X64, not sure if it matters.
command = ["build", "-a", "X64", "-t", config["TOOL_CHAIN_TAG"], "-D", "MAX_SOCKET=" + config["MAX_SOCKET"]]
if config["EXT_BUILD_FLAGS"] and config["EXT_BUILD_FLAGS"] != "":
ext_build_flags = config["EXT_BUILD_FLAGS"].split(" ")
ext_build_flags = [x.strip() for x in ext_build_flags]
ext_build_flags = [x for x in ext_build_flags if x != ""]
command.extend(ext_build_flags)
aml_offsets_split = os.path.split(os.path.normpath(config["AML_OFFSETS_PATH"]))
command.append("-p")
command.append(os.path.normpath(config["AML_OFFSETS_PATH"]) + '.dsc')
command.append("-m")
command.append(os.path.join(aml_offsets_split[0], aml_offsets_split[1], aml_offsets_split[1] + '.inf'))
command.append("-y")
command.append(os.path.join(config["WORKSPACE"], "PreBuildReport.txt"))
command.append("--log=" + os.path.join(config["WORKSPACE"], "PreBuild.log"))
shell = True
if os.name == "posix": # linux
shell = False
_, _, _, code = execute_script(command, config, shell=shell)
if code != 0:
print(" ".join(command))
print("Error re-generating PlatformOffset header files")
sys.exit(1)
# Build AmlGenOffset command to consume the *.offset.h and produce AmlOffsetTable.c for StaticSkuDataDxe use.
# Get destination path and filename from config
relative_file_path = os.path.normpath(config["STRIPPED_AML_OFFSETS_FILE_PATH"]) # get path relative to Platform/Intel
out_file_path = os.path.join(config["WORKSPACE_PLATFORM"], relative_file_path) # full path to output file
out_file_dir = os.path.dirname(out_file_path) # remove filename
out_file_root_ext = os.path.splitext(os.path.basename(out_file_path)) # root and extension of output file
# Get relative path for the generated offset.h file
relative_dsdt_file_path = os.path.normpath(config["DSDT_TABLE_FILE_PATH"]) # path relative to Platform/Intel
dsdt_file_root_ext = os.path.splitext(os.path.basename(relative_dsdt_file_path)) # root and extension of generated offset.h file
# Generate output directory if it doesn't exist
if not os.path.exists(out_file_dir):
os.mkdir(out_file_dir)
command = ["python",
os.path.join(config["MIN_PACKAGE_TOOLS"], "AmlGenOffset", "AmlGenOffset.py"),
"-d", "--aml_filter", config["AML_FILTER"],
"-o", out_file_path,
os.path.join(config["BUILD_X64"], aml_offsets_split[0], aml_offsets_split[1], aml_offsets_split[1], "OUTPUT", os.path.dirname(relative_dsdt_file_path), dsdt_file_root_ext[0] + ".offset.h")]
# execute the command
_, _, _, code = execute_script(command, config, shell=shell)
if code != 0:
print(" ".join(command))
print("Error re-generating PlatformOffset header files")
sys.exit(1)
print("GenOffset done")
return None
def _merge_files(files, ofile):
with open(ofile, 'wb') as of:
for x in files:
if not os.path.exists(x):
return
with open(x, 'rb') as f:
of.write(f.read())
def build_ex(config, functions):
"""Additional BIOS build function
:param config: The environment variables to be used in the build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: config dictionary
:rtype: Dictionary
"""
print("build_ex")
fv_path = os.path.join(config["BUILD_DIR_PATH"], "FV")
binary_fd = os.path.join(fv_path, "BINARY.fd")
main_fd = os.path.join(fv_path, "MAIN.fd")
secpei_fd = os.path.join(fv_path, "SECPEI.fd")
board_fd = config["BOARD"].upper()
final_fd = os.path.join(fv_path, "{}.fd".format(board_fd))
_merge_files((binary_fd, main_fd, secpei_fd), final_fd)
return None
def post_build_ex(config, functions):
"""Additional Post BIOS build function
:param config: The environment variables to be used in the post
build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: config dictionary
:rtype: Dictionary
"""
print("post_build_ex")
fv_path = os.path.join(config["BUILD_DIR_PATH"], "FV")
board_fd = config["BOARD"].upper()
final_fd = os.path.join(fv_path, "{}.fd".format(board_fd))
final_ifwi = os.path.join(fv_path, "{}.bin".format(board_fd))
ifwi_ingredients_path = os.path.join(config["WORKSPACE_PLATFORM_BIN"], "Ifwi", config["BOARD"])
flash_descriptor = os.path.join(ifwi_ingredients_path, "FlashDescriptor.bin")
intel_me = os.path.join(ifwi_ingredients_path, "Me.bin")
_merge_files((flash_descriptor, intel_me, final_fd), final_ifwi)
if os.path.isfile(final_fd):
print("IFWI image can be found at {}".format(final_ifwi))
return None
def clean_ex(config, functions):
"""Additional clean function
:param config: The environment variables to be used in the build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: config dictionary
:rtype: Dictionary
"""
print("clean_ex")
return None
| edk2-platforms-master | Platform/Intel/WhitleyOpenBoardPkg/Aowanda/build_board.py |
# @ build_board.py
# This is a sample code provides Optional dynamic imports
# of build functions to the BuildBios.py script
#
# Copyright (c) 2019, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
"""
This module serves as a sample implementation of the build extension
scripts
"""
def pre_build_ex(config, functions):
"""Additional Pre BIOS build function
:param config: The environment variables to be used in the build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: nothing
"""
print("pre_build_ex")
return None
def build_ex(config, functions):
"""Additional BIOS build function
:param config: The environment variables to be used in the build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: config dictionary
:rtype: Dictionary
"""
print("build_ex")
return None
def post_build_ex(config, functions):
"""Additional Post BIOS build function
:param config: The environment variables to be used in the post
build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: config dictionary
:rtype: Dictionary
"""
print("post_build_ex")
return None
def clean_ex(config, functions):
"""Additional clean function
:param config: The environment variables to be used in the build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: config dictionary
:rtype: Dictionary
"""
print("clean_ex")
return None
| edk2-platforms-master | Platform/Intel/KabylakeOpenBoardPkg/KabylakeRvp3/build_board.py |
# @ build_board.py
# This adds additional functions to the build_bios.py
#
# Copyright (c) 2019, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
"""
This module serves as an additional build steps for the Mt Olympus board
"""
import os
import sys
def pre_build_ex(config, functions):
"""Additional Pre BIOS build function
:param config: The environment variables to be used in the build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: nothing
"""
print("Info: re-generating PlatformOffset header files")
execute_script = functions.get("execute_script")
command = ["build", "-D", "MAX_SOCKET=" + config.get("MAX_SOCKET", "1"),
"-m",
os.path.join(config["PLATFORM_BOARD_PACKAGE"],
"Acpi", "BoardAcpiDxe", "Dsdt.inf"),
"-y",
config.get("PRE_BUILD_REPORT",
os.path.join(config["WORKSPACE"],
"preBuildReport.txt")),
"--log=" + config.get("PRE_BUILD_LOG",
os.path.join(config["WORKSPACE"],
"prebuild.log"))]
_, _, _, code = execute_script(command, config)
if code != 0:
print(" ".join(command))
print("Error re-generating PlatformOffset header files")
sys.exit(1)
config["AML_FILTER"] = "\"PSYS\" .MCTL\" .FIX[0-9,A-Z]\""
print("AML_FILTER= ", config.get("AML_FILTER"))
# build the command with arguments
command = ["python",
os.path.join(config["MIN_PACKAGE_TOOLS"],
"AmlGenOffset",
"AmlGenOffset.py"),
"-d", "--aml_filter", config["AML_FILTER"],
"-o", os.path.join(config["WORKSPACE_PLATFORM"],
config["PLATFORM_BOARD_PACKAGE"],
"Acpi", "BoardAcpiDxe",
"AmlOffsetTable.c"),
os.path.join(config["BUILD_X64"],
"PurleyOpenBoardPkg",
"Acpi",
"BoardAcpiDxe",
"DSDT",
"OUTPUT",
"Dsdt", "WFPPlatform.offset.h")]
# execute the command
_, _, _, code = execute_script(command, config)
if code != 0:
print(" ".join(command))
print("Error re-generating PlatformOffset header files")
sys.exit(1)
print("GenOffset done")
return config
def build_ex(config, functions):
"""Additional BIOS build function
:param config: The environment variables to be used in
the build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: config dictionary
:rtype: Dictionary
"""
print("build_ex")
return None
def post_build_ex(config, functions):
"""Additional Post BIOS build function
:param config: The environment variables to be used in the post
build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: config dictionary
:rtype: Dictionary
"""
print("post_build_ex")
execute_script = functions.get("execute_script")
if not execute_script:
print("post_build_ex Error")
sys.exit(1)
common_patch_command = [os.path.join(config["PYTHON_HOME"], "python"),
os.path.join(config["MIN_PACKAGE_TOOLS"],
"PatchFv", "PatchBinFv.py"),
config["TARGET"],
os.path.join(config["WORKSPACE_SILICON_BIN"],
"PurleySiliconBinPkg", "FV"),
os.path.join(config["WORKSPACE"],
"BuildReport.log")]
fvs_to_patch = ["FvTempMemorySilicon",
"FvPreMemorySilicon",
"FvPostMemorySilicon",
"FvLateSilicon"]
for fv in fvs_to_patch:
patch_command = common_patch_command + [fv]
_, _, _, code = execute_script(patch_command, config)
if code != 0:
print(" ".join(patch_command))
print("Patch Error!")
sys.exit(1)
common_rebase_command = [os.path.join(config["PYTHON_HOME"], "python"),
os.path.join(config["MIN_PACKAGE_TOOLS"],
"PatchFv", "RebaseBinFv.py"),
config["TARGET"],
os.path.join(config["WORKSPACE_SILICON_BIN"],
"PurleySiliconBinPkg", "FV"),
os.path.join(config["WORKSPACE"],
"BuildReport.log")]
rebase_command = common_rebase_command +\
["FvPreMemorySilicon",
"gMinPlatformPkgTokenSpaceGuid.PcdFlashFvFspMBase"]
_, _, _, code = execute_script(rebase_command, config)
if code != 0:
print(" ".join(rebase_command))
print("Patch Error!")
sys.exit(1)
rebase_command = common_rebase_command +\
["FvPostMemorySilicon",
"gMinPlatformPkgTokenSpaceGuid.PcdFlashFvFspSBase"]
_, _, _, code = execute_script(rebase_command, config)
if code != 0:
print(" ".join(rebase_command))
print("Patch Error!")
sys.exit(1)
return None
def clean_ex(config, functions):
"""Additional clean function
:param config: The environment variables to be used in the build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: config dictionary
:rtype: Dictionary
"""
print("clean_ex")
return None
| edk2-platforms-master | Platform/Intel/PurleyOpenBoardPkg/BoardMtOlympus/build_board.py |
# @ build_board.py
# This adds additional functions to the build_bios.py
#
# Copyright (c) 2018, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2021, American Megatrends International LLC.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
"""
This module serves as an additional build steps for the Tioga Pass board
"""
import os
import sys
def pre_build_ex(config, functions):
"""Additional Pre BIOS build function
:param config: The environment variables to be used in the build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: nothing
"""
print("Info: re-generating PlatformOffset header files")
execute_script = functions.get("execute_script")
command = ["build", "-D", "MAX_SOCKET=" + config.get("MAX_SOCKET", "1"),
"-m",
os.path.join(config["PLATFORM_BOARD_PACKAGE"],
"Acpi", "BoardAcpiDxe", "Dsdt.inf"),
"-y",
config.get("PRE_BUILD_REPORT",
os.path.join(config["WORKSPACE"],
"preBuildReport.txt")),
"--log=" + config.get("PRE_BUILD_LOG",
os.path.join(config["WORKSPACE"],
"prebuild.log"))]
_, _, _, code = execute_script(command, config)
if code != 0:
print(" ".join(command))
print("Error re-generating PlatformOffset header files")
sys.exit(1)
config["AML_FILTER"] = "\"PSYS\" .MCTL\" .FIX[0-9,A-Z]\""
print("AML_FILTER= ", config.get("AML_FILTER"))
# build the command with arguments
command = ["python",
os.path.join(config["MIN_PACKAGE_TOOLS"],
"AmlGenOffset",
"AmlGenOffset.py"),
"-d", "--aml_filter", config["AML_FILTER"],
"-o", os.path.join(config["WORKSPACE_PLATFORM"],
config["PLATFORM_BOARD_PACKAGE"],
"Acpi", "BoardAcpiDxe",
"AmlOffsetTable.c"),
os.path.join(config["BUILD_X64"],
"PurleyOpenBoardPkg",
"Acpi",
"BoardAcpiDxe",
"DSDT",
"OUTPUT",
"Dsdt", "WFPPlatform.offset.h")]
# execute the command
_, _, _, code = execute_script(command, config)
if code != 0:
print(" ".join(command))
print("Error re-generating PlatformOffset header files")
sys.exit(1)
print("GenOffset done")
return config
def build_ex(config, functions):
"""Additional BIOS build function
:param config: The environment variables to be used in
the build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: config dictionary
:rtype: Dictionary
"""
print("build_ex")
return None
def post_build_ex(config, functions):
"""Additional Post BIOS build function
:param config: The environment variables to be used in the post
build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: config dictionary
:rtype: Dictionary
"""
print("post_build_ex")
execute_script = functions.get("execute_script")
if not execute_script:
print("post_build_ex Error")
sys.exit(1)
common_patch_command = [os.path.join(config["PYTHON_HOME"], "python"),
os.path.join(config["MIN_PACKAGE_TOOLS"],
"PatchFv", "PatchBinFv.py"),
config["TARGET"],
os.path.join(config["WORKSPACE_SILICON_BIN"],
"PurleySiliconBinPkg", "FV"),
os.path.join(config["WORKSPACE"],
"BuildReport.log")]
fvs_to_patch = ["FvTempMemorySilicon",
"FvPreMemorySilicon",
"FvPostMemorySilicon",
"FvLateSilicon"]
for fv in fvs_to_patch:
patch_command = common_patch_command + [fv]
_, _, _, code = execute_script(patch_command, config)
if code != 0:
print(" ".join(patch_command))
print("Patch Error!")
sys.exit(1)
common_rebase_command = [os.path.join(config["PYTHON_HOME"], "python"),
os.path.join(config["MIN_PACKAGE_TOOLS"],
"PatchFv", "RebaseBinFv.py"),
config["TARGET"],
os.path.join(config["WORKSPACE_SILICON_BIN"],
"PurleySiliconBinPkg", "FV"),
os.path.join(config["WORKSPACE"],
"BuildReport.log")]
rebase_command = common_rebase_command +\
["FvPreMemorySilicon",
"gMinPlatformPkgTokenSpaceGuid.PcdFlashFvFspMBase"]
_, _, _, code = execute_script(rebase_command, config)
if code != 0:
print(" ".join(rebase_command))
print("Patch Error!")
sys.exit(1)
rebase_command = common_rebase_command +\
["FvPostMemorySilicon",
"gMinPlatformPkgTokenSpaceGuid.PcdFlashFvFspSBase"]
_, _, _, code = execute_script(rebase_command, config)
if code != 0:
print(" ".join(rebase_command))
print("Patch Error!")
sys.exit(1)
common_patchbfv_command = [os.path.join(config["PYTHON_HOME"], "python"),
os.path.join(config["MIN_PACKAGE_TOOLS"],
"PatchFv", "PatchBfv.py"),
os.path.join(config["BUILD_DIR_PATH"],
"FV", "PLATFORM.fd"),
os.path.join(config["WORKSPACE"],
"BuildReport.log")]
patchbfv_command = common_patchbfv_command +\
["gMinPlatformPkgTokenSpaceGuid.PcdFlashFvPreMemoryBase"]
_, _, _, code = execute_script(patchbfv_command, config)
if code != 0:
print(" ".join(patchbfv_command))
print("Patch Error!")
sys.exit(1)
return None
def clean_ex(config, functions):
"""Additional clean function
:param config: The environment variables to be used in the build process
:type config: Dictionary
:param functions: A dictionary of function pointers
:type functions: Dictionary
:returns: config dictionary
:rtype: Dictionary
"""
print("clean_ex")
return None
| edk2-platforms-master | Platform/Intel/PurleyOpenBoardPkg/BoardTiogaPass/build_board.py |
edk2-platforms-master | Platform/Intel/PurleyOpenBoardPkg/BoardTiogaPass/__init__.py |
|
## @file
# This file contains the script to build UniversalPayload with platform payload
#
# Copyright (c) 2022, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import argparse
import subprocess
import os
import shutil
import sys
from ctypes import *
sys.dont_write_bytecode = True
def RunCommand(cmd):
print(cmd)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,cwd=os.environ['WORKSPACE'])
while True:
line = p.stdout.readline()
if not line:
break
print(line.strip().decode(errors='ignore'))
p.communicate()
if p.returncode != 0:
print("- Failed - error happened when run command: %s"%cmd)
raise Exception("ERROR: when run command: %s"%cmd)
def BuildUniversalPayload(Args, MacroList):
BuildTarget = Args.Target
ToolChain = Args.ToolChain
ElfToolChain = 'CLANGDWARF'
ObjCopyFlag = "elf64-x86-64" if Args.Arch == 'X64' else "elf32-i386"
#
# Find universal UEFI payload build build script
#
Edk2PayloadBuildScript = os.path.normpath("UefiPayloadPkg/UniversalPayloadBuild.py")
for package_path in os.environ['PACKAGES_PATH'].split(os.pathsep):
if os.path.exists (os.path.join (package_path, Edk2PayloadBuildScript)):
Edk2PayloadBuildScript = os.path.join (package_path, Edk2PayloadBuildScript)
break
if not os.path.exists (Edk2PayloadBuildScript):
raise Exception("Could not find universal UEFI payload build script UniversalPayloadBuild.py")
PlatformFvDscPath = os.path.normpath("PlatformPayloadFeaturePkg/PlatformPayloadFeaturePkg.dsc")
BuildDir = os.path.join(os.environ['WORKSPACE'], os.path.normpath("Build/UefiPayloadPkgX64"))
PlatformFvReportPath = os.path.join(BuildDir, "PlatformPayloadReport.txt")
UniversalUefiPld = os.path.join(BuildDir, 'UniversalPayload.elf')
PlatformFv = os.path.join(os.environ['WORKSPACE'], os.path.normpath("Build/PlatformPayloadFeaturePkg"), f"{BuildTarget}_{ToolChain}", os.path.normpath("FV/PLATFORMPAYLOAD.Fv"))
if "CLANG_BIN" in os.environ:
LlvmObjcopyPath = os.path.join(os.environ["CLANG_BIN"], "llvm-objcopy")
else:
LlvmObjcopyPath = "llvm-objcopy"
try:
RunCommand('"%s" --version'%LlvmObjcopyPath)
except:
print("- Failed - Please check if LLVM is installed or if CLANG_BIN is set correctly")
sys.exit(1)
Defines = ""
for key in MacroList:
Defines +=" -D {0}={1}".format(key, MacroList[key])
#
# Building Universal Payload entry.
#
if not Args.Skip:
BuildPayload = f"python {Edk2PayloadBuildScript} -b {BuildTarget} -t {ToolChain} -a {Args.Arch} {Defines}"
RunCommand(BuildPayload)
#
# Building Platform Payload.
#
BuildPayload = f"build -p {PlatformFvDscPath} -b {BuildTarget} -a X64 -t {ToolChain} -y {PlatformFvReportPath}"
BuildPayload += Defines
RunCommand(BuildPayload)
#
# Copy the Platform Payload as a section in elf format Universal Payload.
#
remove_section = f'"{LlvmObjcopyPath}" -I {ObjCopyFlag} -O {ObjCopyFlag} --remove-section .upld.platform_fv {UniversalUefiPld}'
add_section = f'"{LlvmObjcopyPath}" -I {ObjCopyFlag} -O {ObjCopyFlag} --add-section .upld.platform_fv={PlatformFv} {UniversalUefiPld}'
set_section = f'"{LlvmObjcopyPath}" -I {ObjCopyFlag} -O {ObjCopyFlag} --set-section-alignment .upld.platform_fv=16 {UniversalUefiPld}'
RunCommand(remove_section)
RunCommand(add_section)
RunCommand(set_section)
def main():
parser = argparse.ArgumentParser(description='Build Platform Payload FV and add it to Universal UEFI Payload')
parser.add_argument('-t', '--ToolChain')
parser.add_argument('-b', '--Target', default='DEBUG')
parser.add_argument('-a', '--Arch', choices=['IA32', 'X64'], help='Specify the ARCH for payload entry module. Default build X64 image.', default ='X64')
parser.add_argument("-D", "--Macro", action="append")
parser.add_argument('-s', '--Skip', action='store_true', help='Skip Universal UEFI payload build (just build platform FV and add to Universal UEFI payload.')
MacroList = {}
args = parser.parse_args()
if args.Macro is not None:
for Argument in args.Macro:
if Argument.count('=') != 1:
print("Unknown variable passed in: %s"%Argument)
raise Exception("ERROR: Unknown variable passed in: %s"%Argument)
tokens = Argument.strip().split('=')
MacroList[tokens[0].upper()] = tokens[1]
BuildUniversalPayload(args, MacroList)
print ("Successfully build Universal Payload with platform FV")
if __name__ == '__main__':
main()
| edk2-platforms-master | Features/Intel/PlatformPayloadFeaturePkg/PlatformPayloadFeaturePkg.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="nvidia-clara-client",
version="0.8.1.7",
author="NVIDIA Clara Deploy",
description="Python package to interact with Clara Platform Server API",
license='Apache Software License (http://www.apache.org/licenses/LICENSE-2.0)',
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/NVIDIA/clara-platform-python-client",
install_requires=['grpcio', 'protobuf'],
packages=setuptools.find_packages('.'),
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
'License :: OSI Approved :: Apache Software License'
],
python_requires='>=3.6',
)
| clara-platform-python-client-main | setup.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Mapping
from nvidia_clara.grpc import common_pb2
class PipelineDefinition:
def __init__(self, content: str = None, name: str = None):
self._content = content
self._name = name
@property
def content(self) -> str:
"""Text content of a pipeline definition.
Content is typed as a "System.String" to avoid encoding related issues."""
return self._content
@content.setter
def content(self, content: str):
"""Text content of a pipeline definition.
Content is typed as a "System.String" to avoid encoding related issues."""
self._content = content
@property
def name(self) -> str:
"""The name of the pipeline definition.
Not the name of the pipeline as defined by the definition.
Example: clara/examples/my-pipeline.yml
"""
return self._name
@name.setter
def name(self, name: str):
"""The name of the pipeline definition.
Not the name of the pipeline as defined by the definition.
Example: clara/examples/my-pipeline.yml
"""
self._name = name
class PipelineId:
def __init__(self, value: str):
if (value == "") or (value is None):
raise Exception("Value arguement must be initialized to non-null value")
self._value = value
@property
def value(self):
return self._value
def __eq__(self, other):
return self._value == other._value
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s" % (self._value)
def __str__(self):
return "%s" % (self._value)
def __hash__(self):
return hash(self._value)
def to_grpc_value(self):
id = common_pb2.Identifier()
id.value = self._value
return id
class PipelineDetails:
def __init__(self, pipeline_id: PipelineId = None, name: str = None, definition: List[PipelineDefinition] = None,
metadata: Mapping[str, str] = None):
if definition is None:
definition = []
if metadata is None:
metadata = dict()
self._pipeline_id = pipeline_id
self._name = name
self._definition = definition
self._metadata = metadata
@property
def pipeline_id(self) -> PipelineId:
"""Unique identifier of the pipeline."""
return self._pipeline_id
@pipeline_id.setter
def pipeline_id(self, pipeline_id: PipelineId):
"""Unique identifier of the pipeline."""
self._pipeline_id = pipeline_id
@property
def name(self) -> str:
"""
Human readable name of the pipeline.
Not guaranteed to be unique.
"""
return self._name
@name.setter
def name(self, name: str):
"""
Human readable name of the pipeline.
Not guaranteed to be unique.
"""
self._name = name
@property
def definition(self) -> List[PipelineDefinition]:
"""
The definition of the pipeline.
Clara pipeline definitions can be multi-file.
"""
return self._definition
@definition.setter
def definition(self, definition: List[PipelineDefinition]):
"""
The definition of the pipeline.
Clara pipeline definitions can be multi-file.
"""
self._definition = definition
@property
def metadata(self) -> Mapping[str, str]:
"""
Metadata (set of key/value pairs) associated with the pipeline
"""
return self._metadata
@metadata.setter
def metadata(self, metadata: Mapping[str, str]):
"""
Metadata (set of key/value pairs) associated with the pipeline
"""
self._metadata = metadata
class PipelineInfo:
def __init__(self, pipeline_id: PipelineId = None, name: str = None, metadata: Mapping[str, str] = None):
if metadata is None:
metadata = dict()
self._pipeline_id = pipeline_id
self._name = name
self._metadata = metadata
@property
def pipeline_id(self) -> PipelineId:
"""Unique identifier of the pipeline."""
return self._pipeline_id
@pipeline_id.setter
def pipeline_id(self, pipeline_id: PipelineId):
"""Unique identifier of the pipeline."""
self._pipeline_id = pipeline_id
@property
def name(self) -> str:
"""
Human readable name of the pipeline.
Not guaranteed to be unique.
"""
return self._name
@name.setter
def name(self, name: str):
"""
Human readable name of the pipeline.
Not guaranteed to be unique.
"""
self._name = name
@property
def metadata(self) -> Mapping[str, str]:
"""
Metadata (set of key/value pairs) associated with the pipeline
"""
return self._metadata
@metadata.setter
def metadata(self, metadata: Mapping[str, str]):
"""
Metadata (set of key/value pairs) associated with the pipeline
"""
self._metadata = metadata
| clara-platform-python-client-main | nvidia_clara/pipeline_types.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Mapping
import grpc
from nvidia_clara.grpc import pipelines_pb2, pipelines_pb2_grpc
import nvidia_clara.pipeline_types as pipeline_types
from nvidia_clara.base_client import BaseClient, RequestIterator
class PipelinesClientStub:
def create_pipeline(self, definition: List[pipeline_types.PipelineDefinition],
pipeline_id: pipeline_types.PipelineId = None,
metadata: Mapping[str, str] = None) -> pipeline_types.PipelineId:
"""
Requests the creation of a new pipeline by Clara.
Args:
definition(List[pipeline_types.PipelineDefinition]): Definition from which to create the new pipeline.
pipeline_id: Optional argument to force a specific pipeline identifier when replicating deployments.
Use ONLY with a high available primary-primary fail-over solution in place AND full understanding on
what it does.
metadata(Mapping[str, str]): Set of key/value pairs to be appended to the pipeline metadata. If a metadata
key in the request already exists in the pipeline record, or if duplicate keys are passed in the
request, the pipeline will not be updated and and an error will be returned. Keys are compared using
case insensitive comparator functions. The maximum allowed size of a metadata key is 128 bytes,
while the maximum allowed size of a metadata value is 256 bytes. The maximum allowed size for the
overall metadata of an individual job is 4 Megabytes.
Returns:
pipeline_types.PipelineId of newly created pipeline
"""
pass
def list_pipelines(self) -> List[pipeline_types.PipelineInfo]:
"""
Requests a list of pipelines from Clara.
Returns:
List of pipeline_types.PipelineInfo with running pipeline information
"""
pass
def pipeline_details(self, pipeline_id: pipeline_types.PipelineId) -> pipeline_types.PipelineDetails:
"""
Requests details of a pipeline, identified by pipeline_types.PipelineId, from Clara.
Args:
pipeline_id (pipeline_types.PipelineId): Unique identifier of the pipeline.
Return:
A pipeline_types.PipelineDetails instance with details on the pipeline specified by 'pipeline_id'
"""
pass
def remove_pipeline(self, pipeline_id: pipeline_types.PipelineId):
"""
Removes a pipeline, identified by "pipelineId", from Clara.
Args:
pipeline_id (pipeline_types.PipelineId): Unique identifier of the
pipeline
"""
pass
def update_pipeline(self, pipeline_id: pipeline_types.PipelineId,
definition: List[pipeline_types.PipelineDefinition]):
"""
Requests a pipeline, identified by "pipelineId", be updated by Clara.
Args:
pipeline_id (pipeline_types.PipelineId): Unique identifier of the
pipeline.
definition: Definition from which to update the pipeline.
"""
pass
def add_metadata(self, pipeline_id: pipeline_types.PipelineId, metadata: Mapping[str, str]) -> Mapping[str, str]:
"""
Requests the addition of metadata to a pipeline.
Args:
pipeline_id (pipeline_types.PipelineId): Unique identifier of the pipeline whose metadata is to be appended.
metadata(Mapping[str, str]): Set of key/value pairs to be appended to the pipeline metadata. If a metadata
key in the request already exists in the pipeline record, or if duplicate keys are passed in the
request, the pipeline will not be updated and and an error will be returned. Keys are compared using
case insensitive comparator functions. The maximum allowed size of a metadata key is 128 bytes,
while the maximum allowed size of a metadata value is 256 bytes. The maximum allowed size for the
overall metadata of an individual job is 4 Megabytes.
Returns:
A Mapping[str, str] containing the appended metadata
"""
pass
def remove_metadata(self, pipeline_id: pipeline_types.PipelineId, keys: List[str]) -> Mapping[str, str]:
"""
Requests the removal of specified metadata of a pipeline.
Args:
pipeline_id: Unique identifier of the pipeline whose metadata is to be removed.
keys: List of keys to be removed from the pipeline metadata.
Returns:
A Mapping[str, str] containing the updated set of metadata
"""
pass
class PipelinesClient(BaseClient, PipelinesClientStub):
def __init__(self, target: str, port: str = None, stub=None):
"""
Pipelines Client Creation
Args:
target (str): ipv4 address of clara instance
port (str): if specified, port will be appended to the target with a ":"
"""
if target is None:
raise Exception("Target must be initialized to a non-null value")
self._connection = target
if port is not None:
self._connection += ":" + port
self._channel = grpc.insecure_channel(self._connection)
if stub is None:
self._stub = pipelines_pb2_grpc.PipelinesStub(self._channel)
else:
self._stub = stub
def close(self):
"""
Close connection
"""
if self._channel:
self._channel.close()
self._channel = None
else:
print("Connection for client already closed")
def reconnect(self):
"""
Re-open connection with existing channel
"""
if self._channel is None:
self._channel = grpc.insecure_channel(self._connection)
self._stub = pipelines_pb2_grpc.PipelinesStub(self._channel)
else:
print("Connection for client already open")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._channel is not None:
self.close()
return False
def create_pipeline(self, definition: List[pipeline_types.PipelineDefinition],
pipeline_id: pipeline_types.PipelineId = None, metadata: Mapping[str, str] = None,
timeout=None) -> pipeline_types.PipelineId:
"""
Requests the creation of a new pipeline by Clara.
Args:
definition(List[pipeline_types.PipelineDefinition]): Definition from which to create the new pipeline.
pipeline_id: Optional argument to force a specific pipeline identifier when replicating deployments.
Use ONLY with a high available primary-primary fail-over solution in place AND full understanding on
what it does.
metadata(Mapping[str, str]): Set of key/value pairs to be appended to the pipeline metadata. If a metadata
key in the request already exists in the pipeline record, or if duplicate keys are passed in the
request, the pipeline will not be updated and and an error will be returned. Keys are compared using
case insensitive comparator functions. The maximum allowed size of a metadata key is 128 bytes,
while the maximum allowed size of a metadata value is 256 bytes. The maximum allowed size for the
overall metadata of an individual job is 4 Megabytes.
Returns:
pipeline_types.PipelineId of newly created pipeline
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
if definition is None:
raise Exception("Argument 'definition' must be initialized to a non-null list instance")
request_list = []
for item in definition:
item_definition = pipelines_pb2.PipelineDefinitionFile(
content=item.content,
path=item.name
)
# If pipeline identifier set, must first be in GRPC Identifier format
if pipeline_id is not None:
pipeline_id = pipeline_id.to_grpc_value()
request = pipelines_pb2.PipelinesCreateRequest(
definition=item_definition,
pipeline_id=pipeline_id,
header=self.get_request_header()
)
if metadata is not None:
request.metadata.update(metadata)
request_list.append(request)
request_list = RequestIterator(request_list)
response = self._stub.Create(
request_list(),
timeout=timeout
)
self.check_response_header(header=response.header)
return pipeline_types.PipelineId(response.pipeline_id.value)
def list_pipelines(self, timeout=None) -> List[pipeline_types.PipelineInfo]:
"""
Requests a list of pipelines from Clara.
Returns:
List of pipeline_types.PipelineInfo with running pipeline information
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
request = pipelines_pb2.PipelinesListRequest(
header=self.get_request_header()
)
response = self._stub.List(request, timeout=timeout)
info_list = []
responses = [resp for resp in response]
if len(responses) > 0:
for item in responses:
if (item.details is None) or (item.details.pipeline_id.value == ''):
continue
info = pipeline_types.PipelineInfo(
pipeline_id=pipeline_types.PipelineId(item.details.pipeline_id.value),
name=item.details.name,
metadata=item.details.metadata
)
info_list.append(info)
return info_list
def pipeline_details(self, pipeline_id: pipeline_types.PipelineId, timeout=None) -> pipeline_types.PipelineDetails:
"""
Requests details of a pipeline, identified by pipeline_types.PipelineId, from Clara.
Args:
pipeline_id (pipeline_types.PipelineId): Unique identifier of the pipeline.
Return:
A pipeline_types.PipelineDetails instance with details on the pipeline specified by 'pipeline_id'
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
if pipeline_id.value is None or pipeline_id.value == "":
raise Exception("Pipeline identifier argument must be initialized with non-null instance")
request = pipelines_pb2.PipelinesDetailsRequest(
header=self.get_request_header(),
pipeline_id=pipeline_id.to_grpc_value(),
)
response = self._stub.Details(request, timeout=timeout)
responses = [resp for resp in response]
if len(responses) > 0:
self.check_response_header(header=responses[0].header)
result = pipeline_types.PipelineDetails(
name=responses[0].name,
pipeline_id=pipeline_types.PipelineId(responses[0].pipeline_id.value),
metadata=responses[0].metadata
)
result_definition = []
for resp in responses:
result_definition.append(
pipeline_types.PipelineDefinition(
name=resp.name,
content=resp.definition
)
)
result.definition = result_definition
return result
return None
def remove_pipeline(self, pipeline_id: pipeline_types.PipelineId, timeout=None):
"""
Removes a pipeline, identified by "pipelineId", from Clara.
Args:
pipeline_id (pipeline_types.PipelineId): Unique identifier of the
pipeline
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
if pipeline_id.value is None or pipeline_id.value == "":
raise Exception("Pipeline identifier argument must be initialized with non-null instance")
request = pipelines_pb2.PipelinesRemoveRequest(
header=self.get_request_header(),
pipeline_id=pipeline_id.to_grpc_value()
)
response = self._stub.Remove(request, timeout=timeout)
self.check_response_header(header=response.header)
def update_pipeline(self, pipeline_id: pipeline_types.PipelineId,
definition: List[pipeline_types.PipelineDefinition],
timeout=None):
"""
Requests a pipeline, identified by "pipelineId", be updated by Clara.
Args:
pipeline_id (pipeline_types.PipelineId): Unique identifier of the
pipeline.
definition: Definition from which to update the pipeline.
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
if pipeline_id.value is None or pipeline_id.value == "":
raise Exception("Pipeline identifier argument must be initialized with non-null instance")
if definition is None:
raise Exception(
"Pipeline definition argument must be initialized"
"with non-null instnace of List[pipeline_types.PipelineDefinition]")
request_list = []
for item in definition:
request = pipelines_pb2.PipelinesUpdateRequest(
definition=pipelines_pb2.PipelineDefinitionFile(
content=item.content,
path=item.name
),
header=self.get_request_header(),
pipeline_id=pipeline_id.to_grpc_value()
)
request_list.append(request)
request_list = RequestIterator(request_list)
response = self._stub.Update(request_list(), timeout=timeout)
self.check_response_header(header=response.header)
def add_metadata(self, pipeline_id: pipeline_types.PipelineId, metadata: Mapping[str, str], timeout=None) -> \
Mapping[str, str]:
"""
Requests the addition of metadata to a pipeline.
Args:
pipeline_id (pipeline_types.PipelineId): Unique identifier of the pipeline whose metadata is to be appended.
metadata(Mapping[str, str]): Set of key/value pairs to be appended to the pipeline metadata. If a metadata
key in the request already exists in the pipeline record, or if duplicate keys are passed in the
request, the pipeline will not be updated and and an error will be returned. Keys are compared using
case insensitive comparator functions. The maximum allowed size of a metadata key is 128 bytes,
while the maximum allowed size of a metadata value is 256 bytes. The maximum allowed size for the
overall metadata of an individual job is 4 Megabytes.
Returns:
A Mapping[str, str] containing the appended metadata
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
if (pipeline_id.value is None) or (pipeline_id.value == ""):
raise Exception("Pipeline identifier must have instantiated value")
if metadata is None:
raise Exception("Metadata must be an instantiated map")
request = pipelines_pb2.PipelinesAddMetadataRequest(
pipeline_id=pipeline_id.to_grpc_value()
)
request.metadata.update(metadata)
response = self._stub.AddMetadata(request, timeout)
self.check_response_header(header=response.header)
result = response.metadata
return result
def remove_metadata(self, pipeline_id: pipeline_types.PipelineId, keys: List[str], timeout=None) -> Mapping[
str, str]:
"""
Requests the removal of specified metadata of a pipeline.
Args:
pipeline_id: Unique identifier of the pipeline whose metadata is to be removed.
keys: List of keys to be removed from the pipeline metadata.
Returns:
A Mapping[str, str] containing the updated set of metadata
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
if (pipeline_id.value is None) or (pipeline_id.value == ""):
raise Exception("Pipeline identifier must have instantiated value")
if keys is None:
raise Exception("Keys paramater must be valid list of metadata keys")
request = pipelines_pb2.PipelinesRemoveMetadataRequest(
pipeline_id=pipeline_id.to_grpc_value()
)
request.keys.extend(keys)
response = self._stub.RemoveMetadata(request, timeout)
self.check_response_header(header=response.header)
result = response.metadata
return result
| clara-platform-python-client-main | nvidia_clara/pipelines_client.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
from nvidia_clara.grpc import common_pb2, jobs_pb2
import nvidia_clara.constants as constants
class BaseClient:
@staticmethod
def check_response_header(header):
if not isinstance(header, common_pb2.ResponseHeader):
raise TypeError("Header arguement must be of type ResponseHeader")
if header.code < 0:
if header.messages is not None:
if len(header.messages) > 0:
message_string_list = [header.messages[i] for i in range(len(header.messages))]
raise Exception('\n'.join(message_string_list))
else:
raise Exception("Internal Server Error " + str(header.code))
else:
raise Exception("Internal Server Error " + str(header.code))
@staticmethod
def get_request_header() -> common_pb2.RequestHeader:
header = common_pb2.RequestHeader(api_version=common_pb2.Version(
major=constants.ClaraVersionMajor,
minor=constants.ClaraVersionMinor,
patch=constants.ClaraVersionPatch),
user_agent="Nvidia.Clara.Platform")
return header
class RequestIterator(object):
def __init__(self, requests):
self._requests_iter = iter(requests)
def __call__(self, handler=None):
while True:
try:
request = next(self._requests_iter)
except StopIteration:
return
yield request
| clara-platform-python-client-main | nvidia_clara/base_client.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import grpc
from typing import BinaryIO, Mapping, List
from nvidia_clara.grpc import payloads_pb2, payloads_pb2_grpc
from nvidia_clara.base_client import BaseClient
import nvidia_clara.payload_types as payload_types
class PayloadsClientStub:
def create_payload(self, metadata: Mapping[str, str] = None) -> payload_types.PayloadDetails:
"""
Creates a static payload.
Payloads created using this API are created with a type of "PayloadType.Reusable"
Returns:
the details of newly created payload.
"""
pass
def delete_payload(self, payload_id: payload_types.PayloadId):
"""
Requests the deletion of a payload, identified by "payload_id" from Clara.
Deleted payloads cannot be recovered.
Args:
payload_id (payload_types.PayloadId): Unique identifier of the payload to delete.
"""
pass
def get_details(self, payload_id: payload_types.PayloadId) -> payload_types.PayloadDetails:
"""
Requests the details of a payload, identified by "payload_id" from Clara.
Args:
payload_id (payload_types.PayloadId): Unique identifier of the payload.
Returns:
A payload_types.PayloadDetails instance containing payload details
"""
pass
def download_from(self, payload_id: payload_types.PayloadId, blob_name: str,
dest_obj: BinaryIO) -> payload_types.PayloadFileDetails:
"""
Downloads a blob, identified by "blob_name", from a payload, identified by its "payload_id", from Clara.
Data downloaded from the payload will be written to 'dest_obj' a passed in BinaryIO object with write privileges
Args:
payload_id (payload_types.PayloadId): Unique identifier of the payload.
blob_name (str): The name, or path, of the blob in the payload.
dest_obj (BinaryIO): Target stream object to write to
"""
pass
def remove_from(self, payload_id: payload_types.PayloadId, blob_name: str):
"""
Removes a blob from the payload.
Args:
payload_id (payload_types.PayloadId): Unique identifier of the payload from which to remove the blob.
blob_name (str): The name, or path, of the blob in the payload.
"""
pass
def upload(self, payload_id: payload_types.PayloadId, blob_name: str,
file_object: BinaryIO = None) -> payload_types.PayloadFileDetails:
"""
Uploads a blob from "file_object", to a Clara Payload identified by "payload_id".
Each uploaded blob must be have a unique "blob_name" value within a given payload.
Args:
payload_id (payload_types.PayloadId): Unique identifier of the payload.
blob_name (str): The name, or path, of the blob in the payload.
file_object (BinaryIO): stream to read from and upload
"""
pass
def add_metadata(self, payload_id: payload_types.PayloadId, metadata: Mapping[str, str]) -> Mapping[str, str]:
"""
Requests the addition of metadata to a payload.
Args:
payload_id (payload_types.PayloadId): Unique identifier of the payload.
metadata(Mapping[str, str]): Set of key/value pairs to be appended to the payload metadata. If a metadata
key in the request already exists in the job record, or if duplicate keys are passed in the request,
the payload will not be updated and and an error will be returned. Keys are compared using case
insensitive comparator functions. The maximum allowed size of a metadata key is 128 bytes, while
the maximum allowed size of a metadata value is 256 bytes. The maximum allowed size for the overall
metadata of an individual payload is 4 Megabytes.
Returns:
A Mapping[str, str] containing the appended metadata
"""
pass
def remove_metadata(self, payload_id: payload_types.PayloadId, keys: List[str]) -> Mapping[str, str]:
"""
Requests the removal of metadata from a payload.
Args:
payload_id (payload_types.PayloadId): Unique identifier of the payload.
keys: List of keys to be removed from the payload metadata.
Returns:
A Mapping[str, str] containing the updated set of metadata
"""
pass
class PayloadsClient(BaseClient, PayloadsClientStub):
def __init__(self, target: str, port: str = None, stub=None):
"""
Payloads Client Creation
Args:
target (str): ipv4 address of clara instance
port (str): if specified, port will be appended to the target with a ":"
"""
if target is None:
raise Exception("Target must be initialized to a non-null value")
self._connection = target
if port is not None:
self._connection += ":" + port
self._channel = grpc.insecure_channel(self._connection)
if stub is None:
self._stub = payloads_pb2_grpc.PayloadsStub(self._channel)
else:
self._stub = stub
def close(self):
"""
Close connection
"""
if self._channel:
self._channel.close()
self._channel = None
else:
print("Connection for client already closed")
def reconnect(self):
"""
Re-open connection with existing channel
"""
if self._channel is None:
self._channel = grpc.insecure_channel(self._connection)
self._stub = payloads_pb2_grpc.PayloadsStub(self._channel)
else:
print("Connection for client already open")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._channel is not None:
self.close()
return False
def create_payload(self, metadata: Mapping[str, str] = None, timeout=None) -> payload_types.PayloadDetails:
"""
Creates a static payload.
Payloads created using this API are created with a type of "PayloadType.Reusable"
Returns:
the details of newly created payload.
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
request = payloads_pb2.PayloadsCreateRequest(header=self.get_request_header())
if metadata is not None:
request.metadata.update(metadata)
response = self._stub.Create(request, timeout=timeout)
self.check_response_header(header=response.header)
payload_details = payload_types.PayloadDetails(
file_details=[],
payload_id=payload_types.PayloadId(response.payload_id.value),
payload_type=response.type
)
return payload_details
def delete_payload(self, payload_id: payload_types.PayloadId, timeout=None):
"""
Requests the deletion of a payload, identified by "payload_id" from Clara.
Deleted payloads cannot be recovered.
Args:
payload_id (payload_types.PayloadId): Unique identifier of the payload to delete.
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
if (payload_id.value is None) or (payload_id.value == ""):
raise Exception("Payload identifier argument must be initialized with non-null instance")
request = payloads_pb2.PayloadsDeleteRequest(
header=self.get_request_header(),
payload_id=payload_id.to_grpc_value()
)
response = self._stub.Delete(request, timeout=timeout)
self.check_response_header(header=response.header)
def get_details(self, payload_id: payload_types.PayloadId, timeout=None) -> payload_types.PayloadDetails:
"""
Requests the details of a payload, identified by "payload_id" from Clara.
Args:
payload_id (payload_types.PayloadId): Unique identifier of the payload.
Returns:
A payload_types.PayloadDetails instance containing payload details
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
if (payload_id.value is None) or (payload_id.value == ""):
raise Exception("Payload identifier argument must be initialized with non-null instance")
request = payloads_pb2.PayloadsDetailsRequest(
header=self.get_request_header(),
payload_id=payload_id.to_grpc_value()
)
response = self._stub.Details(request, timeout=timeout)
responses = [resp for resp in response]
if len(responses) > 0:
self.check_response_header(header=responses[0].header)
file_details = []
for item in responses:
if item.file is None:
continue
detail = payload_types.PayloadFileDetails(
mode=item.file.mode,
name=item.file.name,
size=item.file.size
)
file_details.append(detail)
result = payload_types.PayloadDetails(
payload_id=payload_types.PayloadId(responses[0].payload_id.value),
file_details=file_details,
payload_type=responses[0].type,
metadata=responses[0].metadata
)
return result
return None
def download_from(self, payload_id: payload_types.PayloadId, blob_name: str, dest_obj: BinaryIO = None,
dest_path: str = None, timeout=None) -> payload_types.PayloadFileDetails:
"""
Downloads a blob, identified by "blob_name", from a payload, identified by its "payload_id", from Clara.
Data downloaded from the payload will be written to 'dest_obj' a passed in BinaryIO object with write privileges
Args:
payload_id (payload_types.PayloadId): Unique identifier of the payload.
blob_name (str): The name, or path, of the blob in the payload.
dest_obj (BinaryIO): Target stream object to write to with write privileges
dest_path (str): Alternative to passing in BinaryIO object to download to, and rather passing in path for a file
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
if (payload_id.value is None) or (payload_id.value == ""):
raise Exception("Payload identifier argument must be initialized with non-null instance")
if (blob_name is None) or (blob_name == ""):
raise Exception("Name of source blob must be initialized with non-null string")
file_path_used = False
if dest_obj is None:
if dest_path is None:
raise Exception("Destination object for upload must be initialized with non-null BinaryIO object")
else:
dest_obj = open(dest_path, 'wb')
file_path_used = True
request = payloads_pb2.PayloadsDownloadRequest(
header=self.get_request_header(),
name=blob_name,
payload_id=payload_id.to_grpc_value()
)
responses = self._stub.Download(request, timeout=timeout)
result = None
for resp in responses:
if result is None:
self.check_response_header(header=resp.header)
result = payload_types.PayloadFileDetails(
mode=resp.details.mode,
name=resp.details.name,
size=resp.details.size
)
dest_obj.write(resp.data)
if file_path_used:
dest_obj.close()
return result
def remove_from(self, payload_id: payload_types.PayloadId, blob_name: str, timeout=None):
"""
Removes a blob from the payload.
Args:
payload_id (payload_types.PayloadId): Unique identifier of the payload from which to remove the blob.
blob_name (str): The name, or path, of the blob in the payload.
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
if (payload_id.value is None) or (payload_id.value == ""):
raise Exception("Payload identifier argument must be initialized with non-null instance")
if (blob_name is None) or (blob_name == ""):
raise Exception("Name of blob to remove must be initialized with non-null string")
request = payloads_pb2.PayloadsRemoveRequest(
header=self.get_request_header(),
name=blob_name,
payload_id=payload_id.to_grpc_value()
)
response = self._stub.Remove(request, timeout=timeout)
self.check_response_header(header=response.header)
def upload_request_iterator(self, payload_id: payload_types.PayloadId, file_name: str,
source_object: BinaryIO = None, mode: int = 0):
"""
Creates generator with data from input file (specified by file_name)
Args:
payload_id (payload_types.PayloadId): Unique identifier of the payload.
file_name (str): File_name to read from
source_object (BinaryIO): Stream to read from
mode (int): Privilege level
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
if source_object is None:
raise Exception("Source object must be initialized with a non-null BinaryIO instance")
chunk_size = 64 * 1024
while True:
data = source_object.read(chunk_size)
if not data:
return
details = payloads_pb2.PayloadFileDetails(mode=mode, name=file_name, size=len(data))
request = payloads_pb2.PayloadsUploadRequest(
header=self.get_request_header(),
payload_id=payload_id.to_grpc_value(),
details=details,
data=data
)
yield request
def upload(self, payload_id: payload_types.PayloadId, blob_name: str, file_object: BinaryIO = None,
file_path: str = None,
timeout=None) -> payload_types.PayloadFileDetails:
"""
Uploads a blob from "file_object", to a Clara Payload identified by "payload_id".
Each uploaded blob must be have a unique "blob_name" value within a given payload.
Args:
payload_id (payload_types.PayloadId): Unique identifier of the payload.
blob_name (str): The name, or path, of the blob in the payload.
file_object (BinaryIO): stream to read from and upload with read privileges
file_path (str): Alternative to passing in BinaryIO object for upload, and rather passing in path for a file
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
if (payload_id.value is None) or (payload_id.value == ""):
raise Exception("Payload identifier argument must be initialized with non-null instance")
if (blob_name is None) or (blob_name == ""):
raise Exception("Name of destination blob must be initialized with non-null string")
file_path_used = False
if file_object is None:
if file_path is None:
raise Exception("File_object of file for upload must be initialized with non-null BinaryIO object")
else:
file_object = open(file_path, 'rb')
file_path_used = True
requests = self.upload_request_iterator(
payload_id=payload_id,
file_name=blob_name,
source_object=file_object
)
response = self._stub.Upload(
requests,
timeout=timeout
)
self.check_response_header(header=response.header)
result = payload_types.PayloadFileDetails(other=response.details)
if file_path_used:
file_object.close()
return result
def add_metadata(self, payload_id: payload_types.PayloadId, metadata: Mapping[str, str], timeout=None) -> Mapping[
str, str]:
"""
Requests the addition of metadata to a payload.
Args:
payload_id (payload_types.PayloadId): Unique identifier of the payload.
metadata(Mapping[str, str]): Set of key/value pairs to be appended to the payload metadata. If a metadata
key in the request already exists in the job record, or if duplicate keys are passed in the request,
the payload will not be updated and and an error will be returned. Keys are compared using case
insensitive comparator functions. The maximum allowed size of a metadata key is 128 bytes, while
the maximum allowed size of a metadata value is 256 bytes. The maximum allowed size for the overall
metadata of an individual payload is 4 Megabytes.
Returns:
A Mapping[str, str] containing the appended metadata
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
if (payload_id.value is None) or (payload_id.value == ""):
raise Exception("Payload identifier must have instantiated value")
if metadata is None:
raise Exception("Metadata must be an instantiated map")
request = payloads_pb2.PayloadsAddMetadataRequest(
payload_id=payload_id.to_grpc_value()
)
request.metadata.update(metadata)
response = self._stub.AddMetadata(request, timeout)
self.check_response_header(header=response.header)
result = response.metadata
return result
def remove_metadata(self, payload_id: payload_types.PayloadId, keys: List[str], timeout=None) -> Mapping[str, str]:
"""
Requests the removal of metadata from a payload.
Args:
payload_id (payload_types.PayloadId): Unique identifier of the payload.
keys: List of keys to be removed from the payload metadata.
Returns:
A Mapping[str, str] containing the updated set of metadata
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
if (payload_id.value is None) or (payload_id.value == ""):
raise Exception("Payload identifier must have instantiated value")
if keys is None:
raise Exception("Keys paramater must be valid list of metadata keys")
request = payloads_pb2.PayloadsRemoveMetadataRequest(
payload_id=payload_id.to_grpc_value()
)
request.keys.extend(keys)
response = self._stub.RemoveMetadata(request, timeout)
self.check_response_header(header=response.header)
result = response.metadata
return result
| clara-platform-python-client-main | nvidia_clara/payloads_client.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ClaraVersionMajor = 0
ClaraVersionMinor = 6
ClaraVersionPatch = 0
GrpcChunkSizeDefault = 1024 * 1024
GrpcChunkSizeMaximum = 4 * 1024 * 1024 - 512
GrpcChunkSizeMinimum = 1024
GrpcChunkSizeName = "GRPC_CHUNK_SIZE"
GrpcParallelStreamsDefault = 8
GrpcParallelStreamsMaximum = 64
GrpcParallelStreamsMinimum = 1
GrpcParallelStreamsName = "GRPC_PARALLEL_STREAMS"
GrpcChannelProviderUnavailable = "GRPC Channel provider is unavailable."
GrpcClientProviderUnavailable = "GRPC client provider is unavailable."
| clara-platform-python-client-main | nvidia_clara/constants.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from typing import List, Mapping, Iterator
import grpc
from nvidia_clara.grpc import common_pb2, clara_pb2, clara_pb2_grpc
from nvidia_clara.base_client import BaseClient
import nvidia_clara.clara_types as clara_types
import nvidia_clara.job_types as job_types
class ClaraClient(BaseClient):
def __init__(self, target: str, port: str = None, stub=None):
"""
Clara Client Creation
Args:
target (str): ipv4 address of clara instance
port (str): if specified, port will be appended to the target with a ":"
"""
if target is None:
raise Exception("Target must be initialized to a non-null value")
self._connection = target
if port is not None:
self._connection += ":" + port
self._channel = grpc.insecure_channel(self._connection)
if stub is None:
self._stub = clara_pb2_grpc.ClaraStub(self._channel)
else:
self._stub = stub
@staticmethod
def get_timestamp(seconds_since_year_one: str) -> datetime.datetime:
"""
Create datetime.datetime object from a string date
Args:
seconds_since_year_one(str): date to parse
Returns:
datetime.datetime object
"""
if (seconds_since_year_one is None) or (seconds_since_year_one == ""):
return None
try:
# Check to see if in form of seconds since year one
seconds_int = float(seconds_since_year_one.value) - 62167219200
except:
# Otherwise parse timestamp
return datetime.datetime.strptime(seconds_since_year_one, "%Y-%m-%d %H:%M:%SZ")
if seconds_int < 0:
return None
result_date = datetime.datetime.fromtimestamp(seconds_int)
return result_date
def stop(self, timeout=None):
"""Sends stop request to instance of Pipeline Services and Triton"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
request = clara_pb2.ClaraStopRequest(header=self.get_request_header())
response = self._stub.Stop(request, timeout=timeout)
self.check_response_header(header=response.header)
def list_utilization(self, timeout=None) -> List[clara_types.ClaraUtilizationDetails]:
"""
Method for aquiring snapshot of GPU utilization information of Clara in a list
Returns:
List[clara_types.ClaraGpuUtilization] with snapshot of GPU Utilization details for Clara GPUs
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
request = clara_pb2.ClaraUtilizationRequest(header=self.get_request_header(), watch=False)
response = self._stub.Utilization(request, timeout=timeout)
utilization_list = []
header_check = False
for resp in response:
if not header_check:
self.check_response_header(header=resp.header)
header_check = True
metrics = resp.gpu_metrics
clara_utilization_details = clara_types.ClaraUtilizationDetails()
for item in metrics:
gpu_utilization = clara_types.ClaraGpuUtilization(
node_id=item.node_id,
pcie_id=item.pcie_id,
compute_utilization=item.compute_utilization,
memory_free=item.memory_free,
memory_used=item.memory_used,
memory_utilization=item.memory_utilization,
timestamp=self.get_timestamp(item.timestamp),
)
for proc_info in item.process_details:
process_details = clara_types.ClaraProcessDetails(
name=proc_info.name,
)
if proc_info.job_id.value:
process_details.job_id = job_types.JobId(proc_info.job_id.value)
gpu_utilization.process_details.append((process_details))
clara_utilization_details.gpu_metrics.append((gpu_utilization))
utilization_list.append(clara_utilization_details)
return utilization_list
def stream_utilization(self, timeout=None) -> Iterator[clara_types.ClaraUtilizationDetails]:
"""
Method for aquiring stream of GPU utilization information of Clara
Returns:
Iterator[clara_types.ClaraUtilizationDetails] with stream of GPU Utilization details for Clara GPUs
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
request = clara_pb2.ClaraUtilizationRequest(header=self.get_request_header(), watch=True)
response = self._stub.Utilization(request, timeout=timeout)
header_check = False
for resp in response:
if not header_check:
self.check_response_header(header=resp.header)
header_check = True
metrics = resp.gpu_metrics
clara_utilization_details = clara_types.ClaraUtilizationDetails()
for item in metrics:
gpu_utilization = clara_types.ClaraGpuUtilization(
node_id=item.node_id,
pcie_id=item.pcie_id,
compute_utilization=item.compute_utilization,
memory_free=item.memory_free,
memory_used=item.memory_used,
memory_utilization=item.memory_utilization,
timestamp=self.get_timestamp(item.timestamp),
)
for proc_info in item.process_details:
process_details = clara_types.ClaraProcessDetails(
name=proc_info.name,
job_id=job_types.JobId(proc_info.job_id.value)
)
gpu_utilization.process_details.append((process_details))
clara_utilization_details.gpu_metrics.append((gpu_utilization))
yield clara_utilization_details
def version(self, timeout=None):
"""Get Clara Version"""
request = clara_pb2.ClaraVersionRequest(header=self.get_request_header())
response = self._stub.Version(request, timeout=timeout)
self.check_response_header(header=response.header)
result = clara_types.ClaraVersionInfo(
major=response.version.major,
minor=response.version.minor,
patch=response.version.patch,
label=response.version.label
)
return result
| clara-platform-python-client-main | nvidia_clara/clara_client.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia_clara.jobs_client import JobsClient
from nvidia_clara.pipelines_client import PipelinesClient
from nvidia_clara.payloads_client import PayloadsClient
from nvidia_clara.models_client import ModelsClient
from nvidia_clara.base_client import BaseClient
from nvidia_clara.clara_client import ClaraClient
import nvidia_clara.pipeline_types as PipelineTypes
import nvidia_clara.job_types as JobTypes
import nvidia_clara.payload_types as PayloadTypes
import nvidia_clara.model_types as ModelTypes
import nvidia_clara.model_types as ClaraTypes
| clara-platform-python-client-main | nvidia_clara/__init__.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
from typing import List, Mapping
from nvidia_clara.grpc import common_pb2, payloads_pb2
class PayloadType(Enum):
Unknown = payloads_pb2.PAYLOAD_TYPE_UNKNOWN
Pipeline = payloads_pb2.PAYLOAD_TYPE_PIPELINE
Reusable = payloads_pb2.PAYLOAD_TYPE_REUSABLE
Minimum = Pipeline
Maximum = Reusable
class PayloadFileDetails:
def __init__(self, other: payloads_pb2.PayloadFileDetails = None, mode: int = None, name: str = None,
size: int = None):
"""
Args:
mode(int): Permissions
name(str): File Path Location
size(int): Size of File
other(payloads_pb2.PayloadFileDetails): If specified, object information replicated
"""
if other is None:
self._mode = mode
self._name = name
self._size = size
else:
self._mode = other.mode
self._name = other.name
self._size = other.size
@property
def mode(self):
"""
Mode of the file.
See [https://en.wikipedia.org/wiki/Chmod] for additional information.
"""
return self._mode
@mode.setter
def mode(self, mode: int):
"""
Mode of the file.
See [https://en.wikipedia.org/wiki/Chmod] for additional information.
"""
self._mode = mode
@property
def name(self):
"""
Unique (withing a payload) name of the file; in path format.
File names are relative to the root of the payload, and should not be rooted paths (prefixed with a '/' character).
"""
return self._name
@name.setter
def name(self, name: str):
"""
Unique (withing a payload) name of the file; in path format.
File names are relative to the root of the payload, and should not be rooted paths (prefixed with a '/' character).
"""
self._name = name
@property
def size(self):
"""Size, in bytes, of the file."""
return self._size
@size.setter
def size(self, size: int):
"""Size, in bytes, of the file."""
self._size = size
def __eq__(self, other):
return (self._mode == other.getMode()) and (self._name == other.getName()) and (
self._size == other.getSize())
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash((self._mode, self._name, self._size))
class PayloadId:
def __init__(self, value: str = None):
if value == None:
raise Exception("Arguement 'Value' must be initialized to non-null or empty string")
self._value = value
@property
def value(self):
return self._value
def __eq__(self, other):
return self._value == other._value
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s" % (self._value)
def __str__(self):
return "%s" % (self._value)
def __hash__(self):
return hash(self._value)
def to_grpc_value(self):
id = common_pb2.Identifier()
id.value = self._value
return id
class PayloadDetails:
def __init__(self, payload_id: PayloadId = None, file_details: List[PayloadFileDetails] = None,
payload_type: payloads_pb2.PayloadType = None, metadata: Mapping[str, str] = None):
if file_details is None:
file_details = []
if metadata is None:
metadata = dict()
self._payload_id = payload_id
self._file_details = file_details
self._payload_type = payload_type
self._metadata = metadata
@property
def payload_id(self):
"""Gets the unique identifier of the payload."""
return self._payload_id
@payload_id.setter
def payload_id(self, payload_id: PayloadId):
"""Sets the unique identifier of the payload."""
self._payload_id = payload_id
@property
def file_details(self):
"""Gets list of files contained in the payload."""
return self._file_details
@file_details.setter
def file_details(self, file_details: List[PayloadFileDetails]):
"""Sets a list of files contained in the payload."""
self._file_details = file_details
@property
def payload_type(self):
"""Gets a list of files contained in the payload."""
return self._payload_type
@payload_type.setter
def payload_type(self, payload_type: payloads_pb2.PayloadType):
"""Sets a list of files contained in the payload."""
self._payload_type = payload_type
@property
def metadata(self) -> Mapping[str, str]:
"""
Metadata (set of key/value pairs) associated with the payload
"""
return self._metadata
@metadata.setter
def metadata(self, metadata: Mapping[str, str]):
"""
Metadata (set of key/value pairs) associated with the payload
"""
self._metadata = metadata
| clara-platform-python-client-main | nvidia_clara/payload_types.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from enum import Enum
from typing import List, Mapping, TypeVar
from nvidia_clara.grpc import common_pb2, jobs_pb2
import nvidia_clara.payload_types as payload_types
import nvidia_clara.pipeline_types as pipeline_types
T = TypeVar('T')
class JobPriority(Enum):
"""
Priority of a pipeline job.
A job's priority affects the order it will be scheduled once "JobsClient.StartJob()" is called.
"""
# The job priority is unknown.
Unkown = 0
# Lower than normal priority.
# Lower priority jobs are queued like "Normal" and "Higher" priority jobs, but are scheduled less frequently.
Lower = jobs_pb2.JOB_PRIORITY_LOWER
# Normal, or default, priority.
# Normal priority jobs are queued like "Lower" and "Higher" priority jobs.
# Normal priority jobs are scheduled more frequently than lower priority jobs and less frequently then higher priority jobs.
Normal = jobs_pb2.JOB_PRIORITY_NORMAL
# Higher than normal priority.
# Higher priority jobs are queued like "Lower" and "Normal"priority jobs, but are scheduled more frequently.
Higher = jobs_pb2.JOB_PRIORITY_HIGHER
# Immediate priority jobs are queued in separate queue which must emptied before any other priority jobs can be scheduled.
Immediate = jobs_pb2.JOB_PRIORITY_IMMEDIATE
# The maximum supported value for "JobPriority"
Maximum = jobs_pb2.JOB_PRIORITY_IMMEDIATE
# The minimum supported value for "JobPriority"
Minimum = jobs_pb2.JOB_PRIORITY_LOWER
class JobState(Enum):
"""
State of a pipeline job.
Related to "JobStatus"
"""
# The job is unknown or in an unknown state.
Unknown = 0
# The job has been accepted and queued by the server, but has not yet started running.
Pending = jobs_pb2.JOB_STATE_PENDING
# The job is currently running.
Running = jobs_pb2.JOB_STATE_RUNNING
# The job has stopped runing.
Stopped = jobs_pb2.JOB_STATE_STOPPED
# Maximum supported value of "JobState"
Maximum = Stopped
# Minimum supported value of "JobState"
Minimum = Pending
class JobStatus(Enum):
"""
Status of pipeline job.
Related to "JobState"
"""
# The job is unknown or the status of the job is unknown.
Unknown = 0
# The job has been canceled.
Canceled = jobs_pb2.JOB_STATUS_CANCELED
# The job has encountered a terminal error.
Faulted = jobs_pb2.JOB_STATUS_FAULTED
# The job is healthy.
# If stopped, the job has completed successfully.
Healthy = jobs_pb2.JOB_STATUS_HEALTHY
# The job was evicted
Evicted = jobs_pb2.JOB_STATUS_EVICTED
# The job was terminated
Terminated = jobs_pb2.JOB_STATUS_TERMINATED
# Maximum supported value of "JobStatus"
Maximum = Canceled
# Minimum supported value of "JobStatus"
Minimum = Healthy
class JobOperatorStatus(Enum):
"""
Status of an operator in a job.
"""
# The operator is unknownor the status of the operator is unknown.
Unknown = jobs_pb2.JOB_OPERATOR_STATUS_UNKNOWN
# The operator has been accepted and queued by the server, but has not yet started running.
Pending = jobs_pb2.JOB_OPERATOR_STATUS_PENDING
# The operator is currently running.
Running = jobs_pb2.JOB_OPERATOR_STATUS_RUNNING
# The operator has completed successfully.
Completed = jobs_pb2.JOB_OPERATOR_STATUS_COMPLETED
# The operator has encountered an error.
Faulted = jobs_pb2.JOB_OPERATOR_STATUS_FAULTED
class JobId:
"""
Unique identifier for a Clara Pipeline Job.
"""
def __init__(self, value: str):
"""
Creates Unique Identifier Object for a Clara Pipeline Job.
"""
if (value is None) or (value == ""):
raise Exception("Job identifier value must be intialized.")
self._value = value
@property
def value(self):
return self._value
def __eq__(self, other) -> bool:
return self._value == other.value
def __ne__(self, other) -> bool:
return not (self == other)
def __repr__(self):
return "%s" % self._value
def __str__(self):
return "%s" % self._value
def __hash__(self):
return hash(self._value)
def to_grpc_value(self):
id = common_pb2.Identifier()
id.value = self._value
return id
class JobToken:
def __init__(self, job_id: JobId = None, job_state: JobState = None, job_status: JobStatus = None,
job_priority: JobPriority = None):
self._job_id = job_id
self._job_state = job_state
self._job_status = job_status
self._job_priority = job_priority
@property
def job_id(self) -> JobId:
"""Unique identifier of the job."""
return self._job_id
@job_id.setter
def job_id(self, job_id: JobId):
"""Unique identifier of the job."""
self._job_id = job_id
@property
def job_state(self) -> JobState:
"""Current state of the job."""
return self._job_state
@job_state.setter
def job_state(self, job_state: JobState):
"""Current state of the job."""
self._job_state = job_state
@property
def job_status(self) -> JobStatus:
"""Current status of the job."""
return self._job_status
@job_status.setter
def job_status(self, job_status: JobStatus):
"""Current status of the job."""
self._job_status = job_status
@property
def job_priority(self) -> JobPriority:
"""Priority of the job"""
return self._job_priority
@job_priority.setter
def job_priority(self, job_priority: JobPriority):
"""Priority of the job"""
self._job_priority = job_priority
class JobInfo(JobToken):
def __init__(self, job_id: JobId = None, job_state: JobState = None, job_status: JobStatus = None,
job_priority: JobPriority = None, date_created: datetime = None, date_started: datetime = None,
date_stopped: datetime = None, name: str = None, payload_id: payload_types.PayloadId = None,
pipeline_id: pipeline_types.PipelineId = None, metadata: Mapping[str, str] = None):
super().__init__(
job_id=job_id,
job_state=job_state,
job_status=job_status,
job_priority=job_priority,
)
if metadata is None:
metadata = dict()
self._date_created = date_created
self._date_started = date_started
self._date_stopped = date_stopped
self._name = name
self._payload_id = payload_id
self._pipeline_id = pipeline_id
self._metadata = metadata
@property
def date_created(self) -> datetime:
return self._date_created
@date_created.setter
def date_created(self, date_created: datetime):
self._date_created = date_created
@property
def date_started(self) -> datetime:
return self._date_started
@date_started.setter
def date_started(self, date_started: datetime):
self._date_started = date_started
@property
def date_stopped(self) -> datetime:
return self._date_stopped
@date_stopped.setter
def date_stopped(self, date_stopped: datetime):
self._date_stopped = date_stopped
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, name: str):
self._name = name
@property
def payload_id(self) -> payload_types.PayloadId:
return self._payload_id
@payload_id.setter
def payload_id(self, payload_id: payload_types.PayloadId):
self._payload_id = payload_id
@property
def pipeline_id(self) -> pipeline_types.PipelineId:
return self._pipeline_id
@pipeline_id.setter
def pipeline_id(self, pipeline_id: pipeline_types.PipelineId):
self._pipeline_id = pipeline_id
@property
def metadata(self) -> Mapping[str, str]:
"""
Metadata (set of key/value pairs) associated with the job
"""
return self._metadata
@metadata.setter
def metadata(self, metadata: Mapping[str, str]):
"""
Metadata (set of key/value pairs) associated with the job
"""
self._metadata = metadata
class JobFilter:
def __init__(self, completed_before: datetime = None, created_after: datetime = None,
has_job_state: List[JobState] = None, has_job_status: List[JobStatus] = None,
pipeline_ids: List[pipeline_types.PipelineId] = None):
if has_job_state is None:
has_job_state = []
if has_job_status is None:
has_job_status = []
if pipeline_ids is None:
pipeline_ids = []
self._completed_before = completed_before
self._created_after = created_after
self._has_job_state = has_job_state
self._has_job_status = has_job_status
self._pipeline_ids = pipeline_ids
@property
def completed_before(self) -> datetime:
"""When applied, only jobs completed before the supplied date will be returned."""
return self._completed_before
@completed_before.setter
def completed_before(self, completed_before: datetime):
"""When applied, only jobs completed before the supplied date will be returned."""
self._completed_before = completed_before
@property
def created_after(self) -> datetime:
"""When applied, only jobs created after the supplied date will be returned."""
return self._created_after
@created_after.setter
def created_after(self, created_after: datetime):
"""When applied, only jobs created after the supplied date will be returned."""
self._created_after = created_after
@property
def has_job_state(self) -> List[JobState]:
"""When applied, only jobs having a provided state value will be returned."""
return self._has_job_state
@has_job_state.setter
def has_job_state(self, has_job_state: List[JobState]):
"""When applied, only jobs having a provided state value will be returned."""
self._has_job_state = has_job_state
@property
def has_job_status(self) -> List[JobStatus]:
"""When applied, only jobs having a provided status value will be returned."""
return self._has_job_status
@has_job_status.setter
def has_job_status(self, has_job_status: List[JobStatus]):
"""When applied, only jobs having a provided status value will be returned."""
self._has_job_status = has_job_status
@property
def pipeline_ids(self) -> List[pipeline_types.PipelineId]:
"""When applied, only jobs with matching pipeline identifiers will be returned."""
return self._pipeline_ids
@pipeline_ids.setter
def pipeline_ids(self, pipeline_ids: List[pipeline_types.PipelineId]):
"""When applied, only jobs with matching pipeline identifiers will be returned."""
self._pipeline_ids = pipeline_ids
class JobDetails(JobInfo):
def __init__(self, job_id: JobId = None, job_state: JobState = None, job_status: JobStatus = None,
job_priority: JobPriority = None, date_created: datetime = None, date_started: datetime = None,
date_stopped: datetime = None, name: str = None, payload_id: payload_types.PayloadId = None,
pipeline_id: pipeline_types.PipelineId = None, operator_details: Mapping[str, Mapping[str, T]] = None,
messages: List[str] = None, metadata: Mapping[str, str] = None):
if metadata is None:
metadata = dict()
super().__init__(
job_id=job_id,
job_state=job_state,
job_status=job_status,
job_priority=job_priority,
date_created=date_created,
date_started=date_started,
date_stopped=date_stopped,
name=name,
payload_id=payload_id,
pipeline_id=pipeline_id,
metadata=metadata
)
if messages is None:
messages = []
if operator_details is None:
operator_details = dict()
self._messages = messages
self._operator_details = operator_details
@property
def messages(self) -> List[str]:
"""List of messages reported by the job."""
return self._messages
@messages.setter
def messages(self, messages: List[str]):
"""List of messages reported by the job."""
self._messages = messages
@property
def operator_details(self) -> Mapping[str, Mapping[str, T]]:
"""Dictionary mapping operator names to operator details"""
return self._operator_details
@operator_details.setter
def operator_details(self, operator_details: Mapping[str, Mapping[str, T]]):
"""Dictionary mapping operator names to operator details"""
self._operator_details = operator_details
| clara-platform-python-client-main | nvidia_clara/job_types.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from typing import List, Mapping
import grpc
import itertools
from nvidia_clara.grpc import common_pb2, jobs_pb2, jobs_pb2_grpc
from nvidia_clara.base_client import BaseClient
import nvidia_clara.job_types as job_types
import nvidia_clara.pipeline_types as pipeline_types
import nvidia_clara.payload_types as payload_types
class JobsClientStub:
def cancel_job(self, job_id: job_types.JobId, reason=None) -> job_types.JobToken:
"""
Cancels a pipeline job, preventing it from being executed.
Has no affect on executing or terminated jobs.
Args:
job_id (job_types.JobId): Unique identity of the job to be cancelled.
reason: Optional reason as to why the job was cancelled.
Returns:
job_types.JobToken of cancelled job
"""
pass
def create_job(self, pipeline_id: pipeline_types.PipelineId, job_name: str, job_priority: job_types.JobPriority,
input_payloads: List[payload_types.PayloadId] = None,
metadata: Mapping[str, str] = None) -> job_types.JobInfo:
"""
Creates a new pipeline job record and associate storage payload.
Jobs are created in a "JobState.Pending" state.
Use "StartJob(JobId, Map{KeyValuePair{string, string}}" to cause the job to start executing.
Args:
pipeline_id (pipeline_types.PipelineId): Unique identifier of the pipeline which the job should
be instances from.
job_name (str): Human readable name of the job.
job_priority (job_types.JobPriority): Optional Priority of the job.
Affects how and when the server will schedule the job.
input_payloads (List[payload_types.PayloadId]): [Optional Paramater] List of static payloads to
include as input for the job.
metadata (Mapping[str, str]): [Optional Parameter] Metadata (set of key/value pairs) associated with the
job
Returns:
job_types.JobInfo about the newly created pipeline job.
"""
pass
def get_status(self, job_id: job_types.JobId) -> job_types.JobDetails:
"""
Get status of a job
Args:
job_id (job_types.JobId): job_id Unique identifier of the job to get the status of.
Returns:
job_types.JobDetails including the status of a known job
"""
pass
def list_jobs(self, job_filter: job_types.JobFilter = None) -> List[job_types.JobInfo]:
"""
Provides list of currently running jobs
Args:
job_filter (job_types.JobFilter): Optional filter used to limit the number of
pipeline job records return
Returns:
list of job_types.JobInfo with known pipeline job details from the server.
"""
pass
def start_job(self, job_id: job_types.JobId, named_values: Mapping[str, str] = None) -> job_types.JobToken:
"""
Starts a "JobState.Pending" job.
Once started, a job's payload becomes readonly.
Args:
job_id (job_types.JobId): Unique identifier of the job to start.
named_values: Collection of name/value pairs used to populate pipeline
variables.
Returns:
A job_types.JobToken with information on started job
"""
pass
def job_logs(self, job_id: job_types.JobId, operator_name: str) -> List[str]:
"""
Retrieve logs of operator specified with "operator_name" with job associated with "job_id"
Args:
job_id (job_types.JobId): Unique identifier of the job to retrieve logs from
operator_name (str): Operator to retrieve logs from
Returns:
List of operator logs
"""
pass
def add_metadata(self, job_id: job_types.JobId, metadata: Mapping[str, str]) -> Mapping[str, str]:
"""
Requests the addition of metadata to a job.
Args:
job_id (job_types.JobId): Unique identifier of the job whose metadata is to be appended.
metadata(Mapping[str, str]): Set of key/value pairs to be appended to the job metadata. If a metadata
key in the request already exists in the job record, or if duplicate keys are passed in the request,
the job will not be updated and and an error will be returned. Keys are compared using case
insensitive comparator functions. The maximum allowed size of a metadata key is 128 bytes, while
the maximum allowed size of a metadata value is 256 bytes. The maximum allowed size for the overall
metadata of an individual job is 4 Megabytes.
Returns:
A Mapping[str, str] containing the appended metadata
"""
pass
def remove_metadata(self, job_id: job_types.JobId, keys: List[str]) -> Mapping[str, str]:
"""
Requests the removal of metadata from a job.
Args:
job_id: Unique identifier of the job whose metadata is to be removed.
keys: List of keys to be removed from the job metadata.
Returns:
A Mapping[str, str] containing the updated set of metadata
"""
pass
class JobsClient(BaseClient, JobsClientStub):
def __init__(self, target: str, port: str = None, stub=None):
"""
Jobs Client Creation
Args:
target (str): ipv4 address of clara instance
port (str): if specified, port will be appended to the target with a ":"
"""
if target is None:
raise Exception("Target must be initialized to a non-null value")
self._connection = target
if port is not None:
self._connection += ":" + port
self._channel = grpc.insecure_channel(self._connection)
if stub is None:
self._stub = jobs_pb2_grpc.JobsStub(self._channel)
else:
self._stub = stub
def close(self):
"""
Close connection
"""
if self._channel:
self._channel.close()
self._channel = None
self._stub = None
else:
print("Connection for client already closed")
def reconnect(self):
"""
Re-open connection with existing channel
"""
if self._channel is None:
self._channel = grpc.insecure_channel(self._connection)
self._stub = jobs_pb2_grpc.JobsStub(self._channel)
else:
print("Connection for client already open")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._channel is not None:
self.close()
return False
@staticmethod
def get_timestamp(seconds_since_year_one: str) -> datetime.datetime:
"""
Create datetime.datetime object from a string date
Args:
seconds_since_year_one(str): date to parse
Returns:
datetime.datetime object
"""
if (seconds_since_year_one is None) or (seconds_since_year_one == ""):
return None
try:
# Check to see if in form of seconds since year one
seconds_int = float(seconds_since_year_one.value) - 62135596800
except:
# Otherwise parse timestamp
return datetime.datetime.strptime(seconds_since_year_one, "%Y-%m-%d %H:%M:%SZ").astimezone(datetime.timezone.utc)
if seconds_int < 0:
return None
result_date = datetime.datetime.fromtimestamp(seconds_int).astimezone(datetime.timezone.utc)
return result_date
def cancel_job(self, job_id: job_types.JobId, reason=None, timeout=None) -> job_types.JobToken:
"""
Cancels a pipeline job, preventing it from being executed.
Has no affect on executing or terminated jobs.
Args:
job_id (job_types.JobId): Unique identity of the job to be cancelled.
reason: Optional reason as to why the job was cancelled.
Returns:
job_types.JobToken of cancelled job
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
if (job_id.value is None) or (job_id.value == ""):
raise Exception("Job identifier must have instantiated value")
request = jobs_pb2.JobsCancelRequest(header=self.get_request_header(), job_id=job_id.to_grpc_value(),
reason=reason)
response = self._stub.Cancel(request, timeout=timeout)
self.check_response_header(header=response.header)
result = job_types.JobToken(
job_id=job_types.JobId(response.job_id.value),
job_state=response.job_state,
job_status=response.job_status
)
return result
def create_job(self, pipeline_id: pipeline_types.PipelineId, job_name: str,
input_payloads: List[payload_types.PayloadId] = None,
job_priority: job_types.JobPriority = job_types.JobPriority.Normal,
metadata: Mapping[str, str] = None, timeout=None) -> job_types.JobInfo:
"""
Creates a new pipeline job record and associate storage payload.
Jobs are created in a "JobState.Pending" state.
Use "StartJob(JobId, Map{KeyValuePair{string, string}}" to cause the job to start executing.
Args:
pipeline_id (pipeline_types.PipelineId): Unique identifier of the pipeline which the job should
be instances from.
job_name (str): Human readable name of the job.
job_priority (job_types.JobPriority): Optional Priority of the job.
Affects how and when the server will schedule the job.
input_payloads (List[payload_types.PayloadId]): [Optional Paramater] List of static payloads to
include as input for the job.
metadata (Mapping[str, str]): [Optional Parameter] Metadata (set of key/value pairs) associated with the
job
Returns:
job_types.JobInfo about the newly created pipeline job.
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
if pipeline_id.value is None:
raise Exception("Pipeline identifier must have instantiated non-null instance")
if (job_name is None) or (job_name == ""):
raise Exception("Job name must be initialized to non-null/non-empty string")
if (job_priority.value < job_types.JobPriority.Minimum.value) or (
job_priority.value > job_types.JobPriority.Maximum.value):
raise Exception("Job priority must contain valid value between minimum and maximum job priority bounds")
input_payloads_identifiers = []
if input_payloads is not None:
for pay_id in input_payloads:
input_payloads_identifiers.append(pay_id.to_grpc_value())
else:
input_payloads_identifiers = None
request = jobs_pb2.JobsCreateRequest(
header=self.get_request_header(),
name=job_name,
pipeline_id=pipeline_id.to_grpc_value(),
priority=job_priority.value,
input_payloads=input_payloads_identifiers
)
if metadata is not None:
request.metadata.update(metadata)
response = self._stub.Create(request, timeout=timeout)
self.check_response_header(header=response.header)
result = job_types.JobInfo(
job_id=job_types.JobId(response.job_id.value),
job_priority=job_priority,
job_state=job_types.JobState.Pending,
job_status=job_types.JobStatus.Healthy,
name=job_name,
payload_id=payload_types.PayloadId(value=response.payload_id.value),
pipeline_id=pipeline_id,
metadata=metadata
)
return result
def get_status(self, job_id: job_types.JobId, timeout=None) -> job_types.JobDetails:
"""
Get status of a job
Args:
job_id (job_types.JobId): job_id Unique identifier of the job to get the status of.
Returns:
job_types.JobDetails including the status of a known job
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
if job_id.value is None:
raise Exception("Job identifier must have instantiated non-null instance")
request = jobs_pb2.JobsStatusRequest(header=self.get_request_header(), job_id=job_id.to_grpc_value())
response = self._stub.Status(request, timeout=timeout)
self.check_response_header(header=response.header)
resp_operator_details = response.operator_details
operator_details = {}
for item in resp_operator_details:
operator_details[item.name] = {}
operator_details[item.name]["created"] = item.created
operator_details[item.name]["started"] = item.started
operator_details[item.name]["stopped"] = item.stopped
operator_details[item.name]["status"] = item.status
result = job_types.JobDetails(
job_id=job_types.JobId(response.job_id.value),
job_priority=response.priority,
job_state=response.state,
job_status=response.status,
name=response.name,
payload_id=payload_types.PayloadId(response.payload_id.value),
pipeline_id=pipeline_types.PipelineId(response.pipeline_id.value),
date_created=self.get_timestamp(response.created),
date_started=self.get_timestamp(response.started),
date_stopped=self.get_timestamp(response.stopped),
operator_details=operator_details,
messages=response.messages,
metadata=response.metadata
)
return result
def list_jobs(self, job_filter: job_types.JobFilter = None, timeout=None) -> List[job_types.JobInfo]:
"""
Provides list of current jobs on platform
Args:
job_filter (job_types.JobFilter): Optional filter used to limit the number of
pipeline job records return
Returns:
list of job_types.JobInfo with known pipeline job details from the server.
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
empty = job_types.JobFilter()
request = jobs_pb2.JobsListRequest(
header=self.get_request_header()
)
if job_filter != empty and job_filter is not None:
request_filter = jobs_pb2.JobsListRequest.JobFilter
if job_filter.completed_before is not None:
day_one = datetime.datetime(1, 1, 1)
if job_filter.completed_before.tzinfo is not None \
and job_filter.completed_before.tzinfo.utcoffset(job_filter.completed_before) is not None:
day_one = datetime.datetime(1, 1, 1, tzinfo=job_filter.completed_before.tzinfo)
seconds = (job_filter.completed_before - day_one).total_seconds()
request.filter.completed_before.value = int(seconds)
if job_filter.created_after is not None:
day_one = datetime.datetime(1, 1, 1)
if job_filter.created_after.tzinfo is not None \
and job_filter.created_after.tzinfo.utcoffset(job_filter.created_after) is not None:
day_one = datetime.datetime(1, 1, 1, tzinfo=job_filter.created_after.tzinfo)
seconds = (job_filter.created_after - day_one).total_seconds()
request.filter.created_after.value = int(seconds)
if job_filter.has_job_state is not None:
if len(job_filter.has_job_state) > 0:
for state in job_filter.has_job_state:
if (state.value < job_types.JobState.Minimum.value) or (
state.value > job_types.JobState.Maximum.value):
raise Exception("Job states in filter must be within " + str(
job_types.JobState.Minimum) + " and " + str(
job_types.JobState.Maximum) + ", found:" + str(state))
request.filter.has_state.append(state.value)
if job_filter.has_job_status is not None:
if len(job_filter.has_job_status) > 0:
for status in job_filter.has_job_status:
if (status.value < job_types.JobStatus.Minimum.value) or (
status.value > job_types.JobStatus.Maximum.value):
raise Exception("Job status in filter must be within " + str(
job_types.JobStatus.Minimum) + " and " + str(
job_types.JobStatus.Maximum) + ", found:" + str(status))
request.filter.has_status.append(status.value)
if job_filter.pipeline_ids is not None:
if len(job_filter.pipeline_ids) > 0:
for pipe_id in job_filter.pipeline_ids:
request.filter.pipeline_id.append(pipe_id.to_grpc_value())
response = self._stub.List(request, timeout=timeout)
check_header = True
result = []
for item in response:
if check_header:
self.check_response_header(header=item.header)
check_header = False
if (item.job_details is None) or (item.job_details.job_id.value == ''):
continue
info = job_types.JobInfo(
job_id=job_types.JobId(item.job_details.job_id.value),
job_priority=item.job_details.priority,
job_state=item.job_details.state,
job_status=item.job_details.status,
name=item.job_details.job_name,
payload_id=payload_types.PayloadId(item.job_details.payload_id.value),
pipeline_id=pipeline_types.PipelineId(item.job_details.pipeline_id.value),
date_created=self.get_timestamp(item.job_details.created),
date_started=self.get_timestamp(item.job_details.started),
date_stopped=self.get_timestamp(item.job_details.stopped),
metadata=item.job_details.metadata
)
result.append(info)
return result
def stream_jobs(self, job_filter: job_types.JobFilter = None, timeout=None):
"""
Provides generator to stream current jobs on platform
Args:
job_filter (job_types.JobFilter): Optional filter used to limit the number of
pipeline job records return
Returns:
list of job_types.JobInfo with known pipeline job details from the server.
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
empty = job_types.JobFilter()
request = jobs_pb2.JobsListRequest(
header=self.get_request_header()
)
if job_filter != empty and job_filter is not None:
request_filter = jobs_pb2.JobsListRequest.JobFilter
if job_filter.completed_before is not None:
day_one = datetime.datetime(1, 1, 1)
if job_filter.completed_before.tzinfo is not None \
and job_filter.completed_before.tzinfo.utcoffset(job_filter.completed_before) is not None:
day_one = datetime.datetime(1, 1, 1, tzinfo=job_filter.completed_before.tzinfo)
seconds = (job_filter.completed_before - day_one).total_seconds()
request.filter.completed_before.value = int(seconds)
if job_filter.created_after is not None:
day_one = datetime.datetime(1, 1, 1)
if job_filter.created_after.tzinfo is not None \
and job_filter.created_after.tzinfo.utcoffset(job_filter.created_after) is not None:
day_one = datetime.datetime(1, 1, 1, tzinfo=job_filter.created_after.tzinfo)
seconds = (job_filter.created_after - day_one).total_seconds()
request.filter.created_after.value = int(seconds)
if job_filter.has_job_state is not None:
if len(job_filter.has_job_state) > 0:
for state in job_filter.has_job_state:
if (state.value < job_types.JobState.Minimum.value) or (
state.value > job_types.JobState.Maximum.value):
raise Exception("Job states in filter must be within " + str(
job_types.JobState.Minimum) + " and " + str(
job_types.JobState.Maximum) + ", found:" + str(state))
request.filter.has_state.append(state.value)
if job_filter.has_job_status is not None:
if len(job_filter.has_job_status) > 0:
for status in job_filter.has_job_status:
if (status.value < job_types.JobStatus.Minimum.value) or (
status.value > job_types.JobStatus.Maximum.value):
raise Exception("Job status in filter must be within " + str(
job_types.JobStatus.Minimum) + " and " + str(
job_types.JobStatus.Maximum) + ", found:" + str(status))
request.filter.has_status.append(status.value)
if job_filter.pipeline_ids is not None:
if len(job_filter.pipeline_ids) > 0:
for pipe_id in job_filter.pipeline_ids:
request.filter.pipeline_id.append(pipe_id.to_grpc_value())
response = self._stub.List(request, timeout=timeout)
check_header = True
for item in response:
if check_header:
self.check_response_header(header=item.header)
check_header = False
if (item.job_details is None) or (item.job_details.job_id.value == ''):
continue
info = job_types.JobInfo(
job_id=job_types.JobId(item.job_details.job_id.value),
job_priority=item.job_details.priority,
job_state=item.job_details.state,
job_status=item.job_details.status,
name=item.job_details.job_name,
payload_id=payload_types.PayloadId(item.job_details.payload_id.value),
pipeline_id=pipeline_types.PipelineId(item.job_details.pipeline_id.value),
date_created=self.get_timestamp(item.job_details.created),
date_started=self.get_timestamp(item.job_details.started),
date_stopped=self.get_timestamp(item.job_details.stopped),
metadata=item.job_details.metadata
)
yield info
def start_job(self, job_id: job_types.JobId, named_values: Mapping[str, str] = None,
timeout=None) -> job_types.JobToken:
"""
Starts a "JobState.Pending" job.
Once started, a job's payload becomes readonly.
Args:
job_id (job_types.JobId): Unique identifier of the job to start.
named_values: Collection of name/value pairs used to populate pipeline
variables.
Returns:
A job_types.JobToken with information on started job
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
if (job_id.value is None) or (job_id.value == ""):
raise Exception("Job identifier must have instantiated value")
request = jobs_pb2.JobsStartRequest(
header=self.get_request_header(),
job_id=job_id.to_grpc_value()
)
if named_values is not None:
for item in named_values.keys():
nvp = jobs_pb2.JobsStartRequest.NamedValue(
name=item,
value=named_values.get(item)
)
request.Variables.append(nvp)
response = self._stub.Start(request, timeout=timeout)
self.check_response_header(header=response.header)
result = job_types.JobToken(
job_id=job_id,
job_priority=response.priority,
job_state=response.state,
job_status=response.status
)
return result
def job_logs(self, job_id: job_types.JobId, operator_name: str, timeout=None) -> List[str]:
"""
Retrieve logs of operator specified with "operator_name" with job associated with "job_id"
Args:
job_id (job_types.JobId): Unique identifier of the job to retrieve logs from
operator_name (str): Operator to retrieve logs from
Returns:
List of operator logs
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
if (job_id.value is None) or (job_id.value == ""):
raise Exception("Job identifier must have instantiated value")
if operator_name is None:
raise Exception("Operator must have valid instantiated value")
if operator_name.strip() == "":
raise Exception("Operator must have valid instantiated value")
request = jobs_pb2.JobsReadLogsRequest(
header=self.get_request_header(),
job_id=job_id.to_grpc_value(),
operator_name=operator_name
)
response = self._stub.ReadLogs(request, timeout=timeout)
responses = [resp.logs for resp in response]
logs_list = []
for resp in responses:
for log in resp:
logs_list.append(log)
return logs_list
def add_metadata(self, job_id: job_types.JobId, metadata: Mapping[str, str], timeout=None) -> Mapping[str, str]:
"""
Requests the addition of metadata to a job.
Args:
job_id (job_types.JobId): Unique identifier of the job whose metadata is to be appended.
metadata(Mapping[str, str]): Set of key/value pairs to be appended to the job metadata. If a metadata
key in the request already exists in the job record, or if duplicate keys are passed in the request,
the job will not be updated and and an error will be returned. Keys are compared using case
insensitive comparator functions. The maximum allowed size of a metadata key is 128 bytes, while
the maximum allowed size of a metadata value is 256 bytes. The maximum allowed size for the overall
metadata of an individual job is 4 Megabytes.
Returns:
A Mapping[str, str] object containing the appended metadata
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
if (job_id.value is None) or (job_id.value == ""):
raise Exception("Job identifier must have instantiated value")
if metadata is None:
raise Exception("Metadata must be an instantiated map")
request = jobs_pb2.JobsAddMetadataRequest(
job_id=job_id.to_grpc_value()
)
request.metadata.update(metadata)
response = self._stub.AddMetadata(request, timeout)
self.check_response_header(header=response.header)
result = response.metadata
return result
def remove_metadata(self, job_id: job_types.JobId, keys: List[str], timeout=None) -> Mapping[str, str]:
"""
Requests the removal of metadata from a job.
Args:
job_id: Unique identifier of the job whose metadata is to be removed.
keys: List of keys to be removed from the job metadata.
Returns:
A Mapping[str, str] object containing the updated set of metadata
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
if (job_id.value is None) or (job_id.value == ""):
raise Exception("Job identifier must have instantiated value")
if keys is None:
raise Exception("Keys paramater must be valid list of metadata keys")
request = jobs_pb2.JobsRemoveMetadataRequest(
job_id=job_id.to_grpc_value()
)
request.keys.extend(keys)
response = self._stub.RemoveMetadata(request, timeout)
self.check_response_header(header=response.header)
result = response.metadata
return result
| clara-platform-python-client-main | nvidia_clara/jobs_client.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import BinaryIO, List, Mapping
import grpc
from nvidia_clara.grpc import models_pb2, models_pb2_grpc
from nvidia_clara.base_client import BaseClient
import nvidia_clara.model_types as model_types
class ModelsClientStub:
def create_catalog(self) -> model_types.CatalogId:
"""
Creates a new inference model catalog.
Returns:
model_types.CatalogId of the unique identity of the new catalog.
"""
pass
def create_instance(self) -> model_types.InstanceId:
"""
Creates a new inference model catalog instance.
Returns:
model_types.InstanceId of the unique identity of the new instance.
"""
pass
def delete_catalog(self, catalog_id: model_types.CatalogId):
"""
Deletes the inference catalog associated with "catalog_id"
Deleted catalogs can no longer be used by Clara Platform Server to manage inference server model collections.
Deleted catalogs cannot be recovered.
Args:
catalog_id (model_types.CatalogId): Unique identifier for the inference model catalog to be deleted
"""
pass
def delete_instance(self, instance_id: model_types.InstanceId):
"""
Deletes the inference model catalog instance associated with "instance_id"
Deleted instances can no longer be used by Clara Platform Server to manage inference server model collections.
Deleted instances cannot be recovered.
Args:
instance_id (model_types.InstanceId): Unique identifier for the inference model catalog instance to be deleted.
"""
pass
def delete_model(self, model_id: model_types.ModelId):
"""
Deletes the nference model associated with
Deleted models cannot be used by pipeline jobs for inference.
Deleted models cannot be recovered.
Args:
model_id (model_types.ModelId): Unique identifier of the inference model to be deleted.
"""
pass
def download_model(self, model_id: model_types.ModelId, output_stream: BinaryIO) -> model_types.ModelDetails:
"""
Downloads the model associated with "model_id" to an "output_stream" BinaryIO object
Args:
model_id (model_types.ModelId): Unique identifier of the model to download.
output_stream (BinaryIO): Writable stream use to write the raw model data to.
Returns:
model_types.ModelDetails with details of the downloaded model.
"""
pass
def list_models(self) -> List[model_types.ModelDetails]:
"""
Returns details of all inference models known to the server.
Only inference model details are provided; no model raw data is downloaded.
Returns:
List[model_types.ModelDetails] with each element containing details of all inference models known to the server
"""
pass
def read_catalog(self, catalog_id: model_types.CatalogId) -> List[model_types.ModelDetails]:
"""
Returns details of all inference models included in the catalog associated with "catalog_id"
Args:
catalog_id (model_types.CatalogId): Unique identifier of the inference catalog to read.
Returns:
List[model_types.ModelDetails] ith each element containing details of all inference models associated with catalog
"""
pass
def read_instance(self, instance_id: model_types.InstanceId) -> List[model_types.ModelDetails]:
"""
Returns details of all inference models included in the catalog instance associated with "instance_id"
Args:
instance_id (model_types.InstanceId): Unique identifier of the inference catalog instance to read.
Returns:
List[model_types.ModelDetails] ith each element containing details of all inference models associated with Instance
"""
pass
def update_catalog(self, catalog_id: model_types.CatalogId, model_ids: List[model_types.ModelId]):
"""
Updates the inference model catalog associated with "catalog_id" and sets its set of included models in "model_ids"
Any existing list of inference models associated with the catalog is replaced with the new list.
Args:
catalog_id (model_types.CatalogId): Unique identifier of the inference model catalog to update.
model_ids: List of inference model identifiers to replace any existing list with.
"""
pass
def update_instance(self, instance_id: model_types.InstanceId, model_ids: List[model_types.ModelId]):
"""
Updates the inference model catalog instance associated with "instance_id" and sets its set of included models to "model_ids"
Any existing list of inference models associated with the instance is replaced with the new list.
Args:
instance_id (model_types.InstanceId): Unique identifier of the inference model catalog instance to update.
model_ids: List of inference model identifiers to replace any existing list with.
"""
pass
def upload_model(self, details: model_types.ModelDetails, input_stream: BinaryIO):
"""
Uploads an inference model to the model repository.
If a model with the same name exists, it will be overwritten by this operation.
Args:
details (model_types.ModelDetails): provides details, including the name of the model.
input_stream (BinaryIO): Raw model data is read from this stream and persisted into storage by the model repository.
"""
pass
def add_metadata(self, model_id: model_types.ModelId, metadata: Mapping[str, str]) -> Mapping[str, str]:
"""
Requests the addition of metadata to a model.
Args:
model_id (model_types.ModelId): Unique identifier of the model to download.
metadata(Mapping[str, str]): Set of key/value pairs to be appended to the job metadata. If a metadata
key in the request already exists in the model record, or if duplicate keys are passed in the request,
the model will not be updated and and an error will be returned. Keys are compared using case
insensitive comparator functions. The maximum allowed size of a metadata key is 128 bytes, while
the maximum allowed size of a metadata value is 256 bytes. The maximum allowed size for the overall
metadata of an individual model is 4 Megabytes.
Returns:
A Mapping[str, str] containing the appended metadata
"""
pass
def remove_metadata(self, model_id: model_types.ModelId, keys: List[str]) -> Mapping[str, str]:
"""
Requests the removal of metadata from a model.
Args:
model_id (model_types.ModelId): Unique identifier of the model to download.
keys: List of keys to be removed from the model metadata.
Returns:
A Mapping[str, str] containing the updated set of metadata
"""
pass
class ModelsClient(ModelsClientStub, BaseClient):
def __init__(self, target: str, port: str = None, stub=None):
"""
Models Client Creation
Args:
target (str): ipv4 address of clara instance
port (str): if specified, port will be appended to the target with a ":"
"""
if target is None:
raise Exception("Target must be initialized to a non-null value")
self._connection = target
if port is not None:
self._connection += ":" + port
self._channel = grpc.insecure_channel(self._connection)
if stub is None:
self._stub = models_pb2_grpc.ModelsStub(self._channel)
else:
self._stub = stub
def close(self):
"""Close connection"""
if self._channel:
self._channel.close()
self._channel = None
self._stub = None
else:
print("Connection for client already closed")
def reconnect(self):
"""Re-open connection with existing channel"""
if self._channel is None:
self._channel = grpc.insecure_channel(self._connection)
self._stub = models_pb2_grpc.ModelsStub(self._channel)
else:
print("Connection for client already open")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._channel is not None:
self.close()
return False
def create_catalog(self, timeout=None) -> model_types.CatalogId:
"""
Creates a new inference model catalog.
Returns:
model_types.CatalogId of the unique identity of the new catalog.
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
request = models_pb2.ModelsCreateCatalogRequest(
header=self.get_request_header()
)
response = self._stub.CreateCatalog(request, timeout=timeout)
self.check_response_header(header=response.header)
result = model_types.CatalogId(value=response.catalog_id.value)
return result
def create_instance(self, timeout=None) -> model_types.InstanceId:
"""
Creates a new inference model catalog instance.
Returns:
model_types.InstanceId of the unique identity of the new instance.
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
request = models_pb2.ModelsCreateInstanceRequest(
header=self.get_request_header()
)
response = self._stub.CreateInstance(request, timeout=timeout)
self.check_response_header(header=response.header)
result = model_types.InstanceId(value=response.instance_id.value)
return result
def delete_catalog(self, catalog_id: model_types.CatalogId, timeout=None):
"""
Deletes the inference catalog associated with "catalog_id"
Deleted catalogs can no longer be used by Clara Platform Server to manage inference server model collections.
Deleted catalogs cannot be recovered.
Args:
catalog_id (model_types.CatalogId): Unique identifier for the inference model catalog to be deleted
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
if (catalog_id.value is None) or (catalog_id.value == ""):
raise Exception("Catalog identifier must be initialized to non-null instance of model_types.CatalogId")
request = models_pb2.ModelsDeleteCatalogRequest(
catalog_id=catalog_id.to_grpc_value(),
header=self.get_request_header()
)
response = self._stub.DeleteCatalog(request, timeout=timeout)
self.check_response_header(header=response.header)
def delete_instance(self, instance_id: model_types.InstanceId, timeout=None):
"""
Deletes the inference model catalog instance associated with "instance_id"
Deleted instances can no longer be used by Clara Platform Server to manage inference server model collections.
Deleted instances cannot be recovered.
Args:
instance_id (model_types.InstanceId): Unique identifier for the inference model catalog instance to be deleted.
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
if self._channel is None:
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
if (instance_id.value is None) or (instance_id.value == ""):
raise Exception("Instance identifier must be initialized to non-null instance of model_types.InstanceId")
request = models_pb2.ModelsDeleteInstanceRequest(
catalog_id=instance_id.to_grpc_value(),
header=self.get_request_header()
)
response = self._stub.DeleteInstance(request, timeout=timeout)
self.check_response_header(header=response.header)
def delete_model(self, model_id: model_types.ModelId, timeout=None):
"""
Deletes the nference model associated with
Deleted models cannot be used by pipeline jobs for inference.
Deleted models cannot be recovered.
Args:
model_id (model_types.ModelId): Unique identifier of the inference model to be deleted.
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
if (model_id.value is None) or (model_id.value == ""):
raise Exception("Model identifier must be initialized to non-null instance of model_types.ModelId")
request = models_pb2.ModelsDeleteModelRequest(
catalog_id=model_id.to_grpc_value(),
header=self.get_request_header()
)
response = self._stub.DeleteModel(request, timeout=timeout)
self.check_response_header(header=response.header)
def download_model(self, model_id: model_types.ModelId, output_stream: BinaryIO,
timeout=None) -> model_types.ModelDetails:
"""
Downloads the model associated with "model_id" to an "output_stream" BinaryIO object
Args:
model_id (model_types.ModelId): Unique identifier of the model to download.
output_stream (BinaryIO): Writable stream use to write the raw model data to.
Returns:
model_types.ModelDetails with details of the downloaded model.
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
if (model_id.value is None) or (model_id.value == ""):
raise Exception("Model identifier must be initialized to non-null instance of model_types.ModelId")
request = models_pb2.ModelsDownloadModelRequest(
header=self.get_request_header(),
model_id=model_id.to_grpc_value()
)
responses = self._stub.Download(request, timeout=timeout)
result = None
for resp in responses:
if result is None:
self.check_response_header(header=resp.header)
result = model_types.ModelDetails(other=resp.details)
output_stream.write(resp.data)
return result
def list_models(self, timeout=None) -> List[model_types.ModelDetails]:
"""
Returns details of all inference models known to the server.
Only inference model details are provided; no model raw data is downloaded.
Returns:
List[model_types.ModelDetails] with each element containing details of all inference models known to the server
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
request = models_pb2.ModelsListModelsRequest(
header=self.get_request_header()
)
response = self._stub.ListModels(request, timeout=timeout)
responses = [resp for resp in response]
if len(responses) > 0:
self.check_response_header(header=responses[0].header)
result = []
for resp in responses:
details = model_types.ModelDetails(other=resp.models)
result.append(details)
return result
return None
def read_catalog(self, catalog_id: model_types.CatalogId, timeout=None) -> List[model_types.ModelDetails]:
"""
Returns details of all inference models included in the catalog associated with "catalog_id"
Args:
catalog_id (model_types.CatalogId): Unique identifier of the inference catalog to read.
Returns:
List[model_types.ModelDetails] ith each element containing details of all inference models associated with catalog
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
request = models_pb2.ModelsReadCatalogRequest(
catalog_id=catalog_id.to_grpc_value(),
header=self.get_request_header()
)
response = self._stub.ReadCatalog(request, timeout=timeout)
responses = [resp for resp in response]
if len(responses) > 0:
self.check_response_header(header=responses[0].header)
result = []
for resp in responses:
details = model_types.ModelDetails(other=resp.models)
result.append(details)
return result
return None
def read_instance(self, instance_id: model_types.InstanceId, timeout=None) -> List[model_types.ModelDetails]:
"""
Returns details of all inference models included in the catalog instance associated with "instance_id"
Args:
instance_id (model_types.InstanceId): Unique identifier of the inference catalog instance to read.
Returns:
List[model_types.ModelDetails] ith each element containing details of all inference models associated with Instance
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
request = models_pb2.ModelsReadCatalogRequest(
instance_id=instance_id.to_grpc_value(),
header=self.get_request_header()
)
response = self._stub.ReadInstance(request, timeout=timeout)
responses = [resp for resp in response]
if len(responses) > 0:
self.check_response_header(header=responses[0].header)
result = []
for resp in responses:
details = model_types.ModelDetails(other=resp.models)
result.append(details)
return result
return None
def update_catalog(self, catalog_id: model_types.CatalogId, model_ids: List[model_types.ModelId], timeout=None):
"""
Updates the inference model catalog associated with "catalog_id" and sets its set of included models in "model_ids"
Any existing list of inference models associated with the catalog is replaced with the new list.
Args:
catalog_id (model_types.CatalogId): Unique identifier of the inference model catalog to update.
model_ids: List of inference model identifiers to replace any existing list with.
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
model_ids_req = [model_id.to_grpc_value() for model_id in model_ids]
request = models_pb2.ModelsUpdateCatalogRequest(
catalog_id=catalog_id.to_grpc_value(),
header=self.get_request_header(),
model_ids=model_ids_req
)
response = self._stub.UpdateCatalog([request], timeout=timeout)
self.check_response_header(header=response.header)
def update_instance(self, instance_id: model_types.InstanceId, model_ids: List[model_types.ModelId], timeout=None):
"""
Updates the inference model catalog instance associated with "instance_id" and sets its set of included models to "model_ids"
Any existing list of inference models associated with the instance is replaced with the new list.
Args:
instance_id (model_types.InstanceId): Unique identifier of the inference model catalog instance to update.
model_ids: List of inference model identifiers to replace any existing list with.
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
model_ids_req = [model_id.to_grpc_value() for model_id in model_ids]
request = models_pb2.ModelsReadInstanceRequest(
instance_id=instance_id.to_grpc_value(),
header=self.get_request_header(),
model_ids=model_ids_req
)
response = self._stub.UpdateInstance([request], timeout=timeout)
self.check_response_header(header=response.header)
def upload_request_iterator(self, details: models_pb2.ModelDetails, source_object: BinaryIO = None):
"""
Helper method for uplaod model that creates generator of requests
Args:
details (models_pb2.ModelDetails): details of specified model
source_object (BinaryIO): model source file to read data from
"""
if source_object is None:
raise Exception("Source object must be initialized with a non-null BinaryIO instance")
chunk_size = 64 * 1024
while True:
data = source_object.read(chunk_size)
if not data:
return
request = models_pb2.ModelsUploadModelRequest(
header=self.get_request_header(),
details=details,
data=data
)
yield request
def upload_model(self, details: model_types.ModelDetails, input_stream: BinaryIO, timeout=None):
"""
Uploads an inference model to the model repository.
If a model with the same name exists, it will be overwritten by this operation.
Args:
details (model_types.ModelDetails): provides details, including the name of the model.
input_stream (BinaryIO): Raw model data is read from this stream and persisted into storage by the model repository.
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
details = models_pb2.ModelDetails(model_id=details.model_id.to_grpc_value(), name=details.name,
tags=details.tags, model_type=details.model_type)
response = self._stub.UploadModel(
self.upload_request_iterator(details=details, source_object=input_stream),
timeout=timeout
)
self.check_response_header(header=response.header)
def add_metadata(self, model_id: model_types.ModelId, metadata: Mapping[str, str], timeout=None) -> Mapping[
str, str]:
"""
Requests the addition of metadata to a model.
Args:
model_id (model_types.ModelId): Unique identifier of the model to download.
metadata(Mapping[str, str]): Set of key/value pairs to be appended to the job metadata. If a metadata
key in the request already exists in the model record, or if duplicate keys are passed in the request,
the model will not be updated and and an error will be returned. Keys are compared using case
insensitive comparator functions. The maximum allowed size of a metadata key is 128 bytes, while
the maximum allowed size of a metadata value is 256 bytes. The maximum allowed size for the overall
metadata of an individual model is 4 Megabytes.
Returns:
A Mapping[str, str] containing the appended metadata
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
if (model_id.value is None) or (model_id.value == ""):
raise Exception("Model identifier must have instantiated value")
if metadata is None:
raise Exception("Metadata must be an instantiated map")
request = models_pb2.ModelsAddMetadataRequest(
model_id=model_id.to_grpc_value()
)
request.metadata.update(metadata)
response = self._stub.AddMetadata(request, timeout)
self.check_response_header(header=response.header)
result = response.metadata
return result
def remove_metadata(self, model_id: model_types.ModelId, keys: List[str], timeout=None) -> Mapping[str, str]:
"""
Requests the removal of metadata from a model.
Args:
model_id (model_types.ModelId): Unique identifier of the model to download.
keys: List of keys to be removed from the model metadata.
Returns:
A Mapping[str, str] containing the updated set of metadata
"""
if (self._channel is None) or (self._stub is None):
raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")
if (model_id.value is None) or (model_id.value == ""):
raise Exception("Model identifier must have instantiated value")
if keys is None:
raise Exception("Keys paramater must be valid list of metadata keys")
request = models_pb2.ModelsRemoveMetadataRequest(
model_id=model_id.to_grpc_value()
)
request.keys.extend(keys)
response = self._stub.RemoveMetadata(request, timeout)
self.check_response_header(header=response.header)
result = response.metadata
return result
| clara-platform-python-client-main | nvidia_clara/models_client.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from typing import List
from nvidia_clara.job_types import JobId
class ClaraVersionInfo:
def __init__(self, major: int = None, minor: int = None, patch: int = None, label: str = None):
"""Clara version information."""
self._major = major
self._minor = minor
self._patch = patch
self._label = label
@property
def major(self) -> int:
"""Version Major"""
return self._major
@major.setter
def major(self, major: int):
"""Version Major"""
self._major = major
@property
def minor(self) -> int:
"""Version Minor"""
return self._minor
@minor.setter
def minor(self, minor: int):
"""Version Minor"""
self._minor = minor
@property
def patch(self) -> int:
"""Version Patch"""
return self._patch
@patch.setter
def patch(self, patch: int):
"""Version Patch"""
self._patch = patch
@property
def label(self) -> str:
"""Version Label"""
return self._label
@label.setter
def label(self, label: str):
"""Version Label"""
self._label = label
class ClaraProcessDetails:
def __init__(self, name: str = None, job_id: JobId = None):
self._name = name
self._job_id = job_id
@property
def name(self) -> str:
"""
Name of the process utilizing the GPU.
- When job_id is provided, is the unique (to the pipeline-job) name of the pipeline-job operator utilizing the GPU.
- When job_id is not provided, is the name of the Clara Platform managed, non-pipeline process utilizing the GPU.
"""
return self._name
@name.setter
def name(self, name: str):
"""
Name of the process utilizing the GPU.
- When job_id is provided, is the unique (to the pipeline-job) name of the pipeline-job operator utilizing the GPU.
- When job_id is not provided, is the name of the Clara Platform managed, non-pipeline process utilizing the GPU.
"""
self._name = name
@property
def job_id(self) -> JobId:
"""Unique identifier of the pipeline-job utilizing the GPU.
Only provided when the process utilizing the GPU is a pipeline-job.
"""
return self._job_id
@job_id.setter
def job_id(self, job_id: JobId):
"""Unique identifier of the pipeline-job utilizing the GPU.
Only provided when the process utilizing the GPU is a pipeline-job.
"""
self._job_id = job_id
class ClaraGpuUtilization:
def __init__(self, node_id: int = None, pcie_id: int = None, compute_utilization: float = None,
memory_free: int = None, memory_used: int = None, memory_utilization: float = None,
process_details: List[ClaraProcessDetails] = None, timestamp: datetime = None):
"""GPU Utilization details for a Clara process."""
if process_details is None:
process_details = []
self._node_id = node_id
self._pcie_id = pcie_id
self._compute_utilization = compute_utilization
self._memory_free = memory_free
self._memory_used = memory_used
self._memory_utilization = memory_utilization
self._process_details = process_details
self._timestamp = timestamp
@property
def node_id(self) -> int:
"""Unique (to the cluster) name of the node which contains the GPU."""
return self._node_id
@node_id.setter
def node_id(self, node_id: int):
"""Unique (to the cluster) name of the node which contains the GPU."""
self._node_id = node_id
@property
def pcie_id(self) -> int:
"""PCIE device identifier of the GPU."""
return self._pcie_id
@pcie_id.setter
def pcie_id(self, pcie_id: int):
"""PCIE device identifier of the GPU."""
self._pcie_id = pcie_id
@property
def compute_utilization(self) -> float:
"""GPU compute utilization; in the range of zero to one, inclusive [0, 1]."""
return self._compute_utilization
@compute_utilization.setter
def compute_utilization(self, compute_utilization: float):
"""GPU compute utilization; in the range of zero to one, inclusive [0, 1]."""
self._compute_utilization = compute_utilization
@property
def memory_free(self) -> int:
"""Free GPU memory, measured in megabytes."""
return self._memory_free
@memory_free.setter
def memory_free(self, memory_free: int):
"""Free GPU memory, measured in megabytes."""
self._memory_free = memory_free
@property
def memory_used(self) -> int:
"""Used GPU memory, measured in megabytes."""
return self._memory_used
@memory_used.setter
def memory_used(self, memory_used: int):
"""Used GPU memory, measured in megabytes."""
self._memory_used = memory_used
@property
def memory_utilization(self) -> float:
"""GPU memory utilization; in the range of zero to one, inclusive [0, 1]."""
return self._memory_utilization
@memory_utilization.setter
def memory_utilization(self, memory_utilization: float):
"""GPU memory utilization; in the range of zero to one, inclusive [0, 1]."""
self._memory_utilization = memory_utilization
@property
def process_details(self) -> List[ClaraProcessDetails]:
"""List of pipeline-job operators and/or Clara Platform managed process utilizing the GPU."""
return self._process_details
@process_details.setter
def process_details(self, process_details: List[ClaraProcessDetails]):
"""List of pipeline-job operators and/or Clara Platform managed process utilizing the GPU."""
self._process_details = process_details
@property
def timestamp(self) -> datetime:
"""Timestamp when the associated metrics data was collected."""
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp: datetime):
"""Timestamp when the associated metrics data was collected."""
self._timestamp = timestamp
class ClaraUtilizationDetails:
"""Utilization details for a Clara process."""
def __init__(self, gpu_metrics: List[ClaraGpuUtilization] = None):
if gpu_metrics is None:
gpu_metrics = []
self._gpu_metrics = gpu_metrics
@property
def gpu_metrics(self) -> List[ClaraGpuUtilization]:
"""List of Utilization Details of each GPU"""
return self._gpu_metrics
@gpu_metrics.setter
def gpu_metrics(self, gpu_metrics: List[ClaraGpuUtilization]):
"""List of Utilization Details of each GPU"""
self._gpu_metrics = gpu_metrics
| clara-platform-python-client-main | nvidia_clara/clara_types.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
from typing import Mapping as HashMap, List, Mapping
from nvidia_clara.grpc import models_pb2, common_pb2
class CatalogId:
def __init__(self, value: str):
"""
Unique identifier for an Inference Model Catalog.
"""
if (value is None) or (value == ""):
raise Exception("Catalog identifier value must be intialized.")
self._value = value
@property
def value(self):
return self._value
def __eq__(self, other) -> bool:
return self._value == other.value
def __ne__(self, other) -> bool:
return not (self == other)
def __repr__(self):
return "%s" % self._value
def __str__(self):
return "%s" % self._value
def __hash__(self):
return hash(self._value)
def to_grpc_value(self):
id = common_pb2.Identifier()
id.value = self._value
return id
class ModelType(Enum):
Unknown = 0
TensorFlow = 1
TensorRT = 2
PyTorch = 3
Minimum = TensorFlow
Maximum = PyTorch
class ModelId:
def __init__(self, value: str):
if (value is None) or (value == ""):
raise Exception("Model identifier value must be intialized.")
self._value = value
@property
def value(self):
return self._value
def __eq__(self, other) -> bool:
return self._value == other.value
def __ne__(self, other) -> bool:
return not (self == other)
def __repr__(self):
return "%s" % self._value
def __str__(self):
return "%s" % self._value
def __hash__(self):
return hash(self._value)
def to_grpc_value(self):
id = common_pb2.Identifier()
id.value = self._value
return id
class ModelDetails:
def __init__(self, other: models_pb2.ModelDetails = None, model_id: ModelId = None, name: str = None,
tags: Mapping[str, str] = None,
model_type: ModelType = None, metadata: Mapping[str, str] = None):
if other is None:
if tags is None:
tags = dict()
if metadata is None:
metadata = dict()
self._model_id = model_id
self._name = name
self._tags = tags
self._model_type = model_type
self._metadata = metadata
else:
self._model_id = other.model_id
self._name = other.name
self._tags = other.tags
self._model_type = other.type
self._metadata = other.metadata
@property
def model_id(self) -> ModelId:
"""Unique identifier of this inference model."""
return self._model_id
@model_id.setter
def model_id(self, model_id: ModelId = None):
"""Unique identifier of this inference model."""
self._model_id = model_id
@property
def name(self) -> str:
"""The name of this inference model."""
return self._name
@name.setter
def name(self, name: str = None):
"""The name of this inference model."""
self._name = name
@property
def tags(self) -> Mapping[str, str]:
"""The set of tags / meta-data associated with this infrence model."""
return self._tags
@tags.setter
def tags(self, tags: Mapping[str, str] = None):
"""The set of tags / meta-data associated with this infrence model."""
self._tags = tags
@property
def model_type(self) -> ModelId:
"""The type (inference toolset) of this inference model."""
return self._model_type
@model_type.setter
def model_type(self, model_type: ModelType = None):
"""The type (inference toolset) of this inference model."""
self._model_type = model_type
@property
def metadata(self) -> Mapping[str, str]:
"""
Metadata (set of key/value pairs) associated with the model
"""
return self._metadata
@metadata.setter
def metadata(self, metadata: Mapping[str, str]):
"""
Metadata (set of key/value pairs) associated with the model
"""
self._metadata = metadata
class CatalogDetails:
def __init__(self, other: models_pb2.ModelCatalogDetails = None, catalog_id: CatalogId = None,
models: List[ModelDetails] = None):
if other is None:
if catalog_id is None:
raise Exception("Catalog identifier can not be None and must be initializes")
self._catalog_id = catalog_id
if models is None:
self._models = []
else:
self._models = models
else:
self._catalog_id = None
if (other.catalog_id.value is not None) or (other.catalog_id.value != ""):
self._catalog_id = CatalogId(value=other.catalog_id.value)
self._models = []
if len(other.models) > 0:
for model in other.models:
new_model = ModelDetails(other=model)
self._models.append(new_model)
@property
def catalog_id(self) -> CatalogId:
"""Unique identifier of this inference model catalog."""
return self._catalog_id
@catalog_id.setter
def catalog_id(self, catalog_id: CatalogId = None):
"""Unique identifier of this inference model catalog."""
self._catalog_id = catalog_id
@property
def models(self) -> List[ModelDetails]:
"""List of inference models associated with this inference model catalog."""
return self._models
@models.setter
def models(self, models: List[ModelDetails] = None):
"""List of inference models associated with this inference model catalog."""
self._models = models
class InstanceId:
def __init__(self, value: str):
"""Unique identifier for an Model Catalog Instance."""
if (value is None) or (value == ""):
raise Exception("InstanceId identifier value must be intialized.")
self._value = value
@property
def value(self):
return self._value
def __eq__(self, other) -> bool:
return self._value == other.value
def __ne__(self, other) -> bool:
return not (self == other)
def __repr__(self):
return "%s" % self._value
def __str__(self):
return "%s" % self._value
def __hash__(self):
return hash(self._value)
def to_grpc_value(self):
id = common_pb2.Identifier()
id.value = self._value
return id
class InstanceDetails:
def __init__(self, other: models_pb2.ModelCatalogDetails = None, instance_id: InstanceId = None,
models: List[ModelDetails] = None):
if other is None:
if instance_id is None:
raise Exception("Instance identifier can not be None and must be initializes")
self._instance_id = instance_id
if models is None:
self._models = []
else:
self._models = models
else:
self._instance_id = None
if (other.catalog_id.value is not None) or (other.catalog_id.value != ""):
self._instance_id = InstanceId(value=other.catalog_id.value)
self._models = []
if len(other.models) > 0:
for model in other.models:
new_model = ModelDetails(other=model)
self._models.append(new_model)
@property
def instance_id(self) -> InstanceId:
"""Unqiue identifier of this inference model catalog instance."""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id: InstanceId = None):
"""Unqiue identifier of this inference model catalog instance."""
self._instance_id = instance_id
@property
def models(self) -> List[ModelDetails]:
"""List of inference models associated with this inference model catalog instance."""
return self._models
@models.setter
def models(self, models: List[ModelDetails] = None):
"""List of inference models associated with this inference model catalog instance."""
self._models = models
| clara-platform-python-client-main | nvidia_clara/model_types.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia/clara/platform/payloads.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_clara.grpc import common_pb2 as nvidia_dot_clara_dot_platform_dot_common__pb2
from nvidia_clara.grpc.common_pb2 import *
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia/clara/platform/payloads.proto',
package='nvidia.clara.platform',
syntax='proto3',
serialized_options=_b('\n\031com.nvidia.clara.platformZ\004apis\252\002\032Nvidia.Clara.Platform.Grpc'),
serialized_pb=_b('\n$nvidia/clara/platform/payloads.proto\x12\x15nvidia.clara.platform\x1a\"nvidia/clara/platform/common.proto\">\n\x12PayloadFileDetails\x12\x0c\n\x04mode\x18\x01 \x01(\r\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x0c\n\x04name\x18\x03 \x01(\t\"\x8d\x02\n\x1aPayloadsAddMetadataRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\x35\n\npayload_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12Q\n\x08metadata\x18\x03 \x03(\x0b\x32?.nvidia.clara.platform.PayloadsAddMetadataRequest.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x90\x02\n\x1bPayloadsAddMetadataResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12\x35\n\npayload_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12R\n\x08metadata\x18\x03 \x03(\x0b\x32@.nvidia.clara.platform.PayloadsAddMetadataResponse.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xcc\x01\n\x15PayloadsCreateRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12L\n\x08metadata\x18\x02 \x03(\x0b\x32:.nvidia.clara.platform.PayloadsCreateRequest.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xb8\x01\n\x16PayloadsCreateResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12\x35\n\npayload_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12\x30\n\x04type\x18\x05 \x01(\x0e\x32\".nvidia.clara.platform.PayloadType\"\x84\x01\n\x15PayloadsDeleteRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\x35\n\npayload_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\"O\n\x16PayloadsDeleteResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\"\x85\x01\n\x16PayloadsDetailsRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\x35\n\npayload_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\"\xa6\x03\n\x17PayloadsDetailsResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12\x35\n\npayload_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12\x37\n\x04\x66ile\x18\x03 \x01(\x0b\x32).nvidia.clara.platform.PayloadFileDetails\x12\x30\n\x04type\x18\x04 \x01(\x0e\x32\".nvidia.clara.platform.PayloadType\x12\x31\n\x06job_id\x18\x05 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12N\n\x08metadata\x18\x06 \x03(\x0b\x32<.nvidia.clara.platform.PayloadsDetailsResponse.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x94\x01\n\x17PayloadsDownloadRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\x35\n\npayload_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12\x0c\n\x04name\x18\x03 \x01(\t\"\x9b\x01\n\x18PayloadsDownloadResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12:\n\x07\x64\x65tails\x18\x02 \x01(\x0b\x32).nvidia.clara.platform.PayloadFileDetails\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\x0c\"\x92\x01\n\x15PayloadsRemoveRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\x35\n\npayload_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12\x0c\n\x04name\x18\x03 \x01(\t\"O\n\x16PayloadsRemoveResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\"\x9a\x01\n\x1dPayloadsRemoveMetadataRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\x35\n\npayload_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12\x0c\n\x04keys\x18\x03 \x03(\t\"\x96\x02\n\x1ePayloadsRemoveMetadataResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12\x35\n\npayload_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12U\n\x08metadata\x18\x03 \x03(\x0b\x32\x43.nvidia.clara.platform.PayloadsRemoveMetadataResponse.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xce\x01\n\x15PayloadsUploadRequest\x12\x34\n\x06header\x18\x01 \x01(\x0b\x32$.nvidia.clara.platform.RequestHeader\x12\x35\n\npayload_id\x18\x02 \x01(\x0b\x32!.nvidia.clara.platform.Identifier\x12:\n\x07\x64\x65tails\x18\x03 \x01(\x0b\x32).nvidia.clara.platform.PayloadFileDetails\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\"\x8b\x01\n\x16PayloadsUploadResponse\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.nvidia.clara.platform.ResponseHeader\x12:\n\x07\x64\x65tails\x18\x02 \x01(\x0b\x32).nvidia.clara.platform.PayloadFileDetails*]\n\x0bPayloadType\x12\x18\n\x14PAYLOAD_TYPE_UNKNOWN\x10\x00\x12\x19\n\x15PAYLOAD_TYPE_PIPELINE\x10\x01\x12\x19\n\x15PAYLOAD_TYPE_REUSABLE\x10\x02\x32\xf8\x06\n\x08Payloads\x12t\n\x0b\x41\x64\x64Metadata\x12\x31.nvidia.clara.platform.PayloadsAddMetadataRequest\x1a\x32.nvidia.clara.platform.PayloadsAddMetadataResponse\x12\x65\n\x06\x43reate\x12,.nvidia.clara.platform.PayloadsCreateRequest\x1a-.nvidia.clara.platform.PayloadsCreateResponse\x12\x65\n\x06\x44\x65lete\x12,.nvidia.clara.platform.PayloadsDeleteRequest\x1a-.nvidia.clara.platform.PayloadsDeleteResponse\x12j\n\x07\x44\x65tails\x12-.nvidia.clara.platform.PayloadsDetailsRequest\x1a..nvidia.clara.platform.PayloadsDetailsResponse0\x01\x12m\n\x08\x44ownload\x12..nvidia.clara.platform.PayloadsDownloadRequest\x1a/.nvidia.clara.platform.PayloadsDownloadResponse0\x01\x12\x65\n\x06Remove\x12,.nvidia.clara.platform.PayloadsRemoveRequest\x1a-.nvidia.clara.platform.PayloadsRemoveResponse\x12}\n\x0eRemoveMetadata\x12\x34.nvidia.clara.platform.PayloadsRemoveMetadataRequest\x1a\x35.nvidia.clara.platform.PayloadsRemoveMetadataResponse\x12g\n\x06Upload\x12,.nvidia.clara.platform.PayloadsUploadRequest\x1a-.nvidia.clara.platform.PayloadsUploadResponse(\x01\x42>\n\x19\x63om.nvidia.clara.platformZ\x04\x61pis\xaa\x02\x1aNvidia.Clara.Platform.GrpcP\x00\x62\x06proto3')
,
dependencies=[nvidia_dot_clara_dot_platform_dot_common__pb2.DESCRIPTOR,],
public_dependencies=[nvidia_dot_clara_dot_platform_dot_common__pb2.DESCRIPTOR,])
_PAYLOADTYPE = _descriptor.EnumDescriptor(
name='PayloadType',
full_name='nvidia.clara.platform.PayloadType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='PAYLOAD_TYPE_UNKNOWN', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PAYLOAD_TYPE_PIPELINE', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PAYLOAD_TYPE_REUSABLE', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=3209,
serialized_end=3302,
)
_sym_db.RegisterEnumDescriptor(_PAYLOADTYPE)
PayloadType = enum_type_wrapper.EnumTypeWrapper(_PAYLOADTYPE)
PAYLOAD_TYPE_UNKNOWN = 0
PAYLOAD_TYPE_PIPELINE = 1
PAYLOAD_TYPE_REUSABLE = 2
_PAYLOADFILEDETAILS = _descriptor.Descriptor(
name='PayloadFileDetails',
full_name='nvidia.clara.platform.PayloadFileDetails',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='mode', full_name='nvidia.clara.platform.PayloadFileDetails.mode', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='size', full_name='nvidia.clara.platform.PayloadFileDetails.size', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='nvidia.clara.platform.PayloadFileDetails.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=99,
serialized_end=161,
)
_PAYLOADSADDMETADATAREQUEST_METADATAENTRY = _descriptor.Descriptor(
name='MetadataEntry',
full_name='nvidia.clara.platform.PayloadsAddMetadataRequest.MetadataEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='nvidia.clara.platform.PayloadsAddMetadataRequest.MetadataEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='nvidia.clara.platform.PayloadsAddMetadataRequest.MetadataEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=386,
serialized_end=433,
)
_PAYLOADSADDMETADATAREQUEST = _descriptor.Descriptor(
name='PayloadsAddMetadataRequest',
full_name='nvidia.clara.platform.PayloadsAddMetadataRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.PayloadsAddMetadataRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='payload_id', full_name='nvidia.clara.platform.PayloadsAddMetadataRequest.payload_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='nvidia.clara.platform.PayloadsAddMetadataRequest.metadata', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_PAYLOADSADDMETADATAREQUEST_METADATAENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=164,
serialized_end=433,
)
_PAYLOADSADDMETADATARESPONSE_METADATAENTRY = _descriptor.Descriptor(
name='MetadataEntry',
full_name='nvidia.clara.platform.PayloadsAddMetadataResponse.MetadataEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='nvidia.clara.platform.PayloadsAddMetadataResponse.MetadataEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='nvidia.clara.platform.PayloadsAddMetadataResponse.MetadataEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=386,
serialized_end=433,
)
_PAYLOADSADDMETADATARESPONSE = _descriptor.Descriptor(
name='PayloadsAddMetadataResponse',
full_name='nvidia.clara.platform.PayloadsAddMetadataResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.PayloadsAddMetadataResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='payload_id', full_name='nvidia.clara.platform.PayloadsAddMetadataResponse.payload_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='nvidia.clara.platform.PayloadsAddMetadataResponse.metadata', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_PAYLOADSADDMETADATARESPONSE_METADATAENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=436,
serialized_end=708,
)
_PAYLOADSCREATEREQUEST_METADATAENTRY = _descriptor.Descriptor(
name='MetadataEntry',
full_name='nvidia.clara.platform.PayloadsCreateRequest.MetadataEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='nvidia.clara.platform.PayloadsCreateRequest.MetadataEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='nvidia.clara.platform.PayloadsCreateRequest.MetadataEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=386,
serialized_end=433,
)
_PAYLOADSCREATEREQUEST = _descriptor.Descriptor(
name='PayloadsCreateRequest',
full_name='nvidia.clara.platform.PayloadsCreateRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.PayloadsCreateRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='nvidia.clara.platform.PayloadsCreateRequest.metadata', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_PAYLOADSCREATEREQUEST_METADATAENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=711,
serialized_end=915,
)
_PAYLOADSCREATERESPONSE = _descriptor.Descriptor(
name='PayloadsCreateResponse',
full_name='nvidia.clara.platform.PayloadsCreateResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.PayloadsCreateResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='payload_id', full_name='nvidia.clara.platform.PayloadsCreateResponse.payload_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='nvidia.clara.platform.PayloadsCreateResponse.type', index=2,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=918,
serialized_end=1102,
)
_PAYLOADSDELETEREQUEST = _descriptor.Descriptor(
name='PayloadsDeleteRequest',
full_name='nvidia.clara.platform.PayloadsDeleteRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.PayloadsDeleteRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='payload_id', full_name='nvidia.clara.platform.PayloadsDeleteRequest.payload_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1105,
serialized_end=1237,
)
_PAYLOADSDELETERESPONSE = _descriptor.Descriptor(
name='PayloadsDeleteResponse',
full_name='nvidia.clara.platform.PayloadsDeleteResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.PayloadsDeleteResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1239,
serialized_end=1318,
)
_PAYLOADSDETAILSREQUEST = _descriptor.Descriptor(
name='PayloadsDetailsRequest',
full_name='nvidia.clara.platform.PayloadsDetailsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.PayloadsDetailsRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='payload_id', full_name='nvidia.clara.platform.PayloadsDetailsRequest.payload_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1321,
serialized_end=1454,
)
_PAYLOADSDETAILSRESPONSE_METADATAENTRY = _descriptor.Descriptor(
name='MetadataEntry',
full_name='nvidia.clara.platform.PayloadsDetailsResponse.MetadataEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='nvidia.clara.platform.PayloadsDetailsResponse.MetadataEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='nvidia.clara.platform.PayloadsDetailsResponse.MetadataEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=386,
serialized_end=433,
)
_PAYLOADSDETAILSRESPONSE = _descriptor.Descriptor(
name='PayloadsDetailsResponse',
full_name='nvidia.clara.platform.PayloadsDetailsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.PayloadsDetailsResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='payload_id', full_name='nvidia.clara.platform.PayloadsDetailsResponse.payload_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='file', full_name='nvidia.clara.platform.PayloadsDetailsResponse.file', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='nvidia.clara.platform.PayloadsDetailsResponse.type', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='job_id', full_name='nvidia.clara.platform.PayloadsDetailsResponse.job_id', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='nvidia.clara.platform.PayloadsDetailsResponse.metadata', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_PAYLOADSDETAILSRESPONSE_METADATAENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1457,
serialized_end=1879,
)
_PAYLOADSDOWNLOADREQUEST = _descriptor.Descriptor(
name='PayloadsDownloadRequest',
full_name='nvidia.clara.platform.PayloadsDownloadRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.PayloadsDownloadRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='payload_id', full_name='nvidia.clara.platform.PayloadsDownloadRequest.payload_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='nvidia.clara.platform.PayloadsDownloadRequest.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1882,
serialized_end=2030,
)
_PAYLOADSDOWNLOADRESPONSE = _descriptor.Descriptor(
name='PayloadsDownloadResponse',
full_name='nvidia.clara.platform.PayloadsDownloadResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.PayloadsDownloadResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='details', full_name='nvidia.clara.platform.PayloadsDownloadResponse.details', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='nvidia.clara.platform.PayloadsDownloadResponse.data', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2033,
serialized_end=2188,
)
_PAYLOADSREMOVEREQUEST = _descriptor.Descriptor(
name='PayloadsRemoveRequest',
full_name='nvidia.clara.platform.PayloadsRemoveRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.PayloadsRemoveRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='payload_id', full_name='nvidia.clara.platform.PayloadsRemoveRequest.payload_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='nvidia.clara.platform.PayloadsRemoveRequest.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2191,
serialized_end=2337,
)
_PAYLOADSREMOVERESPONSE = _descriptor.Descriptor(
name='PayloadsRemoveResponse',
full_name='nvidia.clara.platform.PayloadsRemoveResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.PayloadsRemoveResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2339,
serialized_end=2418,
)
_PAYLOADSREMOVEMETADATAREQUEST = _descriptor.Descriptor(
name='PayloadsRemoveMetadataRequest',
full_name='nvidia.clara.platform.PayloadsRemoveMetadataRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.PayloadsRemoveMetadataRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='payload_id', full_name='nvidia.clara.platform.PayloadsRemoveMetadataRequest.payload_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='keys', full_name='nvidia.clara.platform.PayloadsRemoveMetadataRequest.keys', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2421,
serialized_end=2575,
)
_PAYLOADSREMOVEMETADATARESPONSE_METADATAENTRY = _descriptor.Descriptor(
name='MetadataEntry',
full_name='nvidia.clara.platform.PayloadsRemoveMetadataResponse.MetadataEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='nvidia.clara.platform.PayloadsRemoveMetadataResponse.MetadataEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='nvidia.clara.platform.PayloadsRemoveMetadataResponse.MetadataEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=386,
serialized_end=433,
)
_PAYLOADSREMOVEMETADATARESPONSE = _descriptor.Descriptor(
name='PayloadsRemoveMetadataResponse',
full_name='nvidia.clara.platform.PayloadsRemoveMetadataResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.PayloadsRemoveMetadataResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='payload_id', full_name='nvidia.clara.platform.PayloadsRemoveMetadataResponse.payload_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='nvidia.clara.platform.PayloadsRemoveMetadataResponse.metadata', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_PAYLOADSREMOVEMETADATARESPONSE_METADATAENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2578,
serialized_end=2856,
)
_PAYLOADSUPLOADREQUEST = _descriptor.Descriptor(
name='PayloadsUploadRequest',
full_name='nvidia.clara.platform.PayloadsUploadRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.PayloadsUploadRequest.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='payload_id', full_name='nvidia.clara.platform.PayloadsUploadRequest.payload_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='details', full_name='nvidia.clara.platform.PayloadsUploadRequest.details', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='nvidia.clara.platform.PayloadsUploadRequest.data', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2859,
serialized_end=3065,
)
_PAYLOADSUPLOADRESPONSE = _descriptor.Descriptor(
name='PayloadsUploadResponse',
full_name='nvidia.clara.platform.PayloadsUploadResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='nvidia.clara.platform.PayloadsUploadResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='details', full_name='nvidia.clara.platform.PayloadsUploadResponse.details', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3068,
serialized_end=3207,
)
_PAYLOADSADDMETADATAREQUEST_METADATAENTRY.containing_type = _PAYLOADSADDMETADATAREQUEST
_PAYLOADSADDMETADATAREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_PAYLOADSADDMETADATAREQUEST.fields_by_name['payload_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_PAYLOADSADDMETADATAREQUEST.fields_by_name['metadata'].message_type = _PAYLOADSADDMETADATAREQUEST_METADATAENTRY
_PAYLOADSADDMETADATARESPONSE_METADATAENTRY.containing_type = _PAYLOADSADDMETADATARESPONSE
_PAYLOADSADDMETADATARESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_PAYLOADSADDMETADATARESPONSE.fields_by_name['payload_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_PAYLOADSADDMETADATARESPONSE.fields_by_name['metadata'].message_type = _PAYLOADSADDMETADATARESPONSE_METADATAENTRY
_PAYLOADSCREATEREQUEST_METADATAENTRY.containing_type = _PAYLOADSCREATEREQUEST
_PAYLOADSCREATEREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_PAYLOADSCREATEREQUEST.fields_by_name['metadata'].message_type = _PAYLOADSCREATEREQUEST_METADATAENTRY
_PAYLOADSCREATERESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_PAYLOADSCREATERESPONSE.fields_by_name['payload_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_PAYLOADSCREATERESPONSE.fields_by_name['type'].enum_type = _PAYLOADTYPE
_PAYLOADSDELETEREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_PAYLOADSDELETEREQUEST.fields_by_name['payload_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_PAYLOADSDELETERESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_PAYLOADSDETAILSREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_PAYLOADSDETAILSREQUEST.fields_by_name['payload_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_PAYLOADSDETAILSRESPONSE_METADATAENTRY.containing_type = _PAYLOADSDETAILSRESPONSE
_PAYLOADSDETAILSRESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_PAYLOADSDETAILSRESPONSE.fields_by_name['payload_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_PAYLOADSDETAILSRESPONSE.fields_by_name['file'].message_type = _PAYLOADFILEDETAILS
_PAYLOADSDETAILSRESPONSE.fields_by_name['type'].enum_type = _PAYLOADTYPE
_PAYLOADSDETAILSRESPONSE.fields_by_name['job_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_PAYLOADSDETAILSRESPONSE.fields_by_name['metadata'].message_type = _PAYLOADSDETAILSRESPONSE_METADATAENTRY
_PAYLOADSDOWNLOADREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_PAYLOADSDOWNLOADREQUEST.fields_by_name['payload_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_PAYLOADSDOWNLOADRESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_PAYLOADSDOWNLOADRESPONSE.fields_by_name['details'].message_type = _PAYLOADFILEDETAILS
_PAYLOADSREMOVEREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_PAYLOADSREMOVEREQUEST.fields_by_name['payload_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_PAYLOADSREMOVERESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_PAYLOADSREMOVEMETADATAREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_PAYLOADSREMOVEMETADATAREQUEST.fields_by_name['payload_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_PAYLOADSREMOVEMETADATARESPONSE_METADATAENTRY.containing_type = _PAYLOADSREMOVEMETADATARESPONSE
_PAYLOADSREMOVEMETADATARESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_PAYLOADSREMOVEMETADATARESPONSE.fields_by_name['payload_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_PAYLOADSREMOVEMETADATARESPONSE.fields_by_name['metadata'].message_type = _PAYLOADSREMOVEMETADATARESPONSE_METADATAENTRY
_PAYLOADSUPLOADREQUEST.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._REQUESTHEADER
_PAYLOADSUPLOADREQUEST.fields_by_name['payload_id'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._IDENTIFIER
_PAYLOADSUPLOADREQUEST.fields_by_name['details'].message_type = _PAYLOADFILEDETAILS
_PAYLOADSUPLOADRESPONSE.fields_by_name['header'].message_type = nvidia_dot_clara_dot_platform_dot_common__pb2._RESPONSEHEADER
_PAYLOADSUPLOADRESPONSE.fields_by_name['details'].message_type = _PAYLOADFILEDETAILS
DESCRIPTOR.message_types_by_name['PayloadFileDetails'] = _PAYLOADFILEDETAILS
DESCRIPTOR.message_types_by_name['PayloadsAddMetadataRequest'] = _PAYLOADSADDMETADATAREQUEST
DESCRIPTOR.message_types_by_name['PayloadsAddMetadataResponse'] = _PAYLOADSADDMETADATARESPONSE
DESCRIPTOR.message_types_by_name['PayloadsCreateRequest'] = _PAYLOADSCREATEREQUEST
DESCRIPTOR.message_types_by_name['PayloadsCreateResponse'] = _PAYLOADSCREATERESPONSE
DESCRIPTOR.message_types_by_name['PayloadsDeleteRequest'] = _PAYLOADSDELETEREQUEST
DESCRIPTOR.message_types_by_name['PayloadsDeleteResponse'] = _PAYLOADSDELETERESPONSE
DESCRIPTOR.message_types_by_name['PayloadsDetailsRequest'] = _PAYLOADSDETAILSREQUEST
DESCRIPTOR.message_types_by_name['PayloadsDetailsResponse'] = _PAYLOADSDETAILSRESPONSE
DESCRIPTOR.message_types_by_name['PayloadsDownloadRequest'] = _PAYLOADSDOWNLOADREQUEST
DESCRIPTOR.message_types_by_name['PayloadsDownloadResponse'] = _PAYLOADSDOWNLOADRESPONSE
DESCRIPTOR.message_types_by_name['PayloadsRemoveRequest'] = _PAYLOADSREMOVEREQUEST
DESCRIPTOR.message_types_by_name['PayloadsRemoveResponse'] = _PAYLOADSREMOVERESPONSE
DESCRIPTOR.message_types_by_name['PayloadsRemoveMetadataRequest'] = _PAYLOADSREMOVEMETADATAREQUEST
DESCRIPTOR.message_types_by_name['PayloadsRemoveMetadataResponse'] = _PAYLOADSREMOVEMETADATARESPONSE
DESCRIPTOR.message_types_by_name['PayloadsUploadRequest'] = _PAYLOADSUPLOADREQUEST
DESCRIPTOR.message_types_by_name['PayloadsUploadResponse'] = _PAYLOADSUPLOADRESPONSE
DESCRIPTOR.enum_types_by_name['PayloadType'] = _PAYLOADTYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PayloadFileDetails = _reflection.GeneratedProtocolMessageType('PayloadFileDetails', (_message.Message,), dict(
DESCRIPTOR = _PAYLOADFILEDETAILS,
__module__ = 'nvidia.clara.platform.payloads_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PayloadFileDetails)
))
_sym_db.RegisterMessage(PayloadFileDetails)
PayloadsAddMetadataRequest = _reflection.GeneratedProtocolMessageType('PayloadsAddMetadataRequest', (_message.Message,), dict(
MetadataEntry = _reflection.GeneratedProtocolMessageType('MetadataEntry', (_message.Message,), dict(
DESCRIPTOR = _PAYLOADSADDMETADATAREQUEST_METADATAENTRY,
__module__ = 'nvidia.clara.platform.payloads_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PayloadsAddMetadataRequest.MetadataEntry)
))
,
DESCRIPTOR = _PAYLOADSADDMETADATAREQUEST,
__module__ = 'nvidia.clara.platform.payloads_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PayloadsAddMetadataRequest)
))
_sym_db.RegisterMessage(PayloadsAddMetadataRequest)
_sym_db.RegisterMessage(PayloadsAddMetadataRequest.MetadataEntry)
PayloadsAddMetadataResponse = _reflection.GeneratedProtocolMessageType('PayloadsAddMetadataResponse', (_message.Message,), dict(
MetadataEntry = _reflection.GeneratedProtocolMessageType('MetadataEntry', (_message.Message,), dict(
DESCRIPTOR = _PAYLOADSADDMETADATARESPONSE_METADATAENTRY,
__module__ = 'nvidia.clara.platform.payloads_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PayloadsAddMetadataResponse.MetadataEntry)
))
,
DESCRIPTOR = _PAYLOADSADDMETADATARESPONSE,
__module__ = 'nvidia.clara.platform.payloads_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PayloadsAddMetadataResponse)
))
_sym_db.RegisterMessage(PayloadsAddMetadataResponse)
_sym_db.RegisterMessage(PayloadsAddMetadataResponse.MetadataEntry)
PayloadsCreateRequest = _reflection.GeneratedProtocolMessageType('PayloadsCreateRequest', (_message.Message,), dict(
MetadataEntry = _reflection.GeneratedProtocolMessageType('MetadataEntry', (_message.Message,), dict(
DESCRIPTOR = _PAYLOADSCREATEREQUEST_METADATAENTRY,
__module__ = 'nvidia.clara.platform.payloads_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PayloadsCreateRequest.MetadataEntry)
))
,
DESCRIPTOR = _PAYLOADSCREATEREQUEST,
__module__ = 'nvidia.clara.platform.payloads_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PayloadsCreateRequest)
))
_sym_db.RegisterMessage(PayloadsCreateRequest)
_sym_db.RegisterMessage(PayloadsCreateRequest.MetadataEntry)
PayloadsCreateResponse = _reflection.GeneratedProtocolMessageType('PayloadsCreateResponse', (_message.Message,), dict(
DESCRIPTOR = _PAYLOADSCREATERESPONSE,
__module__ = 'nvidia.clara.platform.payloads_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PayloadsCreateResponse)
))
_sym_db.RegisterMessage(PayloadsCreateResponse)
PayloadsDeleteRequest = _reflection.GeneratedProtocolMessageType('PayloadsDeleteRequest', (_message.Message,), dict(
DESCRIPTOR = _PAYLOADSDELETEREQUEST,
__module__ = 'nvidia.clara.platform.payloads_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PayloadsDeleteRequest)
))
_sym_db.RegisterMessage(PayloadsDeleteRequest)
PayloadsDeleteResponse = _reflection.GeneratedProtocolMessageType('PayloadsDeleteResponse', (_message.Message,), dict(
DESCRIPTOR = _PAYLOADSDELETERESPONSE,
__module__ = 'nvidia.clara.platform.payloads_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PayloadsDeleteResponse)
))
_sym_db.RegisterMessage(PayloadsDeleteResponse)
PayloadsDetailsRequest = _reflection.GeneratedProtocolMessageType('PayloadsDetailsRequest', (_message.Message,), dict(
DESCRIPTOR = _PAYLOADSDETAILSREQUEST,
__module__ = 'nvidia.clara.platform.payloads_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PayloadsDetailsRequest)
))
_sym_db.RegisterMessage(PayloadsDetailsRequest)
PayloadsDetailsResponse = _reflection.GeneratedProtocolMessageType('PayloadsDetailsResponse', (_message.Message,), dict(
MetadataEntry = _reflection.GeneratedProtocolMessageType('MetadataEntry', (_message.Message,), dict(
DESCRIPTOR = _PAYLOADSDETAILSRESPONSE_METADATAENTRY,
__module__ = 'nvidia.clara.platform.payloads_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PayloadsDetailsResponse.MetadataEntry)
))
,
DESCRIPTOR = _PAYLOADSDETAILSRESPONSE,
__module__ = 'nvidia.clara.platform.payloads_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PayloadsDetailsResponse)
))
_sym_db.RegisterMessage(PayloadsDetailsResponse)
_sym_db.RegisterMessage(PayloadsDetailsResponse.MetadataEntry)
PayloadsDownloadRequest = _reflection.GeneratedProtocolMessageType('PayloadsDownloadRequest', (_message.Message,), dict(
DESCRIPTOR = _PAYLOADSDOWNLOADREQUEST,
__module__ = 'nvidia.clara.platform.payloads_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PayloadsDownloadRequest)
))
_sym_db.RegisterMessage(PayloadsDownloadRequest)
PayloadsDownloadResponse = _reflection.GeneratedProtocolMessageType('PayloadsDownloadResponse', (_message.Message,), dict(
DESCRIPTOR = _PAYLOADSDOWNLOADRESPONSE,
__module__ = 'nvidia.clara.platform.payloads_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PayloadsDownloadResponse)
))
_sym_db.RegisterMessage(PayloadsDownloadResponse)
PayloadsRemoveRequest = _reflection.GeneratedProtocolMessageType('PayloadsRemoveRequest', (_message.Message,), dict(
DESCRIPTOR = _PAYLOADSREMOVEREQUEST,
__module__ = 'nvidia.clara.platform.payloads_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PayloadsRemoveRequest)
))
_sym_db.RegisterMessage(PayloadsRemoveRequest)
PayloadsRemoveResponse = _reflection.GeneratedProtocolMessageType('PayloadsRemoveResponse', (_message.Message,), dict(
DESCRIPTOR = _PAYLOADSREMOVERESPONSE,
__module__ = 'nvidia.clara.platform.payloads_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PayloadsRemoveResponse)
))
_sym_db.RegisterMessage(PayloadsRemoveResponse)
PayloadsRemoveMetadataRequest = _reflection.GeneratedProtocolMessageType('PayloadsRemoveMetadataRequest', (_message.Message,), dict(
DESCRIPTOR = _PAYLOADSREMOVEMETADATAREQUEST,
__module__ = 'nvidia.clara.platform.payloads_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PayloadsRemoveMetadataRequest)
))
_sym_db.RegisterMessage(PayloadsRemoveMetadataRequest)
PayloadsRemoveMetadataResponse = _reflection.GeneratedProtocolMessageType('PayloadsRemoveMetadataResponse', (_message.Message,), dict(
MetadataEntry = _reflection.GeneratedProtocolMessageType('MetadataEntry', (_message.Message,), dict(
DESCRIPTOR = _PAYLOADSREMOVEMETADATARESPONSE_METADATAENTRY,
__module__ = 'nvidia.clara.platform.payloads_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PayloadsRemoveMetadataResponse.MetadataEntry)
))
,
DESCRIPTOR = _PAYLOADSREMOVEMETADATARESPONSE,
__module__ = 'nvidia.clara.platform.payloads_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PayloadsRemoveMetadataResponse)
))
_sym_db.RegisterMessage(PayloadsRemoveMetadataResponse)
_sym_db.RegisterMessage(PayloadsRemoveMetadataResponse.MetadataEntry)
PayloadsUploadRequest = _reflection.GeneratedProtocolMessageType('PayloadsUploadRequest', (_message.Message,), dict(
DESCRIPTOR = _PAYLOADSUPLOADREQUEST,
__module__ = 'nvidia.clara.platform.payloads_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PayloadsUploadRequest)
))
_sym_db.RegisterMessage(PayloadsUploadRequest)
PayloadsUploadResponse = _reflection.GeneratedProtocolMessageType('PayloadsUploadResponse', (_message.Message,), dict(
DESCRIPTOR = _PAYLOADSUPLOADRESPONSE,
__module__ = 'nvidia.clara.platform.payloads_pb2'
# @@protoc_insertion_point(class_scope:nvidia.clara.platform.PayloadsUploadResponse)
))
_sym_db.RegisterMessage(PayloadsUploadResponse)
DESCRIPTOR._options = None
_PAYLOADSADDMETADATAREQUEST_METADATAENTRY._options = None
_PAYLOADSADDMETADATARESPONSE_METADATAENTRY._options = None
_PAYLOADSCREATEREQUEST_METADATAENTRY._options = None
_PAYLOADSDETAILSRESPONSE_METADATAENTRY._options = None
_PAYLOADSREMOVEMETADATARESPONSE_METADATAENTRY._options = None
_PAYLOADS = _descriptor.ServiceDescriptor(
name='Payloads',
full_name='nvidia.clara.platform.Payloads',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=3305,
serialized_end=4193,
methods=[
_descriptor.MethodDescriptor(
name='AddMetadata',
full_name='nvidia.clara.platform.Payloads.AddMetadata',
index=0,
containing_service=None,
input_type=_PAYLOADSADDMETADATAREQUEST,
output_type=_PAYLOADSADDMETADATARESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='Create',
full_name='nvidia.clara.platform.Payloads.Create',
index=1,
containing_service=None,
input_type=_PAYLOADSCREATEREQUEST,
output_type=_PAYLOADSCREATERESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='Delete',
full_name='nvidia.clara.platform.Payloads.Delete',
index=2,
containing_service=None,
input_type=_PAYLOADSDELETEREQUEST,
output_type=_PAYLOADSDELETERESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='Details',
full_name='nvidia.clara.platform.Payloads.Details',
index=3,
containing_service=None,
input_type=_PAYLOADSDETAILSREQUEST,
output_type=_PAYLOADSDETAILSRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='Download',
full_name='nvidia.clara.platform.Payloads.Download',
index=4,
containing_service=None,
input_type=_PAYLOADSDOWNLOADREQUEST,
output_type=_PAYLOADSDOWNLOADRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='Remove',
full_name='nvidia.clara.platform.Payloads.Remove',
index=5,
containing_service=None,
input_type=_PAYLOADSREMOVEREQUEST,
output_type=_PAYLOADSREMOVERESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='RemoveMetadata',
full_name='nvidia.clara.platform.Payloads.RemoveMetadata',
index=6,
containing_service=None,
input_type=_PAYLOADSREMOVEMETADATAREQUEST,
output_type=_PAYLOADSREMOVEMETADATARESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='Upload',
full_name='nvidia.clara.platform.Payloads.Upload',
index=7,
containing_service=None,
input_type=_PAYLOADSUPLOADREQUEST,
output_type=_PAYLOADSUPLOADRESPONSE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_PAYLOADS)
DESCRIPTOR.services_by_name['Payloads'] = _PAYLOADS
# @@protoc_insertion_point(module_scope)
| clara-platform-python-client-main | nvidia_clara/grpc/payloads_pb2.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia/clara/platform/clara.proto
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from nvidia_clara.grpc import metrics_pb2 as nvidia_dot_clara_dot_platform_dot_node__monitor_dot_metrics__pb2
class MonitorStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GpuMetrics = channel.unary_stream(
'/nvidia.clara.platform.node_monitor.Monitor/GpuMetrics',
request_serializer=nvidia_dot_clara_dot_platform_dot_node__monitor_dot_metrics__pb2.MonitorGpuMetricsRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_node__monitor_dot_metrics__pb2.MonitorGpuMetricsResponse.FromString,
)
class MonitorServicer(object):
# missing associated documentation comment in .proto file
pass
def GpuMetrics(self, request, context):
"""Request GPU metrics
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_MonitorServicer_to_server(servicer, server):
rpc_method_handlers = {
'GpuMetrics': grpc.unary_stream_rpc_method_handler(
servicer.GpuMetrics,
request_deserializer=nvidia_dot_clara_dot_platform_dot_node__monitor_dot_metrics__pb2.MonitorGpuMetricsRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_node__monitor_dot_metrics__pb2.MonitorGpuMetricsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'nvidia.clara.platform.node_monitor.Monitor', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| clara-platform-python-client-main | nvidia_clara/grpc/metrics_pb2_grpc.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia/clara/platform/clara.proto
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from nvidia_clara.grpc import models_pb2 as nvidia_dot_clara_dot_platform_dot_models__pb2
class ModelsStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.AddMetadata = channel.unary_unary(
'/nvidia.clara.platform.Models/AddMetadata',
request_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsAddMetadataRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsAddMetadataResponse.FromString,
)
self.CreateCatalog = channel.unary_unary(
'/nvidia.clara.platform.Models/CreateCatalog',
request_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsCreateCatalogRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsCreateCatalogResponse.FromString,
)
self.CreateInstance = channel.unary_unary(
'/nvidia.clara.platform.Models/CreateInstance',
request_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsCreateInstanceRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsCreateInstanceResponse.FromString,
)
self.DeleteCatalog = channel.unary_unary(
'/nvidia.clara.platform.Models/DeleteCatalog',
request_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsDeleteCatalogRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsDeleteCatalogResponse.FromString,
)
self.DeleteInstance = channel.unary_unary(
'/nvidia.clara.platform.Models/DeleteInstance',
request_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsDeleteInstanceRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsDeleteInstanceResponse.FromString,
)
self.DeleteModel = channel.unary_unary(
'/nvidia.clara.platform.Models/DeleteModel',
request_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsDeleteModelRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsDeleteModelResponse.FromString,
)
self.DownloadModel = channel.unary_stream(
'/nvidia.clara.platform.Models/DownloadModel',
request_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsDownloadModelRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsDownloadModelResponse.FromString,
)
self.ListCatalogs = channel.unary_stream(
'/nvidia.clara.platform.Models/ListCatalogs',
request_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsListCatalogsRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsListCatalogsResponse.FromString,
)
self.ListInstances = channel.unary_stream(
'/nvidia.clara.platform.Models/ListInstances',
request_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsListInstancesRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsListInstancesResponse.FromString,
)
self.ListModels = channel.unary_stream(
'/nvidia.clara.platform.Models/ListModels',
request_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsListModelsRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsListModelsResponse.FromString,
)
self.ReadCatalog = channel.unary_stream(
'/nvidia.clara.platform.Models/ReadCatalog',
request_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsReadCatalogRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsReadCatalogResponse.FromString,
)
self.ReadInstance = channel.unary_stream(
'/nvidia.clara.platform.Models/ReadInstance',
request_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsReadInstanceRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsReadInstanceResponse.FromString,
)
self.RemoveMetadata = channel.unary_unary(
'/nvidia.clara.platform.Models/RemoveMetadata',
request_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsRemoveMetadataRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsRemoveMetadataResponse.FromString,
)
self.UpdateCatalog = channel.stream_unary(
'/nvidia.clara.platform.Models/UpdateCatalog',
request_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsUpdateCatalogRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsUpdateCatalogResponse.FromString,
)
self.UpdateInstance = channel.stream_unary(
'/nvidia.clara.platform.Models/UpdateInstance',
request_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsUpdateInstanceRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsUpdateInstanceResponse.FromString,
)
self.UploadModel = channel.stream_unary(
'/nvidia.clara.platform.Models/UploadModel',
request_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsUploadModelRequest.SerializeToString,
response_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsUploadModelResponse.FromString,
)
class ModelsServicer(object):
# missing associated documentation comment in .proto file
pass
def AddMetadata(self, request, context):
"""Requests the addition of metadata to a model.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateCatalog(self, request, context):
"""Requests the creation of a model catalog from the model repository.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateInstance(self, request, context):
"""Requests the creation of a model catalog instance from the model repository.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteCatalog(self, request, context):
"""Requests the deletion of a model catalog from the model repository.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteInstance(self, request, context):
"""Requests the deletion of a model catalog instance from the model repository.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteModel(self, request, context):
"""Requests the deletion of a model from the model repository.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DownloadModel(self, request, context):
"""Requests the download of an existing model from the model repository.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListCatalogs(self, request, context):
"""Requests the list of all existing catalogs from the model repository.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListInstances(self, request, context):
"""Requests a list of all existing model catalog instances from the model repository.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListModels(self, request, context):
"""Requests a list of available models from the model repository.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ReadCatalog(self, request, context):
"""Requests the contents of a model catalog from the model repository.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ReadInstance(self, request, context):
"""Requests the contents of a model catalog instance from the model repository.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RemoveMetadata(self, request, context):
"""Requests the removal of metadata from a model.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateCatalog(self, request_iterator, context):
"""Requests the update of an existing model catalog in the model repository.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateInstance(self, request_iterator, context):
"""Requests the update of an existing model catalog instance with a new set of models.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UploadModel(self, request_iterator, context):
"""Requests the uploads of a new model to model repository.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ModelsServicer_to_server(servicer, server):
rpc_method_handlers = {
'AddMetadata': grpc.unary_unary_rpc_method_handler(
servicer.AddMetadata,
request_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsAddMetadataRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsAddMetadataResponse.SerializeToString,
),
'CreateCatalog': grpc.unary_unary_rpc_method_handler(
servicer.CreateCatalog,
request_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsCreateCatalogRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsCreateCatalogResponse.SerializeToString,
),
'CreateInstance': grpc.unary_unary_rpc_method_handler(
servicer.CreateInstance,
request_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsCreateInstanceRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsCreateInstanceResponse.SerializeToString,
),
'DeleteCatalog': grpc.unary_unary_rpc_method_handler(
servicer.DeleteCatalog,
request_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsDeleteCatalogRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsDeleteCatalogResponse.SerializeToString,
),
'DeleteInstance': grpc.unary_unary_rpc_method_handler(
servicer.DeleteInstance,
request_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsDeleteInstanceRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsDeleteInstanceResponse.SerializeToString,
),
'DeleteModel': grpc.unary_unary_rpc_method_handler(
servicer.DeleteModel,
request_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsDeleteModelRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsDeleteModelResponse.SerializeToString,
),
'DownloadModel': grpc.unary_stream_rpc_method_handler(
servicer.DownloadModel,
request_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsDownloadModelRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsDownloadModelResponse.SerializeToString,
),
'ListCatalogs': grpc.unary_stream_rpc_method_handler(
servicer.ListCatalogs,
request_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsListCatalogsRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsListCatalogsResponse.SerializeToString,
),
'ListInstances': grpc.unary_stream_rpc_method_handler(
servicer.ListInstances,
request_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsListInstancesRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsListInstancesResponse.SerializeToString,
),
'ListModels': grpc.unary_stream_rpc_method_handler(
servicer.ListModels,
request_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsListModelsRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsListModelsResponse.SerializeToString,
),
'ReadCatalog': grpc.unary_stream_rpc_method_handler(
servicer.ReadCatalog,
request_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsReadCatalogRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsReadCatalogResponse.SerializeToString,
),
'ReadInstance': grpc.unary_stream_rpc_method_handler(
servicer.ReadInstance,
request_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsReadInstanceRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsReadInstanceResponse.SerializeToString,
),
'RemoveMetadata': grpc.unary_unary_rpc_method_handler(
servicer.RemoveMetadata,
request_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsRemoveMetadataRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsRemoveMetadataResponse.SerializeToString,
),
'UpdateCatalog': grpc.stream_unary_rpc_method_handler(
servicer.UpdateCatalog,
request_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsUpdateCatalogRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsUpdateCatalogResponse.SerializeToString,
),
'UpdateInstance': grpc.stream_unary_rpc_method_handler(
servicer.UpdateInstance,
request_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsUpdateInstanceRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsUpdateInstanceResponse.SerializeToString,
),
'UploadModel': grpc.stream_unary_rpc_method_handler(
servicer.UploadModel,
request_deserializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsUploadModelRequest.FromString,
response_serializer=nvidia_dot_clara_dot_platform_dot_models__pb2.ModelsUploadModelResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'nvidia.clara.platform.Models', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| clara-platform-python-client-main | nvidia_clara/grpc/models_pb2_grpc.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.