python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
from runtime.runner import Runner | DeepLearningExamples-master | TensorFlow/Classification/ConvNets/runtime/__init__.py |
from typing import Any, Dict, List, Optional
import numpy as np
from deployment_toolkit.core import BaseMetricsCalculator
class MetricsCalculator(BaseMetricsCalculator):
def __init__(self):
self._equals = []
def update(
self,
*,
ids: List[Any],
y_pred: Dict[str, np.ndarray],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
):
classes_real = y_real["classes"]
classes_pred = y_pred["classes"]
classes_real = np.squeeze(classes_real)
classes_pred = np.squeeze(classes_pred)
assert classes_real.shape == classes_pred.shape, (
f"classes_pred.shape={classes_pred.shape} != " f"classes_real.shape={classes_real.shape}"
)
self._equals.append(classes_real == classes_pred)
@property
def metrics(self) -> Dict[str, Any]:
return {"accuracy": np.concatenate(self._equals, axis=0).mean()} | DeepLearningExamples-master | TensorFlow/Classification/ConvNets/triton/metrics.py |
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Using `calculate_metrics.py` script, you can obtain model accuracy/error metrics using defined `MetricsCalculator` class.
Data provided to `MetricsCalculator` are obtained from dump files
stored in directory pointed by `--dump-dir` argument.
Above files are prepared by `run_inference_on_fw.py` and `run_inference_on_triton.py` scripts.
Output data is stored in csv file pointed by `--csv` argument.
Example call:
```shell script
python ./triton/calculate_metrics.py \
--dump-dir /results/dump_triton \
--csv /results/accuracy_results.csv \
--metrics metrics.py \
--metric-class-param1 value
```
"""
import argparse
import csv
import logging
import string
from pathlib import Path
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.args import ArgParserGenerator
from .deployment_toolkit.core import BaseMetricsCalculator, load_from_file
from .deployment_toolkit.dump import JsonDumpReader
LOGGER = logging.getLogger("calculate_metrics")
TOTAL_COLUMN_NAME = "_total_"
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description="Run models with given dataloader", allow_abbrev=False)
parser.add_argument("--metrics", help="Path to python module containing metrics calculator", required=True)
parser.add_argument("--csv", help="Path to csv file", required=True)
parser.add_argument("--dump-dir", help="Path to directory with dumped outputs (and labels)", required=True)
args, *_ = parser.parse_known_args()
MetricsCalculator = load_from_file(args.metrics, "metrics", "MetricsCalculator")
ArgParserGenerator(MetricsCalculator).update_argparser(parser)
args = parser.parse_args()
LOGGER.info("args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
MetricsCalculator = load_from_file(args.metrics, "metrics", "MetricsCalculator")
metrics_calculator: BaseMetricsCalculator = ArgParserGenerator(MetricsCalculator).from_args(args)
reader = JsonDumpReader(args.dump_dir)
for ids, x, y_true, y_pred in reader.iterate_over(["ids", "inputs", "labels", "outputs"]):
ids = list(ids["ids"]) if ids is not None else None
metrics_calculator.update(ids=ids, x=x, y_pred=y_pred, y_real=y_true)
metrics = metrics_calculator.metrics
metric_names_with_space = [name for name in metrics if any([c in string.whitespace for c in name])]
if metric_names_with_space:
raise ValueError(f"Metric names shall have no spaces; Incorrect names: {', '.join(metric_names_with_space)}")
csv_path = Path(args.csv)
csv_path.parent.mkdir(parents=True, exist_ok=True)
with csv_path.open("w") as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=list(metrics.keys()))
writer.writeheader()
writer.writerow(metrics)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/triton/calculate_metrics.py |
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
To infer the model deployed on Triton, you can use `run_inference_on_triton.py` script.
It sends a request with data obtained from pointed data loader and dumps received data into npz files.
Those files are stored in directory pointed by `--output-dir` argument.
Currently, the client communicates with the Triton server asynchronously using GRPC protocol.
Example call:
```shell script
python ./triton/run_inference_on_triton.py \
--server-url localhost:8001 \
--model-name ResNet50 \
--model-version 1 \
--dump-labels \
--output-dir /results/dump_triton
```
"""
import argparse
import functools
import logging
import queue
import threading
import time
from pathlib import Path
from typing import Optional
from tqdm import tqdm
# pytype: disable=import-error
try:
from tritonclient import utils as client_utils # noqa: F401
from tritonclient.grpc import (
InferenceServerClient,
InferInput,
InferRequestedOutput,
)
except ImportError:
import tritongrpcclient as grpc_client
from tritongrpcclient import (
InferenceServerClient,
InferInput,
InferRequestedOutput,
)
# pytype: enable=import-error
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.args import ArgParserGenerator
from .deployment_toolkit.core import DATALOADER_FN_NAME, load_from_file
from .deployment_toolkit.dump import JsonDumpWriter
LOGGER = logging.getLogger("run_inference_on_triton")
class AsyncGRPCTritonRunner:
DEFAULT_MAX_RESP_WAIT_S = 120
DEFAULT_MAX_UNRESP_REQS = 128
DEFAULT_MAX_FINISH_WAIT_S = 900 # 15min
def __init__(
self,
server_url: str,
model_name: str,
model_version: str,
*,
dataloader,
verbose=False,
resp_wait_s: Optional[float] = None,
max_unresponded_reqs: Optional[int] = None,
):
self._server_url = server_url
self._model_name = model_name
self._model_version = model_version
self._dataloader = dataloader
self._verbose = verbose
self._response_wait_t = self.DEFAULT_MAX_RESP_WAIT_S if resp_wait_s is None else resp_wait_s
self._max_unresp_reqs = self.DEFAULT_MAX_UNRESP_REQS if max_unresponded_reqs is None else max_unresponded_reqs
self._results = queue.Queue()
self._processed_all = False
self._errors = []
self._num_waiting_for = 0
self._sync = threading.Condition()
self._req_thread = threading.Thread(target=self.req_loop, daemon=True)
def __iter__(self):
self._req_thread.start()
timeout_s = 0.050 # check flags processed_all and error flags every 50ms
while True:
try:
ids, x, y_pred, y_real = self._results.get(timeout=timeout_s)
yield ids, x, y_pred, y_real
except queue.Empty:
shall_stop = self._processed_all or self._errors
if shall_stop:
break
LOGGER.debug("Waiting for request thread to stop")
self._req_thread.join()
if self._errors:
error_msg = "\n".join(map(str, self._errors))
raise RuntimeError(error_msg)
def _on_result(self, ids, x, y_real, output_names, result, error):
with self._sync:
if error:
self._errors.append(error)
else:
y_pred = {name: result.as_numpy(name) for name in output_names}
self._results.put((ids, x, y_pred, y_real))
self._num_waiting_for -= 1
self._sync.notify_all()
def req_loop(self):
client = InferenceServerClient(self._server_url, verbose=self._verbose)
self._errors = self._verify_triton_state(client)
if self._errors:
return
LOGGER.debug(
f"Triton server {self._server_url} and model {self._model_name}:{self._model_version} " f"are up and ready!"
)
model_config = client.get_model_config(self._model_name, self._model_version)
model_metadata = client.get_model_metadata(self._model_name, self._model_version)
LOGGER.info(f"Model config {model_config}")
LOGGER.info(f"Model metadata {model_metadata}")
inputs = {tm.name: tm for tm in model_metadata.inputs}
outputs = {tm.name: tm for tm in model_metadata.outputs}
output_names = list(outputs)
outputs_req = [InferRequestedOutput(name) for name in outputs]
self._num_waiting_for = 0
for ids, x, y_real in self._dataloader:
infer_inputs = []
for name in inputs:
data = x[name]
infer_input = InferInput(name, data.shape, inputs[name].datatype)
target_np_dtype = client_utils.triton_to_np_dtype(inputs[name].datatype)
data = data.astype(target_np_dtype)
infer_input.set_data_from_numpy(data)
infer_inputs.append(infer_input)
with self._sync:
def _check_can_send():
return self._num_waiting_for < self._max_unresp_reqs
can_send = self._sync.wait_for(_check_can_send, timeout=self._response_wait_t)
if not can_send:
error_msg = f"Runner could not send new requests for {self._response_wait_t}s"
self._errors.append(error_msg)
break
callback = functools.partial(AsyncGRPCTritonRunner._on_result, self, ids, x, y_real, output_names)
client.async_infer(
model_name=self._model_name,
model_version=self._model_version,
inputs=infer_inputs,
outputs=outputs_req,
callback=callback,
)
self._num_waiting_for += 1
# wait till receive all requested data
with self._sync:
def _all_processed():
LOGGER.debug(f"wait for {self._num_waiting_for} unprocessed jobs")
return self._num_waiting_for == 0
self._processed_all = self._sync.wait_for(_all_processed, self.DEFAULT_MAX_FINISH_WAIT_S)
if not self._processed_all:
error_msg = f"Runner {self._response_wait_t}s timeout received while waiting for results from server"
self._errors.append(error_msg)
LOGGER.debug("Finished request thread")
def _verify_triton_state(self, triton_client):
errors = []
if not triton_client.is_server_live():
errors.append(f"Triton server {self._server_url} is not live")
elif not triton_client.is_server_ready():
errors.append(f"Triton server {self._server_url} is not ready")
elif not triton_client.is_model_ready(self._model_name, self._model_version):
errors.append(f"Model {self._model_name}:{self._model_version} is not ready")
return errors
def _parse_args():
parser = argparse.ArgumentParser(description="Infer model on Triton server", allow_abbrev=False)
parser.add_argument(
"--server-url", type=str, default="localhost:8001", help="Inference server URL (default localhost:8001)"
)
parser.add_argument("--model-name", help="The name of the model used for inference.", required=True)
parser.add_argument("--model-version", help="The version of the model used for inference.", required=True)
parser.add_argument("--dataloader", help="Path to python file containing dataloader.", required=True)
parser.add_argument("--dump-labels", help="Dump labels to output dir", action="store_true", default=False)
parser.add_argument("--dump-inputs", help="Dump inputs to output dir", action="store_true", default=False)
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False)
parser.add_argument("--output-dir", required=True, help="Path to directory where outputs will be saved")
parser.add_argument(
"--response-wait-time", required=False, help="Maximal time to wait for response", default=120, type=float
)
parser.add_argument(
"--max-unresponded-requests",
required=False,
help="Maximal number of unresponded requests",
default=128,
type=int,
)
args, *_ = parser.parse_known_args()
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
ArgParserGenerator(get_dataloader_fn).update_argparser(parser)
args = parser.parse_args()
return args
def main():
args = _parse_args()
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
log_level = logging.INFO if not args.verbose else logging.DEBUG
logging.basicConfig(level=log_level, format=log_format)
LOGGER.info(f"args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args)
runner = AsyncGRPCTritonRunner(
args.server_url,
args.model_name,
args.model_version,
dataloader=dataloader_fn(),
verbose=False,
resp_wait_s=args.response_wait_time,
max_unresponded_reqs=args.max_unresponded_requests,
)
with JsonDumpWriter(output_dir=args.output_dir) as writer:
start = time.time()
for ids, x, y_pred, y_real in tqdm(runner, unit="batch", mininterval=10):
data = _verify_and_format_dump(args, ids, x, y_pred, y_real)
writer.write(**data)
stop = time.time()
LOGGER.info(f"\nThe inference took {stop - start:0.3f}s")
def _verify_and_format_dump(args, ids, x, y_pred, y_real):
data = {"outputs": y_pred, "ids": {"ids": ids}}
if args.dump_inputs:
data["inputs"] = x
if args.dump_labels:
if not y_real:
raise ValueError(
"Found empty label values. Please provide labels in dataloader_fn or do not use --dump-labels argument"
)
data["labels"] = y_real
return data
if __name__ == "__main__":
main()
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/triton/run_inference_on_triton.py |
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
For models with variable-sized inputs you must provide the --input-shape argument so that perf_analyzer knows
what shape tensors to use. For example, for a model that has an input called IMAGE that has shape [ 3, N, M ],
where N and M are variable-size dimensions, to tell perf_analyzer to send batch-size 4 requests of shape [ 3, 224, 224 ]
`--shape IMAGE:3,224,224`.
"""
import argparse
import csv
import os
import sys
from pathlib import Path
from typing import Dict, List, Optional
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.report import save_results, show_results, sort_results
from .deployment_toolkit.warmup import warmup
def calculate_average_latency(r):
avg_sum_fields = [
"Client Send",
"Network+Server Send/Recv",
"Server Queue",
"Server Compute",
"Server Compute Input",
"Server Compute Infer",
"Server Compute Output",
"Client Recv",
]
avg_latency = sum([int(r.get(f, 0)) for f in avg_sum_fields])
return avg_latency
def update_performance_data(results: List, batch_size: int, performance_partial_file: str):
row: Dict = {"batch_size": batch_size}
with open(performance_partial_file, "r") as csvfile:
reader = csv.DictReader(csvfile)
for r in reader:
avg_latency = calculate_average_latency(r)
row = {**row, **r, "avg latency": avg_latency}
results.append(row)
def _parse_batch_sizes(batch_sizes: str):
batches = batch_sizes.split(sep=",")
return list(map(lambda x: int(x.strip()), batches))
def offline_performance(
model_name: str,
batch_sizes: List[int],
result_path: str,
input_shapes: Optional[List[str]] = None,
profiling_data: str = "random",
triton_instances: int = 1,
server_url: str = "localhost",
measurement_window: int = 10000,
shared_memory: bool = False
):
print("\n")
print(f"==== Static batching analysis start ====")
print("\n")
input_shapes = " ".join(map(lambda shape: f" --shape {shape}", input_shapes)) if input_shapes else ""
results: List[Dict] = list()
for batch_size in batch_sizes:
print(f"Running performance tests for batch size: {batch_size}")
performance_partial_file = f"triton_performance_partial_{batch_size}.csv"
exec_args = f"""-max-threads {triton_instances} \
-m {model_name} \
-x 1 \
-c {triton_instances} \
-t {triton_instances} \
-p {measurement_window} \
-v \
-i http \
-u {server_url}:8000 \
-b {batch_size} \
-f {performance_partial_file} \
--input-data {profiling_data} {input_shapes}"""
if shared_memory:
exec_args += " --shared-memory=cuda"
result = os.system(f"perf_client {exec_args}")
if result != 0:
print(f"Failed running performance tests. Perf client failed with exit code {result}")
sys.exit(1)
update_performance_data(results, batch_size, performance_partial_file)
os.remove(performance_partial_file)
results = sort_results(results=results)
save_results(filename=result_path, data=results)
show_results(results=results)
print("Performance results for static batching stored in: {0}".format(result_path))
print("\n")
print(f"==== Analysis done ====")
print("\n")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model-name", type=str, required=True, help="Name of the model to test")
parser.add_argument(
"--input-data", type=str, required=False, default="random", help="Input data to perform profiling."
)
parser.add_argument(
"--input-shape",
action="append",
required=False,
help="Input data shape in form INPUT_NAME:<full_shape_without_batch_axis>.",
)
parser.add_argument("--batch-sizes", type=str, required=True, help="List of batch sizes to tests. Comma separated.")
parser.add_argument("--result-path", type=str, required=True, help="Path where result file is going to be stored.")
parser.add_argument("--triton-instances", type=int, default=1, help="Number of Triton Server instances")
parser.add_argument("--server-url", type=str, required=False, default="localhost", help="Url to Triton server")
parser.add_argument(
"--measurement-window", required=False, help="Time which perf_analyzer will wait for results", default=10000
)
parser.add_argument("--shared-memory", help="Use shared memory for communication with Triton", action="store_true",
default=False)
args = parser.parse_args()
warmup(
server_url=args.server_url,
model_name=args.model_name,
batch_sizes=_parse_batch_sizes(args.batch_sizes),
triton_instances=args.triton_instances,
profiling_data=args.input_data,
input_shapes=args.input_shape,
measurement_window=args.measurement_window,
shared_memory=args.shared_memory
)
offline_performance(
server_url=args.server_url,
model_name=args.model_name,
batch_sizes=_parse_batch_sizes(args.batch_sizes),
triton_instances=args.triton_instances,
profiling_data=args.input_data,
input_shapes=args.input_shape,
result_path=args.result_path,
measurement_window=args.measurement_window,
shared_memory=args.shared_memory
)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/triton/run_offline_performance_test_on_triton.py |
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
To infer the model on framework runtime, you can use `run_inference_on_fw.py` script.
It infers data obtained from pointed data loader locally and saves received data into npz files.
Those files are stored in directory pointed by `--output-dir` argument.
Example call:
```shell script
python ./triton/run_inference_on_fw.py \
--input-path /models/exported/model.onnx \
--input-type onnx \
--dataloader triton/dataloader.py \
--data-dir /data/imagenet \
--batch-size 32 \
--output-dir /results/dump_local \
--dump-labels
```
"""
import argparse
import logging
import os
from pathlib import Path
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "0"
from tqdm import tqdm
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.args import ArgParserGenerator
from .deployment_toolkit.core import DATALOADER_FN_NAME, BaseLoader, BaseRunner, Format, load_from_file
from .deployment_toolkit.dump import JsonDumpWriter
from .deployment_toolkit.extensions import loaders, runners
LOGGER = logging.getLogger("run_inference_on_fw")
def _verify_and_format_dump(args, ids, x, y_pred, y_real):
data = {"outputs": y_pred, "ids": {"ids": ids}}
if args.dump_inputs:
data["inputs"] = x
if args.dump_labels:
if not y_real:
raise ValueError(
"Found empty label values. Please provide labels in dataloader_fn or do not use --dump-labels argument"
)
data["labels"] = y_real
return data
def _parse_and_validate_args():
supported_inputs = set(runners.supported_extensions) & set(loaders.supported_extensions)
parser = argparse.ArgumentParser(description="Dump local inference output of given model", allow_abbrev=False)
parser.add_argument("--input-path", help="Path to input model", required=True)
parser.add_argument("--input-type", help="Input model type", choices=supported_inputs, required=True)
parser.add_argument("--dataloader", help="Path to python file containing dataloader.", required=True)
parser.add_argument("--output-dir", help="Path to dir where output files will be stored", required=True)
parser.add_argument("--dump-labels", help="Dump labels to output dir", action="store_true", default=False)
parser.add_argument("--dump-inputs", help="Dump inputs to output dir", action="store_true", default=False)
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False)
args, *_ = parser.parse_known_args()
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
ArgParserGenerator(get_dataloader_fn).update_argparser(parser)
Loader: BaseLoader = loaders.get(args.input_type)
ArgParserGenerator(Loader, module_path=args.input_path).update_argparser(parser)
Runner: BaseRunner = runners.get(args.input_type)
ArgParserGenerator(Runner).update_argparser(parser)
args = parser.parse_args()
types_requiring_io_params = []
if args.input_type in types_requiring_io_params and not all(p for p in [args.inputs, args.outputs]):
parser.error(f"For {args.input_type} input provide --inputs and --outputs parameters")
return args
def main():
args = _parse_and_validate_args()
log_level = logging.INFO if not args.verbose else logging.DEBUG
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(level=log_level, format=log_format)
LOGGER.info(f"args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
Loader: BaseLoader = loaders.get(args.input_type)
Runner: BaseRunner = runners.get(args.input_type)
loader = ArgParserGenerator(Loader, module_path=args.input_path).from_args(args)
runner = ArgParserGenerator(Runner).from_args(args)
LOGGER.info(f"Loading {args.input_path}")
model = loader.load(args.input_path)
with runner.init_inference(model=model) as runner_session, JsonDumpWriter(args.output_dir) as writer:
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args)
LOGGER.info(f"Data loader initialized; Running inference")
for ids, x, y_real in tqdm(dataloader_fn(), unit="batch", mininterval=10):
y_pred = runner_session(x)
data = _verify_and_format_dump(args, ids=ids, x=x, y_pred=y_pred, y_real=y_real)
writer.write(**data)
LOGGER.info(f"Inference finished")
if __name__ == "__main__":
main()
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/triton/run_inference_on_fw.py |
import logging
from pathlib import Path
import numpy as np
from PIL import Image
from rn50_model import HEIGHT, WIDTH
LOGGER = logging.getLogger(__name__)
def get_dataloader_fn(
*, data_dir: str, batch_size: int = 1, width: int = WIDTH, height: int = HEIGHT, images_num: int = None
):
image_extensions = [".gif", ".png", ".jpeg", ".jpg"]
image_paths = sorted([p for p in Path(data_dir).rglob("*") if p.suffix.lower() in image_extensions])
if images_num is not None:
image_paths = image_paths[:images_num]
LOGGER.info(
f"Creating PIL dataloader on data_dir={data_dir} #images={len(image_paths)} "
f"image_size=({width}, {height}) batch_size={batch_size}"
)
def _dataloader_fn():
batch = []
for image_path in image_paths:
img = Image.open(image_path.as_posix()).convert('RGB')
img = img.resize((width, height))
img = np.array(img).astype(np.float32)
true_class = np.array([int(image_path.parent.name)])
assert tuple(img.shape) == (height, width, 3)
img = img[np.newaxis, ...]
batch.append((img, image_path.as_posix(), true_class))
if len(batch) >= batch_size:
ids = [image_path for _, image_path, *_ in batch]
x = {
"input": np.concatenate([img for img, *_ in batch]),
}
y_real = {"classes": np.concatenate([class_ for *_, class_ in batch])}
batch = []
yield ids, x, y_real
return _dataloader_fn
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/triton/dataloader.py |
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
For models with variable-sized inputs you must provide the --input-shape argument so that perf_analyzer knows
what shape tensors to use. For example, for a model that has an input called IMAGE that has shape [ 3, N, M ],
where N and M are variable-size dimensions, to tell perf_analyzer to send batch-size 4 requests of shape [ 3, 224, 224 ]
`--shape IMAGE:3,224,224`.
"""
import argparse
import csv
import os
import sys
from pathlib import Path
from typing import List, Optional
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.report import save_results, show_results, sort_results
from .deployment_toolkit.warmup import warmup
def calculate_average_latency(r):
avg_sum_fields = [
"Client Send",
"Network+Server Send/Recv",
"Server Queue",
"Server Compute",
"Server Compute Input",
"Server Compute Infer",
"Server Compute Output",
"Client Recv",
]
avg_latency = sum([int(r.get(f, 0)) for f in avg_sum_fields])
return avg_latency
def update_performance_data(results: List, performance_file: str):
with open(performance_file, "r") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
row["avg latency"] = calculate_average_latency(row)
results.append(row)
def _parse_batch_sizes(batch_sizes: str):
batches = batch_sizes.split(sep=",")
return list(map(lambda x: int(x.strip()), batches))
def online_performance(
model_name: str,
batch_sizes: List[int],
result_path: str,
input_shapes: Optional[List[str]] = None,
profiling_data: str = "random",
triton_instances: int = 1,
triton_gpu_engine_count: int = 1,
server_url: str = "localhost",
measurement_window: int = 10000,
shared_memory: bool = False
):
print("\n")
print(f"==== Dynamic batching analysis start ====")
print("\n")
input_shapes = " ".join(map(lambda shape: f" --shape {shape}", input_shapes)) if input_shapes else ""
print(f"Running performance tests for dynamic batching")
performance_file = f"triton_performance_dynamic_partial.csv"
max_batch_size = max(batch_sizes)
max_total_requests = 2 * max_batch_size * triton_instances * triton_gpu_engine_count
max_concurrency = min(256, max_total_requests)
batch_size = max(1, max_total_requests // 256)
step = max(1, max_concurrency // 32)
min_concurrency = step
exec_args = f"""-m {model_name} \
-x 1 \
-p {measurement_window} \
-v \
-i http \
-u {server_url}:8000 \
-b {batch_size} \
-f {performance_file} \
--concurrency-range {min_concurrency}:{max_concurrency}:{step} \
--input-data {profiling_data} {input_shapes}"""
if shared_memory:
exec_args += " --shared-memory=cuda"
result = os.system(f"perf_client {exec_args}")
if result != 0:
print(f"Failed running performance tests. Perf client failed with exit code {result}")
sys.exit(1)
results = list()
update_performance_data(results=results, performance_file=performance_file)
results = sort_results(results=results)
save_results(filename=result_path, data=results)
show_results(results=results)
os.remove(performance_file)
print("Performance results for dynamic batching stored in: {0}".format(result_path))
print("\n")
print(f"==== Analysis done ====")
print("\n")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model-name", type=str, required=True, help="Name of the model to test")
parser.add_argument(
"--input-data", type=str, required=False, default="random", help="Input data to perform profiling."
)
parser.add_argument(
"--input-shape",
action="append",
required=False,
help="Input data shape in form INPUT_NAME:<full_shape_without_batch_axis>.",
)
parser.add_argument("--batch-sizes", type=str, required=True, help="List of batch sizes to tests. Comma separated.")
parser.add_argument("--triton-instances", type=int, default=1, help="Number of Triton Server instances")
parser.add_argument(
"--number-of-model-instances", type=int, default=1, help="Number of models instances on Triton Server"
)
parser.add_argument("--result-path", type=str, required=True, help="Path where result file is going to be stored.")
parser.add_argument("--server-url", type=str, required=False, default="localhost", help="Url to Triton server")
parser.add_argument(
"--measurement-window", required=False, help="Time which perf_analyzer will wait for results", default=10000
)
parser.add_argument("--shared-memory", help="Use shared memory for communication with Triton", action="store_true",
default=False)
args = parser.parse_args()
warmup(
server_url=args.server_url,
model_name=args.model_name,
batch_sizes=_parse_batch_sizes(args.batch_sizes),
triton_instances=args.triton_instances,
triton_gpu_engine_count=args.number_of_model_instances,
profiling_data=args.input_data,
input_shapes=args.input_shape,
measurement_window=args.measurement_window,
shared_memory=args.shared_memory
)
online_performance(
server_url=args.server_url,
model_name=args.model_name,
batch_sizes=_parse_batch_sizes(args.batch_sizes),
triton_instances=args.triton_instances,
triton_gpu_engine_count=args.number_of_model_instances,
profiling_data=args.input_data,
input_shapes=args.input_shape,
result_path=args.result_path,
measurement_window=args.measurement_window,
shared_memory=args.shared_memory
)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/triton/run_online_performance_test_on_triton.py |
#!/usr/bin/env python3
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tarfile
from pathlib import Path
from typing import Tuple, Dict, List
from PIL import Image
from tqdm import tqdm
DATASETS_DIR = os.environ.get("DATASETS_DIR", None)
IMAGENET_DIRNAME = "imagenet"
IMAGE_ARCHIVE_FILENAME = "ILSVRC2012_img_val.tar"
DEVKIT_ARCHIVE_FILENAME = "ILSVRC2012_devkit_t12.tar.gz"
LABELS_REL_PATH = "ILSVRC2012_devkit_t12/data/ILSVRC2012_validation_ground_truth.txt"
META_REL_PATH = "ILSVRC2012_devkit_t12/data/meta.mat"
TARGET_SIZE = (224, 224) # (width, height)
_RESIZE_MIN = 256 # resize preserving aspect ratio to where this is minimal size
def parse_meta_mat(metafile) -> Dict[int, str]:
import scipy.io
meta = scipy.io.loadmat(metafile, squeeze_me=True)["synsets"]
nums_children = list(zip(*meta))[4]
meta = [meta[idx] for idx, num_children in enumerate(nums_children) if num_children == 0]
idcs, wnids = list(zip(*meta))[:2]
idx_to_wnid = {idx: wnid for idx, wnid in zip(idcs, wnids)}
return idx_to_wnid
def _process_image(image_file, target_size):
image = Image.open(image_file)
original_size = image.size
# scale image to size where minimal size is _RESIZE_MIN
scale_factor = max(_RESIZE_MIN / original_size[0], _RESIZE_MIN / original_size[1])
resize_to = int(original_size[0] * scale_factor), int(original_size[1] * scale_factor)
resized_image = image.resize(resize_to)
# central crop of image to target_size
left, upper = (resize_to[0] - target_size[0]) // 2, (resize_to[1] - target_size[1]) // 2
cropped_image = resized_image.crop((left, upper, left + target_size[0], upper + target_size[1]))
return cropped_image
def main():
import argparse
parser = argparse.ArgumentParser(description="short_description")
parser.add_argument(
"--dataset-dir",
help="Path to dataset directory where imagenet archives are stored and processed files will be saved.",
required=False,
default=DATASETS_DIR,
)
parser.add_argument(
"--target-size",
help="Size of target image. Format it as <width>,<height>.",
required=False,
default=",".join(map(str, TARGET_SIZE)),
)
args = parser.parse_args()
if args.dataset_dir is None:
raise ValueError(
"Please set $DATASETS_DIR env variable to point dataset dir with original dataset archives "
"and where processed files should be stored. Alternatively provide --dataset-dir CLI argument"
)
datasets_dir = Path(args.dataset_dir)
target_size = tuple(map(int, args.target_size.split(",")))
image_archive_path = datasets_dir / IMAGE_ARCHIVE_FILENAME
if not image_archive_path.exists():
raise RuntimeError(
f"There should be {IMAGE_ARCHIVE_FILENAME} file in {datasets_dir}."
f"You need to download the dataset from http://www.image-net.org/download."
)
devkit_archive_path = datasets_dir / DEVKIT_ARCHIVE_FILENAME
if not devkit_archive_path.exists():
raise RuntimeError(
f"There should be {DEVKIT_ARCHIVE_FILENAME} file in {datasets_dir}."
f"You need to download the dataset from http://www.image-net.org/download."
)
with tarfile.open(devkit_archive_path, mode="r") as devkit_archive_file:
labels_file = devkit_archive_file.extractfile(LABELS_REL_PATH)
labels = list(map(int, labels_file.readlines()))
# map validation labels (idxes from LABELS_REL_PATH) into WNID compatible with training set
meta_file = devkit_archive_file.extractfile(META_REL_PATH)
idx_to_wnid = parse_meta_mat(meta_file)
labels_wnid = [idx_to_wnid[idx] for idx in labels]
# remap WNID into index in sorted list of all WNIDs - this is how network outputs class
available_wnids = sorted(set(labels_wnid))
wnid_to_newidx = {wnid: new_cls for new_cls, wnid in enumerate(available_wnids)}
labels = [wnid_to_newidx[wnid] for wnid in labels_wnid]
output_dir = datasets_dir / IMAGENET_DIRNAME
with tarfile.open(image_archive_path, mode="r") as image_archive_file:
image_rel_paths = sorted(image_archive_file.getnames())
for cls, image_rel_path in tqdm(zip(labels, image_rel_paths), total=len(image_rel_paths)):
output_path = output_dir / str(cls) / image_rel_path
original_image_file = image_archive_file.extractfile(image_rel_path)
processed_image = _process_image(original_image_file, target_size)
output_path.parent.mkdir(parents=True, exist_ok=True)
processed_image.save(output_path.as_posix())
if __name__ == "__main__":
main()
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/triton/process_dataset.py |
import logging
import tensorflow as tf
from utils import data_utils
LOGGER = logging.getLogger(__name__)
NCLASSES = 1001
WIDTH = 224
HEIGHT = 224
NCHANNELS = 3
INPUT_FORMAT = "NHWC"
COMPUTE_FORMAT = "NHWC"
def get_model(
*,
model_dir: str,
arch: str = "resnet50",
precision: str = "fp32",
use_xla: bool = True,
use_tf_amp: bool = False,
use_dali: bool = False,
gpu_memory_fraction=0.7,
):
from runtime import Runner
from utils import hvd_wrapper as hvd
hvd.init()
try:
dtype = {"fp16": tf.float16, "fp32": tf.float32}[precision.lower()]
except KeyError:
raise ValueError(f"Uknown precision {precision}. Allowed values: fp16|fp32")
LOGGER.info(
f"Creating model arch={arch} precision={precision} xla={use_xla}"
f"tf_amp={use_tf_amp}, dali={use_dali}, gpu_memory_frac={gpu_memory_fraction}"
)
runner = Runner(
n_classes=NCLASSES,
architecture=arch,
input_format=INPUT_FORMAT,
compute_format=COMPUTE_FORMAT,
dtype=dtype,
n_channels=NCHANNELS,
height=HEIGHT,
width=WIDTH,
use_xla=use_xla,
use_tf_amp=use_tf_amp,
use_dali=use_dali,
gpu_memory_fraction=gpu_memory_fraction,
gpu_id=0,
model_dir=model_dir,
)
# removed params not used in inference
estimator_params = {"use_final_conv": False} # TODO: Why not moved to model constructor?
estimator = runner._get_estimator(
mode="inference",
run_params=estimator_params,
use_xla=use_xla,
use_dali=use_dali,
gpu_memory_fraction=gpu_memory_fraction,
)
return estimator
def get_serving_input_receiver_fn(
batch_size: int = None,
input_dtype: str = "fp32",
width: int = WIDTH,
height: int = HEIGHT,
nchannels: int = NCHANNELS,
):
input_dtype = tf.float16 if input_dtype and "16" in input_dtype else tf.float32
serving_input_receiver_fn = data_utils.get_serving_input_receiver_fn(
batch_size=batch_size,
height=height,
width=width,
num_channels=nchannels,
data_format=INPUT_FORMAT,
dtype=input_dtype,
)
return serving_input_receiver_fn
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/triton/rn50_model.py |
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
`convert_model.py` script allows to convert between model formats with additional model optimizations
for faster inference.
It converts model from results of get_model function.
Currently supported input and output formats are:
- inputs
- `tf-estimator` - `get_model` function returning Tensorflow Estimator
- `tf-keras` - `get_model` function returning Tensorflow Keras Model
- `tf-savedmodel` - Tensorflow SavedModel binary
- `pyt` - `get_model` function returning PyTorch Module
- output
- `tf-savedmodel` - Tensorflow saved model
- `tf-trt` - TF-TRT saved model
- `ts-trace` - PyTorch traced ScriptModule
- `ts-script` - PyTorch scripted ScriptModule
- `onnx` - ONNX
- `trt` - TensorRT plan file
For tf-keras input you can use:
- --large-model flag - helps loading model which exceeds maximum protobuf size of 2GB
- --tf-allow-growth flag - control limiting GPU memory growth feature
(https://www.tensorflow.org/guide/gpu#limiting_gpu_memory_growth). By default it is disabled.
"""
import argparse
import logging
import os
from pathlib import Path
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "1"
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.args import ArgParserGenerator
from .deployment_toolkit.core import (
DATALOADER_FN_NAME,
BaseConverter,
BaseLoader,
BaseSaver,
Format,
Precision,
load_from_file,
)
from .deployment_toolkit.extensions import converters, loaders, savers
LOGGER = logging.getLogger("convert_model")
INPUT_MODEL_TYPES = [Format.TF_ESTIMATOR, Format.TF_KERAS, Format.TF_SAVEDMODEL, Format.PYT]
OUTPUT_MODEL_TYPES = [Format.TF_SAVEDMODEL, Format.TF_TRT, Format.ONNX, Format.TRT, Format.TS_TRACE, Format.TS_SCRIPT]
def _get_args():
parser = argparse.ArgumentParser(description="Script for conversion between model formats.", allow_abbrev=False)
parser.add_argument("--input-path", help="Path to input model file (python module or binary file)", required=True)
parser.add_argument(
"--input-type", help="Input model type", choices=[f.value for f in INPUT_MODEL_TYPES], required=True
)
parser.add_argument("--output-path", help="Path to output model file", required=True)
parser.add_argument(
"--output-type", help="Output model type", choices=[f.value for f in OUTPUT_MODEL_TYPES], required=True
)
parser.add_argument("--dataloader", help="Path to python module containing data loader")
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False)
parser.add_argument(
"--ignore-unknown-parameters",
help="Ignore unknown parameters (argument often used in CI where set of arguments is constant)",
action="store_true",
default=False,
)
args, unparsed_args = parser.parse_known_args()
Loader: BaseLoader = loaders.get(args.input_type)
ArgParserGenerator(Loader, module_path=args.input_path).update_argparser(parser)
converter_name = f"{args.input_type}--{args.output_type}"
Converter: BaseConverter = converters.get(converter_name)
if Converter is not None:
ArgParserGenerator(Converter).update_argparser(parser)
Saver: BaseSaver = savers.get(args.output_type)
ArgParserGenerator(Saver).update_argparser(parser)
if args.dataloader is not None:
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
ArgParserGenerator(get_dataloader_fn).update_argparser(parser)
if args.ignore_unknown_parameters:
args, unknown_args = parser.parse_known_args()
LOGGER.warning(f"Got additional args {unknown_args}")
else:
args = parser.parse_args()
return args
def main():
args = _get_args()
log_level = logging.INFO if not args.verbose else logging.DEBUG
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(level=log_level, format=log_format)
LOGGER.info(f"args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
requested_model_precision = Precision(args.precision)
dataloader_fn = None
# if conversion is required, temporary change model load precision to that required by converter
# it is for TensorRT converters which require fp32 models for all requested precisions
converter_name = f"{args.input_type}--{args.output_type}"
Converter: BaseConverter = converters.get(converter_name)
if Converter:
args.precision = Converter.required_source_model_precision(requested_model_precision).value
Loader: BaseLoader = loaders.get(args.input_type)
loader = ArgParserGenerator(Loader, module_path=args.input_path).from_args(args)
model = loader.load(args.input_path)
LOGGER.info("inputs: %s", model.inputs)
LOGGER.info("outputs: %s", model.outputs)
if Converter: # if conversion is needed
# dataloader must much source model precision - so not recovering it yet
if args.dataloader is not None:
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args)
# recover precision to that requested by user
args.precision = requested_model_precision.value
if Converter:
converter = ArgParserGenerator(Converter).from_args(args)
model = converter.convert(model, dataloader_fn=dataloader_fn)
Saver: BaseSaver = savers.get(args.output_type)
saver = ArgParserGenerator(Saver).from_args(args)
saver.save(model, args.output_path)
return 0
if __name__ == "__main__":
main()
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/triton/convert_model.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | DeepLearningExamples-master | TensorFlow/Classification/ConvNets/triton/deployment_toolkit/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import importlib
import logging
import os
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union
import numpy as np
LOGGER = logging.getLogger(__name__)
DATALOADER_FN_NAME = "get_dataloader_fn"
GET_MODEL_FN_NAME = "get_model"
GET_SERVING_INPUT_RECEIVER_FN = "get_serving_input_receiver_fn"
GET_ARGPARSER_FN_NAME = "update_argparser"
class TensorSpec(NamedTuple):
name: str
dtype: str
shape: Tuple
class Parameter(Enum):
def __lt__(self, other: "Parameter") -> bool:
return self.value < other.value
def __str__(self):
return self.value
class Accelerator(Parameter):
AMP = "amp"
NONE = "none"
TRT = "trt"
class Precision(Parameter):
FP16 = "fp16"
FP32 = "fp32"
TF32 = "tf32" # Deprecated
class Format(Parameter):
TF_GRAPHDEF = "tf-graphdef"
TF_SAVEDMODEL = "tf-savedmodel"
TF_TRT = "tf-trt"
TF_ESTIMATOR = "tf-estimator"
TF_KERAS = "tf-keras"
ONNX = "onnx"
TRT = "trt"
TS_SCRIPT = "ts-script"
TS_TRACE = "ts-trace"
PYT = "pyt"
class Model(NamedTuple):
handle: object
precision: Optional[Precision]
inputs: Dict[str, TensorSpec]
outputs: Dict[str, TensorSpec]
def load_from_file(file_path, label, target):
spec = importlib.util.spec_from_file_location(name=label, location=file_path)
my_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(my_module) # pytype: disable=attribute-error
return getattr(my_module, target, None)
class BaseLoader(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def load(self, model_path: Union[str, Path], **kwargs) -> Model:
"""
Loads and process model from file based on given set of args
"""
pass
class BaseSaver(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def save(self, model: Model, model_path: Union[str, Path]) -> None:
"""
Save model to file
"""
pass
class BaseRunner(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def init_inference(self, model: Model):
raise NotImplementedError
class BaseRunnerSession(abc.ABC):
def __init__(self, model: Model):
self._model = model
@abc.abstractmethod
def __enter__(self):
raise NotImplementedError()
@abc.abstractmethod
def __exit__(self, exc_type, exc_value, traceback):
raise NotImplementedError()
@abc.abstractmethod
def __call__(self, x: Dict[str, object]):
raise NotImplementedError()
def _set_env_variables(self) -> Dict[str, object]:
"""this method not remove values; fix it if needed"""
to_set = {}
old_values = {k: os.environ.pop(k, None) for k in to_set}
os.environ.update(to_set)
return old_values
def _recover_env_variables(self, old_envs: Dict[str, object]):
for name, value in old_envs.items():
if value is None:
del os.environ[name]
else:
os.environ[name] = str(value)
class BaseConverter(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def convert(self, model: Model, dataloader_fn) -> Model:
raise NotImplementedError()
@staticmethod
def required_source_model_precision(requested_model_precision: Precision) -> Precision:
return requested_model_precision
class BaseMetricsCalculator(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
def calc(
self,
*,
ids: List[Any],
y_pred: Dict[str, np.ndarray],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
) -> Dict[str, float]:
"""
Calculates error/accuracy metrics
Args:
ids: List of ids identifying each sample in the batch
y_pred: model output as dict where key is output name and value is output value
x: model input as dict where key is input name and value is input value
y_real: input ground truth as dict where key is output name and value is output value
Returns:
dictionary where key is metric name and value is its value
"""
pass
@abc.abstractmethod
def update(
self,
ids: List[Any],
y_pred: Dict[str, np.ndarray],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
):
pass
@property
@abc.abstractmethod
def metrics(self) -> Dict[str, Any]:
pass
class ShapeSpec(NamedTuple):
min: Tuple
opt: Tuple
max: Tuple
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/triton/deployment_toolkit/core.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import json
import pickle
import threading
from pathlib import Path
from typing import Dict, Iterator, List, Union
import numpy as np
MB2B = 2 ** 20
B2MB = 1 / MB2B
FLUSH_THRESHOLD_B = 256 * MB2B
def _validate_batch(name: str, value: Union[list, np.ndarray]):
if not isinstance(value, (list, np.ndarray)):
raise ValueError(f"Values shall be lists or np.ndarrays; current type {type(value)}")
def _validate_prefix_data(prefix_data: Dict[str, List[np.ndarray]]):
batch_sizes_per_io_name = {name: [len(batch) for batch in batches] for name, batches in prefix_data.items()}
names = list(batch_sizes_per_io_name)
for io_name in names:
for batch_idx, batch_size in enumerate(batch_sizes_per_io_name[io_name]):
if not all([batch_sizes_per_io_name[other_name][batch_idx] == batch_size for other_name in names]):
non_equal_batch_sizes = {
other_name: batch_sizes_per_io_name[other_name][batch_idx] for other_name in names
}
non_equal_batch_sizes_str = ", ".join(
[f"{name}={batch_size}" for name, batch_size in non_equal_batch_sizes.items()]
)
raise ValueError(
"All inputs/outputs should have same number of batches with equal batch_size. "
f"At batch_idx={batch_idx} there are batch_sizes: {non_equal_batch_sizes_str}"
)
# ensure if each io has same number of batches with equal size
def _get_nitems_and_batches(prefix_data: Dict[str, List[np.ndarray]]):
nitems = 0
nbatches = 0
if prefix_data:
nitems_per_io_name = {name: sum(len(batch) for batch in batches) for name, batches in prefix_data.items()}
nbatches_per_io_name = {name: len(batches) for name, batches in prefix_data.items()}
nitems = list(nitems_per_io_name.values())[0]
nbatches = list(nbatches_per_io_name.values())[0]
return nitems, nbatches
class BaseDumpWriter(abc.ABC):
FILE_SUFFIX = ".abstract"
def __init__(self, output_dir: Union[str, Path]):
self._output_dir = Path(output_dir)
# outer dict key is prefix (i.e. input/output/labels/...), inner dict key is input/output name
# list is list of batches
self._items_cache: Dict[str, Dict[str, List[np.ndarray]]] = {}
# key is prefix
self._items_counters: Dict[str, int] = {}
self._cache_lock = threading.RLock()
self._flush_threshold_b = FLUSH_THRESHOLD_B
@property
def cache_size(self):
def _get_bytes_size(name, batch):
_validate_batch(name, batch)
if not isinstance(batch, np.ndarray):
batch = np.narray(batch)
return batch.nbytes
with self._cache_lock:
return {
prefix: sum(_get_bytes_size(name, batch) for name, batches in data.items() for batch in batches)
for prefix, data in self._items_cache.items()
}
def _append_to_cache(self, prefix, prefix_data):
if prefix_data is None:
return
if not isinstance(prefix_data, dict):
raise ValueError(f"{prefix} data to store shall be dict")
with self._cache_lock:
cached_prefix_data = self._items_cache.setdefault(prefix, {})
for name, batch in prefix_data.items():
_validate_batch(name, batch)
if not isinstance(batch, np.ndarray):
batch = np.array(batch)
cached_batches = cached_prefix_data.setdefault(name, [])
cached_batches += [batch]
def write(self, **kwargs):
with self._cache_lock:
for prefix, prefix_data in kwargs.items():
self._append_to_cache(prefix, prefix_data)
biggest_prefix_data_size = max(self.cache_size.values())
if biggest_prefix_data_size > self._flush_threshold_b:
self.flush()
def flush(self):
with self._cache_lock:
for prefix, prefix_data in self._items_cache.items():
_validate_prefix_data(prefix_data)
output_path = self._output_dir / self._get_filename(prefix)
self._dump(prefix_data, output_path)
nitems, nbatches = _get_nitems_and_batches(prefix_data)
self._items_counters[prefix] += nitems
self._items_cache = {}
def _get_filename(self, prefix):
idx = self._items_counters.setdefault(prefix, 0)
return f"{prefix}-{idx:012d}{self.FILE_SUFFIX}"
@abc.abstractmethod
def _dump(self, prefix_data: Dict[str, List[np.ndarray]], output_path: Path):
pass
def __enter__(self):
if self._output_dir.exists() and len(list(self._output_dir.iterdir())):
raise ValueError(f"{self._output_dir.as_posix()} is not empty")
self._output_dir.mkdir(parents=True, exist_ok=True)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.flush()
class PickleDumpWriter(BaseDumpWriter):
FILE_SUFFIX = ".pkl"
def _dump(self, prefix_data: Dict[str, List[np.ndarray]], output_path: Path):
output_path.parent.mkdir(parents=True, exist_ok=True)
with output_path.open("wb") as pickle_file:
pickle.dump(prefix_data, pickle_file)
class JsonDumpWriter(BaseDumpWriter):
FILE_SUFFIX = ".json"
def _dump(self, prefix_data: Dict[str, List[np.ndarray]], output_path: Path):
repacked_prefix_data = self._format_data(prefix_data)
output_path.parent.mkdir(parents=True, exist_ok=True)
with output_path.open("w") as json_file:
json.dump(repacked_prefix_data, json_file)
def _format_data(self, prefix_data: Dict[str, List[np.ndarray]]) -> Dict:
def _format_batch_for_perf_analyzer_json_format(batch: np.ndarray):
return {
"content": batch.flatten().tolist(),
"shape": list(batch.shape),
"dtype": str(batch.dtype),
}
_, nbatches = _get_nitems_and_batches(prefix_data)
batches = [{} for _ in range(nbatches)]
for io_name, batches_per_io in prefix_data.items():
for batch_idx, batch in enumerate(batches_per_io):
batches[batch_idx][io_name] = _format_batch_for_perf_analyzer_json_format(batch)
return {"data": batches}
class BaseDumpReader(abc.ABC):
FILE_SUFFIX = ".abstract"
def __init__(self, dump_dir: Union[Path, str]):
self._dump_dir = Path(dump_dir)
def get(self, prefix: str) -> Iterator[Dict[str, np.ndarray]]:
dump_files_paths = sorted(self._dump_dir.glob(f"{prefix}*{self.FILE_SUFFIX}"))
for dump_file_path in dump_files_paths:
prefix_data = self._load_file(dump_file_path)
nitems, nbatches = _get_nitems_and_batches(prefix_data)
for batch_idx in range(nbatches):
yield {io_name: prefix_data[io_name][batch_idx] for io_name in prefix_data}
@abc.abstractmethod
def _load_file(self, dump_file_path: Path) -> Dict[str, List[np.ndarray]]:
pass
def iterate_over(self, prefix_list: List[str]) -> Iterator:
iterators = [self.get(prefix) for prefix in prefix_list]
empty_iterators = [False] * len(iterators)
while not all(empty_iterators):
values = [None] * len(iterators)
for idx, iterator in enumerate(iterators):
if empty_iterators[idx]:
continue
try:
values[idx] = next(iterator)
except StopIteration:
empty_iterators[idx] = True
if all(empty_iterators):
break
if not all(empty_iterators):
yield values
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
class PickleDumpReader(BaseDumpReader):
FILE_SUFFIX = ".pkl"
def _load_file(self, dump_file_path: Path) -> Dict[str, List[np.ndarray]]:
with dump_file_path.open("rb") as pickle_file:
return pickle.load(pickle_file)
class JsonDumpReader(BaseDumpReader):
FILE_SUFFIX = ".json"
def _load_file(self, dump_file_path: Path) -> Dict[str, List[np.ndarray]]:
with dump_file_path.open("rb") as json_file:
data = json.load(json_file)
return self._repack_data(data)
def _repack_data(self, data: Dict) -> Dict[str, List[np.ndarray]]:
result: Dict[str, List[np.ndarray]] = {}
batches = data["data"]
for batch in batches:
for io_name, batch_as_dict in batch.items():
io_batches = result.setdefault(io_name, [])
flat_array = batch_as_dict["content"]
shape = batch_as_dict["shape"]
dtype = batch_as_dict["dtype"]
batch_as_array = np.array(flat_array).reshape(shape).astype(dtype)
io_batches.append(batch_as_array)
return result
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/triton/deployment_toolkit/dump.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import logging
import os
import re
from pathlib import Path
from typing import List
LOGGER = logging.getLogger(__name__)
class ExtensionManager:
def __init__(self, name: str):
self._name = name
self._registry = {}
def register_extension(self, extension: str, clazz):
already_registered_class = self._registry.get(extension, None)
if already_registered_class and already_registered_class.__module__ != clazz.__module__:
raise RuntimeError(
f"Conflicting extension {self._name}/{extension}; "
f"{already_registered_class.__module__}.{already_registered_class.__name} "
f"and "
f"{clazz.__module__}.{clazz.__name__}"
)
elif already_registered_class is None:
clazz_full_name = f"{clazz.__module__}.{clazz.__name__}" if clazz is not None else "None"
LOGGER.debug(f"Registering extension {self._name}/{extension}: {clazz_full_name}")
self._registry[extension] = clazz
def get(self, extension):
if extension not in self._registry:
raise RuntimeError(f"Missing extension {self._name}/{extension}")
return self._registry[extension]
@property
def supported_extensions(self):
return list(self._registry)
@staticmethod
def scan_for_extensions(extension_dirs: List[Path]):
register_pattern = r".*\.register_extension\(.*"
for extension_dir in extension_dirs:
for python_path in extension_dir.rglob("*.py"):
if not python_path.is_file():
continue
payload = python_path.read_text()
if re.findall(register_pattern, payload):
import_path = python_path.relative_to(toolkit_root_dir.parent)
package = import_path.parent.as_posix().replace(os.sep, ".")
package_with_module = f"{package}.{import_path.stem}"
spec = importlib.util.spec_from_file_location(name=package_with_module, location=python_path)
my_module = importlib.util.module_from_spec(spec)
my_module.__package__ = package
try:
spec.loader.exec_module(my_module) # pytype: disable=attribute-error
except ModuleNotFoundError as e:
LOGGER.error(
f"Could not load extensions from {import_path} due to missing python packages; {e}"
)
runners = ExtensionManager("runners")
loaders = ExtensionManager("loaders")
savers = ExtensionManager("savers")
converters = ExtensionManager("converters")
toolkit_root_dir = (Path(__file__).parent / "..").resolve()
ExtensionManager.scan_for_extensions([toolkit_root_dir])
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/triton/deployment_toolkit/extensions.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from typing import List, Optional
def warmup(
model_name: str,
batch_sizes: List[int],
triton_gpu_engine_count: int = 1,
triton_instances: int = 1,
profiling_data: str = "random",
input_shapes: Optional[List[str]] = None,
server_url: str = "localhost",
measurement_window: int = 10000,
shared_memory: bool = False
):
print("\n")
print(f"==== Warmup start ====")
print("\n")
input_shapes = " ".join(map(lambda shape: f" --shape {shape}", input_shapes)) if input_shapes else ""
measurement_window = 6 * measurement_window
max_batch_size = max(batch_sizes)
max_total_requests = 2 * max_batch_size * triton_instances * triton_gpu_engine_count
max_concurrency = min(256, max_total_requests)
batch_size = max(1, max_total_requests // 256)
step = max(1, max_concurrency // 2)
min_concurrency = step
exec_args = f"""-m {model_name} \
-x 1 \
-p {measurement_window} \
-v \
-i http \
-u {server_url}:8000 \
-b {batch_size} \
--concurrency-range {min_concurrency}:{max_concurrency}:{step} \
--input-data {profiling_data} {input_shapes}"""
if shared_memory:
exec_args += " --shared-memory=cuda"
result = os.system(f"perf_client {exec_args}")
if result != 0:
print(f"Failed running performance tests. Perf client failed with exit code {result}")
sys.exit(1)
print("\n")
print(f"==== Warmup done ====")
print("\n")
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/triton/deployment_toolkit/warmup.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import inspect
import logging
from typing import Any, Callable, Dict, Optional, Union
from .core import GET_ARGPARSER_FN_NAME, load_from_file
LOGGER = logging.getLogger(__name__)
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def filter_fn_args(args: Union[dict, argparse.Namespace], fn: Callable) -> dict:
signature = inspect.signature(fn)
parameters_names = list(signature.parameters)
if isinstance(args, argparse.Namespace):
args = vars(args)
args = {k: v for k, v in args.items() if k in parameters_names}
return args
def add_args_for_fn_signature(parser, fn) -> argparse.ArgumentParser:
parser.conflict_handler = "resolve"
signature = inspect.signature(fn)
for parameter in signature.parameters.values():
if parameter.name in ["self", "args", "kwargs"]:
continue
argument_kwargs = {}
if parameter.annotation != inspect.Parameter.empty:
if parameter.annotation == bool:
argument_kwargs["type"] = str2bool
argument_kwargs["choices"] = [0, 1]
elif isinstance(parameter.annotation, type(Optional[Any])):
types = [type_ for type_ in parameter.annotation.__args__ if not isinstance(None, type_)]
if len(types) != 1:
raise RuntimeError(
f"Could not prepare argument parser for {parameter.name}: {parameter.annotation} in {fn}"
)
argument_kwargs["type"] = types[0]
else:
argument_kwargs["type"] = parameter.annotation
if parameter.default != inspect.Parameter.empty:
if parameter.annotation == bool:
argument_kwargs["default"] = str2bool(parameter.default)
else:
argument_kwargs["default"] = parameter.default
else:
argument_kwargs["required"] = True
name = parameter.name.replace("_", "-")
LOGGER.debug(f"Adding argument {name} with {argument_kwargs}")
parser.add_argument(f"--{name}", **argument_kwargs)
return parser
class ArgParserGenerator:
def __init__(self, cls_or_fn, module_path: Optional[str] = None):
self._cls_or_fn = cls_or_fn
self._handle = cls_or_fn if inspect.isfunction(cls_or_fn) else getattr(cls_or_fn, "__init__")
input_is_python_file = module_path and module_path.endswith(".py")
self._input_path = module_path if input_is_python_file else None
self._required_fn_name_for_signature_parsing = getattr(
cls_or_fn, "required_fn_name_for_signature_parsing", None
)
def update_argparser(self, parser):
name = self._handle.__name__
group_parser = parser.add_argument_group(name)
add_args_for_fn_signature(group_parser, fn=self._handle)
self._update_argparser(group_parser)
def get_args(self, args: argparse.Namespace):
filtered_args = filter_fn_args(args, fn=self._handle)
tmp_parser = argparse.ArgumentParser(allow_abbrev=False)
self._update_argparser(tmp_parser)
custom_names = [
p.dest.replace("-", "_") for p in tmp_parser._actions if not isinstance(p, argparse._HelpAction)
]
custom_params = {n: getattr(args, n) for n in custom_names}
filtered_args = {**filtered_args, **custom_params}
return filtered_args
def from_args(self, args: Union[argparse.Namespace, Dict]):
args = self.get_args(args)
LOGGER.info(f"Initializing {self._cls_or_fn.__name__}({args})")
return self._cls_or_fn(**args)
def _update_argparser(self, parser):
label = "argparser_update"
if self._input_path:
update_argparser_handle = load_from_file(self._input_path, label=label, target=GET_ARGPARSER_FN_NAME)
if update_argparser_handle:
update_argparser_handle(parser)
elif self._required_fn_name_for_signature_parsing:
fn_handle = load_from_file(
self._input_path, label=label, target=self._required_fn_name_for_signature_parsing
)
if fn_handle:
add_args_for_fn_signature(parser, fn_handle)
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/triton/deployment_toolkit/args.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import re
from typing import Dict, List
from natsort import natsorted
from tabulate import tabulate
def sort_results(results: List):
results = natsorted(results, key=lambda item: [item[key] for key in item.keys()])
return results
def save_results(filename: str, data: List, formatted: bool = False):
data = format_data(data=data) if formatted else data
with open(filename, "a") as csvfile:
fieldnames = data[0].keys()
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in data:
writer.writerow(row)
def format_data(data: List[Dict]) -> List[Dict]:
formatted_data = list()
for item in data:
formatted_item = format_keys(data=item)
formatted_data.append(formatted_item)
return formatted_data
def format_keys(data: Dict) -> Dict:
keys = {format_key(key=key): value for key, value in data.items()}
return keys
def format_key(key: str) -> str:
key = " ".join([k.capitalize() for k in re.split("_| ", key)])
return key
def show_results(results: List[Dict]):
headers = list(results[0].keys())
summary = map(lambda x: list(map(lambda item: item[1], x.items())), results)
print(tabulate(summary, headers=headers))
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/triton/deployment_toolkit/report.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from pathlib import Path
from typing import Dict, Iterable, Optional, Tuple, Union
import numpy as np
# pytype: disable=import-error
import tensorflow as tf
from tensorflow.python.eager import wrap_function
from tf2onnx.shape_inference import infer_shape
from tf2onnx.tf_loader import (
freeze_session,
from_function,
inputs_without_resource,
is_function,
remove_redundant_inputs,
tf_optimize,
)
# pytype: enable=import-error
from ..args import filter_fn_args
from ..core import (
GET_MODEL_FN_NAME,
GET_SERVING_INPUT_RECEIVER_FN,
BaseConverter,
BaseLoader,
BaseRunner,
BaseRunnerSession,
BaseSaver,
Format,
Model,
Precision,
TensorSpec,
load_from_file,
)
from ..extensions import converters, loaders, runners, savers
from .utils import infer_precision
LOGGER = logging.getLogger(__name__)
def is_tf2():
return tf.__version__.startswith("2.")
def create_session_config(*, allow_growth=False, use_xla=False, gpu_memory_fraction=1.0):
gpu_options = tf.compat.v1.GPUOptions(
per_process_gpu_memory_fraction=gpu_memory_fraction, allow_growth=allow_growth
)
config = tf.compat.v1.ConfigProto(gpu_options=gpu_options)
if use_xla:
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
LOGGER.debug(
f"Using gpu memory fraction: allow_growth={allow_growth} "
f"gpu_memory_fraction={gpu_memory_fraction} "
f"use_xla={use_xla}"
)
return config
class TFTRTConverter(BaseConverter):
def __init__(
self,
*,
is_dynamic_op: bool = False,
minimum_segment_size: int = 3,
max_batch_size: int = 1,
max_workspace_size: int = (4 << 30) - 1000, # ~3.999GB
maximum_cached_engines: int = 1000,
precision: str,
):
self._is_dynamic_op = is_dynamic_op
self._minimum_segment_size = minimum_segment_size
self._max_batch_size = max_batch_size
self._max_workspace_size = max_workspace_size
self._maximum_cached_engines = maximum_cached_engines
self._precision = Precision(precision)
def convert(self, model: Model, dataloader_fn) -> Model:
# https://docs.nvidia.com/deeplearning/frameworks/tf-trt-user-guide/index.html
# converting graph_def is not supported in TF2
from tensorflow.python.compiler.tensorrt import trt_convert # pytype: disable=import-error
assert isinstance(model.handle, tf.compat.v1.GraphDef)
session_config = create_session_config(allow_growth=True)
output_node_names = [spec.name.split(":")[0] for spec in model.outputs.values()]
converter = trt_convert.TrtGraphConverter(
input_graph_def=model.handle,
session_config=session_config,
nodes_blacklist=output_node_names,
is_dynamic_op=self._is_dynamic_op,
precision_mode=self._precision.value,
max_workspace_size_bytes=self._max_workspace_size,
maximum_cached_engines=self._maximum_cached_engines,
max_batch_size=self._max_batch_size,
minimum_segment_size=self._minimum_segment_size,
)
graph_def = converter.convert()
return model._replace(handle=graph_def)
@staticmethod
def required_source_model_precision(requested_model_precision: Precision) -> Precision:
# TensorRT requires source models to be in FP32 precision
return Precision.FP32
def _from_saved_model_v1(sess, model_path, tag, signatures):
"""
Load tensorflow graph from saved_model.
NOTICE: Modified version from tf2onnx project
"""
wrn_no_tag = "'--tag' not specified for saved_model. Using --tag serve"
wrn_empty_tag = "'--tag' value is empty string. Using tag =[[]]"
if tag is None:
tag = [tf.saved_model.SERVING]
LOGGER.warning(wrn_no_tag)
if tag == "":
tag = [[]]
LOGGER.warning(wrn_empty_tag)
if not isinstance(tag, list):
tag = [tag]
imported = tf.compat.v1.saved_model.loader.load(sess, tag, model_path)
for k in imported.signature_def.keys():
if k.startswith("_"):
# consider signatures starting with '_' private
continue
signatures.append(k)
try:
from tensorflow.contrib.saved_model.python.saved_model import ( # pytype: disable=import-error
signature_def_utils,
)
# pylint: disable=unnecessary-lambda
get_signature_def = lambda meta_graph_def, k: signature_def_utils.get_signature_def_by_key(meta_graph_def, k)
except ImportError:
# TF1.12 changed the api
get_signature_def = lambda meta_graph_def, k: meta_graph_def.signature_def[k]
inputs = {}
outputs = {}
for k in signatures:
inputs_tensor_info = get_signature_def(imported, k).inputs
for name, input_tensor in inputs_tensor_info.items():
inputs[name] = input_tensor.name
outputs_tensor_info = get_signature_def(imported, k).outputs
for name, output_tensor in outputs_tensor_info.items():
outputs[name] = output_tensor.name
frozen_graph = freeze_session(sess, input_names=list(inputs.values()), output_names=list(outputs.values()))
return frozen_graph, inputs, outputs
def _infer_model_precision(
tf_graph: tf.compat.v1.GraphDef, inputs_dict: Dict[str, TensorSpec], outputs_dict: Dict[str, TensorSpec]
) -> Optional[Precision]:
import networkx as nx
def _get_dtype(node_def):
node_type = node_def.attr.get("T", None) or node_def.attr.get("dtype", None)
if node_type:
if node_type.list.type:
assert len(set(node_type.list.type)) == 1
node_type = tf.dtypes.DType(node_type.list.type[0])
else:
node_type = tf.dtypes.DType(node_type.type)
return np.dtype(node_type.as_numpy_dtype()) if node_type and node_type.is_numpy_compatible else node_type
# build directed graph
nx_graph = nx.DiGraph()
for node_def in tf_graph.node:
nx_graph.add_node(
node_def.name,
op=node_def.op,
**{key: value for key, value in node_def.attr.items() if key not in ["value", "dtype"]},
dtype=_get_dtype(node_def),
)
for input in node_def.input:
nx_graph.add_edge(input, node_def.name)
input_names = [spec.name.split(":")[0] for spec in inputs_dict.values()]
output_names = [spec.name.split(":")[0] for spec in outputs_dict.values()]
most_common_dtype = infer_precision(nx_graph, input_names, output_names, _get_dtype)
if most_common_dtype is not None:
precision = {np.dtype("float32"): Precision.FP32, np.dtype("float16"): Precision.FP16}[most_common_dtype]
else:
precision = None
return precision
class TFEstimatorLoader(BaseLoader):
required_fn_name_for_signature_parsing: Optional[str] = GET_MODEL_FN_NAME
def __init__(self, **kwargs):
self._model_args = kwargs
def load(self, model_path: Union[str, Path], **_) -> Model:
if isinstance(model_path, Path):
model_path = model_path.as_posix()
get_model = load_from_file(model_path, "model", GET_MODEL_FN_NAME)
get_serving_input_receiver_fn = load_from_file(model_path, "model", GET_SERVING_INPUT_RECEIVER_FN)
if get_model is None:
raise RuntimeError(f"Could not find {GET_MODEL_FN_NAME} in {model_path}")
if get_serving_input_receiver_fn is None:
raise RuntimeError(f"Could not find {GET_SERVING_INPUT_RECEIVER_FN} in {model_path}")
model_args = filter_fn_args(self._model_args, fn=get_model)
serving_input_receiver_args = filter_fn_args(self._model_args, fn=get_serving_input_receiver_fn)
session_config = create_session_config(allow_growth=True)
tf.compat.v1.reset_default_graph()
with tf.compat.v1.Session(config=session_config) as sess:
estimator = get_model(**model_args)
serving_input_receiver_fn = get_serving_input_receiver_fn(**serving_input_receiver_args)
input_receiver = serving_input_receiver_fn()
estimator_spec = estimator.model_fn(
features=input_receiver.features,
labels=None,
mode=tf.estimator.ModeKeys.PREDICT,
config=estimator.config,
)
input_tensors_dict = input_receiver.receiver_tensors
output_tensors_dict = estimator_spec.predictions
inputs_dict = {k: tensor2tensor_spec(tensor) for k, tensor in input_tensors_dict.items()}
outputs_dict = {k: tensor2tensor_spec(tensor) for k, tensor in output_tensors_dict.items()}
input_tensor_names = [t.name for t in inputs_dict.values()]
output_tensor_names = [t.name for t in outputs_dict.values()]
graph_saver = estimator_spec.scaffold.saver or tf.compat.v1.train.Saver(sharded=True)
graph_saver.restore(sess, estimator.latest_checkpoint())
input_tensor_names = inputs_without_resource(sess, input_tensor_names)
frozen_graph = freeze_session(sess, input_names=input_tensor_names, output_names=output_tensor_names)
input_tensor_names = remove_redundant_inputs(frozen_graph, input_tensor_names)
tf.compat.v1.reset_default_graph()
with tf.compat.v1.Session(config=estimator.config.session_config):
frozen_graph = tf_optimize(input_tensor_names, output_tensor_names, frozen_graph)
tf.compat.v1.reset_default_graph()
precision = _infer_model_precision(frozen_graph, inputs_dict, outputs_dict)
return Model(frozen_graph, precision, inputs_dict, outputs_dict)
class TFKerasLoader(BaseLoader):
"""
Loads keras model from source code
The large-model flag helps loading model which exceeds maximum protobuf size of 2GB. By default it is disabled.
The tf-allow-growth flag control limiting GPU memory growth feature
(https://www.tensorflow.org/guide/gpu#limiting_gpu_memory_growth). By default it is disabled.
"""
required_fn_name_for_signature_parsing: Optional[str] = GET_MODEL_FN_NAME
def __init__(self, large_model: bool = False, tf_allow_growth: bool = False, **kwargs):
self._large_model = large_model
self._allow_growth = tf_allow_growth
self._model_args = kwargs
def load(self, model_path: Union[str, Path], **_) -> Model:
if isinstance(model_path, Path):
model_path = model_path.as_posix()
get_model = load_from_file(model_path, "model", GET_MODEL_FN_NAME)
if get_model is None:
raise RuntimeError(f"Could not find {GET_MODEL_FN_NAME} in {model_path}")
model_args = filter_fn_args(self._model_args, fn=get_model)
if self._allow_growth:
physical_devices = tf.config.experimental.list_physical_devices("GPU")
for device in physical_devices:
tf.config.experimental.set_memory_growth(device, True)
tf.keras.backend.clear_session()
tf.keras.backend.set_learning_phase(False)
eager_model, call_fn = get_model(**model_args)
inputs_dict: Dict[str, TensorSpec] = {
input_name: TensorSpec(t.name, t.dtype.name, tuple(t.shape.as_list()))
for input_name, t in zip(eager_model.input_names, eager_model.inputs)
}
concrete_func = call_fn.get_concrete_function(
*[tf.TensorSpec(shape=spec.shape, dtype=spec.dtype, name=name) for name, spec in inputs_dict.items()]
)
input_tensors_names = [tensor.name for tensor in concrete_func.inputs if tensor.dtype != tf.dtypes.resource]
output_tensors_names = [tensor.name for tensor in concrete_func.outputs]
graph_def = from_function(
concrete_func, input_tensors_names, output_tensors_names, large_model=self._large_model
)
# tensor names changes after wrapping with call_fn, thus need to use those from concrete_func
outputs_dict: Dict[str, TensorSpec] = {
output_name: TensorSpec(output_tensor_name, t.dtype.name, tuple(t.shape.as_list()))
for output_name, output_tensor_name, t in zip(
eager_model.output_names, output_tensors_names, eager_model.outputs
)
}
precision = _infer_model_precision(graph_def, inputs_dict, outputs_dict)
tf.keras.backend.clear_session()
tf.keras.backend.set_learning_phase(False)
def _add_suffix_as_quickfix_for_tf24_func_refactor(spec):
if not spec.name.endswith(":0"):
spec = spec._replace(name=spec.name + ":0")
return spec
inputs_dict = {name: _add_suffix_as_quickfix_for_tf24_func_refactor(spec) for name, spec in inputs_dict.items()}
return Model(graph_def, precision, inputs_dict, outputs_dict)
class TFSavedModelLoader(BaseLoader):
def load(self, model_path: Union[str, Path], **kwargs) -> Model:
if isinstance(model_path, Path):
model_path = model_path.as_posix()
tf.compat.v1.reset_default_graph()
if is_tf2():
from tf2onnx.tf_loader import _from_saved_model_v2 # pytype: disable=import-error
graph_def, input_names, output_names, concrete_func, imported, initialized_tables = _from_saved_model_v2(
model_path=model_path,
input_names=None,
output_names=None,
tag=None,
signature_def=[],
concrete_function_index=None,
large_model=False,
)
# inspired by https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/saved_model_cli.py#L205
if concrete_func.structured_input_signature:
input_args, input_kwargs = concrete_func.structured_input_signature
input_names = list(input_kwargs)
assert (
not input_args
), f"Not supported args in concrete function signature args={input_args}, kwargs={input_kwargs}"
elif concrete_func._arg_keywords: # pylint: disable=protected-access
# For pure ConcreteFunctions we might have nothing better than _arg_keywords.
assert concrete_func._num_positional_args in [0, 1]
input_names = concrete_func._arg_keywords
input_tensors = [tensor for tensor in concrete_func.inputs if tensor.dtype != tf.dtypes.resource]
inputs = {name: tensor.name for name, tensor in zip(input_names, input_tensors)}
# they are already flattened
output_tensors = [tensor for tensor in concrete_func.outputs if tensor.dtype != tf.dtypes.resource]
output_names = sorted(concrete_func.structured_outputs) # because outputs are in flatten form
outputs = {name: tensor.name for name, tensor in zip(output_names, output_tensors)}
else:
session_config = create_session_config(allow_growth=True)
with tf.compat.v1.Session(config=session_config) as sess:
graph_def, inputs, outputs = _from_saved_model_v1(sess, model_path, tag=None, signatures=[])
inputs, outputs = handle_tensor_specs(graph_def, inputs, outputs)
precision = _infer_model_precision(graph_def, inputs, outputs)
return Model(graph_def, precision, inputs, outputs)
class TFRunner(BaseRunner):
def __init__(self):
pass
def init_inference(self, model: Model):
if is_tf2():
return TF2RunnerSession(model=model)
else:
return TF1RunnerSession(model=model)
class TF1RunnerSession(BaseRunnerSession):
def __init__(self, model: Model):
super().__init__(model)
assert isinstance(model.handle, tf.compat.v1.GraphDef)
self._inputs = None
self._outputs = None
self._session = None
self._old_env_values = {}
def __enter__(self):
self._old_env_values = self._set_env_variables()
tf.compat.v1.reset_default_graph()
session_config = create_session_config(allow_growth=True)
self._session = tf.compat.v1.Session(config=session_config)
self._session.__enter__()
tf.import_graph_def(self._model.handle, name="")
self._inputs = {
name: self._session.graph.get_tensor_by_name(spec.name) for name, spec in self._model.inputs.items()
}
self._outputs = {
name: self._session.graph.get_tensor_by_name(spec.name) for name, spec in self._model.outputs.items()
}
return self
def __exit__(self, exc_type, exc_value, traceback):
self._session.__exit__(exc_type, exc_value, traceback)
tf.compat.v1.reset_default_graph()
self._inputs = None
self._outputs = None
self._session = None
self._recover_env_variables(self._old_env_values)
def __call__(self, x: Dict[str, object]):
feed_dict = {placeholder: x[name] for name, placeholder in self._inputs.items()}
return self._session.run(self._outputs, feed_dict=feed_dict)
class TF2RunnerSession(BaseRunnerSession):
def __init__(self, model: Model):
super().__init__(model)
assert isinstance(model.handle, tf.compat.v1.GraphDef)
self._concrete_func = None
def __enter__(self):
tf.compat.v1.reset_default_graph()
input_tensor_names = [spec.name for spec in self._model.inputs.values()]
output_tensor_names = [spec.name for spec in self._model.outputs.values()]
self._concrete_func = wrap_function.function_from_graph_def(
self._model.handle, input_tensor_names, output_tensor_names
)
self._concrete_func._signature = [
tf.TensorSpec(shape=spec.shape, dtype=spec.dtype, name=name) for name, spec in self._model.inputs.items()
]
return self
def __exit__(self, exc_type, exc_value, traceback):
self._concrete_func = None
tf.compat.v1.reset_default_graph()
def __call__(self, x: Dict[str, object]):
x = tf.nest.map_structure(tf.convert_to_tensor, x)
y_pred = self._concrete_func(**x)
output_struct = {name: spec.name for name, spec in self._model.outputs.items()}
y_pred = tf.nest.map_structure(lambda t: t.numpy(), y_pred)
y_pred = tf.nest.pack_sequence_as(output_struct, y_pred)
return y_pred
class TFSavedModelSaver(BaseSaver):
def save(self, model: Model, model_path: Union[str, Path]) -> None:
if isinstance(model_path, Path):
model_path = model_path.as_posix()
session_config = create_session_config(allow_growth=True)
with tf.compat.v1.Session(config=session_config) as sess:
tf.import_graph_def(model.handle, name="")
is_func = is_function(sess.graph)
if not is_func:
infer_shape(sess.graph, {})
inputs = {name: sess.graph.get_tensor_by_name(spec.name) for name, spec in model.inputs.items()}
outputs = {name: sess.graph.get_tensor_by_name(spec.name) for name, spec in model.outputs.items()}
def _ensure_shape(tensors_dict, tensors_specs):
for name, tensor in tensors_dict.items():
if tensor.shape.rank is None:
tensor.set_shape(tensors_specs[name].shape)
return tensors_dict
inputs = _ensure_shape(inputs, model.inputs)
outputs = _ensure_shape(outputs, model.outputs)
LOGGER.info(inputs)
LOGGER.info(outputs)
tf.compat.v1.saved_model.simple_save(sess, model_path, inputs, outputs, legacy_init_op=None)
def handle_tensor_specs(
graph_def, inputs: Dict[str, str], outputs: Dict[str, str]
) -> Tuple[Dict[str, TensorSpec], Dict[str, TensorSpec]]:
session_config = tf.compat.v1.ConfigProto(graph_options=tf.compat.v1.GraphOptions(infer_shapes=True))
tf.compat.v1.reset_default_graph()
with tf.compat.v1.Session(config=session_config) as sess:
tf.import_graph_def(graph_def, name="")
def _get_spec(tensors_dict):
tensors_dict = {name: sess.graph.get_tensor_by_name(tname) for name, tname in tensors_dict.items()}
return {name: tensor2tensor_spec(tensor) for name, tensor in tensors_dict.items()}
inputs = _get_spec(inputs)
outputs = _get_spec(outputs)
tf.compat.v1.reset_default_graph()
return inputs, outputs
def tensor2tensor_spec(tensor):
shape = tuple([s.value if hasattr(s, "value") else s for s in tensor.shape])
return TensorSpec(tensor.name, tensor.dtype.name, shape)
loaders.register_extension(Format.TF_ESTIMATOR.value, TFEstimatorLoader)
loaders.register_extension(Format.TF_KERAS.value, TFKerasLoader)
loaders.register_extension(Format.TF_SAVEDMODEL.value, TFSavedModelLoader)
loaders.register_extension(Format.TF_TRT.value, TFSavedModelLoader)
converters.register_extension(f"{Format.TF_ESTIMATOR.value}--{Format.TF_SAVEDMODEL.value}", None)
converters.register_extension(f"{Format.TF_KERAS.value}--{Format.TF_SAVEDMODEL.value}", None)
converters.register_extension(f"{Format.TF_SAVEDMODEL.value}--{Format.TF_SAVEDMODEL.value}", None)
converters.register_extension(f"{Format.TF_ESTIMATOR.value}--{Format.TF_TRT.value}", TFTRTConverter)
converters.register_extension(f"{Format.TF_KERAS.value}--{Format.TF_TRT.value}", TFTRTConverter)
converters.register_extension(f"{Format.TF_SAVEDMODEL.value}--{Format.TF_TRT.value}", TFTRTConverter)
savers.register_extension(Format.TF_SAVEDMODEL.value, TFSavedModelSaver)
savers.register_extension(Format.TF_TRT.value, TFSavedModelSaver)
runners.register_extension(Format.TF_ESTIMATOR.value, TFRunner)
runners.register_extension(Format.TF_KERAS.value, TFRunner)
runners.register_extension(Format.TF_SAVEDMODEL.value, TFRunner)
runners.register_extension(Format.TF_TRT.value, TFRunner)
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/triton/deployment_toolkit/library/tf.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Dict, Iterable, Optional
# pytype: disable=import-error
import onnx
import tensorrt as trt
from ..core import BaseConverter, Format, Model, Precision, ShapeSpec
from ..extensions import converters
from .utils import get_input_shapes
# pytype: enable=import-error
LOGGER = logging.getLogger(__name__)
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
class Onnx2TRTConverter(BaseConverter):
def __init__(self, *, max_batch_size: int, max_workspace_size: int, precision: str):
self._max_batch_size = max_batch_size
self._max_workspace_size = max_workspace_size
self._precision = Precision(precision)
def convert(self, model: Model, dataloader_fn) -> Model:
input_shapes = get_input_shapes(dataloader_fn(), self._max_batch_size)
cuda_engine = onnx2trt(
model.handle,
shapes=input_shapes,
max_workspace_size=self._max_workspace_size,
max_batch_size=self._max_batch_size,
model_precision=self._precision.value,
)
return model._replace(handle=cuda_engine)
@staticmethod
def required_source_model_precision(requested_model_precision: Precision) -> Precision:
# TensorRT requires source models to be in FP32 precision
return Precision.FP32
def onnx2trt(
onnx_model: onnx.ModelProto,
*,
shapes: Dict[str, ShapeSpec],
max_workspace_size: int,
max_batch_size: int,
model_precision: str,
) -> "trt.ICudaEngine":
"""
Converts onnx model to TensorRT ICudaEngine
Args:
onnx_model: onnx.Model to convert
shapes: dictionary containing min shape, max shape, opt shape for each input name
max_workspace_size: The maximum GPU temporary memory which the CudaEngine can use at execution time.
max_batch_size: The maximum batch size which can be used at execution time,
and also the batch size for which the CudaEngine will be optimized.
model_precision: precision of kernels (possible values: fp16, fp32)
Returns: TensorRT ICudaEngine
"""
# Whether or not 16-bit kernels are permitted.
# During :class:`ICudaEngine` build fp16 kernels will also be tried when this mode is enabled.
fp16_mode = "16" in model_precision
builder = trt.Builder(TRT_LOGGER)
builder.fp16_mode = fp16_mode
builder.max_batch_size = max_batch_size
builder.max_workspace_size = max_workspace_size
# In TensorRT 7.0, the ONNX parser only supports full-dimensions mode,
# meaning that your network definition must be created with the explicitBatch flag set.
# For more information, see
# https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#work_dynamic_shapes
flags = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
network = builder.create_network(flags)
with trt.OnnxParser(network, TRT_LOGGER) as parser:
# onnx model parsing
if not parser.parse(onnx_model.SerializeToString()):
for i in range(parser.num_errors):
LOGGER.error(f"OnnxParser error {i}/{parser.num_errors}: {parser.get_error(i)}")
raise RuntimeError("Error during parsing ONNX model (see logs for details)")
# optimization
config = builder.create_builder_config()
config.flags |= bool(fp16_mode) << int(trt.BuilderFlag.FP16)
config.max_workspace_size = max_workspace_size
profile = builder.create_optimization_profile()
for name, spec in shapes.items():
profile.set_shape(name, **spec._asdict())
config.add_optimization_profile(profile)
engine = builder.build_engine(network, config=config)
return engine
converters.register_extension(f"{Format.ONNX.value}--{Format.TRT.value}", Onnx2TRTConverter)
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/triton/deployment_toolkit/library/onnx2trt_conv.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from pathlib import Path
from typing import Dict, Optional, Union
import numpy as np
# pytype: disable=import-error
import onnx
import onnx.optimizer
import onnx.shape_inference
import onnxruntime
from google.protobuf import text_format
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
# pytype: enable=import-error
from ..core import BaseLoader, BaseRunner, BaseRunnerSession, BaseSaver, Format, Model, Precision, TensorSpec
from ..extensions import loaders, runners, savers
from .utils import infer_precision
LOGGER = logging.getLogger(__name__)
def _value_info2tensor_spec(value_info: onnx.ValueInfoProto):
onnx_data_type_map = {"float": "float32", "double": "float64"}
elem_type_name = onnx.TensorProto.DataType.Name(value_info.type.tensor_type.elem_type).lower()
dtype = onnx_data_type_map.get(elem_type_name, elem_type_name)
def _get_dim(dim):
which = dim.WhichOneof("value")
if which is not None: # which is None when dim is None
dim = getattr(dim, which)
return None if isinstance(dim, (str, bytes)) else dim
shape = value_info.type.tensor_type.shape
shape = tuple([_get_dim(d) for d in shape.dim])
return TensorSpec(value_info.name, dtype=dtype, shape=shape)
def _infer_graph_precision(onnx_graph: onnx.GraphProto) -> Optional[Precision]:
import networkx as nx
# build directed graph
nx_graph = nx.DiGraph()
def _get_dtype(vi):
t = vi.type
if hasattr(t, "tensor_type"):
type_id = t.tensor_type.elem_type
else:
raise NotImplementedError("Not implemented yet")
return TENSOR_TYPE_TO_NP_TYPE[type_id]
node_output2type = {vi.name: _get_dtype(vi) for vi in onnx_graph.value_info}
node_outputs2node = {output_name: node for node in onnx_graph.node for output_name in node.output}
node_inputs2node = {input_name: node for node in onnx_graph.node for input_name in node.input}
for node in onnx_graph.node:
node_dtype = node_output2type.get("+".join(node.output), None)
nx_graph.add_node(
node.name,
op=node.op_type,
attr={a.name: a for a in node.attribute},
dtype=node_dtype,
)
for input_name in node.input:
prev_node = node_outputs2node.get(input_name, None)
if prev_node:
nx_graph.add_edge(prev_node.name, node.name)
for input_node in onnx_graph.input:
input_name = input_node.name
nx_graph.add_node(input_name, op="input", dtype=_get_dtype(input_node))
next_node = node_inputs2node.get(input_name, None)
if next_node:
nx_graph.add_edge(input_name, next_node.name)
for output in onnx_graph.output:
output_name = output.name
nx_graph.add_node(output_name, op="output", dtype=_get_dtype(output))
prev_node = node_outputs2node.get(output_name, None)
if prev_node:
nx_graph.add_edge(prev_node.name, output_name)
else:
LOGGER.warning(f"Could not find previous node for {output_name}")
input_names = [n.name for n in onnx_graph.input]
output_names = [n.name for n in onnx_graph.output]
most_common_dtype = infer_precision(nx_graph, input_names, output_names, lambda node: node.get("dtype", None))
if most_common_dtype is not None:
precision = {np.dtype("float32"): Precision.FP32, np.dtype("float16"): Precision.FP16}[most_common_dtype]
else:
precision = None
return precision
class OnnxLoader(BaseLoader):
def load(self, model_path: Union[str, Path], **_) -> Model:
if isinstance(model_path, Path):
model_path = model_path.as_posix()
model = onnx.load(model_path)
onnx.checker.check_model(model)
onnx.helper.strip_doc_string(model)
model = onnx.shape_inference.infer_shapes(model)
# TODO: probably modification of onnx model ios causes error on optimize
# from onnx.utils import polish_model
# model = polish_model(model) # run checker, docs strip, optimizer and shape inference
inputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.input}
outputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.output}
precision = _infer_graph_precision(model.graph)
return Model(model, precision, inputs, outputs)
class OnnxSaver(BaseSaver):
def __init__(self, as_text: bool = False):
self._as_text = as_text
def save(self, model: Model, model_path: Union[str, Path]) -> None:
model_path = Path(model_path)
LOGGER.debug(f"Saving ONNX model to {model_path.as_posix()}")
model_path.parent.mkdir(parents=True, exist_ok=True)
onnx_model: onnx.ModelProto = model.handle
if self._as_text:
with model_path.open("w") as f:
f.write(text_format.MessageToString(onnx_model))
else:
with model_path.open("wb") as f:
f.write(onnx_model.SerializeToString())
"""
ExecutionProviders on onnxruntime 1.4.0
['TensorrtExecutionProvider',
'CUDAExecutionProvider',
'MIGraphXExecutionProvider',
'NGRAPHExecutionProvider',
'OpenVINOExecutionProvider',
'DnnlExecutionProvider',
'NupharExecutionProvider',
'VitisAIExecutionProvider',
'ArmNNExecutionProvider',
'ACLExecutionProvider',
'CPUExecutionProvider']
"""
def _check_providers(providers):
providers = providers or []
if not isinstance(providers, (list, tuple)):
providers = [providers]
available_providers = onnxruntime.get_available_providers()
unavailable = set(providers) - set(available_providers)
if unavailable:
raise RuntimeError(f"Unavailable providers {unavailable}")
return providers
class OnnxRunner(BaseRunner):
def __init__(self, verbose_runtime_logs: bool = False):
self._providers = None
self._verbose_runtime_logs = verbose_runtime_logs
def init_inference(self, model: Model):
assert isinstance(model.handle, onnx.ModelProto)
return OnnxRunnerSession(
model=model, providers=self._providers, verbose_runtime_logs=self._verbose_runtime_logs
)
class OnnxRunnerSession(BaseRunnerSession):
def __init__(self, model: Model, providers, verbose_runtime_logs: bool = False):
super().__init__(model)
self._input_names = None
self._output_names = None
self._session = None
self._providers = providers
self._verbose_runtime_logs = verbose_runtime_logs
self._old_env_values = {}
def __enter__(self):
self._old_env_values = self._set_env_variables()
sess_options = onnxruntime.SessionOptions() # default session options
if self._verbose_runtime_logs:
sess_options.log_severity_level = 0
sess_options.log_verbosity_level = 1
LOGGER.info(
f"Starting inference session for onnx model providers={self._providers} sess_options={sess_options}"
)
self._input_names = list(self._model.inputs)
self._output_names = list(self._model.outputs)
model_payload = self._model.handle.SerializeToString()
self._session = onnxruntime.InferenceSession(
model_payload, providers=self._providers, sess_options=sess_options
)
return self
def __exit__(self, exc_type, exc_value, traceback):
self._input_names = None
self._output_names = None
self._session = None
self._recover_env_variables(self._old_env_values)
def __call__(self, x: Dict[str, object]):
feed_dict = {k: x[k] for k in self._input_names}
y_pred = self._session.run(self._output_names, feed_dict)
y_pred = dict(zip(self._output_names, y_pred))
return y_pred
loaders.register_extension(Format.ONNX.value, OnnxLoader)
runners.register_extension(Format.ONNX.value, OnnxRunner)
savers.register_extension(Format.ONNX.value, OnnxSaver)
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/triton/deployment_toolkit/library/onnx.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | DeepLearningExamples-master | TensorFlow/Classification/ConvNets/triton/deployment_toolkit/library/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Iterable
# pytype: disable=import-error
import onnx
import onnx.shape_inference
import tensorflow as tf
from tf2onnx import optimizer, tfonnx
# pytype: enable=import-error
from ..core import BaseConverter, Format, Model
from ..extensions import converters
from .tf import create_session_config
def _replace_io_names(graph_proto, io_type, name2tensor):
tensor2name = {v: k for k, v in name2tensor.items()}
tensor_value_info_list = {"inputs": graph_proto.input, "outputs": graph_proto.output}[io_type]
for tensor_value_info in tensor_value_info_list:
old_name = tensor_value_info.name
new_name = tensor2name.get(old_name)
if new_name is not None and new_name != old_name:
tensor_value_info.name = new_name
# replace other graph nodes I/O
for node in graph_proto.node:
if old_name in node.input:
idx = list(node.input).index(old_name)
node.input[idx] = new_name
if old_name in node.output:
idx = list(node.output).index(old_name)
node.output[idx] = new_name
def tfgraph2onnx(graph_def, inputnames2tensornames, outputnames2tensornames, *, onnx_opset, onnx_optimized=True):
with tf.Graph().as_default() as tf_graph:
tf.import_graph_def(graph_def, name="")
session_config = create_session_config(allow_growth=True)
with tf.compat.v1.Session(graph=tf_graph, config=session_config):
input_tensor_names = list(inputnames2tensornames.values())
output_tensor_names = list(outputnames2tensornames.values())
onnx_graph = tfonnx.process_tf_graph(
tf_graph,
input_names=input_tensor_names,
output_names=output_tensor_names,
opset=onnx_opset,
)
if onnx_optimized:
onnx_graph = optimizer.optimize_graph(onnx_graph)
graph_doc: str = "triton export"
onnx_model = onnx_graph.make_model(graph_doc)
# to match tensorflow savedmodel signature
_replace_io_names(onnx_model.graph, "inputs", inputnames2tensornames)
_replace_io_names(onnx_model.graph, "outputs", outputnames2tensornames)
onnx.checker.check_model(onnx_model)
onnx.helper.strip_doc_string(onnx_model)
onnx_model = onnx.shape_inference.infer_shapes(onnx_model)
return onnx_model
class TFGraphDef2ONNXConverter(BaseConverter):
def __init__(self, *, onnx_opset: int, onnx_optimized: bool = True):
self._onnx_opset = onnx_opset
self._onnx_optimized = onnx_optimized
def convert(self, model: Model, dataloader_fn) -> Model:
assert isinstance(model.handle, tf.compat.v1.GraphDef)
inputnames2tensorname = {name: spec.name for name, spec in model.inputs.items()}
outputnames2tensorname = {name: spec.name for name, spec in model.outputs.items()}
onnx_model = tfgraph2onnx(
model.handle,
inputnames2tensorname,
outputnames2tensorname,
onnx_opset=self._onnx_opset,
onnx_optimized=self._onnx_optimized,
)
from .onnx import _infer_graph_precision
precision = _infer_graph_precision(onnx_model.graph)
assert precision == model.precision # for testing precision inference function
return model._replace(handle=onnx_model)
converters.register_extension(f"{Format.TF_ESTIMATOR.value}--{Format.ONNX.value}", TFGraphDef2ONNXConverter)
converters.register_extension(f"{Format.TF_KERAS.value}--{Format.ONNX.value}", TFGraphDef2ONNXConverter)
converters.register_extension(f"{Format.TF_SAVEDMODEL.value}--{Format.ONNX.value}", TFGraphDef2ONNXConverter)
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/triton/deployment_toolkit/library/tf2onnx_conv.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
from typing import Callable, Dict, List
import networkx as nx
from ..core import ShapeSpec
def infer_precision(
nx_graph: nx.Graph,
input_names: List[str],
output_names: List[str],
get_node_dtype_fn: Callable,
):
node_dtypes = [nx_graph.nodes[node_name].get("dtype", None) for node_name in nx_graph.nodes]
node_dtypes = [dt for dt in node_dtypes if dt is None or dt.kind not in ["i", "b"]]
dtypes_counter = Counter(node_dtypes)
return dtypes_counter.most_common()[0][0]
def get_shapes_with_dynamic_axes(dataloader, batch_size_dim=0):
def _set_dynamic_shapes(t, shapes):
for k, v in t.items():
shape = list(v.shape)
for dim, s in enumerate(shape):
if shapes[k][dim] != -1 and shapes[k][dim] != s:
shapes[k][dim] = -1
## get all shapes from input and output tensors
input_shapes = {}
output_shapes = {}
for batch in dataloader:
_, x, y = batch
for k, v in x.items():
input_shapes[k] = list(v.shape)
for k, v in y.items():
output_shapes[k] = list(v.shape)
break
# based on max <max_num_iters> iterations, check which
# dimensions differ to determine dynamic_axes
max_num_iters = 100
for idx, batch in enumerate(dataloader):
if idx >= max_num_iters:
break
_, x, y = batch
_set_dynamic_shapes(x, input_shapes)
_set_dynamic_shapes(y, output_shapes)
return input_shapes, output_shapes
def get_dynamic_axes(dataloader, batch_size_dim=0):
input_shapes, output_shapes = get_shapes_with_dynamic_axes(dataloader, batch_size_dim)
all_shapes = {**input_shapes, **output_shapes}
dynamic_axes = {}
for k, shape in all_shapes.items():
for idx, s in enumerate(shape):
if s == -1:
dynamic_axes[k] = {idx: k + "_" + str(idx)}
for k, v in all_shapes.items():
if k in dynamic_axes:
dynamic_axes[k].update({batch_size_dim: "batch_size_" + str(batch_size_dim)})
else:
dynamic_axes[k] = {batch_size_dim: "batch_size_" + str(batch_size_dim)}
return dynamic_axes
def get_input_shapes(dataloader, max_batch_size=1) -> Dict[str, ShapeSpec]:
def init_counters_and_shapes(x, counters, min_shapes, max_shapes):
for k, v in x.items():
counters[k] = Counter()
min_shapes[k] = [float("inf")] * v.ndim
max_shapes[k] = [float("-inf")] * v.ndim
counters = {}
min_shapes: Dict[str, tuple] = {}
max_shapes: Dict[str, tuple] = {}
for idx, batch in enumerate(dataloader):
ids, x, y = batch
if idx == 0:
init_counters_and_shapes(x, counters, min_shapes, max_shapes)
for k, v in x.items():
shape = v.shape
counters[k][shape] += 1
min_shapes[k] = tuple([min(a, b) for a, b in zip(min_shapes[k], shape)])
max_shapes[k] = tuple([max(a, b) for a, b in zip(max_shapes[k], shape)])
opt_shapes: Dict[str, tuple] = {}
for k, v in counters.items():
opt_shapes[k] = v.most_common(1)[0][0]
shapes = {}
for k in opt_shapes.keys(): # same keys in min_shapes and max_shapes
shapes[k] = ShapeSpec(
min=(1,) + min_shapes[k][1:],
max=(max_batch_size,) + max_shapes[k][1:],
opt=(max_batch_size,) + opt_shapes[k][1:],
)
return shapes
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/triton/deployment_toolkit/library/utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Iterable
from ..core import BaseConverter, Format, Model, Precision, ShapeSpec
from ..extensions import converters
from .onnx2trt_conv import onnx2trt
from .tf2onnx_conv import tfgraph2onnx
from .utils import get_input_shapes
class TFGraphDef2TRTConverter(BaseConverter):
def __init__(
self,
*,
max_batch_size: int,
max_workspace_size: int,
onnx_opset: int,
onnx_optimized: bool = True,
precision: str,
):
self._max_batch_size = max_batch_size
self._max_workspace_size = max_workspace_size
self._onnx_opset = onnx_opset
self._onnx_optimized = onnx_optimized
self._precision = Precision(precision)
def convert(self, model: Model, dataloader_fn) -> Model:
inputnames2tensorname = {name: spec.name for name, spec in model.inputs.items()}
outputnames2tensorname = {name: spec.name for name, spec in model.outputs.items()}
onnx_model = tfgraph2onnx(
model.handle,
inputnames2tensorname,
outputnames2tensorname,
onnx_opset=self._onnx_opset,
onnx_optimized=self._onnx_optimized,
)
from .onnx import _infer_graph_precision
precision = _infer_graph_precision(onnx_model.graph)
assert precision == model.precision # for testing precision inference function
input_shapes = get_input_shapes(dataloader_fn(), self._max_batch_size)
cuda_engine = onnx2trt(
onnx_model,
shapes=input_shapes,
max_workspace_size=self._max_workspace_size,
max_batch_size=self._max_batch_size,
model_precision=self._precision.value,
)
return model._replace(handle=cuda_engine)
@staticmethod
def required_source_model_precision(requested_model_precision: Precision) -> Precision:
# TensorRT requires source models to be in FP32 precision
return Precision.FP32
converters.register_extension(f"{Format.TF_ESTIMATOR.value}--{Format.TRT.value}", TFGraphDef2TRTConverter)
converters.register_extension(f"{Format.TF_KERAS.value}--{Format.TRT.value}", TFGraphDef2TRTConverter)
converters.register_extension(f"{Format.TF_SAVEDMODEL.value}--{Format.TRT.value}", TFGraphDef2TRTConverter)
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/triton/deployment_toolkit/library/tf2trt_conv.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from pathlib import Path
from typing import Dict, NamedTuple, Optional, Union
import numpy as np
# pytype: disable=import-error
try:
import pycuda.autoinit
import pycuda.driver as cuda
except (ImportError, Exception) as e:
logging.getLogger(__name__).warning(f"Problems with importing pycuda package; {e}")
# pytype: enable=import-error
import tensorrt as trt # pytype: disable=import-error
from ..core import BaseLoader, BaseRunner, BaseRunnerSession, BaseSaver, Format, Model, Precision, TensorSpec
from ..extensions import loaders, runners, savers
LOGGER = logging.getLogger(__name__)
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
"""
documentation:
https://docs.nvidia.com/deeplearning/tensorrt/api/python_api/index.html
https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#python_samples_section
"""
class TensorRTLoader(BaseLoader):
def load(self, model_path: Union[str, Path], **_) -> Model:
model_path = Path(model_path)
LOGGER.debug(f"Loading TensorRT engine from {model_path}")
with model_path.open("rb") as fh, trt.Runtime(TRT_LOGGER) as runtime:
engine = runtime.deserialize_cuda_engine(fh.read())
if engine is None:
raise RuntimeError(f"Could not load ICudaEngine from {model_path}")
inputs = {}
outputs = {}
for binding_idx in range(engine.num_bindings):
name = engine.get_binding_name(binding_idx)
is_input = engine.binding_is_input(binding_idx)
dtype = engine.get_binding_dtype(binding_idx)
shape = engine.get_binding_shape(binding_idx)
if is_input:
inputs[name] = TensorSpec(name, dtype, shape)
else:
outputs[name] = TensorSpec(name, dtype, shape)
return Model(engine, None, inputs, outputs)
class TensorRTSaver(BaseSaver):
def __init__(self):
pass
def save(self, model: Model, model_path: Union[str, Path]) -> None:
model_path = Path(model_path)
LOGGER.debug(f"Saving TensorRT engine to {model_path.as_posix()}")
model_path.parent.mkdir(parents=True, exist_ok=True)
engine: "trt.ICudaEngine" = model.handle
with model_path.open("wb") as fh:
fh.write(engine.serialize())
class TRTBuffers(NamedTuple):
x_host: Optional[Dict[str, object]]
x_dev: Dict[str, object]
y_pred_host: Dict[str, object]
y_pred_dev: Dict[str, object]
class TensorRTRunner(BaseRunner):
def __init__(self):
pass
def init_inference(self, model: Model):
return TensorRTRunnerSession(model=model)
class TensorRTRunnerSession(BaseRunnerSession):
def __init__(self, model: Model):
super().__init__(model)
assert isinstance(model.handle, trt.ICudaEngine)
self._model = model
self._has_dynamic_shapes = None
self._context = None
self._engine: trt.ICudaEngine = self._model.handle
self._cuda_context = pycuda.autoinit.context
self._input_names = None
self._output_names = None
self._buffers = None
def __enter__(self):
self._context = self._engine.create_execution_context()
self._context.__enter__()
self._input_names = [
self._engine[idx] for idx in range(self._engine.num_bindings) if self._engine.binding_is_input(idx)
]
self._output_names = [
self._engine[idx] for idx in range(self._engine.num_bindings) if not self._engine.binding_is_input(idx)
]
# all_binding_shapes_specified is True for models without dynamic shapes
# so initially this variable is False for models with dynamic shapes
self._has_dynamic_shapes = not self._context.all_binding_shapes_specified
return self
def __exit__(self, exc_type, exc_value, traceback):
self._context.__exit__(exc_type, exc_value, traceback)
self._input_names = None
self._output_names = None
# TODO: are cuda buffers dealloc automatically?
self._buffers = None
def __call__(self, x):
buffers = self._prepare_buffers_if_needed(x)
bindings = self._update_bindings(buffers)
for name in self._input_names:
cuda.memcpy_htod(buffers.x_dev[name], buffers.x_host[name])
self._cuda_context.push()
self._context.execute_v2(bindings=bindings)
self._cuda_context.pop()
for name in self._output_names:
cuda.memcpy_dtoh(buffers.y_pred_host[name], buffers.y_pred_dev[name])
return buffers.y_pred_host
def _update_bindings(self, buffers: TRTBuffers):
bindings = [None] * self._engine.num_bindings
for name in buffers.y_pred_dev:
binding_idx: int = self._engine[name]
bindings[binding_idx] = buffers.y_pred_dev[name]
for name in buffers.x_dev:
binding_idx: int = self._engine[name]
bindings[binding_idx] = buffers.x_dev[name]
return bindings
def _set_dynamic_input_shapes(self, x_host):
def _is_shape_dynamic(input_shape):
return any([dim is None or dim == -1 for dim in input_shape])
for name in self._input_names:
bindings_idx = self._engine[name]
data_shape = x_host[name].shape # pytype: disable=attribute-error
if self._engine.is_shape_binding(bindings_idx):
input_shape = self._context.get_shape(bindings_idx)
if _is_shape_dynamic(input_shape):
self._context.set_shape_input(bindings_idx, data_shape)
else:
input_shape = self._engine.get_binding_shape(bindings_idx)
if _is_shape_dynamic(input_shape):
self._context.set_binding_shape(bindings_idx, data_shape)
assert self._context.all_binding_shapes_specified and self._context.all_shape_inputs_specified
def _prepare_buffers_if_needed(self, x_host: Dict[str, object]):
# pytype: disable=attribute-error
new_batch_size = list(x_host.values())[0].shape[0]
current_batch_size = list(self._buffers.y_pred_host.values())[0].shape[0] if self._buffers else 0
# pytype: enable=attribute-error
if self._has_dynamic_shapes or new_batch_size != current_batch_size:
# TODO: are CUDA buffers dealloc automatically?
self._set_dynamic_input_shapes(x_host)
y_pred_host = {}
for name in self._output_names:
shape = self._context.get_binding_shape(self._engine[name])
y_pred_host[name] = np.zeros(shape, dtype=trt.nptype(self._model.outputs[name].dtype))
y_pred_dev = {name: cuda.mem_alloc(data.nbytes) for name, data in y_pred_host.items()}
x_dev = {
name: cuda.mem_alloc(host_input.nbytes)
for name, host_input in x_host.items()
if name in self._input_names # pytype: disable=attribute-error
}
self._buffers = TRTBuffers(None, x_dev, y_pred_host, y_pred_dev)
return self._buffers._replace(x_host=x_host)
if "pycuda.driver" in sys.modules:
loaders.register_extension(Format.TRT.value, TensorRTLoader)
runners.register_extension(Format.TRT.value, TensorRTRunner)
savers.register_extension(Format.TRT.value, TensorRTSaver)
else:
LOGGER.warning("Do not register TensorRT extension due problems with importing pycuda.driver package.")
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/triton/deployment_toolkit/library/tensorrt.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import tensorflow as tf
from utils import image_processing
from utils import hvd_wrapper as hvd
from nvidia import dali
import nvidia.dali.plugin.tf as dali_tf
__all__ = ["get_synth_input_fn", "normalized_inputs"]
class HybridPipe(dali.pipeline.Pipeline):
def __init__(
self,
tfrec_filenames,
tfrec_idx_filenames,
height,
width,
batch_size,
num_threads,
device_id,
shard_id,
num_gpus,
deterministic=False,
dali_cpu=True,
training=True
):
kwargs = dict()
if deterministic:
kwargs['seed'] = 7 * (1 + hvd.rank())
super(HybridPipe, self).__init__(batch_size, num_threads, device_id, **kwargs)
self.training = training
self.input = dali.ops.TFRecordReader(
path=tfrec_filenames,
index_path=tfrec_idx_filenames,
random_shuffle=True,
shard_id=shard_id,
num_shards=num_gpus,
initial_fill=10000,
features={
'image/encoded': dali.tfrecord.FixedLenFeature((), dali.tfrecord.string, ""),
'image/class/label': dali.tfrecord.FixedLenFeature([1], dali.tfrecord.int64, -1),
'image/class/text': dali.tfrecord.FixedLenFeature([], dali.tfrecord.string, ''),
'image/object/bbox/xmin': dali.tfrecord.VarLenFeature(dali.tfrecord.float32, 0.0),
'image/object/bbox/ymin': dali.tfrecord.VarLenFeature(dali.tfrecord.float32, 0.0),
'image/object/bbox/xmax': dali.tfrecord.VarLenFeature(dali.tfrecord.float32, 0.0),
'image/object/bbox/ymax': dali.tfrecord.VarLenFeature(dali.tfrecord.float32, 0.0)
}
)
if self.training:
self.decode = dali.ops.ImageDecoderRandomCrop(
device="cpu" if dali_cpu else "mixed",
output_type=dali.types.RGB,
random_aspect_ratio=[0.75, 1.33],
random_area=[0.05, 1.0],
num_attempts=100
)
self.resize = dali.ops.Resize(device="cpu" if dali_cpu else "gpu", resize_x=width, resize_y=height)
else:
self.decode = dali.ops.ImageDecoder(device="cpu" if dali_cpu else "mixed", output_type=dali.types.RGB)
# Make sure that every image > 224 for CropMirrorNormalize
self.resize = dali.ops.Resize(device="cpu" if dali_cpu else "gpu", resize_shorter=256)
self.normalize = dali.ops.CropMirrorNormalize(
device="gpu",
output_dtype=dali.types.FLOAT,
crop=(height, width),
image_type=dali.types.RGB,
mean=[123.68, 116.28, 103.53],
std=[58.395, 57.120, 57.385],
output_layout=dali.types.NHWC
)
self.cast_float = dali.ops.Cast(device="gpu", dtype=dali.types.FLOAT)
self.mirror = dali.ops.CoinFlip()
self.iter = 0
def define_graph(self):
# Read images and labels
inputs = self.input(name="Reader")
images = inputs["image/encoded"]
labels = inputs["image/class/label"].gpu()
# Decode and augmentation
images = self.decode(images)
images = self.resize(images)
images = self.normalize(images.gpu(), mirror=self.mirror() if self.training else None)
return (images, labels)
class DALIPreprocessor(object):
def __init__(
self,
filenames,
idx_filenames,
height,
width,
batch_size,
num_threads,
dtype=tf.uint8,
dali_cpu=True,
deterministic=False,
training=False
):
device_id = hvd.local_rank()
shard_id = hvd.rank()
num_gpus = hvd.size()
pipe = HybridPipe(
tfrec_filenames=filenames,
tfrec_idx_filenames=idx_filenames,
height=height,
width=width,
batch_size=batch_size,
num_threads=num_threads,
device_id=device_id,
shard_id=shard_id,
num_gpus=num_gpus,
deterministic=deterministic,
dali_cpu=dali_cpu,
training=training
)
daliop = dali_tf.DALIIterator()
with tf.device("/gpu:0"):
self.images, self.labels = daliop(
pipeline=pipe,
shapes=[(batch_size, height, width, 3), (batch_size, 1)],
dtypes=[tf.float32, tf.int64],
device_id=device_id
)
def get_device_minibatches(self):
with tf.device("/gpu:0"):
self.labels -= 1 # Change to 0-based (don't use background class)
self.labels = tf.squeeze(self.labels, axis=-1)
return self.images, self.labels
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/utils/dali_utils.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
class ArgumentParserUtil(object):
def __init__(self, parser: argparse.ArgumentParser = None):
self.parser = parser
def build_data_parser_group(self):
data_group = self.parser.add_argument_group("Dataset arguments")
data_group.add_argument(
"--data_dir",
required=False,
default=None,
type=str,
help="Path to dataset in TFRecord format. Files should be named 'train-*' and 'validation-*'.")
data_group.add_argument("--data_idx_dir",
required=False,
default=None,
type=str,
help="Path to index files for DALI. Files should be named 'train-*' and 'validation-*'.")
data_group.add_argument("--dali",
action="store_true",
default=False,
required=False,
help="Enable DALI data input.")
data_group.add_argument("--synthetic_data_size",
required=False,
default=224,
type=int,
help="Dimension of image for synthetic dataset")
def build_training_parser_group(self):
train_group = self.parser.add_argument_group("Training arguments")
train_group.add_argument("--lr_init",
default=0.1,
type=float,
required=False,
help="Initial value for the learning rate.")
train_group.add_argument("--lr_warmup_epochs",
default=5,
type=int,
required=False,
help="Number of warmup epochs for learning rate schedule.")
train_group.add_argument("--weight_decay",
default=1e-4,
type=float,
required=False,
help="Weight Decay scale factor.")
train_group.add_argument("--weight_init",
default="fan_out",
choices=["fan_in", "fan_out"],
type=str,
required=False,
help="Model weight initialization method.")
train_group.add_argument("--momentum",
default=0.9,
type=float,
required=False,
help="SGD momentum value for the Momentum optimizer.")
train_group.add_argument("--label_smoothing",
type=float,
default=0.0,
required=False,
help="The value of label smoothing.")
train_group.add_argument("--mixup",
type=float,
default=0.0,
required=False,
help="The alpha parameter for mixup (if 0 then mixup is not applied).")
train_group.add_argument("--cosine_lr",
"--use_cosine",
"--use_cosine_lr"
"--cosine",
action="store_true",
default=False,
required=False,
help="Use cosine learning rate schedule.")
def build_generic_optimization_parser_group(self):
goptim_group = self.parser.add_argument_group("Generic optimization arguments")
goptim_group.add_argument("--xla",
"--use_xla",
action="store_true",
default=False,
required=False,
help="Enable XLA (Accelerated Linear Algebra) computation for improved performance.")
goptim_group.add_argument("--data_format",
choices=['NHWC', 'NCHW'],
type=str,
default='NHWC',
required=False,
help="Data format used to do calculations")
goptim_group.add_argument("--amp",
"--use_tf_amp",
action="store_true",
dest="amp",
default=False,
required=False,
help="Enable Automatic Mixed Precision to speedup computation using tensor cores.")
goptim_group.add_argument("--cpu",
action="store_true",
dest="cpu",
default=False,
required=False,
help="Run model on CPU instead of GPU")
amp_group = self.parser.add_argument_group("Automatic Mixed Precision arguments")
amp_group.add_argument("--static_loss_scale",
"--loss_scale",
default=-1,
required=False,
help="Use static loss scaling in FP32 AMP.")
amp_group.add_argument("--use_static_loss_scaling", required=False, action="store_true", help=argparse.SUPPRESS)
def parse_cmdline(available_arch):
p = argparse.ArgumentParser(description="JoC-RN50v1.5-TF")
p.add_argument('--arch',
choices=available_arch,
type=str,
default='resnet50',
required=False,
help="""Architecture of model to run""")
p.add_argument('--mode',
choices=[
'train', 'train_and_evaluate', 'evaluate', 'predict', 'training_benchmark', 'inference_benchmark'
],
type=str,
default='train_and_evaluate',
required=False,
help="""The execution mode of the script.""")
p.add_argument('--export_dir',
required=False,
default=None,
type=str,
help="Directory in which to write exported SavedModel.")
p.add_argument('--to_predict',
required=False,
default=None,
type=str,
help="Path to file or directory of files to run prediction on.")
p.add_argument('--batch_size', type=int, required=True, help="""Size of each minibatch per GPU.""")
p.add_argument('--num_iter', type=int, required=False, default=1, help="""Number of iterations to run.""")
p.add_argument('--run_iter',
type=int,
required=False,
default=-1,
help="""Number of training iterations to run on single run.""")
p.add_argument('--iter_unit',
choices=['epoch', 'batch'],
type=str,
required=False,
default='epoch',
help="""Unit of iterations.""")
p.add_argument(
'--warmup_steps',
default=50,
type=int,
required=False,
help="""Number of steps considered as warmup and not taken into account for performance measurements.""")
p.add_argument('--model_dir',
type=str,
required=False,
default=None,
help="""Directory in which to write model. If undefined, results dir will be used.""")
p.add_argument('--results_dir',
type=str,
required=False,
default='.',
help="""Directory in which to write training logs, summaries and checkpoints.""")
p.add_argument('--log_filename',
type=str,
required=False,
default='log.json',
help="Name of the JSON file to which write the training log")
p.add_argument('--display_every',
default=10,
type=int,
required=False,
help="""How often (in batches) to print out running information.""")
p.add_argument('--seed', type=int, default=None, help="""Random seed.""")
p.add_argument('--gpu_memory_fraction',
type=float,
default=0.7,
help="""Limit memory fraction used by training script for DALI""")
p.add_argument('--gpu_id',
type=int,
default=0,
help="""Specify ID of the target GPU on multi-device platform. Effective only for single-GPU mode.""")
p.add_argument('--finetune_checkpoint',
required=False,
default=None,
type=str,
help="Path to pre-trained checkpoint which will be used for fine-tuning")
p.add_argument("--use_final_conv",
default=False,
required=False,
action="store_true",
help="Use convolution operator instead of MLP as last layer.")
p.add_argument('--quant_delay',
type=int,
default=0,
required=False,
help="Number of steps to be run before quantization starts to happen")
p.add_argument("--quantize",
default=False,
required=False,
action="store_true",
help="Quantize weights and activations during training. (Defaults to Assymmetric quantization)")
p.add_argument("--use_qdq",
default=False,
required=False,
action="store_true",
help="Use QDQV3 op instead of FakeQuantWithMinMaxVars op for quantization. QDQv3 does only scaling")
p.add_argument("--symmetric",
default=False,
required=False,
action="store_true",
help="Quantize weights and activations during training using symmetric quantization.")
parser_util = ArgumentParserUtil(p)
parser_util.build_data_parser_group()
parser_util.build_training_parser_group()
parser_util.build_generic_optimization_parser_group()
FLAGS, unknown_args = p.parse_known_args()
if len(unknown_args) > 0:
for bad_arg in unknown_args:
print("ERROR: Unknown command line arg: %s" % bad_arg)
raise ValueError("Invalid command line arg(s)")
return FLAGS
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/utils/cmdline_helper.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
__all__ = ['learning_rate_scheduler']
def learning_rate_scheduler(lr_init, lr_warmup_epochs, global_step, batch_size,
num_batches_per_epoch, num_decay_steps, num_gpus, use_cosine_lr):
def get_scaled_base_learning_rate():
"""Calculates base learning rate for creating lr schedule.
In replicated mode, gradients are summed rather than averaged which, with
the sgd and momentum optimizers, increases the effective learning rate by
lr * num_gpus. Dividing the base lr by num_gpus negates the increase.
Args:
batch_size: Total batch-size.
Returns:
Base learning rate to use to create lr schedule.
"""
base_lr = lr_init * num_gpus
# Starting LR = 0.1 with BS = 256, else linearly scale
return base_lr * (batch_size / 256.0)
rescaled_lr = get_scaled_base_learning_rate()
if use_cosine_lr:
print("Using cosine learning rate schedule")
lr = tf.train.cosine_decay(rescaled_lr, global_step, num_decay_steps)
else:
print("Using step learning rate schedule")
boundaries = [int(num_batches_per_epoch * x) for x in [30, 60, 80, 90]]
values = [1e0, 1e-1, 1e-2, 1e-3, 1e-4]
values = [rescaled_lr * v for v in values]
lr = tf.train.piecewise_constant(global_step, boundaries, values)
warmup_steps = int(num_batches_per_epoch * lr_warmup_epochs)
warmup_lr = (rescaled_lr * tf.cast(global_step, tf.float32) / tf.cast(warmup_steps, tf.float32))
return tf.cond(global_step < warmup_steps, lambda: warmup_lr, lambda: lr)
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/utils/learning_rate.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from utils import hooks
from utils import var_storage
from utils import cmdline_helper
from utils import data_utils
from utils import image_processing
from utils import learning_rate
from utils import dali_utils | DeepLearningExamples-master | TensorFlow/Classification/ConvNets/utils/__init__.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import print_function
import tensorflow as tf
__all__ = ['FixedLossScalerOptimizer']
class FixedLossScalerOptimizer(tf.compat.v1.train.Optimizer):
"""An optimizer that scales loss and un-scales gradients for FP16 training."""
def __init__(self, optimizer, scale=None, name="LossScalingOptimizer", use_locking=False):
super(FixedLossScalerOptimizer, self).__init__(name=name, use_locking=use_locking)
self._optimizer = optimizer
self._scale = float(scale) if scale is not None else 1.0
def compute_gradients(self, loss, var_list=None, *args, **kwargs):
if self._scale != 1.0:
loss = tf.scalar_mul(self._scale, loss)
gradvar = self._optimizer.compute_gradients(loss, var_list, *args, **kwargs)
gradvar = [(tf.scalar_mul(1. / self._scale, g), v) for g, v in gradvar]
return gradvar
def apply_gradients(self, *args, **kwargs):
return self._optimizer.apply_gradients(*args, **kwargs)
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/utils/optimizers.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import tensorflow as tf
from utils import image_processing
from utils import dali_utils
from utils import hvd_wrapper as hvd
__all__ = ["get_synth_input_fn", "normalized_inputs"]
_R_MEAN = 123.68
_G_MEAN = 116.28
_B_MEAN = 103.53
_CHANNEL_MEANS = [_R_MEAN, _G_MEAN, _B_MEAN]
_CHANNEL_STDS = [58.395, 57.120, 57.385]
_NUM_CHANNELS = 3
def get_synth_input_fn(batch_size, height, width, num_channels, data_format, num_classes, dtype=tf.float32):
"""Returns an input function that returns a dataset with random data.
This input_fn returns a data set that iterates over a set of random data and
bypasses all preprocessing, e.g. jpeg decode and copy. The host to device
copy is still included. This used to find the upper throughput bound when
tunning the full input pipeline.
Args:
height: Integer height that will be used to create a fake image tensor.
width: Integer width that will be used to create a fake image tensor.
num_channels: Integer depth that will be used to create a fake image tensor.
num_classes: Number of classes that should be represented in the fake labels
tensor
dtype: Data type for features/images.
Returns:
An input_fn that can be used in place of a real one to return a dataset
that can be used for iteration.
"""
if data_format not in ["NHWC", "NCHW"]:
raise ValueError("Unknown data_format: %s" % str(data_format))
if data_format == "NHWC":
input_shape = [batch_size, height, width, num_channels]
else:
input_shape = [batch_size, num_channels, height, width]
# Convert the inputs to a Dataset.
inputs = tf.truncated_normal(input_shape, dtype=dtype, mean=127, stddev=60, name='synthetic_inputs')
labels = tf.random_uniform([batch_size], minval=0, maxval=num_classes - 1, dtype=tf.int32, name='synthetic_labels')
data = tf.data.Dataset.from_tensors((inputs, labels))
data = data.repeat()
data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return data
def get_tfrecords_input_fn(filenames, batch_size, height, width, training, distort_color, num_threads, deterministic):
shuffle_buffer_size = 4096
if deterministic:
seed = 13 * hvd.rank()
else:
seed = None
ds = tf.data.Dataset.from_tensor_slices(filenames)
if hvd.size() > 1 and training:
ds = ds.shard(hvd.size(), hvd.rank())
ds = ds.interleave(tf.data.TFRecordDataset, cycle_length=10, block_length=8)
def preproc_func(record):
return image_processing.preprocess_image_record(record, height, width, _NUM_CHANNELS, training)
if training:
ds = ds.shuffle(buffer_size=shuffle_buffer_size, seed=seed)
ds = ds.repeat().map(preproc_func, num_parallel_calls=num_threads)
ds = ds.batch(batch_size=batch_size, drop_remainder=True)
ds = ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return ds
def get_inference_input_fn(filenames, height, width, num_threads):
ds = tf.data.Dataset.from_tensor_slices(filenames)
counter = tf.data.Dataset.range(sys.maxsize)
ds = tf.data.Dataset.zip((ds, counter))
def preproc_func(record, counter_):
return image_processing.preprocess_image_file(record, height, width, _NUM_CHANNELS, is_training=False)
ds = ds.apply(
tf.data.experimental.map_and_batch(map_func=preproc_func, num_parallel_calls=num_threads, batch_size=1)
)
ds = ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return ds
def get_dali_input_fn(
filenames, idx_filenames, batch_size, height, width, training, distort_color, num_threads, deterministic
):
if idx_filenames is None:
raise ValueError("Must provide idx_filenames for DALI's reader")
preprocessor = dali_utils.DALIPreprocessor(
filenames,
idx_filenames,
height,
width,
batch_size,
num_threads,
dali_cpu=False,
deterministic=deterministic,
training=training
)
images, labels = preprocessor.get_device_minibatches()
return (images, labels)
def normalized_inputs(inputs):
num_channels = inputs.get_shape()[-1]
if inputs.get_shape().ndims != 4:
raise ValueError('Input must be of size [batch_size, height, width, C>0]')
if len(_CHANNEL_MEANS) != num_channels:
raise ValueError('len(means) must match the number of channels')
# We have a 1-D tensor of means; convert to 3-D.
means_per_channel = tf.reshape(_CHANNEL_MEANS, [1, 1, num_channels])
means_per_channel = tf.cast(means_per_channel, dtype=inputs.dtype)
stds_per_channel = tf.reshape(_CHANNEL_STDS, [1, 1, num_channels])
stds_per_channel = tf.cast(stds_per_channel, dtype=inputs.dtype)
inputs = tf.subtract(inputs, means_per_channel)
return tf.divide(inputs, stds_per_channel)
def get_serving_input_receiver_fn(batch_size, height, width, num_channels, data_format, dtype=tf.float32):
if data_format not in ["NHWC", "NCHW"]:
raise ValueError("Unknown data_format: %s" % str(data_format))
if data_format == "NHWC":
input_shape = [batch_size] + [height, width, num_channels]
else:
input_shape = [batch_size] + [num_channels, height, width]
def serving_input_receiver_fn():
features = tf.placeholder(dtype=dtype, shape=input_shape, name='input_tensor')
return tf.estimator.export.TensorServingInputReceiver(features=features, receiver_tensors=features)
return serving_input_receiver_fn
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/utils/data_utils.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
_RESIZE_MIN = 256
_DEFAULT_IMAGE_SIZE = 224
__all__ = ['preprocess_image_record', 'preprocess_image_file']
def _deserialize_image_record(record):
feature_map = {
'image/encoded': tf.io.FixedLenFeature([], tf.string, ''),
'image/class/label': tf.io.FixedLenFeature([], tf.int64, -1),
'image/class/text': tf.io.FixedLenFeature([], tf.string, ''),
'image/object/bbox/xmin': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.io.VarLenFeature(dtype=tf.float32)
}
with tf.name_scope('deserialize_image_record'):
obj = tf.io.parse_single_example(record, feature_map)
imgdata = obj['image/encoded']
label = tf.cast(obj['image/class/label'], tf.int32)
bbox = tf.stack([obj['image/object/bbox/%s' % x].values for x in ['ymin', 'xmin', 'ymax', 'xmax']])
bbox = tf.transpose(tf.expand_dims(bbox, 0), [0, 2, 1])
text = obj['image/class/text']
return imgdata, label, bbox, text
def _decode_jpeg(imgdata, channels=3):
return tf.image.decode_jpeg(imgdata, channels=channels, fancy_upscaling=False, dct_method='INTEGER_FAST')
def _crop_and_filp(image, bbox, num_channels):
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=0.1,
aspect_ratio_range=[0.75, 1.33],
area_range=[0.05, 1.0],
max_attempts=100,
use_image_if_no_bounding_boxes=True
)
bbox_begin, bbox_size, _ = sample_distorted_bounding_box
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
cropped = tf.image.crop_to_bounding_box(image, offset_y, offset_x, target_height, target_width)
cropped = tf.image.random_flip_left_right(cropped)
return cropped
def _central_crop(image, crop_height, crop_width):
shape = tf.shape(image)
height, width = shape[0], shape[1]
amount_to_be_cropped_h = (height - crop_height)
crop_top = amount_to_be_cropped_h // 2
amount_to_be_cropped_w = (width - crop_width)
crop_left = amount_to_be_cropped_w // 2
return tf.slice(image, [crop_top, crop_left, 0], [crop_height, crop_width, -1])
def _smallest_size_at_least(height, width, resize_min):
resize_min = tf.cast(resize_min, tf.float32)
# Convert to floats to make subsequent calculations go smoothly.
height, width = tf.cast(height, tf.float32), tf.cast(width, tf.float32)
smaller_dim = tf.minimum(height, width)
scale_ratio = resize_min / smaller_dim
# Convert back to ints to make heights and widths that TF ops will accept.
new_height = tf.cast(height * scale_ratio, tf.int32)
new_width = tf.cast(width * scale_ratio, tf.int32)
return new_height, new_width
def _aspect_preserving_resize(image, resize_min):
"""Resize images preserving the original aspect ratio.
Args:
image: A 3-D image `Tensor`.
resize_min: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
resized_image: A 3-D tensor containing the resized image.
"""
shape = tf.shape(image)
height, width = shape[0], shape[1]
new_height, new_width = _smallest_size_at_least(height, width, resize_min)
return _resize_image(image, new_height, new_width)
def _resize_image(image, height, width):
"""Simple wrapper around tf.resize_images.
This is primarily to make sure we use the same `ResizeMethod` and other
details each time.
Args:
image: A 3-D image `Tensor`.
height: The target height for the resized image.
width: The target width for the resized image.
Returns:
resized_image: A 3-D tensor containing the resized image. The first two
dimensions have the shape [height, width].
"""
return tf.image.resize(image, [height, width], method=tf.image.ResizeMethod.BILINEAR, preserve_aspect_ratio=False)
def preprocess_image_record(record, height, width, num_channels, is_training=False):
imgdata, label, bbox, text = _deserialize_image_record(record)
label -= 1
try:
image = _decode_jpeg(imgdata, channels=3)
except:
image = tf.image.decode_image(imgdata, channels=3)
if is_training:
# For training, we want to randomize some of the distortions.
image = _crop_and_filp(image, bbox, num_channels)
image = _resize_image(image, height, width)
else:
image = _aspect_preserving_resize(image, _RESIZE_MIN)
image = _central_crop(image, height, width)
return image, label
def preprocess_image_file(filename, height, width, num_channels, is_training=False):
imgdata = tf.read_file(filename)
try:
image = _decode_jpeg(imgdata, channels=3)
except:
image = tf.image.decode_image(imgdata, channels=3)
if is_training:
# For training, we want to randomize some of the distortions.
image = _crop_and_filp(image, bbox, num_channels)
image = _resize_image(image, height, width)
else:
image = _aspect_preserving_resize(image, _RESIZE_MIN)
image = _central_crop(image, height, width)
return image, filename
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/utils/image_processing.py |
hvd_global_object = None
def init(use_horovod: bool = False):
global hvd_global_object
if use_horovod:
import horovod.tensorflow as hvd
hvd.init()
hvd_global_object = hvd
else:
class _DummyWrapper:
def rank(self): return 0
def size(self): return 1
def local_rank(self): return 0
def local_size(self): return 1
hvd_global_object = _DummyWrapper()
def size():
global hvd_global_object
return hvd_global_object.size()
def rank():
global hvd_global_object
return hvd_global_object.rank()
def local_rank():
global hvd_global_object
return hvd_global_object.local_rank()
def local_size():
global hvd_global_object
return hvd_global_object.local_size() | DeepLearningExamples-master | TensorFlow/Classification/ConvNets/utils/hvd_wrapper.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
__all__ = ['model_variable_scope']
def model_variable_scope(name, reuse=False, dtype=tf.float32, *args, **kwargs):
"""Returns a variable scope that the model should be created under.
If self.dtype is a castable type, model variable will be created in fp32
then cast to self.dtype before being used.
Returns:
A variable scope for the model.
"""
def _custom_dtype_getter(getter, name, shape=None, dtype=None, trainable=True, regularizer=None, *args, **kwargs):
"""Creates variables in fp32, then casts to fp16 if necessary.
This function is a custom getter. A custom getter is a function with the
same signature as tf.get_variable, except it has an additional getter
parameter. Custom getters can be passed as the `custom_getter` parameter of
tf.variable_scope. Then, tf.get_variable will call the custom getter,
instead of directly getting a variable itself. This can be used to change
the types of variables that are retrieved with tf.get_variable.
The `getter` parameter is the underlying variable getter, that would have
been called if no custom getter was used. Custom getters typically get a
variable with `getter`, then modify it in some way.
This custom getter will create an fp32 variable. If a low precision
(e.g. float16) variable was requested it will then cast the variable to the
requested dtype. The reason we do not directly create variables in low
precision dtypes is that applying small gradients to such variables may
cause the variable not to change.
Args:
getter: The underlying variable getter, that has the same signature as
tf.get_variable and returns a variable.
name: The name of the variable to get.
shape: The shape of the variable to get.
*args: Additional arguments to pass unmodified to getter.
**kwargs: Additional keyword arguments to pass unmodified to getter.
Returns:
A variable which is cast to fp16 if necessary.
"""
storage_dtype = tf.float32 if dtype in [tf.float32, tf.float16] else dtype
variable = getter(
name,
shape,
dtype=storage_dtype,
trainable=trainable,
regularizer=(
regularizer if
(trainable and not any(l_name.lower() in name.lower()
for l_name in ['batchnorm', 'batch_norm'])) else None
),
*args,
**kwargs
)
if dtype != tf.float32:
cast_name = name + '/fp16_cast'
try:
cast_variable = tf.get_default_graph().get_tensor_by_name(cast_name + ':0')
except KeyError:
cast_variable = tf.cast(variable, dtype, name=cast_name)
cast_variable._ref = variable._ref
variable = cast_variable
return variable
return tf.variable_scope(name, reuse=reuse, dtype=dtype, custom_getter=_custom_dtype_getter, *args, **kwargs)
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/utils/var_storage.py |
#! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import tensorflow as tf
import numpy as np
import random
import dllogger
import signal
from utils import hvd_wrapper as hvd
__all__ = ['TrainingLoggingHook', 'TrainingPartitionHook']
class MeanAccumulator:
def __init__(self):
self.sum = 0
self.count = 0
def consume(self, value):
self.sum += value
self.count += 1
def value(self):
if self.count:
return self.sum / self.count
else:
return 0
class TrainingLoggingHook(tf.estimator.SessionRunHook):
def __init__(
self, global_batch_size, num_steps, num_samples, num_epochs, steps_per_epoch, warmup_steps=20, logging_steps=1, seed=None
):
self.global_batch_size = global_batch_size
self.num_steps = num_steps
self.num_samples = num_samples
self.num_epochs = num_epochs
self.steps_per_epoch = steps_per_epoch
self.warmup_steps = warmup_steps
self.logging_steps = logging_steps
self.current_step = 0
self.current_epoch = 0
self.t0 = None
self.mean_throughput = MeanAccumulator()
self.seed = seed
# Determines if its the last step of the epoch
def _last_step_of_epoch(self, global_step):
return (global_step + 1) // self.steps_per_epoch > (global_step // self.steps_per_epoch)
def before_run(self, run_context):
run_args = tf.train.SessionRunArgs(
fetches=[
tf.train.get_global_step(), 'cross_entropy_loss_ref:0', 'l2_loss_ref:0', 'total_loss_ref:0',
'learning_rate_ref:0'
]
)
self.t0 = time.time()
return run_args
def after_run(self, run_context, run_values):
global_step, cross_entropy, l2_loss, total_loss, learning_rate = run_values.results
batch_time = time.time() - self.t0
ips = self.global_batch_size / batch_time
if global_step // self.steps_per_epoch < (global_step + 1) // self.steps_per_epoch and self.seed is not None:
tf.set_random_seed(self.seed + global_step)
np.random.seed(self.seed + global_step)
random.seed(self.seed + global_step)
metrics = {
"imgs_per_sec": ips,
"cross_entropy": cross_entropy,
"l2_loss": l2_loss,
"total_loss": total_loss,
"learning_rate": learning_rate
}
if self.current_step >= self.warmup_steps:
self.mean_throughput.consume(metrics['imgs_per_sec'])
if (self.current_step % self.logging_steps) == 0:
metrics = {k: float(v) for k, v in metrics.items()}
dllogger.log(data=metrics, step=(
int(global_step // self.steps_per_epoch), int(global_step)))
self.current_step += 1
if self._last_step_of_epoch(global_step):
metrics = {
"cross_entropy": cross_entropy,
"l2_loss": l2_loss,
"total_loss": total_loss,
"learning_rate": learning_rate
}
metrics = {k: float(v) for k, v in metrics.items()}
dllogger.log(data=metrics, step=(
int(global_step // self.steps_per_epoch), ))
self.current_epoch += 1
class TrainingPartitionHook(tf.estimator.SessionRunHook):
def __init__(self, sync_freq=10):
super().__init__()
self.signal_recieved = False
self.sync_freq = sync_freq
self.global_step = 0
signal.signal(signal.SIGUSR1, self._signal_handler)
signal.signal(signal.SIGTERM, self._signal_handler)
def begin(self):
if hvd.size() > 1:
with tf.device("/cpu:0"):
self.input_op = tf.placeholder(tf.int32, shape=())
self.allreduce_op = hvd.hvd_global_object.allreduce(
self.input_op, op=hvd.hvd_global_object.Sum, name="signal_handler_all_reduce")
def before_run(self, run_context):
fetches = [tf.train.get_global_step()]
feed_dict = None
if hvd.size() > 1 and (self.global_step % self.sync_freq) == 0:
fetches += [self.allreduce_op]
feed_dict = {self.input_op: int(self.signal_recieved)}
return tf.train.SessionRunArgs(fetches, feed_dict=feed_dict)
def after_run(self, run_context, run_values):
self.global_step = run_values.results[0] + 1
if hvd.size() > 1 and len(run_values.results) == 2:
if run_values.results[1] > 0:
self.signal_recieved = True
run_context.request_stop()
elif self.signal_recieved:
run_context.request_stop()
def _signal_handler(self, signum, frame):
self.signal_recieved = True
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/utils/hooks/training_hooks.py |
#! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import tensorflow as tf
import dllogger
from .training_hooks import MeanAccumulator
__all__ = ['BenchmarkLoggingHook']
class BenchmarkLoggingHook(tf.estimator.SessionRunHook):
def __init__(self, global_batch_size, warmup_steps=20, logging_steps=1):
self.latencies = []
self.warmup_steps = warmup_steps
self.global_batch_size = global_batch_size
self.current_step = 0
self.t0 = None
self.mean_throughput = MeanAccumulator()
self.logging_steps = logging_steps
def before_run(self, run_context):
self.t0 = time.time()
def after_run(self, run_context, run_values):
batch_time = time.time() - self.t0
ips = self.global_batch_size / batch_time
if self.current_step >= self.warmup_steps:
self.latencies.append(batch_time)
self.mean_throughput.consume(ips)
if (self.current_step % self.logging_steps) == 0:
dllogger.log(data={"total_ips": ips}, step=(0, self.current_step))
self.current_step += 1
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/utils/hooks/benchmark_hooks.py |
#! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import numpy as np
import tensorflow as tf
__all__ = ['PrefillStagingAreasHook']
class PrefillStagingAreasHook(tf.estimator.SessionRunHook):
def after_create_session(self, session, coord):
# TODO: This assumes TF collections are ordered; is this safe?
enqueue_ops = tf.get_collection('STAGING_AREA_PUTS')
for i in range(len(enqueue_ops)):
session.run(enqueue_ops[:i + 1])
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/utils/hooks/prefill_hook.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from utils.hooks.training_hooks import *
from utils.hooks.benchmark_hooks import *
from utils.hooks.prefill_hook import *
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/utils/hooks/__init__.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from model import layers
from model import blocks
from model import resnet | DeepLearningExamples-master | TensorFlow/Classification/ConvNets/model/__init__.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import tensorflow as tf
from utils import hvd_wrapper as hvd
import dllogger
from model import layers
from model import blocks
from utils import var_storage
from utils.data_utils import normalized_inputs
from utils.learning_rate import learning_rate_scheduler
from utils.optimizers import FixedLossScalerOptimizer
__all__ = [
'ResnetModel',
]
class ResnetModel(object):
"""Resnet cnn network configuration."""
def __init__(
self,
model_name,
n_classes,
layers_count,
layers_depth,
expansions,
compute_format='NCHW',
input_format='NHWC',
weight_init='fan_out',
dtype=tf.float32,
use_dali=False,
use_cpu=False,
cardinality=1,
use_se=False,
se_ratio=1,
):
self.model_hparams = tf.contrib.training.HParams(
n_classes=n_classes,
compute_format=compute_format,
input_format=input_format,
dtype=dtype,
layers_count=layers_count,
layers_depth=layers_depth,
expansions=expansions,
model_name=model_name,
use_dali=use_dali,
use_cpu=use_cpu,
cardinality=cardinality,
use_se=use_se,
se_ratio=se_ratio
)
self.batch_norm_hparams = tf.contrib.training.HParams(
decay=0.9,
epsilon=1e-5,
scale=True,
center=True,
param_initializers={
'beta': tf.constant_initializer(0.0),
'gamma': tf.constant_initializer(1.0),
'moving_mean': tf.constant_initializer(0.0),
'moving_variance': tf.constant_initializer(1.0)
},
)
self.conv2d_hparams = tf.contrib.training.HParams(
kernel_initializer=tf.compat.v1.variance_scaling_initializer(
scale=2.0, distribution='truncated_normal', mode=weight_init
),
bias_initializer=tf.constant_initializer(0.0)
)
self.dense_hparams = tf.contrib.training.HParams(
kernel_initializer=tf.compat.v1.variance_scaling_initializer(
scale=2.0, distribution='truncated_normal', mode=weight_init
),
bias_initializer=tf.constant_initializer(0.0)
)
if hvd.rank() == 0:
print("Model HParams:")
print("Name", model_name)
print("Number of classes", n_classes)
print("Compute_format", compute_format)
print("Input_format", input_format)
print("dtype", str(dtype))
def __call__(self, features, labels, mode, params):
if mode == tf.estimator.ModeKeys.TRAIN:
mandatory_params = [
"batch_size", "lr_init", "num_gpus", "steps_per_epoch", "momentum", "weight_decay", "loss_scale",
"label_smoothing"
]
for p in mandatory_params:
if p not in params:
raise RuntimeError("Parameter {} is missing.".format(p))
if mode == tf.estimator.ModeKeys.TRAIN and not self.model_hparams.use_dali:
with tf.device('/cpu:0'):
# Stage inputs on the host
cpu_prefetch_op, (features, labels) = self._stage([features, labels])
if not self.model_hparams.use_cpu:
with tf.device('/gpu:0'):
# Stage inputs to the device
gpu_prefetch_op, (features, labels) = self._stage([features, labels])
main_device = "/gpu:0" if not self.model_hparams.use_cpu else "/cpu:0"
with tf.device(main_device):
if features.dtype != self.model_hparams.dtype:
features = tf.cast(features, self.model_hparams.dtype)
# Subtract mean per channel
# and enforce values between [-1, 1]
if not self.model_hparams.use_dali:
features = normalized_inputs(features)
mixup = 0
eta = 0
if mode == tf.estimator.ModeKeys.TRAIN:
eta = params['label_smoothing']
mixup = params['mixup']
if mode != tf.estimator.ModeKeys.PREDICT:
n_cls = self.model_hparams.n_classes
one_hot_smoothed_labels = tf.one_hot(labels, n_cls,
on_value=1 - eta + eta / n_cls, off_value=eta / n_cls)
if mixup != 0:
print("Using mixup training with beta=", params['mixup'])
beta_distribution = tf.distributions.Beta(params['mixup'], params['mixup'])
feature_coefficients = beta_distribution.sample(sample_shape=[params['batch_size'], 1, 1, 1])
reversed_feature_coefficients = tf.subtract(
tf.ones(shape=feature_coefficients.shape), feature_coefficients
)
rotated_features = tf.reverse(features, axis=[0])
features = feature_coefficients * features + reversed_feature_coefficients * rotated_features
label_coefficients = tf.squeeze(feature_coefficients, axis=[2, 3])
rotated_labels = tf.reverse(one_hot_smoothed_labels, axis=[0])
reversed_label_coefficients = tf.subtract(
tf.ones(shape=label_coefficients.shape), label_coefficients
)
one_hot_smoothed_labels = label_coefficients * one_hot_smoothed_labels + reversed_label_coefficients * rotated_labels
# Update Global Step
global_step = tf.train.get_or_create_global_step()
tf.identity(global_step, name="global_step_ref")
tf.identity(features, name="features_ref")
if mode == tf.estimator.ModeKeys.TRAIN:
tf.identity(labels, name="labels_ref")
probs, logits = self.build_model(
features,
training=mode == tf.estimator.ModeKeys.TRAIN,
reuse=False,
use_final_conv=params['use_final_conv']
)
if params['use_final_conv']:
logits = tf.squeeze(logits, axis=[-2, -1])
y_preds = tf.argmax(logits, axis=1, output_type=tf.int32)
# Check the output dtype, shall be FP32 in training
assert (probs.dtype == tf.float32)
assert (logits.dtype == tf.float32)
assert (y_preds.dtype == tf.int32)
tf.identity(logits, name="logits_ref")
tf.identity(probs, name="probs_ref")
tf.identity(y_preds, name="y_preds_ref")
#if mode == tf.estimator.ModeKeys.TRAIN:
#
# assert (len(tf.trainable_variables()) == 161)
#
#else:
#
# assert (len(tf.trainable_variables()) == 0)
if mode == tf.estimator.ModeKeys.TRAIN and params['quantize']:
dllogger.log(data={"QUANTIZATION AWARE TRAINING ENABLED": True}, step=tuple())
if params['symmetric']:
dllogger.log(data={"MODE": "USING SYMMETRIC MODE"}, step=tuple())
tf.contrib.quantize.experimental_create_training_graph(
tf.get_default_graph(),
symmetric=True,
use_qdq=params['use_qdq'],
quant_delay=params['quant_delay']
)
else:
dllogger.log(data={"MODE": "USING ASSYMETRIC MODE"}, step=tuple())
tf.contrib.quantize.create_training_graph(
tf.get_default_graph(), quant_delay=params['quant_delay'], use_qdq=params['use_qdq']
)
# Fix for restoring variables during fine-tuning of Resnet
if 'finetune_checkpoint' in params.keys():
train_vars = tf.trainable_variables()
train_var_dict = {}
for var in train_vars:
train_var_dict[var.op.name] = var
dllogger.log(data={"Restoring variables from checkpoint": params['finetune_checkpoint']}, step=tuple())
tf.train.init_from_checkpoint(params['finetune_checkpoint'], train_var_dict)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {'classes': y_preds, 'probabilities': probs}
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
export_outputs={'predict': tf.estimator.export.PredictOutput(predictions)}
)
else:
with tf.device(main_device):
if mode == tf.estimator.ModeKeys.TRAIN:
acc_top1 = tf.nn.in_top_k(predictions=logits, targets=labels, k=1)
acc_top5 = tf.nn.in_top_k(predictions=logits, targets=labels, k=5)
else:
acc_top1, acc_top1_update_op = tf.metrics.mean(
tf.nn.in_top_k(predictions=logits, targets=labels, k=1)
)
acc_top5, acc_top5_update_op = tf.metrics.mean(
tf.nn.in_top_k(predictions=logits, targets=labels, k=5)
)
tf.identity(acc_top1, name="acc_top1_ref")
tf.identity(acc_top5, name="acc_top5_ref")
predictions = {
'classes': y_preds,
'probabilities': probs,
'accuracy_top1': acc_top1,
'accuracy_top5': acc_top5
}
cross_entropy = tf.losses.softmax_cross_entropy(logits=logits, onehot_labels=one_hot_smoothed_labels)
assert (cross_entropy.dtype == tf.float32)
tf.identity(cross_entropy, name='cross_entropy_loss_ref')
def loss_filter_fn(name):
"""we don't need to compute L2 loss for BN and bias (eq. to add a cste)"""
return all(
[
tensor_name not in name.lower()
# for tensor_name in ["batchnorm", "batch_norm", "batch_normalization", "bias"]
for tensor_name in ["batchnorm", "batch_norm", "batch_normalization"]
]
)
filtered_params = [tf.cast(v, tf.float32) for v in tf.trainable_variables() if loss_filter_fn(v.name)]
if len(filtered_params) != 0:
l2_loss_per_vars = [tf.nn.l2_loss(v) for v in filtered_params]
l2_loss = tf.multiply(tf.add_n(l2_loss_per_vars), params["weight_decay"])
else:
l2_loss = tf.zeros(shape=(), dtype=tf.float32)
assert (l2_loss.dtype == tf.float32)
tf.identity(l2_loss, name='l2_loss_ref')
total_loss = tf.add(cross_entropy, l2_loss, name="total_loss")
assert (total_loss.dtype == tf.float32)
tf.identity(total_loss, name='total_loss_ref')
tf.summary.scalar('cross_entropy', cross_entropy)
tf.summary.scalar('l2_loss', l2_loss)
tf.summary.scalar('total_loss', total_loss)
if mode == tf.estimator.ModeKeys.TRAIN:
with tf.device("/cpu:0"):
learning_rate = learning_rate_scheduler(
lr_init=params["lr_init"],
lr_warmup_epochs=params["lr_warmup_epochs"],
global_step=global_step,
batch_size=params["batch_size"],
num_batches_per_epoch=params["steps_per_epoch"],
num_decay_steps=params["num_decay_steps"],
num_gpus=params["num_gpus"],
use_cosine_lr=params["use_cosine_lr"]
)
tf.identity(learning_rate, name='learning_rate_ref')
tf.summary.scalar('learning_rate', learning_rate)
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=params["momentum"])
if params["apply_loss_scaling"]:
optimizer = FixedLossScalerOptimizer(optimizer, scale=params["loss_scale"])
if hvd.size() > 1:
optimizer = hvd.hvd_global_object.DistributedOptimizer(optimizer)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if mode != tf.estimator.ModeKeys.TRAIN:
update_ops += [acc_top1_update_op, acc_top5_update_op]
deterministic = True
gate_gradients = (tf.compat.v1.train.Optimizer.GATE_OP if deterministic else tf.compat.v1.train.Optimizer.GATE_NONE)
backprop_op = optimizer.minimize(total_loss, gate_gradients=gate_gradients, global_step=global_step)
if self.model_hparams.use_dali:
train_ops = tf.group(backprop_op, update_ops, name='train_ops')
elif self.model_hparams.use_cpu:
train_ops = tf.group(
backprop_op, cpu_prefetch_op, update_ops, name='train_ops'
)
else:
train_ops = tf.group(
backprop_op, cpu_prefetch_op, gpu_prefetch_op, update_ops, name='train_ops'
)
return tf.estimator.EstimatorSpec(mode=mode, loss=total_loss, train_op=train_ops)
elif mode == tf.estimator.ModeKeys.EVAL:
eval_metrics = {
"top1_accuracy": (acc_top1, acc_top1_update_op),
"top5_accuracy": (acc_top5, acc_top5_update_op)
}
return tf.estimator.EstimatorSpec(
mode=mode, predictions=predictions, loss=total_loss, eval_metric_ops=eval_metrics
)
else:
raise NotImplementedError('Unknown mode {}'.format(mode))
@staticmethod
def _stage(tensors):
"""Stages the given tensors in a StagingArea for asynchronous put/get.
"""
stage_area = tf.contrib.staging.StagingArea(
dtypes=[tensor.dtype for tensor in tensors], shapes=[tensor.get_shape() for tensor in tensors]
)
put_op = stage_area.put(tensors)
get_tensors = stage_area.get()
tf.add_to_collection('STAGING_AREA_PUTS', put_op)
return put_op, get_tensors
def build_model(self, inputs, training=True, reuse=False, use_final_conv=False):
with var_storage.model_variable_scope(
self.model_hparams.model_name, reuse=reuse, dtype=self.model_hparams.dtype
):
with tf.variable_scope("input_reshape"):
if self.model_hparams.input_format == 'NHWC' and self.model_hparams.compute_format == 'NCHW':
# Reshape inputs: NHWC => NCHW
inputs = tf.transpose(inputs, [0, 3, 1, 2])
elif self.model_hparams.input_format == 'NCHW' and self.model_hparams.compute_format == 'NHWC':
# Reshape inputs: NCHW => NHWC
inputs = tf.transpose(inputs, [0, 2, 3, 1])
if self.model_hparams.dtype != inputs.dtype:
inputs = tf.cast(inputs, self.model_hparams.dtype)
net = blocks.conv2d_block(
inputs,
n_channels=64,
kernel_size=(7, 7),
strides=(2, 2),
mode='SAME',
use_batch_norm=True,
activation='relu',
is_training=training,
data_format=self.model_hparams.compute_format,
conv2d_hparams=self.conv2d_hparams,
batch_norm_hparams=self.batch_norm_hparams,
name='conv2d'
)
net = layers.max_pooling2d(
net,
pool_size=(3, 3),
strides=(2, 2),
padding='SAME',
data_format=self.model_hparams.compute_format,
name="max_pooling2d",
)
model_bottlenecks = self.model_hparams.layers_depth
for block_id, block_bottleneck in enumerate(model_bottlenecks):
for layer_id in range(self.model_hparams.layers_count[block_id]):
stride = 2 if (layer_id == 0 and block_id != 0) else 1
net = blocks.bottleneck_block(
inputs=net,
depth=block_bottleneck * self.model_hparams.expansions,
depth_bottleneck=block_bottleneck,
cardinality=self.model_hparams.cardinality,
stride=stride,
training=training,
data_format=self.model_hparams.compute_format,
conv2d_hparams=self.conv2d_hparams,
batch_norm_hparams=self.batch_norm_hparams,
block_name="btlnck_block_%d_%d" % (block_id, layer_id),
use_se=self.model_hparams.use_se,
ratio=self.model_hparams.se_ratio
)
with tf.variable_scope("output"):
net = layers.reduce_mean(
net, keepdims=False, data_format=self.model_hparams.compute_format, name='spatial_mean'
)
if use_final_conv:
logits = layers.conv2d(
net,
n_channels=self.model_hparams.n_classes,
kernel_size=(1, 1),
strides=(1, 1),
padding='SAME',
data_format=self.model_hparams.compute_format,
dilation_rate=(1, 1),
use_bias=True,
kernel_initializer=self.dense_hparams.kernel_initializer,
bias_initializer=self.dense_hparams.bias_initializer,
trainable=training,
name='dense'
)
else:
logits = layers.dense(
inputs=net,
units=self.model_hparams.n_classes,
use_bias=True,
trainable=training,
kernel_initializer=self.dense_hparams.kernel_initializer,
bias_initializer=self.dense_hparams.bias_initializer
)
if logits.dtype != tf.float32:
logits = tf.cast(logits, tf.float32)
axis = 3 if self.model_hparams.compute_format=="NHWC" and use_final_conv else 1
probs = layers.softmax(logits, name="softmax", axis=axis)
return probs, logits
model_architectures = {
'resnet50': {
'layers': [3, 4, 6, 3],
'widths': [64, 128, 256, 512],
'expansions': 4,
},
'resnext101-32x4d': {
'layers': [3, 4, 23, 3],
'widths': [128, 256, 512, 1024],
'expansions': 2,
'cardinality': 32,
},
'se-resnext101-32x4d': {
'cardinality': 32,
'layers': [3, 4, 23, 3],
'widths': [128, 256, 512, 1024],
'expansions': 2,
'use_se': True,
'se_ratio': 16,
},
}
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/model/resnet.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from model.blocks.conv2d_block import conv2d_block
from model.blocks.resnet_bottleneck_block import bottleneck_block
__all__ = [
# conv + bn + act block
'conv2d_block',
# resnet blocks
'bottleneck_block'
]
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/model/blocks/__init__.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from model import layers
__all__ = ['conv2d_block']
def conv2d_block(
inputs,
n_channels,
kernel_size=(3, 3),
strides=(2, 2),
mode='SAME',
use_batch_norm=True,
activation='relu',
is_training=True,
data_format='NHWC',
conv2d_hparams=None,
batch_norm_hparams=None,
name='conv2d',
cardinality=1,
):
if not isinstance(conv2d_hparams, tf.contrib.training.HParams):
raise ValueError("The paramater `conv2d_hparams` is not of type `HParams`")
if not isinstance(batch_norm_hparams, tf.contrib.training.HParams) and use_batch_norm:
raise ValueError("The paramater `conv2d_hparams` is not of type `HParams`")
with tf.variable_scope(name):
if cardinality == 1:
net = layers.conv2d(
inputs,
n_channels=n_channels,
kernel_size=kernel_size,
strides=strides,
padding=mode,
data_format=data_format,
use_bias=not use_batch_norm,
trainable=is_training,
kernel_initializer=conv2d_hparams.kernel_initializer,
bias_initializer=conv2d_hparams.bias_initializer)
else:
group_filter = tf.get_variable(
name=name + 'group_filter',
shape=[3, 3, n_channels // cardinality, n_channels],
trainable=is_training,
dtype=tf.float32)
net = tf.nn.conv2d(inputs,
group_filter,
strides=strides,
padding='SAME',
data_format=data_format)
if use_batch_norm:
net = layers.batch_norm(
net,
decay=batch_norm_hparams.decay,
epsilon=batch_norm_hparams.epsilon,
scale=batch_norm_hparams.scale,
center=batch_norm_hparams.center,
is_training=is_training,
data_format=data_format,
param_initializers=batch_norm_hparams.param_initializers
)
if activation == 'relu':
net = layers.relu(net, name='relu')
elif activation == 'tanh':
net = layers.tanh(net, name='tanh')
elif activation != 'linear' and activation is not None:
raise KeyError('Invalid activation type: `%s`' % activation)
return net
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/model/blocks/conv2d_block.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import model.layers
import model.blocks
__all__ = ['bottleneck_block']
def bottleneck_block(
inputs,
depth,
depth_bottleneck,
stride,
cardinality=1,
training=True,
data_format='NCHW',
conv2d_hparams=None,
batch_norm_hparams=None,
block_name="bottleneck_block",
use_se=False,
ratio=1
):
if data_format not in ['NHWC', 'NCHW']:
raise ValueError("Unknown data format: `%s` (accepted: ['NHWC', 'NCHW'])" % data_format)
if not isinstance(conv2d_hparams, tf.contrib.training.HParams):
raise ValueError("The paramater `conv2d_hparams` is not of type `HParams`")
if not isinstance(batch_norm_hparams, tf.contrib.training.HParams):
raise ValueError("The paramater `batch_norm_hparams` is not of type `HParams`")
in_shape = inputs.get_shape()
in_size = in_shape[1] if data_format == "NCHW" else in_shape[-1]
with tf.variable_scope(block_name):
with tf.variable_scope("shortcut"):
if depth == in_size:
if stride == 1:
shortcut = tf.identity(inputs)
else:
shortcut = model.layers.average_pooling2d(
inputs,
pool_size=(1, 1),
strides=(stride, stride),
padding='valid',
data_format='channels_first' if data_format == 'NCHW' else 'channels_last',
name="average_pooling2d")
else:
shortcut = model.blocks.conv2d_block(
inputs,
n_channels=depth,
kernel_size=(1, 1),
strides=(stride, stride),
mode='SAME',
use_batch_norm=True,
activation=None, # Applied at the end after addition with bottleneck
is_training=training,
data_format=data_format,
conv2d_hparams=conv2d_hparams,
batch_norm_hparams=batch_norm_hparams
)
#cardinality_to_bottleneck_width = { 1:64, 2:40, 4:24, 8:14, 32:4, 64:4 }
#cardinality_to_grouped_conv_width = { 1:64, 2:80, 4:96, 8:112, 32:128, 64:256 }
#per_group_ck = cardinality_to_bottleneck_width[cardinality] * depth_bottleneck / 64
bottleneck = model.blocks.conv2d_block(
inputs,
#n_channels=per_group_ck * cardinality if cardinality != 1 else depth_bottleneck,
n_channels=depth_bottleneck,
kernel_size=(1, 1),
strides=(1, 1),
mode='SAME',
use_batch_norm=True,
activation='relu',
is_training=training,
data_format=data_format,
conv2d_hparams=conv2d_hparams,
batch_norm_hparams=batch_norm_hparams,
name='bottleneck_1')
bottleneck = model.blocks.conv2d_block(
bottleneck,
n_channels=depth_bottleneck,
kernel_size=(3, 3),
strides=(stride, stride),
mode='SAME',
use_batch_norm=True,
activation='relu',
is_training=training,
data_format=data_format,
conv2d_hparams=conv2d_hparams,
batch_norm_hparams=batch_norm_hparams,
name='bottleneck_2',
cardinality=cardinality)
bottleneck = model.blocks.conv2d_block(
bottleneck,
n_channels=depth,
kernel_size=(1, 1),
strides=(1, 1),
mode='SAME',
use_batch_norm=True,
activation=None, # Applied at the end after addition with shortcut
is_training=training,
data_format=data_format,
conv2d_hparams=conv2d_hparams,
batch_norm_hparams=batch_norm_hparams,
name='bottleneck_3'
)
if use_se:
bottleneck = model.layers.squeeze_excitation_layer(
inputs=bottleneck,
ratio=ratio,
training=training,
data_format=data_format,
name='bottleneck_se_layer')
return model.layers.relu(shortcut + bottleneck, name='relu')
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/model/blocks/resnet_bottleneck_block.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
__all__ = ['dense']
def dense(
inputs,
units,
use_bias=True,
trainable=True,
kernel_initializer=tf.compat.v1.variance_scaling_initializer(),
bias_initializer=tf.zeros_initializer()
):
net = tf.layers.dense(
inputs,
units=units,
activation=None,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
trainable=trainable
)
return net
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/model/layers/dense.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tensorflow as tf
__all__ = ['average_pooling2d', 'max_pooling2d']
def average_pooling2d(inputs, pool_size=(2, 2), strides=None, padding='valid', data_format=None, name="avg_pooling2d"):
if data_format not in ['NHWC', 'NCHW']:
raise ValueError("Unknown data format: `%s` (accepted: ['NHWC', 'NCHW'])" % data_format)
if padding.lower() not in ['same', 'valid']:
raise ValueError("Unknown padding: `%s` (accepted: ['same', 'valid'])" % padding)
net = tf.layers.average_pooling2d(
inputs,
pool_size=pool_size,
strides=strides,
padding=padding,
data_format='channels_first' if data_format == 'NCHW' else 'channels_last',
name=name
)
return net
def max_pooling2d(inputs, pool_size=(2, 2), strides=None, padding='valid', data_format=None, name="max_pooling2d"):
if data_format not in ['NHWC', 'NCHW']:
raise ValueError("Unknown data format: `%s` (accepted: ['NHWC', 'NCHW'])" % data_format)
if padding.lower() not in ['same', 'valid']:
raise ValueError("Unknown padding: `%s` (accepted: ['same', 'valid'])" % padding)
net = tf.layers.max_pooling2d(
inputs,
pool_size=pool_size,
strides=strides,
padding=padding,
data_format='channels_first' if data_format == 'NCHW' else 'channels_last',
name=name
)
return net
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/model/layers/pooling.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from model.layers.activation import relu
from model.layers.activation import softmax
from model.layers.activation import tanh
from model.layers.activation import sigmoid
from model.layers.conv2d import conv2d
from model.layers.dense import dense
from model.layers.math_ops import reduce_mean
from model.layers.normalization import batch_norm
from model.layers.padding import pad
from model.layers.pooling import average_pooling2d
from model.layers.pooling import max_pooling2d
from model.layers.squeeze_excitation_layer import squeeze_excitation_layer
__all__ = [
# activation layers
'relu',
'softmax',
'tanh',
'sigmoid',
# conv layers
'conv2d',
# dense layers
'dense',
# math_ops layers
'reduce_mean',
# normalization layers
'batch_norm',
# padding layers
'pad',
# pooling layers
'average_pooling2d',
'max_pooling2d',
'squeeze_excitation_layer'
]
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/model/layers/__init__.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from model import layers
from model import blocks
__all__ = ['squeeze_excitation_layer']
def squeeze_excitation_layer(
inputs,
ratio,
training=True,
data_format='NCHW',
kernel_initializer=tf.compat.v1.variance_scaling_initializer(),
bias_initializer=tf.zeros_initializer(),
name="squeeze_excitation_layer"
):
if data_format not in ['NHWC', 'NCHW']:
raise ValueError("Unknown data format: `%s` (accepted: ['NHWC', 'NCHW'])" % data_format)
in_shape = inputs.get_shape()
num_channels = in_shape[1] if data_format == "NCHW" else in_shape[-1]
with tf.variable_scope(name):
net = inputs
# squeeze
squeeze = layers.reduce_mean(
net,
keepdims=False,
data_format=data_format,
name='squeeze_spatial_mean'
)
# fc + relu
excitation = layers.dense(
inputs=squeeze,
units=num_channels // ratio,
use_bias=True,
trainable=training,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer
)
excitation = layers.relu(excitation)
# fc + sigmoid
excitation = layers.dense(
inputs=excitation,
units=num_channels,
use_bias=True,
trainable=training,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer
)
excitation = layers.sigmoid(excitation)
out_shape = [-1, num_channels, 1, 1] if data_format == "NCHW" else [-1, 1, 1, num_channels]
excitation = tf.reshape(excitation, out_shape)
net = net * excitation
return net
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/model/layers/squeeze_excitation_layer.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tensorflow as tf
__all__ = ['relu', 'softmax', 'tanh', 'sigmoid']
def relu(inputs, name='relu'):
net = tf.nn.relu(inputs, name=name)
return net
def softmax(inputs, axis=None, name="softmax"):
net = tf.nn.softmax(
inputs,
axis=axis,
name=name,
)
return net
def tanh(inputs, name='tanh'):
net = tf.math.tanh(inputs, name=name)
return net
def sigmoid(inputs, name='sigmoid'):
net = tf.math.sigmoid(inputs, name=name)
return net
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/model/layers/activation.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tensorflow as tf
__all__ = ['conv2d']
def conv2d(
inputs,
n_channels=8,
kernel_size=(3, 3),
strides=(1, 1),
padding='VALID',
data_format='NHWC',
dilation_rate=(1, 1),
use_bias=True,
kernel_initializer=tf.compat.v1.variance_scaling_initializer(),
bias_initializer=tf.zeros_initializer(),
trainable=True,
name=None
):
if data_format not in ['NHWC', 'NCHW']:
raise ValueError("Unknown data format: `%s` (accepted: ['NHWC', 'NCHW'])" % data_format)
if padding.upper() not in ['SAME', 'VALID']:
raise ValueError("Unknown padding: `%s` (accepted: ['SAME', 'VALID'])" % padding.upper())
net = tf.layers.conv2d(
inputs,
filters=n_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation_rate=dilation_rate,
data_format='channels_last' if data_format == 'NHWC' else 'channels_first',
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
trainable=trainable,
activation=None,
name=name
)
return net
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/model/layers/conv2d.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tensorflow as tf
__all__ = ['reduce_mean']
def reduce_mean(inputs, keepdims=None, data_format='channels_last', name='spatial_mean'):
if data_format not in ['NHWC', 'NCHW']:
raise ValueError("Unknown data format: `%s` (accepted: ['NHWC', 'NCHW'])" % data_format)
axes = [1, 2] if data_format == 'NHWC' else [2, 3]
net = tf.math.reduce_mean(inputs, axis=axes, keepdims=keepdims, name=name)
return net
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/model/layers/math_ops.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import tensorflow as tf
from tensorflow.python.training import moving_averages
__all__ = ['batch_norm']
def batch_norm(
inputs,
decay=0.999,
epsilon=0.001,
scale=False,
center=True,
is_training=True,
data_format='NHWC',
param_initializers=None
):
"""Adds a Batch Normalization layer."""
if data_format not in ['NHWC', 'NCHW']:
raise ValueError("Unknown data format: `%s` (accepted: ['NHWC', 'NCHW'])" % data_format)
if param_initializers is not None:
for key, initializer in param_initializers.items():
if key not in ['beta', 'gamma', 'moving_mean', 'moving_variance']:
raise ValueError("Unknown key received: `%s`" % key)
if inspect.isclass(initializer):
initializer = initializer()
setattr(param_initializers, key, initializer)
if initializer.__class__.__module__ != 'tensorflow.python.ops.init_ops':
raise ValueError("The object `%s` is not a Tensor initializer" % str(initializer))
input_shape = inputs.get_shape()
input_rank = input_shape.ndims
input_channels = input_shape[1]
if input_rank == 2:
if data_format == 'NCHW':
new_shape = [-1, input_channels, 1, 1]
else:
new_shape = [-1, 1, 1, input_channels]
inputs = tf.reshape(inputs, new_shape)
net = tf.contrib.layers.batch_norm(
inputs,
decay=decay,
scale=scale,
epsilon=epsilon,
is_training=is_training,
trainable=is_training,
fused=True,
data_format=data_format,
center=center,
param_initializers=param_initializers
)
if input_rank == 2:
net = tf.reshape(net, [-1, input_channels])
return net | DeepLearningExamples-master | TensorFlow/Classification/ConvNets/model/layers/normalization.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tensorflow as tf
__all__ = ['pad']
def pad(inputs, paddings, mode='CONSTANT', name='padding', constant_values=0):
if mode.upper() not in ['CONSTANT', 'REFLECT', 'SYMMETRIC']:
raise ValueError("Unknown padding mode: `%s` (accepted: ['CONSTANT', 'REFLECT', 'SYMMETRIC'])" % mode)
net = tf.pad(inputs, paddings=paddings, mode=mode, name=name, constant_values=constant_values)
return net
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/model/layers/padding.py |
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Process the ImageNet Challenge bounding boxes for TensorFlow model training.
Associate the ImageNet 2012 Challenge validation data set with labels.
The raw ImageNet validation data set is expected to reside in JPEG files
located in the following directory structure.
data_dir/ILSVRC2012_val_00000001.JPEG
data_dir/ILSVRC2012_val_00000002.JPEG
...
data_dir/ILSVRC2012_val_00050000.JPEG
This script moves the files into a directory structure like such:
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
This directory reorganization requires a mapping from validation image
number (i.e. suffix of the original file) to the associated label. This
is provided in the ImageNet development kit via a Matlab file.
In order to make life easier and divorce ourselves from Matlab, we instead
supply a custom text file that provides this mapping for us.
Sample usage:
./preprocess_imagenet_validation_data.py ILSVRC2012_img_val \
imagenet_2012_validation_synset_labels.txt
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import errno
import os.path
import sys
if __name__ == '__main__':
if len(sys.argv) < 3:
print('Invalid usage\n'
'usage: preprocess_imagenet_validation_data.py '
'<validation data dir> <validation labels file>')
sys.exit(-1)
data_dir = sys.argv[1]
validation_labels_file = sys.argv[2]
# Read in the 50000 synsets associated with the validation data set.
labels = [l.strip() for l in open(validation_labels_file).readlines()]
unique_labels = set(labels)
# Make all sub-directories in the validation data dir.
for label in unique_labels:
labeled_data_dir = os.path.join(data_dir, label)
# Catch error if sub-directory exists
try:
os.makedirs(labeled_data_dir)
except OSError as e:
# Raise all errors but 'EEXIST'
if e.errno != errno.EEXIST:
raise
# Move all of the image to the appropriate sub-directory.
for i in range(len(labels)):
basename = 'ILSVRC2012_val_000%.5d.JPEG' % (i + 1)
original_filename = os.path.join(data_dir, basename)
if not os.path.exists(original_filename):
print('Failed to find: %s' % original_filename)
sys.exit(-1)
new_filename = os.path.join(data_dir, labels[i], basename)
os.rename(original_filename, new_filename)
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/dataprep/preprocess_imagenet_validation_data.py |
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts ImageNet data to TFRecords file format with Example protos.
The raw ImageNet data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
The training data set consists of 1000 sub-directories (i.e. labels)
each containing 1200 JPEG images for a total of 1.2M JPEG images.
The evaluation data set consists of 1000 sub-directories (i.e. labels)
each containing 50 JPEG images for a total of 50K JPEG images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of 1024 and 128 TFRecord files, respectively.
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-01023-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
Each validation TFRecord file contains ~390 records. Each training TFREcord
file contains ~1250 records. Each record within the TFRecord file is a
serialized Example proto. The Example proto contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always 'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [1, 1000] where 0 is not used.
image/class/synset: string specifying the unique ID of the label,
e.g. 'n01440764'
image/class/text: string specifying the human-readable version of the label
e.g. 'red fox, Vulpes vulpes'
image/object/bbox/xmin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/xmax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/label: integer specifying the index in a classification
layer. The label ranges from [1, 1000] where 0 is not used. Note this is
always identical to the image label.
Note that the length of xmin is identical to the length of xmax, ymin and ymax
for each example.
Running this script using 16 threads may take around ~2.5 hours on an HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import six
import tensorflow as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 1024,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 128,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 8,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# n01440764
# n01443537
# n01484850
# where each line corresponds to a label expressed as a synset. We map
# each synset contained in the file to an integer (based on the alphabetical
# ordering). See below for details.
tf.app.flags.DEFINE_string('labels_file',
'imagenet_lsvrc_2015_synsets.txt',
'Labels file')
# This file containing mapping from synset to human-readable label.
# Assumes each line of the file looks like:
#
# n02119247 black fox
# n02119359 silver fox
# n02119477 red fox, Vulpes fulva
#
# where each line corresponds to a unique mapping. Note that each line is
# formatted as <synset>\t<human readable label>.
tf.app.flags.DEFINE_string('imagenet_metadata_file',
'imagenet_metadata.txt',
'ImageNet metadata file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
if six.PY3 and isinstance(value, six.text_type):
value = six.binary_type(value, encoding='utf-8')
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, synset, human, bbox,
height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
human: string, human-readable label, e.g., 'red fox, Vulpes vulpes'
bbox: list of bounding boxes; each box is a list of integers
specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to
the same label as the image label.
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
xmin = []
ymin = []
xmax = []
ymax = []
for b in bbox:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([xmin, ymin, xmax, ymax], b)]
# pylint: enable=expression-not-assigned
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/synset': _bytes_feature(synset),
'image/class/text': _bytes_feature(human),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature([label] * len(xmin)),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(os.path.basename(filename)),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb,
feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
# File list from:
# https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU
return 'n02105855_2933.JPEG' in filename
def _is_cmyk(filename):
"""Determine if file contains a CMYK JPEG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a JPEG encoded with CMYK color space.
"""
# File list from:
# https://github.com/cytsai/ilsvrc-cmyk-image-list
blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG',
'n02447366_23489.JPEG', 'n02492035_15739.JPEG',
'n02747177_10752.JPEG', 'n03018349_4028.JPEG',
'n03062245_4620.JPEG', 'n03347037_9675.JPEG',
'n03467068_12171.JPEG', 'n03529860_11437.JPEG',
'n03544143_17228.JPEG', 'n03633091_5218.JPEG',
'n03710637_5125.JPEG', 'n03961711_5286.JPEG',
'n04033995_2932.JPEG', 'n04258138_17003.JPEG',
'n04264628_27969.JPEG', 'n04336792_7448.JPEG',
'n04371774_5854.JPEG', 'n04596742_4225.JPEG',
'n07583066_647.JPEG', 'n13037406_4650.JPEG']
return filename.split('/')[-1] in blacklist
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'rb') as f:
image_data = f.read()
# Clean the dirty data.
if _is_png(filename):
# 1 image is a PNG.
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
elif _is_cmyk(filename):
# 22 JPEG images are in CMYK colorspace.
print('Converting CMYK to RGB for %s' % filename)
image_data = coder.cmyk_to_rgb(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
synset = synsets[i]
human = humans[i]
#bbox = bboxes[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label,
synset, human, [[0, 0, 1, 1]],
height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, synsets, labels, humans,
bboxes, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(synsets)
assert len(filenames) == len(labels)
assert len(filenames) == len(humans)
#assert len(filenames) == len(bboxes)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the ImageNet data set resides in JPEG files located in
the following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
where 'n01440764' is the unique synset label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
n01440764
n01443537
n01484850
where each line corresponds to a label expressed as a synset. We map
each synset contained in the file to an integer (based on the alphabetical
ordering) starting with the integer 1 corresponding to the synset
contained in the first line.
The reason we start the integer labels at 1 is to reserve label 0 as an
unused background class.
Returns:
filenames: list of strings; each string is a path to an image file.
synsets: list of strings; each string is a unique WordNet ID.
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
challenge_synsets = [l.strip() for l in
tf.gfile.FastGFile(labels_file, 'r').readlines()]
labels = []
filenames = []
synsets = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for synset in challenge_synsets:
jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
synsets.extend([synset] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(challenge_synsets)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = list(range(len(filenames)))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
synsets = [synsets[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(challenge_synsets), data_dir))
return filenames, synsets, labels
def _find_human_readable_labels(synsets, synset_to_human):
"""Build a list of human-readable labels.
Args:
synsets: list of strings; each string is a unique WordNet ID.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
Returns:
List of human-readable strings corresponding to each synset.
"""
humans = []
for s in synsets:
assert s in synset_to_human, ('Failed to find: %s' % s)
humans.append(synset_to_human[s])
return humans
def _process_dataset(name, directory, num_shards, synset_to_human,
image_to_bboxes):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
"""
filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file)
humans = _find_human_readable_labels(synsets, synset_to_human)
#bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes)
bboxes = []
_process_image_files(name, filenames, synsets, labels,
humans, bboxes, num_shards)
def _build_synset_lookup(imagenet_metadata_file):
"""Build lookup for synset to human-readable label.
Args:
imagenet_metadata_file: string, path to file containing mapping from
synset to human-readable label.
Assumes each line of the file looks like:
n02119247 black fox
n02119359 silver fox
n02119477 red fox, Vulpes fulva
where each line corresponds to a unique mapping. Note that each line is
formatted as <synset>\t<human readable label>.
Returns:
Dictionary of synset to human labels, such as:
'n02119022' --> 'red fox, Vulpes vulpes'
"""
lines = tf.gfile.FastGFile(imagenet_metadata_file, 'r').readlines()
synset_to_human = {}
for l in lines:
if l:
parts = l.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
return synset_to_human
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Build a map from synset to human-readable label.
synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, synset_to_human, None)
_process_dataset('train', FLAGS.train_directory, FLAGS.train_shards,
synset_to_human, None)
if __name__ == '__main__':
tf.app.run()
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/dataprep/build_imagewoof_data.py |
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts ImageNet data to TFRecords file format with Example protos.
The raw ImageNet data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
The training data set consists of 1000 sub-directories (i.e. labels)
each containing 1200 JPEG images for a total of 1.2M JPEG images.
The evaluation data set consists of 1000 sub-directories (i.e. labels)
each containing 50 JPEG images for a total of 50K JPEG images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of 1024 and 128 TFRecord files, respectively.
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-01023-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
Each validation TFRecord file contains ~390 records. Each training TFREcord
file contains ~1250 records. Each record within the TFRecord file is a
serialized Example proto. The Example proto contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always 'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [1, 1000] where 0 is not used.
image/class/synset: string specifying the unique ID of the label,
e.g. 'n01440764'
image/class/text: string specifying the human-readable version of the label
e.g. 'red fox, Vulpes vulpes'
image/object/bbox/xmin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/xmax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/label: integer specifying the index in a classification
layer. The label ranges from [1, 1000] where 0 is not used. Note this is
always identical to the image label.
Note that the length of xmin is identical to the length of xmax, ymin and ymax
for each example.
Running this script using 16 threads may take around ~2.5 hours on an HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import six
import tensorflow as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 1024,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 128,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 8,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# n01440764
# n01443537
# n01484850
# where each line corresponds to a label expressed as a synset. We map
# each synset contained in the file to an integer (based on the alphabetical
# ordering). See below for details.
tf.app.flags.DEFINE_string('labels_file',
'imagenet_lsvrc_2015_synsets.txt',
'Labels file')
# This file containing mapping from synset to human-readable label.
# Assumes each line of the file looks like:
#
# n02119247 black fox
# n02119359 silver fox
# n02119477 red fox, Vulpes fulva
#
# where each line corresponds to a unique mapping. Note that each line is
# formatted as <synset>\t<human readable label>.
tf.app.flags.DEFINE_string('imagenet_metadata_file',
'imagenet_metadata.txt',
'ImageNet metadata file')
# This file is the output of process_bounding_box.py
# Assumes each line of the file looks like:
#
# n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
#
# where each line corresponds to one bounding box annotation associated
# with an image. Each line can be parsed as:
#
# <JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
#
# Note that there might exist mulitple bounding box annotations associated
# with an image file.
tf.app.flags.DEFINE_string('bounding_box_file',
'./imagenet_2012_bounding_boxes.csv',
'Bounding box file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
if six.PY3 and isinstance(value, six.text_type):
value = six.binary_type(value, encoding='utf-8')
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, synset, human, bbox,
height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
human: string, human-readable label, e.g., 'red fox, Vulpes vulpes'
bbox: list of bounding boxes; each box is a list of integers
specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to
the same label as the image label.
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
xmin = []
ymin = []
xmax = []
ymax = []
for b in bbox:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([xmin, ymin, xmax, ymax], b)]
# pylint: enable=expression-not-assigned
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/synset': _bytes_feature(synset),
'image/class/text': _bytes_feature(human),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature([label] * len(xmin)),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(os.path.basename(filename)),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb,
feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
# File list from:
# https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU
return 'n02105855_2933.JPEG' in filename
def _is_cmyk(filename):
"""Determine if file contains a CMYK JPEG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a JPEG encoded with CMYK color space.
"""
# File list from:
# https://github.com/cytsai/ilsvrc-cmyk-image-list
blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG',
'n02447366_23489.JPEG', 'n02492035_15739.JPEG',
'n02747177_10752.JPEG', 'n03018349_4028.JPEG',
'n03062245_4620.JPEG', 'n03347037_9675.JPEG',
'n03467068_12171.JPEG', 'n03529860_11437.JPEG',
'n03544143_17228.JPEG', 'n03633091_5218.JPEG',
'n03710637_5125.JPEG', 'n03961711_5286.JPEG',
'n04033995_2932.JPEG', 'n04258138_17003.JPEG',
'n04264628_27969.JPEG', 'n04336792_7448.JPEG',
'n04371774_5854.JPEG', 'n04596742_4225.JPEG',
'n07583066_647.JPEG', 'n13037406_4650.JPEG']
return filename.split('/')[-1] in blacklist
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'rb') as f:
image_data = f.read()
# Clean the dirty data.
if _is_png(filename):
# 1 image is a PNG.
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
elif _is_cmyk(filename):
# 22 JPEG images are in CMYK colorspace.
print('Converting CMYK to RGB for %s' % filename)
image_data = coder.cmyk_to_rgb(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
synset = synsets[i]
human = humans[i]
bbox = bboxes[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label,
synset, human, bbox,
height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, synsets, labels, humans,
bboxes, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(synsets)
assert len(filenames) == len(labels)
assert len(filenames) == len(humans)
assert len(filenames) == len(bboxes)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the ImageNet data set resides in JPEG files located in
the following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
where 'n01440764' is the unique synset label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
n01440764
n01443537
n01484850
where each line corresponds to a label expressed as a synset. We map
each synset contained in the file to an integer (based on the alphabetical
ordering) starting with the integer 1 corresponding to the synset
contained in the first line.
The reason we start the integer labels at 1 is to reserve label 0 as an
unused background class.
Returns:
filenames: list of strings; each string is a path to an image file.
synsets: list of strings; each string is a unique WordNet ID.
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
challenge_synsets = [l.strip() for l in
tf.gfile.FastGFile(labels_file, 'r').readlines()]
labels = []
filenames = []
synsets = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for synset in challenge_synsets:
jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
synsets.extend([synset] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(challenge_synsets)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = list(range(len(filenames)))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
synsets = [synsets[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(challenge_synsets), data_dir))
return filenames, synsets, labels
def _find_human_readable_labels(synsets, synset_to_human):
"""Build a list of human-readable labels.
Args:
synsets: list of strings; each string is a unique WordNet ID.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
Returns:
List of human-readable strings corresponding to each synset.
"""
humans = []
for s in synsets:
assert s in synset_to_human, ('Failed to find: %s' % s)
humans.append(synset_to_human[s])
return humans
def _find_image_bounding_boxes(filenames, image_to_bboxes):
"""Find the bounding boxes for a given image file.
Args:
filenames: list of strings; each string is a path to an image file.
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
Returns:
List of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
"""
num_image_bbox = 0
bboxes = []
for f in filenames:
basename = os.path.basename(f)
if basename in image_to_bboxes:
bboxes.append(image_to_bboxes[basename])
num_image_bbox += 1
else:
bboxes.append([])
print('Found %d images with bboxes out of %d images' % (
num_image_bbox, len(filenames)))
return bboxes
def _process_dataset(name, directory, num_shards, synset_to_human,
image_to_bboxes):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
"""
filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file)
humans = _find_human_readable_labels(synsets, synset_to_human)
bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes)
_process_image_files(name, filenames, synsets, labels,
humans, bboxes, num_shards)
def _build_synset_lookup(imagenet_metadata_file):
"""Build lookup for synset to human-readable label.
Args:
imagenet_metadata_file: string, path to file containing mapping from
synset to human-readable label.
Assumes each line of the file looks like:
n02119247 black fox
n02119359 silver fox
n02119477 red fox, Vulpes fulva
where each line corresponds to a unique mapping. Note that each line is
formatted as <synset>\t<human readable label>.
Returns:
Dictionary of synset to human labels, such as:
'n02119022' --> 'red fox, Vulpes vulpes'
"""
lines = tf.gfile.FastGFile(imagenet_metadata_file, 'r').readlines()
synset_to_human = {}
for l in lines:
if l:
parts = l.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
return synset_to_human
def _build_bounding_box_lookup(bounding_box_file):
"""Build a lookup from image file to bounding boxes.
Args:
bounding_box_file: string, path to file with bounding boxes annotations.
Assumes each line of the file looks like:
n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
where each line corresponds to one bounding box annotation associated
with an image. Each line can be parsed as:
<JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
Note that there might exist mulitple bounding box annotations associated
with an image file. This file is the output of process_bounding_boxes.py.
Returns:
Dictionary mapping image file names to a list of bounding boxes. This list
contains 0+ bounding boxes.
"""
lines = tf.gfile.FastGFile(bounding_box_file, 'r').readlines()
images_to_bboxes = {}
num_bbox = 0
num_image = 0
for l in lines:
if l:
parts = l.split(',')
assert len(parts) == 5, ('Failed to parse: %s' % l)
filename = parts[0]
xmin = float(parts[1])
ymin = float(parts[2])
xmax = float(parts[3])
ymax = float(parts[4])
box = [xmin, ymin, xmax, ymax]
if filename not in images_to_bboxes:
images_to_bboxes[filename] = []
num_image += 1
images_to_bboxes[filename].append(box)
num_bbox += 1
print('Successfully read %d bounding boxes '
'across %d images.' % (num_bbox, num_image))
return images_to_bboxes
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Build a map from synset to human-readable label.
synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file)
image_to_bboxes = _build_bounding_box_lookup(FLAGS.bounding_box_file)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, synset_to_human, image_to_bboxes)
_process_dataset('train', FLAGS.train_directory, FLAGS.train_shards,
synset_to_human, image_to_bboxes)
if __name__ == '__main__':
tf.app.run()
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/dataprep/build_imagenet_data.py |
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Process the ImageNet Challenge bounding boxes for TensorFlow model training.
This script is called as
process_bounding_boxes.py <dir> [synsets-file]
Where <dir> is a directory containing the downloaded and unpacked bounding box
data. If [synsets-file] is supplied, then only the bounding boxes whose
synstes are contained within this file are returned. Note that the
[synsets-file] file contains synset ids, one per line.
The script dumps out a CSV text file in which each line contains an entry.
n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
The entry can be read as:
<JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
The bounding box for <JPEG file name> contains two points (xmin, ymin) and
(xmax, ymax) specifying the lower-left corner and upper-right corner of a
bounding box in *relative* coordinates.
The user supplies a directory where the XML files reside. The directory
structure in the directory <dir> is assumed to look like this:
<dir>/nXXXXXXXX/nXXXXXXXX_YYYY.xml
Each XML file contains a bounding box annotation. The script:
(1) Parses the XML file and extracts the filename, label and bounding box info.
(2) The bounding box is specified in the XML files as integer (xmin, ymin) and
(xmax, ymax) *relative* to image size displayed to the human annotator. The
size of the image displayed to the human annotator is stored in the XML file
as integer (height, width).
Note that the displayed size will differ from the actual size of the image
downloaded from image-net.org. To make the bounding box annotation useable,
we convert bounding box to floating point numbers relative to displayed
height and width of the image.
Note that each XML file might contain N bounding box annotations.
Note that the points are all clamped at a range of [0.0, 1.0] because some
human annotations extend outside the range of the supplied image.
See details here: http://image-net.org/download-bboxes
(3) By default, the script outputs all valid bounding boxes. If a
[synsets-file] is supplied, only the subset of bounding boxes associated
with those synsets are outputted. Importantly, one can supply a list of
synsets in the ImageNet Challenge and output the list of bounding boxes
associated with the training images of the ILSVRC.
We use these bounding boxes to inform the random distortion of images
supplied to the network.
If you run this script successfully, you will see the following output
to stderr:
> Finished processing 544546 XML files.
> Skipped 0 XML files not in ImageNet Challenge.
> Skipped 0 bounding boxes not in ImageNet Challenge.
> Wrote 615299 bounding boxes from 544546 annotated images.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os.path
import sys
import xml.etree.ElementTree as ET
class BoundingBox(object):
pass
def GetItem(name, root, index=0):
count = 0
for item in root.iter(name):
if count == index:
return item.text
count += 1
# Failed to find "index" occurrence of item.
return -1
def GetInt(name, root, index=0):
# In some XML annotation files, the point values are not integers, but floats.
# So we add a float function to avoid ValueError.
return int(float(GetItem(name, root, index)))
def FindNumberBoundingBoxes(root):
index = 0
while True:
if GetInt('xmin', root, index) == -1:
break
index += 1
return index
def ProcessXMLAnnotation(xml_file):
"""Process a single XML file containing a bounding box."""
# pylint: disable=broad-except
try:
tree = ET.parse(xml_file)
except Exception:
print('Failed to parse: ' + xml_file, file=sys.stderr)
return None
# pylint: enable=broad-except
root = tree.getroot()
num_boxes = FindNumberBoundingBoxes(root)
boxes = []
for index in range(num_boxes):
box = BoundingBox()
# Grab the 'index' annotation.
box.xmin = GetInt('xmin', root, index)
box.ymin = GetInt('ymin', root, index)
box.xmax = GetInt('xmax', root, index)
box.ymax = GetInt('ymax', root, index)
box.width = GetInt('width', root)
box.height = GetInt('height', root)
box.filename = GetItem('filename', root) + '.JPEG'
box.label = GetItem('name', root)
xmin = float(box.xmin) / float(box.width)
xmax = float(box.xmax) / float(box.width)
ymin = float(box.ymin) / float(box.height)
ymax = float(box.ymax) / float(box.height)
# Some images contain bounding box annotations that
# extend outside of the supplied image. See, e.g.
# n03127925/n03127925_147.xml
# Additionally, for some bounding boxes, the min > max
# or the box is entirely outside of the image.
min_x = min(xmin, xmax)
max_x = max(xmin, xmax)
box.xmin_scaled = min(max(min_x, 0.0), 1.0)
box.xmax_scaled = min(max(max_x, 0.0), 1.0)
min_y = min(ymin, ymax)
max_y = max(ymin, ymax)
box.ymin_scaled = min(max(min_y, 0.0), 1.0)
box.ymax_scaled = min(max(max_y, 0.0), 1.0)
boxes.append(box)
return boxes
if __name__ == '__main__':
if len(sys.argv) < 2 or len(sys.argv) > 3:
print('Invalid usage\n'
'usage: process_bounding_boxes.py <dir> [synsets-file]',
file=sys.stderr)
sys.exit(-1)
xml_files = glob.glob(sys.argv[1] + '/*/*.xml')
print('Identified %d XML files in %s' % (len(xml_files), sys.argv[1]),
file=sys.stderr)
if len(sys.argv) == 3:
labels = set([l.strip() for l in open(sys.argv[2]).readlines()])
print('Identified %d synset IDs in %s' % (len(labels), sys.argv[2]),
file=sys.stderr)
else:
labels = None
skipped_boxes = 0
skipped_files = 0
saved_boxes = 0
saved_files = 0
for file_index, one_file in enumerate(xml_files):
# Example: <...>/n06470073/n00141669_6790.xml
label = os.path.basename(os.path.dirname(one_file))
# Determine if the annotation is from an ImageNet Challenge label.
if labels is not None and label not in labels:
skipped_files += 1
continue
bboxes = ProcessXMLAnnotation(one_file)
assert bboxes is not None, 'No bounding boxes found in ' + one_file
found_box = False
for bbox in bboxes:
if labels is not None:
if bbox.label != label:
# Note: There is a slight bug in the bounding box annotation data.
# Many of the dog labels have the human label 'Scottish_deerhound'
# instead of the synset ID 'n02092002' in the bbox.label field. As a
# simple hack to overcome this issue, we only exclude bbox labels
# *which are synset ID's* that do not match original synset label for
# the XML file.
if bbox.label in labels:
skipped_boxes += 1
continue
# Guard against improperly specified boxes.
if (bbox.xmin_scaled >= bbox.xmax_scaled or
bbox.ymin_scaled >= bbox.ymax_scaled):
skipped_boxes += 1
continue
# Note bbox.filename occasionally contains '%s' in the name. This is
# data set noise that is fixed by just using the basename of the XML file.
image_filename = os.path.splitext(os.path.basename(one_file))[0]
print('%s.JPEG,%.4f,%.4f,%.4f,%.4f' %
(image_filename,
bbox.xmin_scaled, bbox.ymin_scaled,
bbox.xmax_scaled, bbox.ymax_scaled))
saved_boxes += 1
found_box = True
if found_box:
saved_files += 1
else:
skipped_files += 1
if not file_index % 5000:
print('--> processed %d of %d XML files.' %
(file_index + 1, len(xml_files)),
file=sys.stderr)
print('--> skipped %d boxes and %d XML files.' %
(skipped_boxes, skipped_files), file=sys.stderr)
print('Finished processing %d XML files.' % len(xml_files), file=sys.stderr)
print('Skipped %d XML files not in ImageNet Challenge.' % skipped_files,
file=sys.stderr)
print('Skipped %d bounding boxes not in ImageNet Challenge.' % skipped_boxes,
file=sys.stderr)
print('Wrote %d bounding boxes from %d annotated images.' %
(saved_boxes, saved_files),
file=sys.stderr)
print('Finished.', file=sys.stderr)
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/dataprep/process_bounding_boxes.py |
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts image data to TFRecords file format with Example protos.
The image data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/label_0/image0.jpeg
data_dir/label_0/image1.jpg
...
data_dir/label_1/weird-image.jpeg
data_dir/label_1/my-image.jpeg
...
where the sub-directory is the unique label associated with these images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of TFRecord files
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-01023-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
where we have selected 1024 and 128 shards for each data set. Each record
within the TFRecord file is a serialized Example proto. The Example proto
contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always 'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [0, num_labels] where 0 is unused and left as
the background class.
image/class/text: string specifying the human-readable version of the label
e.g. 'dog'
If your data set involves bounding boxes, please look at build_imagenet_data.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 2,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 2,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 2,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# dog
# cat
# flower
# where each line corresponds to a label. We map each label contained in
# the file to an integer corresponding to the line number starting from 0.
tf.app.flags.DEFINE_string('labels_file', '', 'Labels file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, text, height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
text: string, unique human-readable, e.g. 'dog'
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(tf.compat.as_bytes(colorspace)),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/text': _bytes_feature(tf.compat.as_bytes(text)),
'image/format': _bytes_feature(tf.compat.as_bytes(image_format)),
'image/filename': _bytes_feature(tf.compat.as_bytes(os.path.basename(filename))),
'image/encoded': _bytes_feature(tf.compat.as_bytes(image_buffer))}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
return filename.endswith('.png')
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'rb') as f:
image_data = f.read()
# Convert any PNG to JPEG's for consistency.
if _is_png(filename):
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
texts, labels, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
text = texts[i]
try:
image_buffer, height, width = _process_image(filename, coder)
except Exception as e:
print(e)
print('SKIPPED: Unexpected error while decoding %s.' % filename)
continue
example = _convert_to_example(filename, image_buffer, label,
text, height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, texts, labels, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(texts)
assert len(filenames) == len(labels)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
texts, labels, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the image data set resides in JPEG files located in
the following directory structure.
data_dir/dog/another-image.JPEG
data_dir/dog/my-image.jpg
where 'dog' is the label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
dog
cat
flower
where each line corresponds to a label. We map each label contained in
the file to an integer starting with the integer 0 corresponding to the
label contained in the first line.
Returns:
filenames: list of strings; each string is a path to an image file.
texts: list of strings; each string is the class, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
unique_labels = [l.strip() for l in tf.gfile.FastGFile(
labels_file, 'r').readlines()]
labels = []
filenames = []
texts = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for text in unique_labels:
jpeg_file_path = '%s/%s/*' % (data_dir, text)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
texts.extend([text] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(labels)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = list(range(len(filenames)))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
texts = [texts[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(unique_labels), data_dir))
return filenames, texts, labels
def _process_dataset(name, directory, num_shards, labels_file):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
labels_file: string, path to the labels file.
"""
filenames, texts, labels = _find_image_files(directory, labels_file)
_process_image_files(name, filenames, texts, labels, num_shards)
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, FLAGS.labels_file)
_process_dataset('train', FLAGS.train_directory,
FLAGS.train_shards, FLAGS.labels_file)
if __name__ == '__main__':
tf.app.run()
| DeepLearningExamples-master | TensorFlow/Classification/ConvNets/dataprep/build_image_data.py |
# -----------------------------------------------------------------------
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import cupy as cp
def generate_negatives(neg_users, true_mat, item_range, sort=False, use_trick=False):
"""
Generate negative samples for data augmentation
"""
neg_u = []
neg_i = []
# If using the shortcut, generate negative items without checking if the associated
# user has interacted with it. Speeds up training significantly with very low impact
# on accuracy.
if use_trick:
neg_items = cp.random.randint(0, high=item_range, size=neg_users.shape[0])
return neg_users, neg_items
# Otherwise, generate negative items, check if associated user has interacted with it,
# then generate a new one if true
while len(neg_users) > 0:
neg_items = cp.random.randint(0, high=item_range, size=neg_users.shape[0])
neg_mask = true_mat[neg_users, neg_items]
neg_u.append(neg_users[neg_mask])
neg_i.append(neg_items[neg_mask])
neg_users = neg_users[cp.logical_not(neg_mask)]
neg_users = cp.concatenate(neg_u)
neg_items = cp.concatenate(neg_i)
if not sort:
return neg_users, neg_items
sorted_users = cp.sort(neg_users)
sort_indices = cp.argsort(neg_users)
return sorted_users, neg_items[sort_indices]
class DataGenerator:
"""
Class to handle data augmentation
"""
def __init__(self,
seed,
hvd_rank,
num_users, # type: int
num_items, # type: int
neg_mat, # type: np.ndarray
train_users, # type: np.ndarray
train_items, # type: np.ndarray
train_labels, # type: np.ndarray
train_batch_size, # type: int
train_negative_samples, # type: int
pos_eval_users, # type: np.ndarray
pos_eval_items, # type: np.ndarray
eval_users_per_batch, # type: int
eval_negative_samples, # type: int
):
# Check input data
if train_users.shape != train_items.shape:
raise ValueError(
"Train shapes mismatch! {} Users vs {} Items!".format(
train_users.shape, train_items.shape))
if pos_eval_users.shape != pos_eval_items.shape:
raise ValueError(
"Eval shapes mismatch! {} Users vs {} Items!".format(
pos_eval_users.shape, pos_eval_items.shape))
np.random.seed(seed)
cp.random.seed(seed)
# Use GPU assigned to the horovod rank
self.hvd_rank = hvd_rank
cp.cuda.Device(self.hvd_rank).use()
self.num_users = num_users
self.num_items = num_items
self._neg_mat = neg_mat
self._train_users = cp.array(train_users)
self._train_items = cp.array(train_items)
self._train_labels = cp.array(train_labels)
self.train_batch_size = train_batch_size
self._train_negative_samples = train_negative_samples
self._pos_eval_users = pos_eval_users
self._pos_eval_items = pos_eval_items
self.eval_users_per_batch = eval_users_per_batch
self._eval_negative_samples = eval_negative_samples
# Eval data
self.eval_users = None
self.eval_items = None
self.dup_mask = None
# Training data
self.train_users_batches = None
self.train_items_batches = None
self.train_labels_batches = None
# Augment test data with negative samples
def prepare_eval_data(self):
pos_eval_users = cp.array(self._pos_eval_users)
pos_eval_items = cp.array(self._pos_eval_items)
neg_mat = cp.array(self._neg_mat)
neg_eval_users_base = cp.repeat(pos_eval_users, self._eval_negative_samples)
# Generate negative samples
test_u_neg, test_i_neg = generate_negatives(neg_users=neg_eval_users_base, true_mat=neg_mat,
item_range=self.num_items, sort=True, use_trick=False)
test_u_neg = test_u_neg.reshape((-1, self._eval_negative_samples)).get()
test_i_neg = test_i_neg.reshape((-1, self._eval_negative_samples)).get()
test_users = self._pos_eval_users.reshape((-1, 1))
test_items = self._pos_eval_items.reshape((-1, 1))
# Combine positive and negative samples
test_users = np.concatenate((test_u_neg, test_users), axis=1)
test_items = np.concatenate((test_i_neg, test_items), axis=1)
# Generate duplicate mask
## Stable sort indices by incrementing all values with fractional position
indices = np.arange(test_users.shape[1]).reshape((1, -1)).repeat(test_users.shape[0], axis=0)
summed_items = np.add(test_items, indices/test_users.shape[1])
sorted_indices = np.argsort(summed_items, axis=1)
sorted_order = np.argsort(sorted_indices, axis=1)
sorted_items = np.sort(test_items, axis=1)
## Generate duplicate mask
dup_mask = np.equal(sorted_items[:,0:-1], sorted_items[:,1:])
dup_mask = np.concatenate((dup_mask, np.zeros((test_users.shape[0], 1))), axis=1)
r_indices = np.arange(test_users.shape[0]).reshape((-1, 1)).repeat(test_users.shape[1], axis=1)
dup_mask = dup_mask[r_indices, sorted_order].astype(np.float32)
# Reshape all to (-1) and split into chunks
batch_size = self.eval_users_per_batch * test_users.shape[1]
split_indices = np.arange(batch_size, test_users.shape[0]*test_users.shape[1], batch_size)
self.eval_users = np.split(test_users.reshape(-1), split_indices)
self.eval_items = np.split(test_items.reshape(-1), split_indices)
self.dup_mask = np.split(dup_mask.reshape(-1), split_indices)
# Free GPU memory to make space for Tensorflow
cp.get_default_memory_pool().free_all_blocks()
# Augment training data with negative samples
def prepare_train_data(self):
batch_size = self.train_batch_size
is_neg = cp.logical_not(self._train_labels)
# Do not store verification matrix if using the negatives generation shortcut
neg_mat = None
# If there are no negative samples in the local portion of the training data, do nothing
any_neg = cp.any(is_neg)
if any_neg:
self._train_users[is_neg], self._train_items[is_neg] = generate_negatives(
self._train_users[is_neg], neg_mat, self.num_items, use_trick=True
)
shuffled_order = cp.random.permutation(self._train_users.shape[0])
self._train_users = self._train_users[shuffled_order]
self._train_items = self._train_items[shuffled_order]
self._train_labels = self._train_labels[shuffled_order]
# Manually create batches
split_indices = np.arange(batch_size, self._train_users.shape[0], batch_size)
self.train_users_batches = np.split(self._train_users, split_indices)
self.train_items_batches = np.split(self._train_items, split_indices)
self.train_labels_batches = np.split(self._train_labels, split_indices)
| DeepLearningExamples-master | TensorFlow/Recommendation/NCF/input_pipeline.py |
# Copyright (c) 2018, deepakn94, codyaustun, robieta. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
from argparse import ArgumentParser
import tensorflow as tf
import pandas as pd
import numpy as np
import cupy as cp
import horovod.tensorflow as hvd
from mpi4py import MPI
from neumf import ncf_model_ops
from input_pipeline import DataGenerator
import dllogger
def parse_args():
"""
Parse command line arguments.
"""
parser = ArgumentParser(description="Train a Neural Collaborative"
" Filtering model")
parser.add_argument('--data', type=str,
help='path to test and training data files')
parser.add_argument('-e', '--epochs', type=int, default=30,
help='number of epochs to train for')
parser.add_argument('-b', '--batch-size', type=int, default=1048576,
help='number of examples for each iteration')
parser.add_argument('--valid-users-per-batch', type=int, default=5000,
help='Number of users tested in each evaluation batch')
parser.add_argument('-f', '--factors', type=int, default=64,
help='number of predictive factors')
parser.add_argument('--layers', nargs='+', type=int,
default=[256, 256, 128, 64],
help='size of hidden layers for MLP')
parser.add_argument('-n', '--negative-samples', type=int, default=4,
help='number of negative examples per interaction')
parser.add_argument('-l', '--learning-rate', type=float, default=0.0045,
help='learning rate for optimizer')
parser.add_argument('-k', '--topk', type=int, default=10,
help='rank for test examples to be considered a hit')
parser.add_argument('--seed', '-s', type=int, default=None,
help='manually set random seed for random number generation')
parser.add_argument('--target', '-t', type=float, default=0.9562,
help='stop training early at target')
parser.add_argument('--amp', action='store_true', dest='amp', default=False,
help='enable half-precision computations using automatic mixed precision \
(only available in supported containers)')
parser.add_argument('--xla', action='store_true',
help='enable TensorFlow XLA (Accelerated Linear Algebra)')
parser.add_argument('--valid-negative', type=int, default=100,
help='Number of negative samples for each positive test example')
parser.add_argument('--beta1', '-b1', type=float, default=0.25,
help='beta1 for Adam')
parser.add_argument('--beta2', '-b2', type=float, default=0.5,
help='beta2 for Adam')
parser.add_argument('--eps', type=float, default=1e-8,
help='epsilon for Adam')
parser.add_argument('--dropout', type=float, default=0.5,
help='Dropout probability, if equal to 0 will not use dropout at all')
parser.add_argument('--loss-scale', default=8192, type=int,
help='Loss scale value to use when manually enabling mixed precision')
parser.add_argument('--checkpoint-dir', default=None, type=str,
help='Path to the store the result checkpoint file for training')
parser.add_argument('--load-checkpoint-path', default=None, type=str,
help='Path to the checkpoint for initialization. If None will initialize with random weights')
parser.add_argument('--mode', choices=['train', 'test'], default='train', type=str,
help='Passing "test" will only run a single evaluation, \
otherwise full training will be performed')
parser.add_argument('--eval-after', type=int, default=8,
help='Perform evaluations only after this many epochs')
parser.add_argument('--log-path', default='log.json', type=str,
help='Path for the JSON training log')
return parser.parse_args()
def hvd_init():
"""
Initialize Horovod
"""
# Reduce logging
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.logging.set_verbosity(tf.logging.ERROR)
# Initialize horovod
hvd.init()
if hvd.rank() == 0:
print('PY', sys.version)
print('TF', tf.__version__)
def get_local_train_data(pos_train_users, pos_train_items, negative_samples):
"""
For distributed, split up the train data and only keep the local portion
"""
num_pos_samples = pos_train_users.shape[0]
# Create the entire train set
all_train_users = np.tile(pos_train_users, negative_samples+1)
all_train_items = np.tile(pos_train_items, negative_samples+1)
all_train_labels = np.zeros_like(all_train_users, dtype=np.float32)
all_train_labels[:num_pos_samples] = 1.0
# Get local training set
split_size = all_train_users.shape[0] // hvd.size() + 1
split_indices = np.arange(split_size, all_train_users.shape[0], split_size)
all_train_users_splits = np.split(all_train_users, split_indices)
all_train_items_splits = np.split(all_train_items, split_indices)
all_train_labels_splits = np.split(all_train_labels, split_indices)
assert len(all_train_users_splits) == hvd.size()
local_train_users = all_train_users_splits[hvd.rank()]
local_train_items = all_train_items_splits[hvd.rank()]
local_train_labels = all_train_labels_splits[hvd.rank()]
return local_train_users, local_train_items, local_train_labels
def get_local_test_data(pos_test_users, pos_test_items):
"""
For distributed, split up the test data and only keep the local portion
"""
split_size = pos_test_users.shape[0] // hvd.size() + 1
split_indices = np.arange(split_size, pos_test_users.shape[0], split_size)
test_users_splits = np.split(pos_test_users, split_indices)
test_items_splits = np.split(pos_test_items, split_indices)
assert len(test_users_splits) == hvd.size()
local_test_users = test_users_splits[hvd.rank()]
local_test_items = test_items_splits[hvd.rank()]
return local_test_users, local_test_items
def main():
script_start = time.time()
hvd_init()
mpi_comm = MPI.COMM_WORLD
args = parse_args()
if hvd.rank() == 0:
dllogger.init(backends=[dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE,
filename=args.log_path),
dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE)])
else:
dllogger.init(backends=[])
dllogger.metadata("best_epoch", {"unit": None})
dllogger.metadata("first_epoch_to_hit", {"unit": None})
dllogger.metadata("best_hr", {"unit": None})
dllogger.metadata("average_eval_time_per_epoch", {"unit": "s"})
dllogger.metadata("average_train_time_per_epoch", {"unit": "s"})
dllogger.metadata("average_train_throughput", {"unit": "samples/s"})
dllogger.metadata("average_eval_throughput", {"unit": "samples/s"})
args.world_size = hvd.size()
dllogger.log(data=vars(args), step='PARAMETER')
if args.seed is None:
if hvd.rank() == 0:
seed = int(time.time())
else:
seed = None
seed = mpi_comm.bcast(seed, root=0)
else:
seed = args.seed
tf.random.set_random_seed(seed)
np.random.seed(seed)
cp.random.seed(seed)
if args.amp:
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION"] = "1"
if args.checkpoint_dir is not None:
os.makedirs(args.checkpoint_dir, exist_ok=True)
final_checkpoint_path = os.path.join(args.checkpoint_dir, 'model.ckpt')
else:
final_checkpoint_path = None
# Load converted data and get statistics
train_df = pd.read_pickle(args.data+'/train_ratings.pickle')
test_df = pd.read_pickle(args.data+'/test_ratings.pickle')
nb_users, nb_items = train_df.max() + 1
# Extract train and test feature tensors from dataframe
pos_train_users = train_df.iloc[:, 0].values.astype(np.int32)
pos_train_items = train_df.iloc[:, 1].values.astype(np.int32)
pos_test_users = test_df.iloc[:, 0].values.astype(np.int32)
pos_test_items = test_df.iloc[:, 1].values.astype(np.int32)
# Negatives indicator for negatives generation
neg_mat = np.ones((nb_users, nb_items), dtype=np.bool)
neg_mat[pos_train_users, pos_train_items] = 0
# Get the local training/test data
train_users, train_items, train_labels = get_local_train_data(
pos_train_users, pos_train_items, args.negative_samples
)
test_users, test_items = get_local_test_data(
pos_test_users, pos_test_items
)
# Create and run Data Generator in a separate thread
data_generator = DataGenerator(
args.seed,
hvd.local_rank(),
nb_users,
nb_items,
neg_mat,
train_users,
train_items,
train_labels,
args.batch_size // hvd.size(),
args.negative_samples,
test_users,
test_items,
args.valid_users_per_batch,
args.valid_negative,
)
# Create tensorflow session and saver
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(hvd.local_rank())
if args.xla:
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
sess = tf.Session(config=config)
# Input tensors
users = tf.placeholder(tf.int32, shape=(None,))
items = tf.placeholder(tf.int32, shape=(None,))
labels = tf.placeholder(tf.int32, shape=(None,))
is_dup = tf.placeholder(tf.float32, shape=(None,))
dropout = tf.placeholder_with_default(args.dropout, shape=())
# Model ops and saver
hit_rate, ndcg, eval_op, train_op = ncf_model_ops(
users,
items,
labels,
is_dup,
params={
'val_batch_size': args.valid_negative+1,
'top_k': args.topk,
'learning_rate': args.learning_rate,
'beta_1': args.beta1,
'beta_2': args.beta2,
'epsilon': args.eps,
'num_users': nb_users,
'num_items': nb_items,
'num_factors': args.factors,
'mf_reg': 0,
'layer_sizes': args.layers,
'layer_regs': [0. for i in args.layers],
'dropout': dropout,
'sigmoid': True,
'loss_scale': args.loss_scale
},
mode='TRAIN' if args.mode == 'train' else 'EVAL'
)
saver = tf.train.Saver()
# Accuracy metric tensors
hr_sum = tf.get_default_graph().get_tensor_by_name('neumf/hit_rate/total:0')
hr_cnt = tf.get_default_graph().get_tensor_by_name('neumf/hit_rate/count:0')
ndcg_sum = tf.get_default_graph().get_tensor_by_name('neumf/ndcg/total:0')
ndcg_cnt = tf.get_default_graph().get_tensor_by_name('neumf/ndcg/count:0')
# Prepare evaluation data
data_generator.prepare_eval_data()
if args.load_checkpoint_path:
saver.restore(sess, args.load_checkpoint_path)
else:
# Manual initialize weights
sess.run(tf.global_variables_initializer())
# If test mode, run one eval
if args.mode == 'test':
sess.run(tf.local_variables_initializer())
eval_start = time.time()
for user_batch, item_batch, dup_batch \
in zip(data_generator.eval_users, data_generator.eval_items, data_generator.dup_mask):
sess.run(
eval_op,
feed_dict={
users: user_batch,
items: item_batch,
is_dup:dup_batch, dropout: 0.0
}
)
eval_duration = time.time() - eval_start
# Report results
hit_rate_sum = sess.run(hvd.allreduce(hr_sum, average=False))
hit_rate_cnt = sess.run(hvd.allreduce(hr_cnt, average=False))
ndcg_sum = sess.run(hvd.allreduce(ndcg_sum, average=False))
ndcg_cnt = sess.run(hvd.allreduce(ndcg_cnt, average=False))
hit_rate = hit_rate_sum / hit_rate_cnt
ndcg = ndcg_sum / ndcg_cnt
if hvd.rank() == 0:
eval_throughput = pos_test_users.shape[0] * (args.valid_negative + 1) / eval_duration
dllogger.log(step=tuple(), data={'eval_throughput': eval_throughput,
'eval_time': eval_duration,
'hr@10': float(hit_rate),
'ndcg': float(ndcg)})
return
# Performance Metrics
train_times = list()
eval_times = list()
# Accuracy Metrics
first_to_target = None
best_hr = 0
best_epoch = 0
# Buffers for global metrics
global_hr_sum = np.ones(1)
global_hr_count = np.ones(1)
global_ndcg_sum = np.ones(1)
global_ndcg_count = np.ones(1)
# Buffers for local metrics
local_hr_sum = np.ones(1)
local_hr_count = np.ones(1)
local_ndcg_sum = np.ones(1)
local_ndcg_count = np.ones(1)
# Begin training
for epoch in range(args.epochs):
# Train for one epoch
train_start = time.time()
data_generator.prepare_train_data()
for user_batch, item_batch, label_batch \
in zip(data_generator.train_users_batches,
data_generator.train_items_batches,
data_generator.train_labels_batches):
sess.run(
train_op,
feed_dict={
users: user_batch.get(),
items: item_batch.get(),
labels: label_batch.get()
}
)
train_duration = time.time() - train_start
# Only log "warm" epochs
if epoch >= 1:
train_times.append(train_duration)
# Evaluate
if epoch > args.eval_after:
eval_start = time.time()
sess.run(tf.local_variables_initializer())
for user_batch, item_batch, dup_batch \
in zip(data_generator.eval_users,
data_generator.eval_items,
data_generator.dup_mask):
sess.run(
eval_op,
feed_dict={
users: user_batch,
items: item_batch,
is_dup: dup_batch,
dropout: 0.0
}
)
# Compute local metrics
local_hr_sum[0] = sess.run(hr_sum)
local_hr_count[0] = sess.run(hr_cnt)
local_ndcg_sum[0] = sess.run(ndcg_sum)
local_ndcg_count[0] = sess.run(ndcg_cnt)
# Reduce metrics across all workers
mpi_comm.Reduce(local_hr_count, global_hr_count)
mpi_comm.Reduce(local_hr_sum, global_hr_sum)
mpi_comm.Reduce(local_ndcg_count, global_ndcg_count)
mpi_comm.Reduce(local_ndcg_sum, global_ndcg_sum)
# Calculate metrics
hit_rate = global_hr_sum[0] / global_hr_count[0]
ndcg = global_ndcg_sum[0] / global_ndcg_count[0]
eval_duration = time.time() - eval_start
# Only log "warm" epochs
if epoch >= 1:
eval_times.append(eval_duration)
if hvd.rank() == 0:
dllogger.log(step=(epoch,), data={
'train_time': train_duration,
'eval_time': eval_duration,
'hr@10': hit_rate,
'ndcg': ndcg})
# Update summary metrics
if hit_rate > args.target and first_to_target is None:
first_to_target = epoch
if hit_rate > best_hr:
best_hr = hit_rate
best_epoch = epoch
if hit_rate > args.target and final_checkpoint_path:
saver.save(sess, final_checkpoint_path)
# Final Summary
if hvd.rank() == 0:
train_times = np.array(train_times)
train_throughputs = pos_train_users.shape[0]*(args.negative_samples+1) / train_times
eval_times = np.array(eval_times)
eval_throughputs = pos_test_users.shape[0]*(args.valid_negative+1) / eval_times
dllogger.log(step=tuple(), data={
'average_train_time_per_epoch': np.mean(train_times),
'average_train_throughput': np.mean(train_throughputs),
'average_eval_time_per_epoch': np.mean(eval_times),
'average_eval_throughput': np.mean(eval_throughputs),
'first_epoch_to_hit': first_to_target,
'best_hr': best_hr,
'best_epoch': best_epoch})
dllogger.flush()
sess.close()
return
if __name__ == '__main__':
main()
| DeepLearningExamples-master | TensorFlow/Recommendation/NCF/ncf.py |
# Copyright (c) 2018, deepakn94, codyaustun, robieta. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
import pandas as pd
from load import implicit_load
import tensorflow as tf
MIN_RATINGS = 20
USER_COLUMN = 'user_id'
ITEM_COLUMN = 'item_id'
def parse_args():
parser = ArgumentParser()
parser.add_argument('--path', type=str, default='/data/ml-20m/ratings.csv',
help='Path to reviews CSV file from MovieLens')
parser.add_argument('--output', type=str, default='/data',
help='Output directory for train and test files')
return parser.parse_args()
def main():
args = parse_args()
print("Loading raw data from {}".format(args.path))
df = implicit_load(args.path, sort=False)
print("Filtering out users with less than {} ratings".format(MIN_RATINGS))
grouped = df.groupby(USER_COLUMN)
df = grouped.filter(lambda x: len(x) >= MIN_RATINGS)
print("Mapping original user and item IDs to new sequential IDs")
df[USER_COLUMN] = pd.factorize(df[USER_COLUMN])[0]
df[ITEM_COLUMN] = pd.factorize(df[ITEM_COLUMN])[0]
print("Creating list of items for each user")
# Need to sort before popping to get last item
df.sort_values(by='timestamp', inplace=True)
# clean up data
del df['rating'], df['timestamp']
df = df.drop_duplicates() # assuming it keeps order
# now we have filtered and sorted by time data, we can split test data out
grouped_sorted = df.groupby(USER_COLUMN, group_keys=False)
test_data = grouped_sorted.tail(1).sort_values(by='user_id')
# need to pop for each group
train_data = grouped_sorted.apply(lambda x: x.iloc[:-1])
train_data = train_data.sort_values([USER_COLUMN, ITEM_COLUMN])
train_data.to_pickle(args.output + '/train_ratings.pickle')
test_data.to_pickle(args.output + '/test_ratings.pickle')
if __name__ == '__main__':
main()
| DeepLearningExamples-master | TensorFlow/Recommendation/NCF/convert.py |
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import os
import json
import argparse
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
from neumf import ncf_model_ops
import dllogger
def parse_args():
parser = argparse.ArgumentParser(description="Benchmark inference performance of the NCF model")
parser.add_argument('--load_checkpoint_path', default=None, type=str,
help='Path to the checkpoint file to be loaded. If None will use random weights')
parser.add_argument('--n_users', default=138493, type=int,
help='Number of users. Defaults to the number of users in the ml-20m dataset after preprocessing')
parser.add_argument('--n_items', default=26744, type=int,
help='Number of items. Defaults to the number of users in the ml-20m dataset after preprocessing')
parser.add_argument('-f', '--factors', type=int, default=64,
help='Number of predictive factors')
parser.add_argument('--layers', nargs='+', type=int,
default=[256, 256, 128, 64],
help='Sizes of hidden layers for MLP')
parser.add_argument('--batch_sizes', default='1,4,16,64,256,1024,4096,16384,65536,262144,1048576', type=str,
help='A list of comma-separated batch size values to benchmark')
parser.add_argument('--num_batches', default=200, type=int,
help='Number of batches for which to measure latency and throughput')
parser.add_argument('--amp', action='store_true', default=False,
help='Enable automatic mixed precision')
parser.add_argument('--xla', dest='xla', action='store_true', default=False,
help='Enable XLA')
parser.add_argument('--log_path', default='log.json', type=str,
help='Path to the path to store benchmark results')
return parser.parse_args()
def main():
args = parse_args()
if args.amp:
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION"] = "1"
dllogger.init(backends=[dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE,
filename=args.log_path),
dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE)])
dllogger.log(data=vars(args), step='PARAMETER')
batch_sizes = args.batch_sizes.split(',')
batch_sizes = [int(s) for s in batch_sizes]
result_data = {}
for batch_size in batch_sizes:
print('Benchmarking batch size', batch_size)
tf.reset_default_graph()
# Input tensors
users = tf.placeholder(tf.int32, shape=(None,))
items = tf.placeholder(tf.int32, shape=(None,))
dropout = tf.placeholder_with_default(0.0, shape=())
# Model ops and saver
logits_op = ncf_model_ops(users=users, items=items, labels=None, dup_mask=None, mode='INFERENCE',
params={'fp16': False, 'val_batch_size': batch_size, 'num_users': args.n_users,
'num_items': args.n_items, 'num_factors': args.factors, 'mf_reg': 0,
'layer_sizes': args.layers, 'layer_regs': [0. for i in args.layers],
'dropout': 0.0, 'sigmoid': True, 'top_k': None, 'learning_rate': None,
'beta_1': None, 'beta_2': None, 'epsilon': None, 'loss_scale': None, })
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
if args.xla:
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
sess = tf.Session(config=config)
saver = tf.train.Saver()
if args.load_checkpoint_path:
saver.restore(sess, args.load_checkpoint_path)
else:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
users_batch = np.random.randint(size=batch_size, low=0, high=args.n_users)
items_batch = np.random.randint(size=batch_size, low=0, high=args.n_items)
latencies = []
for i in range(args.num_batches):
start = time.time()
_ = sess.run(logits_op, feed_dict={users: users_batch, items: items_batch, dropout: 0.0 })
end = time.time()
if i < 10: # warmup iterations
continue
latencies.append(end - start)
result_data[f'batch_{batch_size}_mean_throughput'] = batch_size / np.mean(latencies)
result_data[f'batch_{batch_size}_mean_latency'] = np.mean(latencies)
result_data[f'batch_{batch_size}_p90_latency'] = np.percentile(latencies, 90)
result_data[f'batch_{batch_size}_p95_latency'] = np.percentile(latencies, 95)
result_data[f'batch_{batch_size}_p99_latency'] = np.percentile(latencies, 99)
dllogger.log(data=result_data, step=tuple())
dllogger.flush()
if __name__ == '__main__':
main()
| DeepLearningExamples-master | TensorFlow/Recommendation/NCF/inference.py |
# Copyright (c) 2018, deepakn94. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import pandas as pd
RatingData = namedtuple('RatingData',
['items', 'users', 'ratings', 'min_date', 'max_date'])
def describe_ratings(ratings):
info = RatingData(items=len(ratings['item_id'].unique()),
users=len(ratings['user_id'].unique()),
ratings=len(ratings),
min_date=ratings['timestamp'].min(),
max_date=ratings['timestamp'].max())
print("{ratings} ratings on {items} items from {users} users"
" from {min_date} to {max_date}"
.format(**(info._asdict())))
return info
def process_movielens(ratings, sort=True):
ratings['timestamp'] = pd.to_datetime(ratings['timestamp'], unit='s')
if sort:
ratings.sort_values(by='timestamp', inplace=True)
describe_ratings(ratings)
return ratings
def load_ml_1m(filename, sort=True):
names = ['user_id', 'item_id', 'rating', 'timestamp']
ratings = pd.read_csv(filename, sep='::', names=names, engine='python')
return process_movielens(ratings, sort=sort)
def load_ml_20m(filename, sort=True):
ratings = pd.read_csv(filename)
ratings['timestamp'] = pd.to_datetime(ratings['timestamp'], unit='s')
names = {'userId': 'user_id', 'movieId': 'item_id'}
ratings.rename(columns=names, inplace=True)
return process_movielens(ratings, sort=sort)
DATASETS = [k.replace('load_', '') for k in locals().keys() if "load_" in k]
def get_dataset_name(filename):
for dataset in DATASETS:
if dataset in filename.replace('-', '_').lower():
return dataset
raise NotImplementedError
def implicit_load(filename, sort=True):
func = globals()["load_" + get_dataset_name(filename)]
return func(filename, sort=sort)
| DeepLearningExamples-master | TensorFlow/Recommendation/NCF/load.py |
# Copyright (c) 2018. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import horovod.tensorflow as hvd
def float32_variable_storage_getter(getter, name, shape=None, dtype=None,
initializer=None, regularizer=None,
trainable=True,
*args, **kwargs):
"""
Custom variable getter that forces trainable variables to be stored in
float32 precision and then casts them to the half-precision
"""
storage_dtype = tf.float32 if trainable else dtype
variable = getter(name, shape, dtype=storage_dtype,
initializer=initializer, regularizer=regularizer,
trainable=trainable,
*args, **kwargs)
if trainable and dtype != tf.float32:
variable = tf.cast(variable, dtype)
return variable
def neural_mf(users,
items,
model_dtype,
nb_users,
nb_items,
mf_dim,
mf_reg,
mlp_layer_sizes,
mlp_layer_regs,
dropout_rate,
sigmoid=False):
"""
Constructs the model graph
"""
# Check params
if len(mlp_layer_sizes) != len(mlp_layer_regs):
raise RuntimeError('u dummy, layer_sized != layer_regs')
if mlp_layer_sizes[0] % 2 != 0:
raise RuntimeError('u dummy, mlp_layer_sizes[0] % 2 != 0')
nb_mlp_layers = len(mlp_layer_sizes)
# Embeddings
user_embed = tf.get_variable(
"user_embeddings",
shape=[nb_users, mf_dim + mlp_layer_sizes[0] // 2],
initializer=tf.initializers.random_normal(mean=0.0, stddev=0.01))
item_embed = tf.get_variable(
"item_embeddings",
shape=[nb_items, mf_dim + mlp_layer_sizes[0] // 2],
initializer=tf.initializers.random_normal(mean=0.0, stddev=0.01))
# Matrix Factorization Embeddings
xmfu = tf.nn.embedding_lookup(user_embed[:, :mf_dim], users, partition_strategy='div')
xmfi = tf.nn.embedding_lookup(item_embed[:, :mf_dim], items, partition_strategy='div')
# MLP Network Embeddings
xmlpu = tf.nn.embedding_lookup(user_embed[:, mf_dim:], users, partition_strategy='div')
xmlpi = tf.nn.embedding_lookup(item_embed[:, mf_dim:], items, partition_strategy='div')
# Enforce model to use fp16 data types when manually enabling mixed precision
# (Tensorfow ops will use automatically use the data type of the first input)
if model_dtype == tf.float16:
xmfu = tf.cast(xmfu, model_dtype)
xmfi = tf.cast(xmfi, model_dtype)
xmlpu = tf.cast(xmlpu, model_dtype)
xmlpi = tf.cast(xmlpi, model_dtype)
# Matrix Factorization
xmf = tf.math.multiply(xmfu, xmfi)
# MLP Layers
xmlp = tf.concat((xmlpu, xmlpi), 1)
for i in range(1, nb_mlp_layers):
xmlp = tf.layers.Dense(
mlp_layer_sizes[i],
activation=tf.nn.relu,
kernel_initializer=tf.glorot_uniform_initializer()
).apply(xmlp)
xmlp = tf.layers.Dropout(rate=dropout_rate).apply(xmlp)
# Final fully-connected layer
logits = tf.concat((xmf, xmlp), 1)
logits = tf.layers.Dense(
1,
kernel_initializer=tf.keras.initializers.lecun_uniform()
).apply(logits)
if sigmoid:
logits = tf.math.sigmoid(logits)
# Cast model outputs back to float32 if manually enabling mixed precision for loss calculation
if model_dtype == tf.float16:
logits = tf.cast(logits, tf.float32)
return logits
def compute_eval_metrics(logits, dup_mask, val_batch_size, K):
"""
Constructs the graph to compute Hit Rate and NDCG
"""
# Replace duplicate (uid, iid) pairs with -inf
logits = logits * (1. - dup_mask)
logits = logits + (dup_mask * logits.dtype.min)
# Reshape tensors so that each row corresponds with a user
logits_by_user = tf.reshape(logits, [-1, val_batch_size])
dup_mask_by_user = tf.cast(tf.reshape(logits, [-1, val_batch_size]), tf.bool)
# Get the topk items for each user
top_item_indices = tf.math.top_k(logits_by_user, K)[1]
# Check that the positive sample (last index) is in the top K
is_positive = tf.cast(tf.equal(top_item_indices, val_batch_size-1), tf.int32)
found_positive = tf.reduce_sum(is_positive, axis=1)
# Extract the rankings of the positive samples
positive_ranks = tf.reduce_sum(is_positive * tf.expand_dims(tf.range(K), 0), axis=1)
dcg = tf.log(2.) / tf.log(tf.cast(positive_ranks, tf.float32) + 2)
dcg *= tf.cast(found_positive, dcg.dtype)
return found_positive, dcg
def ncf_model_ops(users,
items,
labels,
dup_mask,
params,
mode='TRAIN'):
"""
Constructs the training and evaluation graphs
"""
# Validation params
val_batch_size = params['val_batch_size']
K = params['top_k']
# Training params
learning_rate = params['learning_rate']
beta_1 = params['beta_1']
beta_2 = params['beta_2']
epsilon = params['epsilon']
# Model params
fp16 = False
nb_users = params['num_users']
nb_items = params['num_items']
mf_dim = params['num_factors']
mf_reg = params['mf_reg']
mlp_layer_sizes = params['layer_sizes']
mlp_layer_regs = params['layer_regs']
dropout = params['dropout']
sigmoid = False #params['sigmoid']
loss_scale = params['loss_scale']
model_dtype = tf.float16 if fp16 else tf.float32
# If manually enabling mixed precision, use the custom variable getter
custom_getter = None if not fp16 else float32_variable_storage_getter
# Allow soft device placement
with tf.device(None), \
tf.variable_scope('neumf', custom_getter=custom_getter):
# Model graph
logits = neural_mf(
users,
items,
model_dtype,
nb_users,
nb_items,
mf_dim,
mf_reg,
mlp_layer_sizes,
mlp_layer_regs,
dropout,
sigmoid
)
logits = tf.squeeze(logits)
if mode == 'INFERENCE':
return logits
# Evaluation Ops
found_positive, dcg = compute_eval_metrics(logits, dup_mask, val_batch_size, K)
# Metrics
hit_rate = tf.metrics.mean(found_positive, name='hit_rate')
ndcg = tf.metrics.mean(dcg, name='ndcg')
eval_op = tf.group(hit_rate[1], ndcg[1])
if mode == 'EVAL':
return hit_rate[0], ndcg[0], eval_op, None
# Labels
labels = tf.reshape(labels, [-1, 1])
logits = tf.reshape(logits, [-1, 1])
# Use adaptive momentum optimizer
optimizer = tf.train.AdamOptimizer(
learning_rate=learning_rate,
beta1=beta_1, beta2=beta_2,
epsilon=epsilon)
loss = tf.losses.sigmoid_cross_entropy(
labels,
logits,
reduction=tf.losses.Reduction.MEAN)
# Apply loss scaling if manually enabling mixed precision
if fp16:
if loss_scale is None:
loss_scale_manager = tf.contrib.mixed_precision.ExponentialUpdateLossScaleManager(2**32, 1000)
else:
loss_scale_manager = tf.contrib.mixed_precision.FixedLossScaleManager(loss_scale)
optimizer = tf.contrib.mixed_precision.LossScaleOptimizer(optimizer, loss_scale_manager)
# Horovod wrapper for distributed training
optimizer = hvd.DistributedOptimizer(optimizer)
# Update ops
global_step = tf.train.get_global_step()
train_op = optimizer.minimize(loss, global_step=global_step)
return hit_rate[0], ndcg[0], eval_op, train_op
| DeepLearningExamples-master | TensorFlow/Recommendation/NCF/neumf.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| DeepLearningExamples-master | TensorFlow/Recommendation/WideAndDeep/__init__.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import setuptools
NAME = 'trainer'
VERSION = '1.0'
TENSORFLOW_TRANSFORM = 'tensorflow-transform==0.1.8'
if __name__ == '__main__':
setuptools.setup(name=NAME, version=VERSION, packages=['trainer'],
install_requires=[TENSORFLOW_TRANSFORM])
| DeepLearningExamples-master | TensorFlow/Recommendation/WideAndDeep/setup.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from trainer import features
# rough approximation for MAP metric for measuring ad quality
# roughness comes from batch sizes falling between groups of
# display ids
# hack because of name clashes. Probably makes sense to rename features
DISPLAY_ID_COLUMN = features.DISPLAY_ID_COLUMN
def map_custom_metric(features, labels, predictions):
display_ids = tf.reshape(features[DISPLAY_ID_COLUMN], [-1])
predictions = predictions['probabilities'][:, 1]
labels = labels[:, 0]
# Processing unique display_ids, indexes and counts
# Sorting needed in case the same display_id occurs in two different places
sorted_ids = tf.argsort(display_ids)
display_ids = tf.gather(display_ids, indices=sorted_ids)
predictions = tf.gather(predictions, indices=sorted_ids)
labels = tf.gather(labels, indices=sorted_ids)
_, display_ids_idx, display_ids_ads_count = tf.unique_with_counts(
display_ids, out_idx=tf.int64)
pad_length = 30 - tf.reduce_max(display_ids_ads_count)
pad_fn = lambda x: tf.pad(x, [(0, 0), (0, pad_length)])
preds = tf.RaggedTensor.from_value_rowids(
predictions, display_ids_idx).to_tensor()
labels = tf.RaggedTensor.from_value_rowids(
labels, display_ids_idx).to_tensor()
labels = tf.argmax(labels, axis=1)
return {
'map': tf.compat.v1.metrics.average_precision_at_k(
predictions=pad_fn(preds),
labels=labels,
k=12,
name="streaming_map")}
IS_LEAK_COLUMN = features.IS_LEAK_COLUMN
def map_custom_metric_with_leak(features, labels, predictions):
display_ids = features[DISPLAY_ID_COLUMN]
display_ids = tf.reshape(display_ids, [-1])
is_leak_tf = features[IS_LEAK_COLUMN]
is_leak_tf = tf.reshape(is_leak_tf, [-1])
predictions = predictions['probabilities'][:, 1]
predictions = predictions + tf.cast(is_leak_tf, tf.float32)
labels = labels[:, 0]
# Processing unique display_ids, indexes and counts
# Sorting needed in case the same display_id occurs in two different places
sorted_ids = tf.argsort(display_ids)
display_ids = tf.gather(display_ids, indices=sorted_ids)
predictions = tf.gather(predictions, indices=sorted_ids)
labels = tf.gather(labels, indices=sorted_ids)
_, display_ids_idx, display_ids_ads_count = tf.unique_with_counts(
display_ids, out_idx=tf.int64)
pad_length = 30 - tf.reduce_max(display_ids_ads_count)
pad_fn = lambda x: tf.pad(x, [(0, 0), (0, pad_length)])
preds = tf.RaggedTensor.from_value_rowids(predictions, display_ids_idx).to_tensor()
labels = tf.RaggedTensor.from_value_rowids(labels, display_ids_idx).to_tensor()
labels = tf.argmax(labels, axis=1)
return {
'map_with_leak': tf.compat.v1.metrics.average_precision_at_k(
predictions=pad_fn(preds),
labels=labels,
k=12,
name="streaming_map_with_leak")}
| DeepLearningExamples-master | TensorFlow/Recommendation/WideAndDeep/utils/metrics.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.compat.v1 import logging
def separate_input_fn(
tf_transform_output,
transformed_examples,
create_batches,
mode,
reader_num_threads=1,
parser_num_threads=2,
shuffle_buffer_size=10,
prefetch_buffer_size=1,
print_display_ids=False):
"""
A version of the training + eval input function that uses dataset operations.
(For more straightforward tweaking.)
"""
logging.warn('Shuffle buffer size: {}'.format(shuffle_buffer_size))
filenames_dataset = tf.data.Dataset.list_files(
transformed_examples,
shuffle=False
)
raw_dataset = tf.data.TFRecordDataset(
filenames_dataset,
num_parallel_reads=reader_num_threads
)
if mode == tf.estimator.ModeKeys.TRAIN and shuffle_buffer_size > 1:
raw_dataset = raw_dataset.shuffle(shuffle_buffer_size)
raw_dataset = raw_dataset.repeat()
raw_dataset = raw_dataset.batch(create_batches)
# this function appears to require each element to be a vector
# batching should mean that this is always true
# one possible alternative for any problematic case is tf.io.parse_single_example
parsed_dataset = raw_dataset.apply(
tf.data.experimental.parse_example_dataset(
tf_transform_output.transformed_feature_spec(),
num_parallel_calls=parser_num_threads
)
)
# a function mapped over each dataset element
# will separate label, ensure that elements are two-dimensional (batch size, elements per record)
# adds print_display_ids injection
def consolidate_batch(elem):
label = elem.pop('label')
reshaped_label = tf.reshape(label, [-1, label.shape[-1]])
reshaped_elem = {
key: tf.reshape(elem[key], [-1, elem[key].shape[-1]])
for key in elem
}
if print_display_ids:
elem['ad_id'] = tf.Print(input_=elem['ad_id'],
data=[tf.reshape(elem['display_id'], [-1])],
message='display_id', name='print_display_ids',
summarize=elem['ad_id'].shape[1])
elem['ad_id'] = tf.Print(input_=elem['ad_id'],
data=[tf.reshape(elem['ad_id'], [-1])],
message='ad_id', name='print_ad_ids',
summarize=elem['ad_id'].shape[1])
elem['ad_id'] = tf.Print(input_=elem['ad_id'],
data=[tf.reshape(elem['is_leak'], [-1])],
message='is_leak', name='print_is_leak',
summarize=elem['ad_id'].shape[1])
return reshaped_elem, reshaped_label
if mode == tf.estimator.ModeKeys.EVAL:
parsed_dataset = parsed_dataset.map(
consolidate_batch,
num_parallel_calls=None
)
else:
parsed_dataset = parsed_dataset.map(
consolidate_batch,
num_parallel_calls=parser_num_threads
)
parsed_dataset = parsed_dataset.prefetch(prefetch_buffer_size)
return parsed_dataset
| DeepLearningExamples-master | TensorFlow/Recommendation/WideAndDeep/utils/dataloader.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
__all__ = ['learning_rate_scheduler']
def learning_rate_scheduler(lr_init, warmup_steps, global_step):
warmup_lr = (lr_init * tf.cast(global_step, tf.float32) / tf.cast(warmup_steps, tf.float32))
return tf.cond(global_step < warmup_steps, lambda: warmup_lr, lambda: lr_init)
| DeepLearningExamples-master | TensorFlow/Recommendation/WideAndDeep/utils/schedulers.py |
#! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class MeanAccumulator:
def __init__(self):
self.sum = 0
self.count = 0
def consume(self, value):
self.sum += value
self.count += 1
def value(self):
return self.sum / self.count
| DeepLearningExamples-master | TensorFlow/Recommendation/WideAndDeep/utils/hooks/training_hooks.py |
#! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dllogger
import tensorflow as tf
import time
from .training_hooks import MeanAccumulator
__all__ = ['BenchmarkLoggingHook']
class BenchmarkLoggingHook(tf.train.SessionRunHook):
def __init__(self, global_batch_size, warmup_steps=100):
self.warmup_steps = warmup_steps
self.global_batch_size = global_batch_size
self.current_step = 0
self.t0 = None
self.mean_throughput = MeanAccumulator()
def before_run(self, run_context):
self.t0 = time.time()
def after_run(self, run_context, run_values):
batch_time = time.time() - self.t0
samplesps = self.global_batch_size / batch_time
if self.current_step >= self.warmup_steps:
self.mean_throughput.consume(samplesps)
dllogger.log(data={"samplesps": samplesps}, step=(0, self.current_step))
self.current_step += 1
| DeepLearningExamples-master | TensorFlow/Recommendation/WideAndDeep/utils/hooks/benchmark_hooks.py |
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import math
import pickle
import pyspark.sql.functions as F
import time
from collections import defaultdict
from pyspark.context import SparkContext, SparkConf
from pyspark.sql.session import SparkSession
from pyspark.sql.types import IntegerType, StringType, StructType, StructField, TimestampType, FloatType, ArrayType, \
MapType
OUTPUT_BUCKET_FOLDER = "/outbrain/preprocessed/"
DATA_BUCKET_FOLDER = "/outbrain/orig/"
SPARK_TEMP_FOLDER = "/outbrain/spark-temp/"
parser = argparse.ArgumentParser()
parser.add_argument(
'--submission',
action='store_true',
default=False
)
args = parser.parse_args()
evaluation = not args.submission
conf = SparkConf().setMaster('local[*]').set('spark.executor.memory', '40g').set('spark.driver.memory', '200g').set(
"spark.local.dir", SPARK_TEMP_FOLDER)
sc = SparkContext(conf=conf)
spark = SparkSession(sc)
start_time = time.time()
print('Loading data...')
truncate_day_from_timestamp_udf = F.udf(lambda ts: int(ts / 1000 / 60 / 60 / 24), IntegerType())
extract_country_udf = F.udf(lambda geo: geo.strip()[:2] if geo is not None else '', StringType())
documents_meta_schema = StructType(
[StructField("document_id_doc", IntegerType(), True),
StructField("source_id", IntegerType(), True),
StructField("publisher_id", IntegerType(), True),
StructField("publish_time", TimestampType(), True)]
)
documents_meta_df = spark.read.schema(documents_meta_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "documents_meta.csv") \
.withColumn('dummyDocumentsMeta', F.lit(1)).alias('documents_meta')
documents_meta_df.count()
print('Drop rows with empty "source_id"...')
documents_meta_df = documents_meta_df.dropna(subset="source_id")
documents_meta_df.count()
source_publishers_df = documents_meta_df.select(["source_id", "publisher_id"]).dropDuplicates()
source_publishers_df.count()
print('Get list of source_ids without publisher_id...')
rows_no_pub = source_publishers_df.filter("publisher_id is NULL")
source_ids_without_publisher = [row['source_id'] for row in rows_no_pub.collect()]
len(source_ids_without_publisher)
print('Maximum value of publisher_id used so far...')
max_pub = max(source_publishers_df.select(["publisher_id"]).dropna().collect())['publisher_id']
max_pub
print('Rows filled with new publisher_ids')
new_publishers = [(source, max_pub + 1 + nr) for nr, source in enumerate(source_ids_without_publisher)]
new_publishers_df = spark.createDataFrame(new_publishers, ("source_id", "publisher_id"))
new_publishers_df.take(10)
# old and new publishers merged
fixed_source_publishers_df = source_publishers_df.dropna().union(new_publishers_df)
fixed_source_publishers_df.collect()[-30:]
print('Update documents_meta with bew publishers...')
documents_meta_df = documents_meta_df.drop('publisher_id').join(fixed_source_publishers_df, on='source_id')
documents_meta_df.count()
documents_categories_schema = StructType(
[StructField("document_id_cat", IntegerType(), True),
StructField("category_id", IntegerType(), True),
StructField("confidence_level_cat", FloatType(), True)]
)
documents_categories_df = spark.read.schema(documents_categories_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "documents_categories.csv") \
.alias('documents_categories')
documents_categories_grouped_df = documents_categories_df.groupBy('document_id_cat') \
.agg(F.collect_list('category_id').alias('category_id_list'),
F.collect_list('confidence_level_cat').alias('cat_confidence_level_list')) \
.withColumn('dummyDocumentsCategory', F.lit(1)) \
.alias('documents_categories_grouped')
documents_topics_schema = StructType(
[StructField("document_id_top", IntegerType(), True),
StructField("topic_id", IntegerType(), True),
StructField("confidence_level_top", FloatType(), True)]
)
documents_topics_df = spark.read.schema(documents_topics_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "documents_topics.csv") \
.alias('documents_topics')
documents_topics_grouped_df = documents_topics_df.groupBy('document_id_top') \
.agg(F.collect_list('topic_id').alias('topic_id_list'),
F.collect_list('confidence_level_top').alias('top_confidence_level_list')) \
.withColumn('dummyDocumentsTopics', F.lit(1)) \
.alias('documents_topics_grouped')
documents_entities_schema = StructType(
[StructField("document_id_ent", IntegerType(), True),
StructField("entity_id", StringType(), True),
StructField("confidence_level_ent", FloatType(), True)]
)
documents_entities_df = spark.read.schema(documents_entities_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "documents_entities.csv") \
.alias('documents_entities')
documents_entities_grouped_df = documents_entities_df.groupBy('document_id_ent') \
.agg(F.collect_list('entity_id').alias('entity_id_list'),
F.collect_list('confidence_level_ent').alias('ent_confidence_level_list')) \
.withColumn('dummyDocumentsEntities', F.lit(1)) \
.alias('documents_entities_grouped')
documents_df = documents_meta_df.join(
documents_categories_grouped_df,
on=F.col("document_id_doc") == F.col("documents_categories_grouped.document_id_cat"),
how='left') \
.join(documents_topics_grouped_df,
on=F.col("document_id_doc") == F.col("documents_topics_grouped.document_id_top"),
how='left') \
.join(documents_entities_grouped_df,
on=F.col("document_id_doc") == F.col("documents_entities_grouped.document_id_ent"),
how='left') \
.cache()
documents_df.count()
if evaluation:
validation_set_df = spark.read.parquet(OUTPUT_BUCKET_FOLDER + "validation_set.parquet") \
.alias('validation_set')
validation_set_df.select('uuid_event').distinct().createOrReplaceTempView('users_to_profile')
validation_set_df.select('uuid_event', 'document_id_promo').distinct() \
.createOrReplaceTempView('validation_users_docs_to_ignore')
else:
events_schema = StructType(
[StructField("display_id", IntegerType(), True),
StructField("uuid_event", StringType(), True),
StructField("document_id_event", IntegerType(), True),
StructField("timestamp_event", IntegerType(), True),
StructField("platform_event", IntegerType(), True),
StructField("geo_location_event", StringType(), True)]
)
events_df = spark.read.schema(events_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "events.csv") \
.withColumn('dummyEvents', F.lit(1)) \
.withColumn('day_event', truncate_day_from_timestamp_udf('timestamp_event')) \
.withColumn('event_country', extract_country_udf('geo_location_event')) \
.alias('events')
# Drop rows with empty "geo_location"
events_df = events_df.dropna(subset="geo_location_event")
# Drop rows with empty "platform"
events_df = events_df.dropna(subset="platform_event")
events_df.createOrReplaceTempView('events')
promoted_content_schema = StructType(
[StructField("ad_id", IntegerType(), True),
StructField("document_id_promo", IntegerType(), True),
StructField("campaign_id", IntegerType(), True),
StructField("advertiser_id", IntegerType(), True)]
)
promoted_content_df = spark.read.schema(promoted_content_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "promoted_content.csv") \
.withColumn('dummyPromotedContent', F.lit(1)).alias('promoted_content')
clicks_test_schema = StructType(
[StructField("display_id", IntegerType(), True),
StructField("ad_id", IntegerType(), True)]
)
clicks_test_df = spark.read.schema(clicks_test_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "clicks_test.csv") \
.withColumn('dummyClicksTest', F.lit(1)).alias('clicks_test')
test_set_df = clicks_test_df.join(promoted_content_df, on='ad_id', how='left') \
.join(events_df, on='display_id', how='left')
test_set_df.select('uuid_event').distinct().createOrReplaceTempView('users_to_profile')
test_set_df.select('uuid_event', 'document_id_promo', 'timestamp_event').distinct() \
.createOrReplaceTempView('test_users_docs_timestamp_to_ignore')
page_views_schema = StructType(
[StructField("uuid_pv", StringType(), True),
StructField("document_id_pv", IntegerType(), True),
StructField("timestamp_pv", IntegerType(), True),
StructField("platform_pv", IntegerType(), True),
StructField("geo_location_pv", StringType(), True),
StructField("traffic_source_pv", IntegerType(), True)]
)
page_views_df = spark.read.schema(page_views_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "page_views.csv") \
.alias('page_views')
page_views_df.createOrReplaceTempView('page_views')
additional_filter = ''
if evaluation:
additional_filter = '''
AND NOT EXISTS (SELECT uuid_event FROM validation_users_docs_to_ignore
WHERE uuid_event = p.uuid_pv
AND document_id_promo = p.document_id_pv)
'''
else:
additional_filter = '''
AND NOT EXISTS (SELECT uuid_event FROM test_users_docs_timestamp_to_ignore
WHERE uuid_event = p.uuid_pv
AND document_id_promo = p.document_id_pv
AND p.timestamp_pv >= timestamp_event)
'''
page_views_train_df = spark.sql('''
SELECT * FROM page_views p
WHERE EXISTS (SELECT uuid_event FROM users_to_profile
WHERE uuid_event = p.uuid_pv)
''' + additional_filter).alias('views') \
.join(documents_df, on=F.col("document_id_pv") == F.col("document_id_doc"), how='left') \
.filter(
'dummyDocumentsEntities is not null OR dummyDocumentsTopics is not null OR dummyDocumentsCategory is not null')
print('Processing document frequencies...')
documents_total = documents_meta_df.count()
documents_total
categories_docs_counts = documents_categories_df.groupBy('category_id').count().rdd.collectAsMap()
len(categories_docs_counts)
df_filenames_suffix = ''
if evaluation:
df_filenames_suffix = '_eval'
with open(OUTPUT_BUCKET_FOLDER + 'categories_docs_counts' + df_filenames_suffix + '.pickle', 'wb') as output:
pickle.dump(categories_docs_counts, output)
topics_docs_counts = documents_topics_df.groupBy('topic_id').count().rdd.collectAsMap()
len(topics_docs_counts)
with open(OUTPUT_BUCKET_FOLDER + 'topics_docs_counts' + df_filenames_suffix + '.pickle', 'wb') as output:
pickle.dump(topics_docs_counts, output)
entities_docs_counts = documents_entities_df.groupBy('entity_id').count().rdd.collectAsMap()
len(entities_docs_counts)
with open(OUTPUT_BUCKET_FOLDER + 'entities_docs_counts' + df_filenames_suffix + '.pickle', 'wb') as output:
pickle.dump(entities_docs_counts, output)
print('Processing user profiles...')
int_null_to_minus_one_udf = F.udf(lambda x: x if x is not None else -1, IntegerType())
int_list_null_to_empty_list_udf = F.udf(lambda x: x if x is not None else [], ArrayType(IntegerType()))
float_list_null_to_empty_list_udf = F.udf(lambda x: x if x is not None else [], ArrayType(FloatType()))
str_list_null_to_empty_list_udf = F.udf(lambda x: x if x is not None else [], ArrayType(StringType()))
page_views_by_user_df = page_views_train_df.select(
'uuid_pv',
'document_id_pv',
int_null_to_minus_one_udf('timestamp_pv').alias('timestamp_pv'),
int_list_null_to_empty_list_udf('category_id_list').alias('category_id_list'),
float_list_null_to_empty_list_udf('cat_confidence_level_list').alias('cat_confidence_level_list'),
int_list_null_to_empty_list_udf('topic_id_list').alias('topic_id_list'),
float_list_null_to_empty_list_udf('top_confidence_level_list').alias('top_confidence_level_list'),
str_list_null_to_empty_list_udf('entity_id_list').alias('entity_id_list'),
float_list_null_to_empty_list_udf('ent_confidence_level_list').alias('ent_confidence_level_list')) \
.groupBy('uuid_pv') \
.agg(F.collect_list('document_id_pv').alias('document_id_pv_list'),
F.collect_list('timestamp_pv').alias('timestamp_pv_list'),
F.collect_list('category_id_list').alias('category_id_lists'),
F.collect_list('cat_confidence_level_list').alias('cat_confidence_level_lists'),
F.collect_list('topic_id_list').alias('topic_id_lists'),
F.collect_list('top_confidence_level_list').alias('top_confidence_level_lists'),
F.collect_list('entity_id_list').alias('entity_id_lists'),
F.collect_list('ent_confidence_level_list').alias('ent_confidence_level_lists'))
def get_user_aspects(docs_aspects, aspect_docs_counts):
docs_aspects_merged_lists = defaultdict(list)
for doc_aspects in docs_aspects:
for key in doc_aspects.keys():
docs_aspects_merged_lists[key].append(doc_aspects[key])
docs_aspects_stats = {}
for key in docs_aspects_merged_lists.keys():
aspect_list = docs_aspects_merged_lists[key]
tf = len(aspect_list)
idf = math.log(documents_total / float(aspect_docs_counts[key]))
confid_mean = sum(aspect_list) / float(len(aspect_list))
docs_aspects_stats[key] = [tf * idf, confid_mean]
return docs_aspects_stats
def generate_user_profile(docs_aspects_list, docs_aspects_confidence_list, aspect_docs_counts):
docs_aspects = []
for doc_aspects_list, doc_aspects_confidence_list in zip(docs_aspects_list, docs_aspects_confidence_list):
doc_aspects = dict(zip(doc_aspects_list, doc_aspects_confidence_list))
docs_aspects.append(doc_aspects)
user_aspects = get_user_aspects(docs_aspects, aspect_docs_counts)
return user_aspects
get_list_len_udf = F.udf(lambda docs_list: len(docs_list), IntegerType())
generate_categories_user_profile_map_udf = F.udf(
lambda docs_aspects_list, docs_aspects_confidence_list:
generate_user_profile(docs_aspects_list,
docs_aspects_confidence_list,
categories_docs_counts),
MapType(IntegerType(), ArrayType(FloatType()), False))
generate_topics_user_profile_map_udf = F.udf(
lambda docs_aspects_list, docs_aspects_confidence_list:
generate_user_profile(docs_aspects_list,
docs_aspects_confidence_list,
topics_docs_counts),
MapType(IntegerType(), ArrayType(FloatType()), False))
generate_entities_user_profile_map_udf = F.udf(
lambda docs_aspects_list, docs_aspects_confidence_list:
generate_user_profile(docs_aspects_list,
docs_aspects_confidence_list,
entities_docs_counts),
MapType(StringType(), ArrayType(FloatType()), False))
users_profile_df = page_views_by_user_df \
.withColumn('views', get_list_len_udf('document_id_pv_list')) \
.withColumn('categories', generate_categories_user_profile_map_udf('category_id_lists',
'cat_confidence_level_lists')) \
.withColumn('topics', generate_topics_user_profile_map_udf('topic_id_lists',
'top_confidence_level_lists')) \
.withColumn('entities', generate_entities_user_profile_map_udf('entity_id_lists',
'ent_confidence_level_lists')) \
.select(
F.col('uuid_pv').alias('uuid'),
F.col('document_id_pv_list').alias('doc_ids'),
'views', 'categories', 'topics', 'entities')
if evaluation:
table_name = 'user_profiles_eval'
else:
table_name = 'user_profiles'
users_profile_df.write.parquet(OUTPUT_BUCKET_FOLDER + table_name, mode='overwrite')
finish_time = time.time()
print("Elapsed min: ", (finish_time - start_time) / 60)
spark.stop()
| DeepLearningExamples-master | TensorFlow/Recommendation/WideAndDeep/preproc/preproc2.py |
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import _pickle as cPickle
import argparse
import datetime
import hashlib
import math
import numpy as np
import pandas as pd
import pyspark.sql.functions as F
import time
from pyspark.context import SparkContext, SparkConf
from pyspark.ml.linalg import SparseVector, VectorUDT
from pyspark.sql.session import SparkSession
from pyspark.sql.types import IntegerType, StringType, StructType, StructField, TimestampType, FloatType, ArrayType
evaluation_verbose = False
OUTPUT_BUCKET_FOLDER = "/outbrain/preprocessed/"
DATA_BUCKET_FOLDER = "/outbrain/orig/"
SPARK_TEMP_FOLDER = "/outbrain/spark-temp/"
conf = SparkConf().setMaster('local[*]').set('spark.executor.memory', '40g').set('spark.driver.memory', '200g').set(
"spark.local.dir", SPARK_TEMP_FOLDER)
sc = SparkContext(conf=conf)
spark = SparkSession(sc)
start_time = time.time()
def hashstr(s, nr_bins):
return int(hashlib.md5(s.encode('utf8')).hexdigest(), 16) % (nr_bins - 1) + 1
parser = argparse.ArgumentParser()
parser.add_argument(
'--submission',
action='store_true',
default=False
)
args = parser.parse_args()
evaluation = not args.submission
# ## UDFs
def date_time_to_unix_epoch(date_time):
return int(time.mktime(date_time.timetuple()))
def date_time_to_unix_epoch_treated(dt):
if dt is not None:
try:
epoch = date_time_to_unix_epoch(dt)
return epoch
except Exception as e:
print("Error processing dt={}".format(dt), e)
return 0
else:
return 0
timestamp_null_to_zero_int_udf = F.udf(lambda x: date_time_to_unix_epoch_treated(x), IntegerType())
INT_DEFAULT_NULL_VALUE = -1
int_null_to_minus_one_udf = F.udf(lambda x: x if x is not None else INT_DEFAULT_NULL_VALUE, IntegerType())
int_list_null_to_empty_list_udf = F.udf(lambda x: x if x is not None else [], ArrayType(IntegerType()))
float_list_null_to_empty_list_udf = F.udf(lambda x: x if x is not None else [], ArrayType(FloatType()))
str_list_null_to_empty_list_udf = F.udf(lambda x: x if x is not None else [], ArrayType(StringType()))
def truncate_day_from_timestamp(ts):
return int(ts / 1000 / 60 / 60 / 24)
truncate_day_from_timestamp_udf = F.udf(lambda ts: truncate_day_from_timestamp(ts), IntegerType())
extract_country_udf = F.udf(lambda geo: geo.strip()[:2] if geo is not None else '', StringType())
extract_country_state_udf = F.udf(lambda geo: geo.strip()[:5] if geo is not None else '', StringType())
list_len_udf = F.udf(lambda x: len(x) if x is not None else 0, IntegerType())
def convert_odd_timestamp(timestamp_ms_relative):
TIMESTAMP_DELTA = 1465876799998
return datetime.datetime.fromtimestamp((int(timestamp_ms_relative) + TIMESTAMP_DELTA) // 1000)
# # Loading Files
# ## Loading UTC/BST for each country and US / CA states (local time)
country_utc_dst_df = pd.read_csv('preproc/data/country_codes_utc_dst_tz_delta.csv', keep_default_na=False)
countries_utc_dst_dict = dict(
zip(country_utc_dst_df['country_code'].tolist(), country_utc_dst_df['utc_dst_time_offset_cleaned'].tolist()))
countries_utc_dst_broad = sc.broadcast(countries_utc_dst_dict)
us_states_utc_dst_df = pd.read_csv('preproc/data/us_states_abbrev_bst.csv', keep_default_na=False)
us_states_utc_dst_dict = dict(
zip(us_states_utc_dst_df['state_abb'].tolist(), us_states_utc_dst_df['utc_dst_time_offset_cleaned'].tolist()))
us_states_utc_dst_broad = sc.broadcast(us_states_utc_dst_dict)
ca_states_utc_dst_df = pd.read_csv('preproc/data/ca_states_abbrev_bst.csv', keep_default_na=False)
ca_countries_utc_dst_dict = dict(
zip(ca_states_utc_dst_df['state_abb'].tolist(), ca_states_utc_dst_df['utc_dst_time_offset_cleaned'].tolist()))
ca_countries_utc_dst_broad = sc.broadcast(ca_countries_utc_dst_dict)
# ## Loading competition csvs
events_schema = StructType(
[StructField("display_id", IntegerType(), True),
StructField("uuid_event", StringType(), True),
StructField("document_id_event", IntegerType(), True),
StructField("timestamp_event", IntegerType(), True),
StructField("platform_event", IntegerType(), True),
StructField("geo_location_event", StringType(), True)]
)
events_df = spark.read.schema(events_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "events.csv") \
.withColumn('dummyEvents', F.lit(1)) \
.withColumn('day_event', truncate_day_from_timestamp_udf('timestamp_event')) \
.withColumn('event_country', extract_country_udf('geo_location_event')) \
.withColumn('event_country_state', extract_country_state_udf('geo_location_event')) \
.alias('events')
events_df.count()
# Drop rows with empty "geo_location"
events_df = events_df.dropna(subset="geo_location_event")
events_df.count()
# Drop rows with empty "platform"
events_df = events_df.dropna(subset="platform_event")
events_df.count()
page_views_schema = StructType(
[StructField("uuid_pv", StringType(), True),
StructField("document_id_pv", IntegerType(), True),
StructField("timestamp_pv", IntegerType(), True),
StructField("platform_pv", IntegerType(), True),
StructField("geo_location_pv", StringType(), True),
StructField("traffic_source_pv", IntegerType(), True)]
)
page_views_df = spark.read.schema(page_views_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "page_views.csv") \
.withColumn('day_pv', truncate_day_from_timestamp_udf('timestamp_pv')) \
.alias('page_views')
page_views_df.createOrReplaceTempView('page_views')
page_views_users_df = spark.sql('''
SELECT uuid_pv, document_id_pv, max(timestamp_pv) as max_timestamp_pv, 1 as dummyPageView
FROM page_views p
GROUP BY uuid_pv, document_id_pv
''').alias('page_views_users')
promoted_content_schema = StructType(
[StructField("ad_id", IntegerType(), True),
StructField("document_id_promo", IntegerType(), True),
StructField("campaign_id", IntegerType(), True),
StructField("advertiser_id", IntegerType(), True)]
)
promoted_content_df = spark.read.schema(promoted_content_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "promoted_content.csv") \
.withColumn('dummyPromotedContent', F.lit(1)).alias('promoted_content').cache()
documents_meta_schema = StructType(
[StructField("document_id_doc", IntegerType(), True),
StructField("source_id", IntegerType(), True),
StructField("publisher_id", IntegerType(), True),
StructField("publish_time", TimestampType(), True)]
)
documents_meta_df = spark.read.schema(documents_meta_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "documents_meta.csv") \
.withColumn('dummyDocumentsMeta', F.lit(1)).alias('documents_meta').cache()
documents_meta_df.count()
# Drop rows with empty "source_id"
documents_meta_df = documents_meta_df.dropna(subset="source_id")
documents_meta_df.count()
source_publishers_df = documents_meta_df.select(["source_id", "publisher_id"]).dropDuplicates()
source_publishers_df.count()
# get list of source_ids without publisher_id
rows_no_pub = source_publishers_df.filter("publisher_id is NULL")
source_ids_without_publisher = [row['source_id'] for row in rows_no_pub.collect()]
len(source_ids_without_publisher)
# maximum value of publisher_id used so far
max_pub = max(source_publishers_df.select(["publisher_id"]).dropna().collect())['publisher_id']
max_pub
# rows filled with new publisher_ids
new_publishers = [(source, max_pub + 1 + nr) for nr, source in enumerate(source_ids_without_publisher)]
new_publishers_df = spark.createDataFrame(new_publishers, ("source_id", "publisher_id"))
new_publishers_df.take(10)
# old and new publishers merged
fixed_source_publishers_df = source_publishers_df.dropna().union(new_publishers_df)
fixed_source_publishers_df.collect()[-30:]
# update documents_meta with bew publishers
documents_meta_df = documents_meta_df.drop('publisher_id').join(fixed_source_publishers_df, on='source_id')
documents_meta_df.count()
# Joining with Page Views to get traffic_source_pv
events_joined_df = events_df.join(documents_meta_df
.withColumnRenamed('source_id', 'source_id_doc_event')
.withColumnRenamed('publisher_id', 'publisher_doc_event')
.withColumnRenamed('publish_time', 'publish_time_doc_event'),
on=F.col("document_id_event") == F.col("document_id_doc"), how='left') \
.join(page_views_df,
on=[F.col('uuid_event') == F.col('uuid_pv'),
F.col('document_id_event') == F.col('document_id_pv'),
F.col('platform_event') == F.col('platform_pv'),
F.col('geo_location_event') == F.col('geo_location_pv'),
F.col('day_event') == F.col('day_pv')],
how='left') \
.alias('events').cache()
documents_categories_schema = StructType(
[StructField("document_id_cat", IntegerType(), True),
StructField("category_id", IntegerType(), True),
StructField("confidence_level_cat", FloatType(), True)]
)
documents_categories_df = spark.read.schema(documents_categories_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "documents_categories.csv") \
.alias('documents_categories').cache()
documents_categories_grouped_df = documents_categories_df.groupBy('document_id_cat') \
.agg(F.collect_list('category_id').alias('category_id_list'),
F.collect_list('confidence_level_cat').alias('confidence_level_cat_list')) \
.withColumn('dummyDocumentsCategory', F.lit(1)) \
.alias('documents_categories_grouped')
documents_topics_schema = StructType(
[StructField("document_id_top", IntegerType(), True),
StructField("topic_id", IntegerType(), True),
StructField("confidence_level_top", FloatType(), True)]
)
documents_topics_df = spark.read.schema(documents_topics_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "documents_topics.csv") \
.alias('documents_topics').cache()
documents_topics_grouped_df = documents_topics_df.groupBy('document_id_top') \
.agg(F.collect_list('topic_id').alias('topic_id_list'),
F.collect_list('confidence_level_top').alias('confidence_level_top_list')) \
.withColumn('dummyDocumentsTopics', F.lit(1)) \
.alias('documents_topics_grouped')
documents_entities_schema = StructType(
[StructField("document_id_ent", IntegerType(), True),
StructField("entity_id", StringType(), True),
StructField("confidence_level_ent", FloatType(), True)]
)
documents_entities_df = spark.read.schema(documents_entities_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "documents_entities.csv") \
.alias('documents_entities').cache()
documents_entities_grouped_df = documents_entities_df.groupBy('document_id_ent') \
.agg(F.collect_list('entity_id').alias('entity_id_list'),
F.collect_list('confidence_level_ent').alias('confidence_level_ent_list')) \
.withColumn('dummyDocumentsEntities', F.lit(1)) \
.alias('documents_entities_grouped')
clicks_train_schema = StructType(
[StructField("display_id", IntegerType(), True),
StructField("ad_id", IntegerType(), True),
StructField("clicked", IntegerType(), True)]
)
clicks_train_df = spark.read.schema(clicks_train_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "clicks_train.csv") \
.withColumn('dummyClicksTrain', F.lit(1)).alias('clicks_train')
clicks_train_joined_df = clicks_train_df \
.join(promoted_content_df, on='ad_id', how='left') \
.join(documents_meta_df,
on=F.col("promoted_content.document_id_promo") == F.col("documents_meta.document_id_doc"),
how='left') \
.join(events_joined_df, on='display_id', how='left')
clicks_train_joined_df.createOrReplaceTempView('clicks_train_joined')
if evaluation:
table_name = 'user_profiles_eval'
else:
table_name = 'user_profiles'
user_profiles_df = spark.read.parquet(OUTPUT_BUCKET_FOLDER + table_name) \
.withColumn('dummyUserProfiles', F.lit(1)).alias('user_profiles')
# # Spliting Train/validation set | Test set
if evaluation:
validation_set_exported_df = spark.read.parquet(
OUTPUT_BUCKET_FOLDER + "validation_set.parquet") \
.alias('validation_set')
validation_set_exported_df.select('display_id').distinct() \
.createOrReplaceTempView("validation_display_ids")
validation_set_df = spark.sql('''
SELECT * FROM clicks_train_joined t
WHERE EXISTS (SELECT display_id FROM validation_display_ids
WHERE display_id = t.display_id)''').alias('clicks') \
.join(documents_categories_grouped_df,
on=F.col("document_id_promo") == F.col("documents_categories_grouped.document_id_cat"),
how='left') \
.join(documents_topics_grouped_df,
on=F.col("document_id_promo") == F.col("documents_topics_grouped.document_id_top"),
how='left') \
.join(documents_entities_grouped_df,
on=F.col("document_id_promo") == F.col("documents_entities_grouped.document_id_ent"),
how='left') \
.join(documents_categories_grouped_df
.withColumnRenamed('category_id_list', 'doc_event_category_id_list')
.withColumnRenamed('confidence_level_cat_list', 'doc_event_confidence_level_cat_list')
.alias('documents_event_categories_grouped'),
on=F.col("document_id_event") == F.col("documents_event_categories_grouped.document_id_cat"),
how='left') \
.join(documents_topics_grouped_df
.withColumnRenamed('topic_id_list', 'doc_event_topic_id_list')
.withColumnRenamed('confidence_level_top_list', 'doc_event_confidence_level_top_list')
.alias('documents_event_topics_grouped'),
on=F.col("document_id_event") == F.col("documents_event_topics_grouped.document_id_top"),
how='left') \
.join(documents_entities_grouped_df
.withColumnRenamed('entity_id_list', 'doc_event_entity_id_list')
.withColumnRenamed('confidence_level_ent_list', 'doc_event_confidence_level_ent_list')
.alias('documents_event_entities_grouped'),
on=F.col("document_id_event") == F.col("documents_event_entities_grouped.document_id_ent"),
how='left') \
.join(page_views_users_df,
on=[F.col("clicks.uuid_event") == F.col("page_views_users.uuid_pv"),
F.col("clicks.document_id_promo") == F.col("page_views_users.document_id_pv")],
how='left')
# print("validation_set_df.count() =", validation_set_df.count())
# Added to validation set information about the event and the user for statistics of the error (avg ctr)
validation_set_ground_truth_df = validation_set_df.filter('clicked = 1') \
.join(user_profiles_df,
on=[F.col("user_profiles.uuid") == F.col("uuid_event")],
how='left') \
.withColumn('user_categories_count', list_len_udf('category_id_list')) \
.withColumn('user_topics_count', list_len_udf('topic_id_list')) \
.withColumn('user_entities_count', list_len_udf('entity_id_list')) \
.select('display_id', 'ad_id', 'platform_event', 'day_event', 'timestamp_event',
'geo_location_event', 'event_country', 'event_country_state', 'views',
'user_categories_count', 'user_topics_count', 'user_entities_count') \
.withColumnRenamed('ad_id', 'ad_id_gt') \
.withColumnRenamed('views', 'user_views_count') \
.cache()
# print("validation_set_ground_truth_df.count() =", validation_set_ground_truth_df.count())
train_set_df = spark.sql('''
SELECT * FROM clicks_train_joined t
WHERE NOT EXISTS (SELECT display_id FROM validation_display_ids
WHERE display_id = t.display_id)''').cache()
print("train_set_df.count() =", train_set_df.count())
# validation_display_ids_df.groupBy("day_event").count().show()
else:
clicks_test_schema = StructType(
[StructField("display_id", IntegerType(), True),
StructField("ad_id", IntegerType(), True)]
)
clicks_test_df = spark.read.schema(clicks_test_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "clicks_test.csv") \
.withColumn('dummyClicksTest', F.lit(1)) \
.withColumn('clicked', F.lit(-999)) \
.alias('clicks_test')
test_set_df = clicks_test_df \
.join(promoted_content_df, on='ad_id', how='left') \
.join(documents_meta_df,
on=F.col("promoted_content.document_id_promo") == F.col("documents_meta.document_id_doc"),
how='left') \
.join(documents_categories_grouped_df,
on=F.col("document_id_promo") == F.col("documents_categories_grouped.document_id_cat"),
how='left') \
.join(documents_topics_grouped_df,
on=F.col("document_id_promo") == F.col("documents_topics_grouped.document_id_top"),
how='left') \
.join(documents_entities_grouped_df,
on=F.col("document_id_promo") == F.col("documents_entities_grouped.document_id_ent"),
how='left') \
.join(events_joined_df, on='display_id', how='left') \
.join(documents_categories_grouped_df
.withColumnRenamed('category_id_list', 'doc_event_category_id_list')
.withColumnRenamed('confidence_level_cat_list', 'doc_event_confidence_level_cat_list')
.alias('documents_event_categories_grouped'),
on=F.col("document_id_event") == F.col("documents_event_categories_grouped.document_id_cat"),
how='left') \
.join(documents_topics_grouped_df
.withColumnRenamed('topic_id_list', 'doc_event_topic_id_list')
.withColumnRenamed('confidence_level_top_list', 'doc_event_confidence_level_top_list')
.alias('documents_event_topics_grouped'),
on=F.col("document_id_event") == F.col("documents_event_topics_grouped.document_id_top"),
how='left') \
.join(documents_entities_grouped_df
.withColumnRenamed('entity_id_list', 'doc_event_entity_id_list')
.withColumnRenamed('confidence_level_ent_list', 'doc_event_confidence_level_ent_list')
.alias('documents_event_entities_grouped'),
on=F.col("document_id_event") == F.col("documents_event_entities_grouped.document_id_ent"),
how='left') \
.join(page_views_users_df,
on=[F.col("events.uuid_event") == F.col("page_views_users.uuid_pv"),
F.col("promoted_content.document_id_promo") == F.col("page_views_users.document_id_pv")],
how='left')
train_set_df = clicks_train_joined_df.cache()
print("train_set_df.count() =", train_set_df.count())
# # Training models
def is_null(value):
return value is None or len(str(value).strip()) == 0
LESS_SPECIAL_CAT_VALUE = 'less'
def get_category_field_values_counts(field, df, min_threshold=10):
category_counts = dict(list(filter(lambda x: not is_null(x[0]) and x[1] >= min_threshold,
df.select(field).groupBy(field).count().rdd.map(
lambda x: (x[0], x[1])).collect())))
# Adding a special value to create a feature for values in this category that are less than min_threshold
category_counts[LESS_SPECIAL_CAT_VALUE] = -1
return category_counts
# ## Building category values counters and indexers
event_country_values_counts = get_category_field_values_counts('event_country', events_df, min_threshold=10)
len(event_country_values_counts)
# All non-null categories: 230
event_country_state_values_counts = get_category_field_values_counts('event_country_state', events_df, min_threshold=10)
len(event_country_state_values_counts)
event_geo_location_values_counts = get_category_field_values_counts('geo_location_event', events_df, min_threshold=10)
len(event_geo_location_values_counts)
# All non-null categories: 2988
doc_entity_id_values_counts = get_category_field_values_counts('entity_id', documents_entities_df, min_threshold=10)
len(doc_entity_id_values_counts)
# All non-null categories: 1326009
# ## Processing average CTR by categories
def get_percentiles(df, field, quantiles_levels=None, max_error_rate=0.0):
if quantiles_levels is None:
quantiles_levels = np.arange(0.0, 1.1, 0.1).tolist()
quantiles = df.approxQuantile(field, quantiles_levels, max_error_rate)
return dict(zip(quantiles_levels, quantiles))
# REG = 10
REG = 0
ctr_udf = F.udf(lambda clicks, views: clicks / float(views + REG), FloatType())
# ### Average CTR by ad_id
ad_id_popularity_df = train_set_df.groupby('ad_id').agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
# ad_id_popularity_df.count()
# get_percentiles(ad_id_popularity_df, 'clicks')
# get_percentiles(ad_id_popularity_df, 'views')
ad_id_popularity = ad_id_popularity_df.filter('views > 5').select('ad_id', 'ctr', 'views') \
.rdd.map(lambda x: (x['ad_id'], (x['ctr'], x['views'], 1, 1))).collectAsMap()
ad_id_popularity_broad = sc.broadcast(ad_id_popularity)
list(ad_id_popularity.values())[:3]
len(ad_id_popularity)
# get_ad_id_ctr_udf = F.udf(lambda ad_id: ad_id_popularity[ad_id] if ad_id in ad_id_popularity else -1, FloatType())
ad_id_avg_ctr = sum(map(lambda x: x[0], ad_id_popularity.values())) / float(len(ad_id_popularity))
ad_id_avg_ctr
ad_id_weighted_avg_ctr = sum(map(lambda x: x[0] * x[1], ad_id_popularity.values())) / float(
sum(map(lambda x: x[1], ad_id_popularity.values())))
ad_id_weighted_avg_ctr
ad_id_views_median = np.median(np.array(list(map(lambda x: x[1], ad_id_popularity.values()))))
ad_id_views_median
ad_id_views_mean = sum(map(lambda x: x[1], ad_id_popularity.values())) / float(len(ad_id_popularity))
ad_id_views_mean
# ### Average CTR by document_id (promoted_content)
document_id_popularity_df = train_set_df \
.groupby('document_id_promo') \
.agg(F.sum('clicked').alias('clicks'), F.count('*').alias('views'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
document_id_popularity = document_id_popularity_df.filter('views > 5') \
.select('document_id_promo', 'ctr', 'views', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['document_id_promo'],
(x['ctr'], x['views'], x['distinct_ad_ids'], 1))).collectAsMap()
len(document_id_popularity)
document_id_popularity_broad = sc.broadcast(document_id_popularity)
# document_id_popularity_df.count()
# get_percentiles(document_id_popularity_df, 'clicks')
# get_percentiles(document_id_popularity_df, 'views')
document_id_avg_ctr = sum(map(lambda x: x[0], document_id_popularity.values())) / float(len(document_id_popularity))
document_id_avg_ctr
document_id_weighted_avg_ctr = sum(list(map(lambda x: x[0] * x[1], document_id_popularity.values()))) / float(
sum(list(map(lambda x: x[1], document_id_popularity.values()))))
document_id_weighted_avg_ctr
document_id_views_median = np.median(np.array(list(map(lambda x: x[1], document_id_popularity.values()))))
document_id_views_median
document_id_views_mean = sum(map(lambda x: x[1], document_id_popularity.values())) / float(len(document_id_popularity))
document_id_views_mean
# ### Average CTR by (doc_event, doc_ad)
doc_event_doc_ad_avg_ctr_df = train_set_df.groupBy('document_id_event', 'document_id_promo') \
.agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views'), F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
doc_event_doc_ad_avg_ctr = doc_event_doc_ad_avg_ctr_df.filter('views > 5') \
.select('document_id_event', 'document_id_promo', 'ctr', 'views', 'distinct_ad_ids') \
.rdd.map(lambda x: ((x['document_id_event'], x['document_id_promo']),
(x['ctr'], x['views'], x['distinct_ad_ids'], 1))).collectAsMap()
len(doc_event_doc_ad_avg_ctr)
doc_event_doc_ad_avg_ctr_broad = sc.broadcast(doc_event_doc_ad_avg_ctr)
# ### Average CTR by country, source_id
source_id_by_country_popularity_df = train_set_df \
.select('clicked', 'source_id', 'event_country', 'ad_id') \
.groupby('event_country', 'source_id') \
.agg(F.sum('clicked').alias('clicks'), F.count('*').alias('views'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
# source_id_popularity = source_id_popularity_df
# .filter('views > 100 and source_id is not null')
# .select('source_id', 'ctr')
# .rdd.collectAsMap()
source_id_by_country_popularity = source_id_by_country_popularity_df.filter(
'views > 5 and source_id is not null and event_country <> ""').select('event_country', 'source_id', 'ctr', 'views',
'distinct_ad_ids').rdd.map(
lambda x: ((x['event_country'], x['source_id']), (x['ctr'], x['views'], x['distinct_ad_ids'], 1))).collectAsMap()
len(source_id_by_country_popularity)
source_id_by_country_popularity_broad = sc.broadcast(source_id_by_country_popularity)
source_id_by_country_avg_ctr = sum(map(lambda x: x[0], source_id_by_country_popularity.values())) / float(
len(source_id_by_country_popularity))
source_id_by_country_avg_ctr
source_id_by_country_weighted_avg_ctr = sum(
map(lambda x: x[0] * x[1], source_id_by_country_popularity.values())) / float(
sum(map(lambda x: x[1], source_id_by_country_popularity.values())))
source_id_by_country_weighted_avg_ctr
source_id_by_country_views_median = np.median(
np.array(list(map(lambda x: x[1], source_id_by_country_popularity.values()))))
source_id_by_country_views_median
source_id_by_country_views_mean = sum(map(lambda x: x[1], source_id_by_country_popularity.values())) / float(
len(source_id_by_country_popularity))
source_id_by_country_views_mean
# ### Average CTR by source_id
source_id_popularity_df = train_set_df.select('clicked', 'source_id', 'ad_id') \
.groupby('source_id').agg(F.sum('clicked').alias('clicks'), F.count('*').alias('views'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
source_id_popularity = source_id_popularity_df \
.filter('views > 10 and source_id is not null') \
.select('source_id', 'ctr', 'views', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['source_id'], (x['ctr'], x['views'], x['distinct_ad_ids'], 1))) \
.collectAsMap()
len(source_id_popularity)
source_id_popularity_broad = sc.broadcast(source_id_popularity)
# source_id_popularity_df.count()
# get_percentiles(source_id_popularity_df, 'clicks')
# get_percentiles(source_id_popularity_df, 'views')
# source_id_popularity = source_id_popularity_df
# .filter('views > 100 and source_id is not null')
# .select('source_id', 'ctr')
# .rdd.collectAsMap()
# ### Average CTR by publisher_id
publisher_popularity_df = train_set_df.select('clicked', 'publisher_id', 'ad_id') \
.groupby('publisher_id').agg(F.sum('clicked').alias('clicks'), F.count('*').alias('views'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
publisher_popularity = publisher_popularity_df \
.filter('views > 10 and publisher_id is not null') \
.select('publisher_id', 'ctr', 'views', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['publisher_id'], (x['ctr'], x['views'], x['distinct_ad_ids'], 1))) \
.collectAsMap()
len(publisher_popularity)
publisher_popularity_broad = sc.broadcast(publisher_popularity)
# publisher_popularity_df.count()
# ##863
# get_percentiles(publisher_popularity_df, 'clicks')
# get_percentiles(publisher_popularity_df, 'views')
# publisher_id_popularity = publisher_popularity_df
# .filter('views > 100 and publisher_id is not null')
# .select('publisher_id', 'ctr')
# .rdd.collectAsMap()
# len(publisher_id_popularity)
# ##639
# ### Average CTR by advertiser_id
advertiser_id_popularity_df = train_set_df.select('clicked', 'advertiser_id', 'ad_id') \
.groupby('advertiser_id').agg(F.sum('clicked').alias('clicks'), F.count('*').alias('views'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
advertiser_id_popularity = advertiser_id_popularity_df \
.filter('views > 10 and advertiser_id is not null') \
.select('advertiser_id', 'ctr', 'views', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['advertiser_id'],
(x['ctr'], x['views'], x['distinct_ad_ids'], 1))).collectAsMap()
len(advertiser_id_popularity)
advertiser_id_popularity_broad = sc.broadcast(advertiser_id_popularity)
# advertiser_id_popularity_df.count()
# ##4063
# get_percentiles(advertiser_id_popularity_df, 'clicks')
# get_percentiles(advertiser_id_popularity_df, 'views')
# advertiser_id_popularity = advertiser_id_popularity_df
# .filter('views > 100 and advertiser_id is not null')
# .select('advertiser_id', 'ctr')
# .rdd.collectAsMap()
# len(advertiser_id_popularity)
# ##3129
# ### Average CTR by campaign_id
campaign_id_popularity_df = train_set_df.select('clicked', 'campaign_id', 'ad_id') \
.groupby('campaign_id').agg(F.sum('clicked').alias('clicks'), F.count('*').alias('views'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
campaign_id_popularity = campaign_id_popularity_df \
.filter('views > 10 and campaign_id is not null') \
.select('campaign_id', 'ctr', 'views', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['campaign_id'], (x['ctr'], x['views'], x['distinct_ad_ids'], 1))) \
.collectAsMap()
len(campaign_id_popularity)
campaign_id_popularity_broad = sc.broadcast(campaign_id_popularity)
# campaign_id_popularity_df.count()
# ##31390
# get_percentiles(campaign_id_popularity_df, 'clicks')
# get_percentiles(campaign_id_popularity_df, 'views')
# campaign_id_popularity = campaign_id_popularity_df
# .filter('views > 100 and campaign_id is not null')
# .select('campaign_id', 'ctr')
# .rdd.collectAsMap()
# len(campaign_id_popularity)
# ##16097
# ### Average CTR by category
category_id_popularity_df = train_set_df.join(
documents_categories_df.alias('cat_local'),
on=F.col("document_id_promo") == F.col("cat_local.document_id_cat"), how='inner') \
.select('clicked', 'category_id', 'confidence_level_cat', 'ad_id') \
.groupby('category_id').agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views'),
F.mean('confidence_level_cat').alias('avg_confidence_level_cat'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
category_id_popularity = category_id_popularity_df.filter('views > 10') \
.select('category_id', 'ctr', 'views', 'avg_confidence_level_cat', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['category_id'],
(x['ctr'], x['views'], x['distinct_ad_ids'], x['avg_confidence_level_cat']))).collectAsMap()
len(category_id_popularity)
category_id_popularity_broad = sc.broadcast(category_id_popularity)
list(category_id_popularity.values())[:10]
np.median(np.array(list(map(lambda x: x[1], category_id_popularity.values()))))
sum(map(lambda x: x[1], category_id_popularity.values())) / float(len(category_id_popularity))
# Parece haver uma hierarquia nas categorias pelo padrão dos códigos...
# category_id_popularity
# ### Average CTR by (country, category)
category_id_by_country_popularity_df = train_set_df \
.join(documents_categories_df.alias('cat_local'),
on=F.col("document_id_promo") == F.col("cat_local.document_id_cat"), how='inner') \
.select('clicked', 'category_id', 'confidence_level_cat', 'event_country', 'ad_id') \
.groupby('event_country', 'category_id').agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views'),
F.mean('confidence_level_cat').alias('avg_confidence_level_cat'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
category_id_by_country_popularity = category_id_by_country_popularity_df \
.filter('views > 10 and event_country <> ""') \
.select('event_country', 'category_id', 'ctr', 'views', 'avg_confidence_level_cat',
'distinct_ad_ids') \
.rdd.map(lambda x: ((x['event_country'], x['category_id']),
(x['ctr'], x['views'], x['distinct_ad_ids'], x['avg_confidence_level_cat']))).collectAsMap()
len(category_id_by_country_popularity)
category_id_by_country_popularity_broad = sc.broadcast(category_id_by_country_popularity)
# ### Average CTR by Topic
topic_id_popularity_df = train_set_df.join(
documents_topics_df.alias('top_local'),
on=F.col("document_id_promo") == F.col("top_local.document_id_top"), how='inner') \
.select('clicked', 'topic_id', 'confidence_level_top', 'ad_id') \
.groupby('topic_id').agg(F.sum('clicked').alias('clicks'), F.count('*').alias('views'),
F.mean('confidence_level_top').alias('avg_confidence_level_top'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
topic_id_popularity = topic_id_popularity_df.filter('views > 10') \
.select('topic_id', 'ctr', 'views', 'avg_confidence_level_top', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['topic_id'],
(x['ctr'], x['views'], x['distinct_ad_ids'], x['avg_confidence_level_top']))).collectAsMap()
len(topic_id_popularity)
topic_id_popularity_broad = sc.broadcast(topic_id_popularity)
sum(map(lambda x: x[1], topic_id_popularity.values())) / float(len(topic_id_popularity))
sum(map(lambda x: x[2] * x[1], topic_id_popularity.values())) / float(len(topic_id_popularity))
# ### Average CTR by (country, topic)
topic_id_by_country_popularity_df = train_set_df.join(
documents_topics_df.alias('top_local'),
on=F.col("document_id_promo") == F.col("top_local.document_id_top"), how='inner') \
.select('clicked', 'topic_id', 'confidence_level_top', 'event_country', 'ad_id') \
.groupby('event_country', 'topic_id').agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views'),
F.mean('confidence_level_top').alias('avg_confidence_level_top'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
topic_id_id_by_country_popularity = topic_id_by_country_popularity_df \
.filter('views > 10 and event_country <> ""') \
.select('event_country', 'topic_id', 'ctr', 'views', 'avg_confidence_level_top',
'distinct_ad_ids') \
.rdd.map(lambda x: ((x['event_country'], x['topic_id']),
(x['ctr'], x['views'], x['distinct_ad_ids'], x['avg_confidence_level_top']))).collectAsMap()
len(topic_id_id_by_country_popularity)
topic_id_id_by_country_popularity_broad = sc.broadcast(topic_id_id_by_country_popularity)
# ### Average CTR by Entity
entity_id_popularity_df = train_set_df.join(
documents_entities_df.alias('ent_local'),
on=F.col("document_id_promo") == F.col("ent_local.document_id_ent"), how='inner') \
.select('clicked', 'entity_id', 'confidence_level_ent', 'ad_id') \
.groupby('entity_id').agg(F.sum('clicked').alias('clicks'), F.count('*').alias('views'),
F.mean('confidence_level_ent').alias('avg_confidence_level_ent'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
entity_id_popularity = entity_id_popularity_df.filter('views > 5') \
.select('entity_id', 'ctr', 'views', 'avg_confidence_level_ent', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['entity_id'],
(x['ctr'], x['views'], x['distinct_ad_ids'], x['avg_confidence_level_ent']))).collectAsMap()
len(entity_id_popularity)
entity_id_popularity_broad = sc.broadcast(entity_id_popularity)
np.median(np.array(list(map(lambda x: x[1], entity_id_popularity.values()))))
sum(map(lambda x: x[1], entity_id_popularity.values())) / float(len(entity_id_popularity))
# ### Average CTR by (country, entity)
entity_id_by_country_popularity_df = train_set_df.join(
documents_entities_df.alias('ent_local'),
on=F.col("document_id_promo") == F.col("ent_local.document_id_ent"), how='inner') \
.select('clicked', 'entity_id', 'event_country', 'confidence_level_ent', 'ad_id') \
.groupby('event_country', 'entity_id').agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views'),
F.mean('confidence_level_ent').alias('avg_confidence_level_ent'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
entity_id_by_country_popularity = entity_id_by_country_popularity_df \
.filter('views > 5 and event_country <> ""') \
.select('event_country', 'entity_id', 'ctr', 'views', 'avg_confidence_level_ent',
'distinct_ad_ids') \
.rdd.map(lambda x: ((x['event_country'], x['entity_id']),
(x['ctr'], x['views'], x['distinct_ad_ids'], x['avg_confidence_level_ent']))).collectAsMap()
len(entity_id_by_country_popularity)
entity_id_by_country_popularity_broad = sc.broadcast(entity_id_by_country_popularity)
# ### Loading # docs by categories, topics, entities
df_filenames_suffix = ''
if evaluation:
df_filenames_suffix = '_eval'
with open(OUTPUT_BUCKET_FOLDER + 'categories_docs_counts' + df_filenames_suffix + '.pickle', 'rb') as input_file:
categories_docs_counts = cPickle.load(input_file)
len(categories_docs_counts)
with open(OUTPUT_BUCKET_FOLDER + 'topics_docs_counts' + df_filenames_suffix + '.pickle', 'rb') as input_file:
topics_docs_counts = cPickle.load(input_file)
len(topics_docs_counts)
with open(OUTPUT_BUCKET_FOLDER + 'entities_docs_counts' + df_filenames_suffix + '.pickle', 'rb') as input_file:
entities_docs_counts = cPickle.load(input_file)
len(entities_docs_counts)
documents_total = documents_meta_df.count()
documents_total
# ## Exploring Publish Time
publish_times_df = train_set_df.filter('publish_time is not null').select('document_id_promo',
'publish_time').distinct().select(
F.col('publish_time').cast(IntegerType()))
publish_time_percentiles = get_percentiles(publish_times_df, 'publish_time', quantiles_levels=[0.5],
max_error_rate=0.001)
publish_time_percentiles
publish_time_median = int(publish_time_percentiles[0.5])
datetime.datetime.utcfromtimestamp(publish_time_median)
def get_days_diff(newer_timestamp, older_timestamp):
sec_diff = newer_timestamp - older_timestamp
days_diff = sec_diff / 60 / 60 / 24
return days_diff
def get_time_decay_factor(timestamp, timestamp_ref=None, alpha=0.001):
if timestamp_ref is None:
timestamp_ref = time.time()
days_diff = get_days_diff(timestamp_ref, timestamp)
denominator = math.pow(1 + alpha, days_diff)
if denominator != 0:
return 1.0 / denominator
else:
return 0.0
TIME_DECAY_ALPHA = 0.0005
ref_dates = [
1476714880, # 7 days
1474727680, # 30 days
1469370880, # 90 days
1461508480, # 180 days
1445697280, # 1 year
1414161280 # 2 years
]
for d in ref_dates:
print(datetime.datetime.utcfromtimestamp(d), get_time_decay_factor(d, alpha=TIME_DECAY_ALPHA))
# ### Get local time
DEFAULT_TZ_EST = -4.0
def get_local_utc_bst_tz(event_country, event_country_state):
local_tz = DEFAULT_TZ_EST
if len(event_country) > 0:
if event_country in countries_utc_dst_broad.value:
local_tz = countries_utc_dst_broad.value[event_country]
if len(event_country_state) > 2:
state = event_country_state[3:5]
if event_country == 'US':
if state in us_states_utc_dst_broad.value:
local_tz = us_states_utc_dst_broad.value[state]
elif event_country == 'CA':
if state in ca_countries_utc_dst_broad.value:
local_tz = ca_countries_utc_dst_broad.value[state]
return float(local_tz)
hour_bins_dict = {'EARLY_MORNING': 0,
'MORNING': 1,
'MIDDAY': 2,
'AFTERNOON': 3,
'EVENING': 4,
'NIGHT': 5}
hour_bins_values = sorted(hour_bins_dict.values())
def get_hour_bin(hour):
if hour >= 5 and hour < 8:
hour_bin = hour_bins_dict['EARLY_MORNING']
elif hour >= 8 and hour < 11:
hour_bin = hour_bins_dict['MORNING']
elif hour >= 11 and hour < 14:
hour_bin = hour_bins_dict['MIDDAY']
elif hour >= 14 and hour < 19:
hour_bin = hour_bins_dict['AFTERNOON']
elif hour >= 19 and hour < 22:
hour_bin = hour_bins_dict['EVENING']
else:
hour_bin = hour_bins_dict['NIGHT']
return hour_bin
def get_local_datetime(dt, event_country, event_country_state):
local_tz = get_local_utc_bst_tz(event_country, event_country_state)
tz_delta = local_tz - DEFAULT_TZ_EST
local_time = dt + datetime.timedelta(hours=tz_delta)
return local_time
get_local_datetime(datetime.datetime.now(), 'US', 'US>CA')
def is_weekend(dt):
return dt.weekday() >= 5
is_weekend(datetime.datetime(2016, 6, 14))
# ## Average CTR functions
timestamp_ref = date_time_to_unix_epoch(datetime.datetime(2016, 6, 29, 3, 59, 59))
decay_factor_default = get_time_decay_factor(publish_time_median, timestamp_ref, alpha=TIME_DECAY_ALPHA)
print("decay_factor_default", decay_factor_default)
def get_confidence_sample_size(sample, max_for_reference=100000):
# Avoiding overflow for large sample size
if sample >= max_for_reference:
return 1.0
ref_log = math.log(1 + max_for_reference,
2) # Curiosly reference in log with base 2 gives a slightly higher score, so I will keep
return math.log(1 + sample) / float(ref_log)
for i in [0, 0.5, 1, 2, 3, 4, 5, 10, 20, 30, 100, 200, 300, 1000, 2000, 3000, 10000, 20000, 30000, 50000, 90000, 100000,
500000, 900000, 1000000, 2171607]:
print(i, get_confidence_sample_size(i))
def get_popularity(an_id, a_dict):
return (a_dict[an_id][0], get_confidence_sample_size(a_dict[an_id][1] / float(a_dict[an_id][2])) * a_dict[an_id][
3]) if an_id in a_dict else (None, None)
def get_weighted_avg_popularity_from_list(ids_list, confidence_ids_list, pop_dict):
pops = list(filter(lambda x: x[0][0] is not None,
[(get_popularity(an_id, pop_dict), confidence) for an_id, confidence in
zip(ids_list, confidence_ids_list)]))
# print("pops",pops)
if len(pops) > 0:
weighted_avg = sum(map(lambda x: x[0][0] * x[0][1] * x[1], pops)) / float(
sum(map(lambda x: x[0][1] * x[1], pops)))
confidence = max(map(lambda x: x[0][1] * x[1], pops))
return weighted_avg, confidence
else:
return None, None
def get_weighted_avg_country_popularity_from_list(event_country, ids_list, confidence_ids_list, pop_dict):
pops = list(filter(lambda x: x[0][0] is not None,
[(get_popularity((event_country, an_id), pop_dict), confidence) for an_id, confidence in
zip(ids_list, confidence_ids_list)]))
if len(pops) > 0:
weighted_avg = sum(map(lambda x: x[0][0] * x[0][1] * x[1], pops)) / float(
sum(map(lambda x: x[0][1] * x[1], pops)))
confidence = max(map(lambda x: x[0][1] * x[1], pops))
return weighted_avg, confidence
else:
return None, None
def get_popularity_score(event_country, ad_id, document_id, source_id,
publisher_id, advertiser_id, campaign_id, document_id_event,
category_ids_by_doc, cat_confidence_level_by_doc,
topic_ids_by_doc, top_confidence_level_by_doc,
entity_ids_by_doc, ent_confidence_level_by_doc,
output_detailed_list=False):
probs = []
avg_ctr, confidence = get_popularity(ad_id, ad_id_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_ad_id', avg_ctr, confidence))
avg_ctr, confidence = get_popularity(document_id, document_id_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_document_id', avg_ctr, confidence))
avg_ctr, confidence = get_popularity((document_id_event, document_id), doc_event_doc_ad_avg_ctr_broad.value)
if avg_ctr is not None:
probs.append(('pop_doc_event_doc_ad', avg_ctr, confidence))
if source_id != -1:
avg_ctr = None
if event_country != '':
avg_ctr, confidence = get_popularity((event_country, source_id),
source_id_by_country_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_source_id_country', avg_ctr, confidence))
avg_ctr, confidence = get_popularity(source_id, source_id_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_source_id', avg_ctr, confidence))
if publisher_id is not None:
avg_ctr, confidence = get_popularity(publisher_id, publisher_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_publisher_id', avg_ctr, confidence))
if advertiser_id is not None:
avg_ctr, confidence = get_popularity(advertiser_id, advertiser_id_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_advertiser_id', avg_ctr, confidence))
if campaign_id is not None:
avg_ctr, confidence = get_popularity(campaign_id, campaign_id_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_campain_id', avg_ctr, confidence))
if len(entity_ids_by_doc) > 0:
avg_ctr = None
if event_country != '':
avg_ctr, confidence = get_weighted_avg_country_popularity_from_list(
event_country, entity_ids_by_doc, ent_confidence_level_by_doc,
entity_id_by_country_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_entity_id_country', avg_ctr, confidence))
avg_ctr, confidence = get_weighted_avg_popularity_from_list(
entity_ids_by_doc, ent_confidence_level_by_doc,
entity_id_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_entity_id', avg_ctr, confidence))
if len(topic_ids_by_doc) > 0:
avg_ctr = None
if event_country != '':
avg_ctr, confidence = get_weighted_avg_country_popularity_from_list(
event_country, topic_ids_by_doc, top_confidence_level_by_doc,
topic_id_id_by_country_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_topic_id_country', avg_ctr, confidence))
avg_ctr, confidence = get_weighted_avg_popularity_from_list(
topic_ids_by_doc, top_confidence_level_by_doc,
topic_id_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_topic_id', avg_ctr, confidence))
if len(category_ids_by_doc) > 0:
avg_ctr = None
if event_country != '':
avg_ctr, confidence = get_weighted_avg_country_popularity_from_list(
event_country, category_ids_by_doc, cat_confidence_level_by_doc,
category_id_by_country_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_category_id_country', avg_ctr, confidence))
avg_ctr, confidence = get_weighted_avg_popularity_from_list(
category_ids_by_doc, cat_confidence_level_by_doc,
category_id_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_category_id', avg_ctr, confidence))
# print("[get_popularity_score] probs", probs)
if output_detailed_list:
return probs
else:
if len(probs) > 0:
# weighted_avg_probs_by_confidence = sum(map(lambda x: x[1] * math.log(1+x[2],2), probs)) \
# / float(sum(map(lambda x: math.log(1+x[2],2), probs)))
weighted_avg_probs_by_confidence = sum(map(lambda x: x[1] * x[2], probs)) / float(
sum(map(lambda x: x[2], probs)))
confidence = max(map(lambda x: x[2], probs))
return weighted_avg_probs_by_confidence, confidence
else:
return None, None
# ## Content-Based similarity functions
def cosine_similarity_dicts(dict1, dict2):
dict1_norm = math.sqrt(sum([v ** 2 for v in dict1.values()]))
dict2_norm = math.sqrt(sum([v ** 2 for v in dict2.values()]))
sum_common_aspects = 0.0
intersections = 0
for key in dict1:
if key in dict2:
sum_common_aspects += dict1[key] * dict2[key]
intersections += 1
return sum_common_aspects / (dict1_norm * dict2_norm), intersections
def cosine_similarity_user_docs_aspects(user_aspect_profile, doc_aspect_ids, doc_aspects_confidence,
aspect_docs_counts):
if user_aspect_profile is None or len(user_aspect_profile) == 0 or doc_aspect_ids is None or len(
doc_aspect_ids) == 0:
return None, None
doc_aspects = dict(zip(doc_aspect_ids, doc_aspects_confidence))
doc_aspects_tfidf_confid = {}
for key in doc_aspects:
tf = 1.0
idf = math.log(math.log(documents_total / float(aspect_docs_counts[key])))
confidence = doc_aspects[key]
doc_aspects_tfidf_confid[key] = tf * idf * confidence
user_aspects_tfidf_confid = {}
for key in user_aspect_profile:
tfidf = user_aspect_profile[key][0]
confidence = user_aspect_profile[key][1]
user_aspects_tfidf_confid[key] = tfidf * confidence
similarity, intersections = cosine_similarity_dicts(doc_aspects_tfidf_confid, user_aspects_tfidf_confid)
if intersections > 0:
# P(A intersect B)_intersections = P(A)^intersections * P(B)^intersections
random_error = math.pow(len(doc_aspects) / float(len(aspect_docs_counts)),
intersections) * math.pow(len(user_aspect_profile) / float(len(aspect_docs_counts)),
intersections)
else:
# P(A not intersect B) = 1 - P(A intersect B)
random_error = 1 - ((len(doc_aspects) / float(len(aspect_docs_counts))) *
(len(user_aspect_profile) / float(len(aspect_docs_counts))))
confidence = 1.0 - random_error
return similarity, confidence
def cosine_similarity_doc_event_doc_ad_aspects(doc_event_aspect_ids, doc_event_aspects_confidence,
doc_ad_aspect_ids, doc_ad_aspects_confidence,
aspect_docs_counts):
if doc_event_aspect_ids is None or len(doc_event_aspect_ids) == 0 \
or doc_ad_aspect_ids is None or len(doc_ad_aspect_ids) == 0:
return None, None
doc_event_aspects = dict(zip(doc_event_aspect_ids, doc_event_aspects_confidence))
doc_event_aspects_tfidf_confid = {}
for key in doc_event_aspect_ids:
tf = 1.0
idf = math.log(math.log(documents_total / float(aspect_docs_counts[key])))
confidence = doc_event_aspects[key]
doc_event_aspects_tfidf_confid[key] = tf * idf * confidence
doc_ad_aspects = dict(zip(doc_ad_aspect_ids, doc_ad_aspects_confidence))
doc_ad_aspects_tfidf_confid = {}
for key in doc_ad_aspect_ids:
tf = 1.0
idf = math.log(math.log(documents_total / float(aspect_docs_counts[key])))
confidence = doc_ad_aspects[key]
doc_ad_aspects_tfidf_confid[key] = tf * idf * confidence
similarity, intersections = cosine_similarity_dicts(doc_event_aspects_tfidf_confid, doc_ad_aspects_tfidf_confid)
if intersections > 0:
# P(A intersect B)_intersections = P(A)^intersections * P(B)^intersections
random_error = math.pow(len(doc_event_aspect_ids) / float(len(aspect_docs_counts)),
intersections) * math.pow(len(doc_ad_aspect_ids) / float(len(aspect_docs_counts)),
intersections)
else:
# P(A not intersect B) = 1 - P(A intersect B)
random_error = 1 - ((len(doc_event_aspect_ids) / float(len(aspect_docs_counts))) *
(len(doc_ad_aspect_ids) / float(len(aspect_docs_counts))))
confidence = 1.0 - random_error
return similarity, confidence
def get_user_cb_interest_score(user_views_count, user_categories, user_topics, user_entities,
timestamp_event, category_ids_by_doc, cat_confidence_level_by_doc,
topic_ids_by_doc, top_confidence_level_by_doc,
entity_ids_by_doc, ent_confidence_level_by_doc,
output_detailed_list=False):
# Content-Based
sims = []
categories_similarity, cat_sim_confidence = cosine_similarity_user_docs_aspects(user_categories,
category_ids_by_doc,
cat_confidence_level_by_doc,
categories_docs_counts)
if categories_similarity is not None:
sims.append(('user_doc_ad_sim_categories', categories_similarity, cat_sim_confidence))
topics_similarity, top_sim_confidence = cosine_similarity_user_docs_aspects(user_topics, topic_ids_by_doc,
top_confidence_level_by_doc,
topics_docs_counts)
if topics_similarity is not None:
sims.append(('user_doc_ad_sim_topics', topics_similarity, top_sim_confidence))
entities_similarity, entity_sim_confid = cosine_similarity_user_docs_aspects(user_entities, entity_ids_by_doc,
ent_confidence_level_by_doc,
entities_docs_counts)
if entities_similarity is not None:
sims.append(('user_doc_ad_sim_entities', entities_similarity, entity_sim_confid))
if output_detailed_list:
return sims
else:
if len(sims) > 0:
weighted_avg_sim_by_confidence = sum(map(lambda x: x[1] * x[2], sims)) / float(
sum(map(lambda x: x[2], sims)))
confidence = sum(map(lambda x: x[2], sims)) / float(len(sims))
# print("[get_user_cb_interest_score] sims: {} | \
# Avg: {} - Confid: {}".format(sims, weighted_avg_sim_by_confidence, confidence))
return weighted_avg_sim_by_confidence, confidence
else:
return None, None
def get_doc_event_doc_ad_cb_similarity_score(doc_event_category_ids, doc_event_cat_confidence_levels,
doc_event_topic_ids, doc_event_top_confidence_levels,
doc_event_entity_ids, doc_event_ent_confidence_levels,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
output_detailed_list=False):
# Content-Based
sims = []
categories_similarity, cat_sim_confidence = cosine_similarity_doc_event_doc_ad_aspects(
doc_event_category_ids, doc_event_cat_confidence_levels,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
categories_docs_counts)
if categories_similarity is not None:
sims.append(('doc_event_doc_ad_sim_categories', categories_similarity, cat_sim_confidence))
topics_similarity, top_sim_confidence = cosine_similarity_doc_event_doc_ad_aspects(
doc_event_topic_ids, doc_event_top_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
topics_docs_counts)
if topics_similarity is not None:
sims.append(('doc_event_doc_ad_sim_topics', topics_similarity, top_sim_confidence))
entities_similarity, entity_sim_confid = cosine_similarity_doc_event_doc_ad_aspects(
doc_event_entity_ids, doc_event_ent_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
entities_docs_counts)
if entities_similarity is not None:
sims.append(('doc_event_doc_ad_sim_entities', entities_similarity, entity_sim_confid))
if output_detailed_list:
return sims
else:
if len(sims) > 0:
weighted_avg_sim_by_confidence = sum(map(lambda x: x[1] * x[2], sims)) / float(
sum(map(lambda x: x[2], sims)))
confidence = sum(map(lambda x: x[2], sims)) / float(len(sims))
# print("[get_user_cb_interest_score] sims: {} | \
# Avg: {} - Confid: {}".format(sims, weighted_avg_sim_by_confidence, confidence))
return weighted_avg_sim_by_confidence, confidence
else:
return None, None
# # Feature Vector export
bool_feature_names = ['event_weekend',
'user_has_already_viewed_doc']
int_feature_names = ['user_views',
'ad_views',
'doc_views',
'doc_event_days_since_published',
'doc_event_hour',
'doc_ad_days_since_published',
]
float_feature_names = [
'pop_ad_id',
'pop_ad_id_conf',
'pop_ad_id_conf_multipl',
'pop_document_id',
'pop_document_id_conf',
'pop_document_id_conf_multipl',
'pop_publisher_id',
'pop_publisher_id_conf',
'pop_publisher_id_conf_multipl',
'pop_advertiser_id',
'pop_advertiser_id_conf',
'pop_advertiser_id_conf_multipl',
'pop_campain_id',
'pop_campain_id_conf',
'pop_campain_id_conf_multipl',
'pop_doc_event_doc_ad',
'pop_doc_event_doc_ad_conf',
'pop_doc_event_doc_ad_conf_multipl',
'pop_source_id',
'pop_source_id_conf',
'pop_source_id_conf_multipl',
'pop_source_id_country',
'pop_source_id_country_conf',
'pop_source_id_country_conf_multipl',
'pop_entity_id',
'pop_entity_id_conf',
'pop_entity_id_conf_multipl',
'pop_entity_id_country',
'pop_entity_id_country_conf',
'pop_entity_id_country_conf_multipl',
'pop_topic_id',
'pop_topic_id_conf',
'pop_topic_id_conf_multipl',
'pop_topic_id_country',
'pop_topic_id_country_conf',
'pop_topic_id_country_conf_multipl',
'pop_category_id',
'pop_category_id_conf',
'pop_category_id_conf_multipl',
'pop_category_id_country',
'pop_category_id_country_conf',
'pop_category_id_country_conf_multipl',
'user_doc_ad_sim_categories',
'user_doc_ad_sim_categories_conf',
'user_doc_ad_sim_categories_conf_multipl',
'user_doc_ad_sim_topics',
'user_doc_ad_sim_topics_conf',
'user_doc_ad_sim_topics_conf_multipl',
'user_doc_ad_sim_entities',
'user_doc_ad_sim_entities_conf',
'user_doc_ad_sim_entities_conf_multipl',
'doc_event_doc_ad_sim_categories',
'doc_event_doc_ad_sim_categories_conf',
'doc_event_doc_ad_sim_categories_conf_multipl',
'doc_event_doc_ad_sim_topics',
'doc_event_doc_ad_sim_topics_conf',
'doc_event_doc_ad_sim_topics_conf_multipl',
'doc_event_doc_ad_sim_entities',
'doc_event_doc_ad_sim_entities_conf',
'doc_event_doc_ad_sim_entities_conf_multipl'
]
TRAFFIC_SOURCE_FV = 'traffic_source'
EVENT_HOUR_FV = 'event_hour'
EVENT_COUNTRY_FV = 'event_country'
EVENT_COUNTRY_STATE_FV = 'event_country_state'
EVENT_GEO_LOCATION_FV = 'event_geo_location'
EVENT_PLATFORM_FV = 'event_platform'
AD_ADVERTISER_FV = 'ad_advertiser'
DOC_AD_SOURCE_ID_FV = 'doc_ad_source_id'
DOC_AD_PUBLISHER_ID_FV = 'doc_ad_publisher_id'
DOC_EVENT_SOURCE_ID_FV = 'doc_event_source_id'
DOC_EVENT_PUBLISHER_ID_FV = 'doc_event_publisher_id'
DOC_AD_CATEGORY_ID_FV = 'doc_ad_category_id'
DOC_AD_TOPIC_ID_FV = 'doc_ad_topic_id'
DOC_AD_ENTITY_ID_FV = 'doc_ad_entity_id'
DOC_EVENT_CATEGORY_ID_FV = 'doc_event_category_id'
DOC_EVENT_TOPIC_ID_FV = 'doc_event_topic_id'
DOC_EVENT_ENTITY_ID_FV = 'doc_event_entity_id'
# ### Configuring feature vector
category_feature_names_integral = ['ad_advertiser',
'doc_ad_category_id_1',
'doc_ad_category_id_2',
'doc_ad_category_id_3',
'doc_ad_topic_id_1',
'doc_ad_topic_id_2',
'doc_ad_topic_id_3',
'doc_ad_entity_id_1',
'doc_ad_entity_id_2',
'doc_ad_entity_id_3',
'doc_ad_entity_id_4',
'doc_ad_entity_id_5',
'doc_ad_entity_id_6',
'doc_ad_publisher_id',
'doc_ad_source_id',
'doc_event_category_id_1',
'doc_event_category_id_2',
'doc_event_category_id_3',
'doc_event_topic_id_1',
'doc_event_topic_id_2',
'doc_event_topic_id_3',
'doc_event_entity_id_1',
'doc_event_entity_id_2',
'doc_event_entity_id_3',
'doc_event_entity_id_4',
'doc_event_entity_id_5',
'doc_event_entity_id_6',
'doc_event_publisher_id',
'doc_event_source_id',
'event_country',
'event_country_state',
'event_geo_location',
'event_hour',
'event_platform',
'traffic_source']
feature_vector_labels_integral = bool_feature_names \
+ int_feature_names \
+ float_feature_names \
+ category_feature_names_integral
feature_vector_labels_integral_dict = dict([(key, idx) for idx, key in enumerate(feature_vector_labels_integral)])
with open(OUTPUT_BUCKET_FOLDER + 'feature_vector_labels_integral.txt', 'w') as output:
output.writelines('\n'.join(feature_vector_labels_integral))
# ### Building feature vectors
def set_feature_vector_cat_value_integral(field_name, field_value, feature_vector):
if not is_null(field_value): # and str(field_value) != '-1':
feature_vector[feature_vector_labels_integral_dict[field_name]] = float(field_value)
def set_feature_vector_cat_top_multi_values_integral(
field_name, values, confidences, feature_vector, top=5):
top_values = list(filter(lambda z: z != -1,
map(lambda y: y[0], sorted(zip(values, confidences), key=lambda x: -x[1]))))[:top]
for idx, field_value in list(enumerate(top_values)):
set_feature_vector_cat_value_integral(
'{}_{}'.format(field_name, idx + 1), field_value, feature_vector)
def get_ad_feature_vector_integral(
user_doc_ids_viewed, user_views_count, user_categories, user_topics, user_entities,
event_country, event_country_state,
ad_id, document_id, source_id, doc_ad_publish_time, timestamp_event, platform_event,
geo_location_event,
doc_event_source_id, doc_event_publisher_id, doc_event_publish_time,
traffic_source_pv, advertiser_id, publisher_id,
campaign_id, document_id_event,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
doc_event_category_ids, doc_event_cat_confidence_levels,
doc_event_topic_ids, doc_event_top_confidence_levels,
doc_event_entity_ids, doc_event_ent_confidence_levels):
try:
feature_vector = {}
if user_views_count is not None:
feature_vector[feature_vector_labels_integral_dict['user_views']] = float(user_views_count)
if user_doc_ids_viewed is not None:
feature_vector[feature_vector_labels_integral_dict['user_has_already_viewed_doc']] = float(
document_id in user_doc_ids_viewed)
if ad_id in ad_id_popularity_broad.value:
feature_vector[feature_vector_labels_integral_dict['ad_views']] = float(
ad_id_popularity_broad.value[ad_id][1])
if document_id in document_id_popularity_broad.value:
feature_vector[feature_vector_labels_integral_dict['doc_views']] = float(
document_id_popularity_broad.value[document_id][1])
if timestamp_event > -1:
dt_timestamp_event = convert_odd_timestamp(timestamp_event)
if doc_ad_publish_time is not None:
delta_days = (dt_timestamp_event - doc_ad_publish_time).days
if 0 <= delta_days <= 365 * 10: # 10 years
feature_vector[feature_vector_labels_integral_dict['doc_ad_days_since_published']] = float(
delta_days)
if doc_event_publish_time is not None:
delta_days = (dt_timestamp_event - doc_event_publish_time).days
if 0 <= delta_days <= 365 * 10: # 10 years
feature_vector[feature_vector_labels_integral_dict['doc_event_days_since_published']] = float(
delta_days)
# Local period of the day (hours)
dt_local_timestamp_event = get_local_datetime(dt_timestamp_event, event_country, event_country_state)
local_hour_bin = get_hour_bin(dt_local_timestamp_event.hour)
feature_vector[feature_vector_labels_integral_dict['doc_event_hour']] = float(
local_hour_bin) # Hour for Decision Trees
set_feature_vector_cat_value_integral(EVENT_HOUR_FV, local_hour_bin,
feature_vector) # Period of day for FFM
# Weekend
weekend = int(is_weekend(dt_local_timestamp_event))
feature_vector[feature_vector_labels_integral_dict['event_weekend']] = float(weekend)
conf_field_suffix = '_conf'
conf_multiplied_field_suffix = '_conf_multipl'
# Setting Popularity fields
pop_scores = get_popularity_score(event_country, ad_id, document_id, source_id,
publisher_id, advertiser_id, campaign_id, document_id_event,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
output_detailed_list=True)
for score in pop_scores:
feature_vector[feature_vector_labels_integral_dict[score[0]]] = score[1]
feature_vector[feature_vector_labels_integral_dict[score[0] + conf_field_suffix]] = score[2]
feature_vector[feature_vector_labels_integral_dict[score[0] + conf_multiplied_field_suffix]] = \
score[1] * score[2]
# Setting User-Doc_ad CB Similarity fields
user_doc_ad_cb_sim_scores = get_user_cb_interest_score(
user_views_count, user_categories, user_topics, user_entities,
timestamp_event,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
output_detailed_list=True)
for score in user_doc_ad_cb_sim_scores:
feature_vector[feature_vector_labels_integral_dict[score[0]]] = score[1]
feature_vector[feature_vector_labels_integral_dict[score[0] + conf_field_suffix]] = score[2]
feature_vector[feature_vector_labels_integral_dict[score[0] + conf_multiplied_field_suffix]] = \
score[1] * score[2]
# Setting Doc_event-doc_ad CB Similarity fields
doc_event_doc_ad_cb_sim_scores = get_doc_event_doc_ad_cb_similarity_score(
doc_event_category_ids, doc_event_cat_confidence_levels,
doc_event_topic_ids, doc_event_top_confidence_levels,
doc_event_entity_ids, doc_event_ent_confidence_levels,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
output_detailed_list=True)
for score in doc_event_doc_ad_cb_sim_scores:
feature_vector[feature_vector_labels_integral_dict[score[0]]] = score[1]
feature_vector[feature_vector_labels_integral_dict[score[0] + conf_field_suffix]] = score[2]
feature_vector[feature_vector_labels_integral_dict[score[0] + conf_multiplied_field_suffix]] = \
score[1] * score[2]
# Process code for event_country
if event_country in event_country_values_counts:
event_country_code = event_country_values_counts[event_country]
else:
event_country_code = event_country_values_counts[LESS_SPECIAL_CAT_VALUE]
set_feature_vector_cat_value_integral(EVENT_COUNTRY_FV, event_country_code, feature_vector)
# Process code for event_country_state
if event_country_state in event_country_state_values_counts:
event_country_state_code = event_country_state_values_counts[event_country_state]
else:
event_country_state_code = event_country_state_values_counts[LESS_SPECIAL_CAT_VALUE]
set_feature_vector_cat_value_integral(EVENT_COUNTRY_STATE_FV, event_country_state_code, feature_vector)
# Process code for geo_location_event
if geo_location_event in event_geo_location_values_counts:
geo_location_event_code = event_geo_location_values_counts[geo_location_event]
else:
geo_location_event_code = event_geo_location_values_counts[LESS_SPECIAL_CAT_VALUE]
# -1 to traffic_source and platform_event
if traffic_source_pv is not None:
feature_vector[feature_vector_labels_integral_dict[TRAFFIC_SOURCE_FV]] = int(traffic_source_pv - 1)
if platform_event is not None:
feature_vector[feature_vector_labels_integral_dict[EVENT_PLATFORM_FV]] = int(platform_event - 1)
set_feature_vector_cat_value_integral(EVENT_GEO_LOCATION_FV, geo_location_event_code, feature_vector)
# set_feature_vector_cat_value_integral(TRAFFIC_SOURCE_FV, traffic_source_pv - 1, feature_vector)
# set_feature_vector_cat_value_integral(EVENT_PLATFORM_FV, platform_event - 1, feature_vector)
set_feature_vector_cat_value_integral(AD_ADVERTISER_FV, advertiser_id, feature_vector)
set_feature_vector_cat_value_integral(DOC_AD_SOURCE_ID_FV, source_id, feature_vector)
set_feature_vector_cat_value_integral(DOC_AD_PUBLISHER_ID_FV, publisher_id, feature_vector)
set_feature_vector_cat_value_integral(DOC_EVENT_SOURCE_ID_FV, doc_event_source_id, feature_vector)
set_feature_vector_cat_value_integral(DOC_EVENT_PUBLISHER_ID_FV, doc_event_publisher_id, feature_vector)
set_feature_vector_cat_top_multi_values_integral(DOC_AD_CATEGORY_ID_FV, doc_ad_category_ids,
doc_ad_cat_confidence_levels, feature_vector, top=3)
set_feature_vector_cat_top_multi_values_integral(DOC_AD_TOPIC_ID_FV, doc_ad_topic_ids,
doc_ad_top_confidence_levels, feature_vector, top=3)
set_feature_vector_cat_top_multi_values_integral(DOC_EVENT_CATEGORY_ID_FV, doc_event_category_ids,
doc_event_cat_confidence_levels, feature_vector, top=3)
set_feature_vector_cat_top_multi_values_integral(DOC_EVENT_TOPIC_ID_FV, doc_event_topic_ids,
doc_event_top_confidence_levels, feature_vector, top=3)
# Process codes for doc_ad_entity_ids
doc_ad_entity_ids_codes = [doc_entity_id_values_counts[x]
if x in doc_entity_id_values_counts
else doc_entity_id_values_counts[LESS_SPECIAL_CAT_VALUE]
for x in doc_ad_entity_ids]
set_feature_vector_cat_top_multi_values_integral(DOC_AD_ENTITY_ID_FV, doc_ad_entity_ids_codes,
doc_ad_ent_confidence_levels, feature_vector, top=6)
# Process codes for doc_event_entity_ids
doc_event_entity_ids_codes = [doc_entity_id_values_counts[x]
if x in doc_entity_id_values_counts
else doc_entity_id_values_counts[LESS_SPECIAL_CAT_VALUE]
for x in doc_event_entity_ids]
set_feature_vector_cat_top_multi_values_integral(DOC_EVENT_ENTITY_ID_FV, doc_event_entity_ids_codes,
doc_event_ent_confidence_levels, feature_vector, top=6)
# Creating dummy column as the last column
# because xgboost have a problem if the last column is undefined for all rows,
# saying that dimentions of data and feature_names do not match
# feature_vector[feature_vector_labels_dict[DUMMY_FEATURE_COLUMN]] = float(0)
# Ensuring that all elements are floats for compatibility with UDF output (ArrayType(FloatType()))
# feature_vector = list([float(x) for x in feature_vector])
except Exception as e:
raise Exception("[get_ad_feature_vector_integral] ERROR PROCESSING FEATURE VECTOR! Params: {}"
.format([user_doc_ids_viewed, user_views_count, user_categories, user_topics, user_entities,
event_country, event_country_state,
ad_id, document_id, source_id, doc_ad_publish_time, timestamp_event, platform_event,
geo_location_event,
doc_event_source_id, doc_event_publisher_id, doc_event_publish_time,
traffic_source_pv, advertiser_id, publisher_id,
campaign_id, document_id_event,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
doc_event_category_ids, doc_event_cat_confidence_levels,
doc_event_topic_ids, doc_event_top_confidence_levels,
doc_event_entity_ids, doc_event_ent_confidence_levels]),
e)
return SparseVector(len(feature_vector_labels_integral_dict), feature_vector)
get_ad_feature_vector_integral_udf = F.udf(
lambda user_doc_ids_viewed, user_views_count, user_categories, user_topics,
user_entities, event_country, event_country_state, ad_id, document_id, source_id,
doc_ad_publish_time, timestamp_event, platform_event,
geo_location_event,
doc_event_source_id, doc_event_publisher_id, doc_event_publish_time,
traffic_source_pv, advertiser_id, publisher_id,
campaign_id, document_id_event,
category_ids_by_doc, cat_confidence_level_by_doc,
topic_ids_by_doc, top_confidence_level_by_doc,
entity_ids_by_doc, ent_confidence_level_by_doc,
doc_event_category_id_list, doc_event_confidence_level_cat_list,
doc_event_topic_id_list, doc_event_confidence_level_top,
doc_event_entity_id_list, doc_event_confidence_level_ent:
get_ad_feature_vector_integral(user_doc_ids_viewed, user_views_count, user_categories, user_topics,
user_entities,
event_country, event_country_state,
ad_id, document_id, source_id, doc_ad_publish_time, timestamp_event,
platform_event,
geo_location_event,
doc_event_source_id, doc_event_publisher_id, doc_event_publish_time,
traffic_source_pv, advertiser_id, publisher_id,
campaign_id, document_id_event,
category_ids_by_doc, cat_confidence_level_by_doc,
topic_ids_by_doc, top_confidence_level_by_doc,
entity_ids_by_doc, ent_confidence_level_by_doc,
doc_event_category_id_list, doc_event_confidence_level_cat_list,
doc_event_topic_id_list, doc_event_confidence_level_top,
doc_event_entity_id_list, doc_event_confidence_level_ent),
VectorUDT())
# ## Export Train set feature vectors
train_set_enriched_df = train_set_df \
.join(documents_categories_grouped_df,
on=F.col("document_id_promo") == F.col("documents_categories_grouped.document_id_cat"),
how='left') \
.join(documents_topics_grouped_df,
on=F.col("document_id_promo") == F.col("documents_topics_grouped.document_id_top"),
how='left') \
.join(documents_entities_grouped_df,
on=F.col("document_id_promo") == F.col("documents_entities_grouped.document_id_ent"),
how='left') \
.join(documents_categories_grouped_df
.withColumnRenamed('category_id_list', 'doc_event_category_id_list')
.withColumnRenamed('confidence_level_cat_list', 'doc_event_confidence_level_cat_list')
.alias('documents_event_categories_grouped'),
on=F.col("document_id_event") == F.col("documents_event_categories_grouped.document_id_cat"),
how='left') \
.join(documents_topics_grouped_df
.withColumnRenamed('topic_id_list', 'doc_event_topic_id_list')
.withColumnRenamed('confidence_level_top_list', 'doc_event_confidence_level_top_list')
.alias('documents_event_topics_grouped'),
on=F.col("document_id_event") == F.col("documents_event_topics_grouped.document_id_top"),
how='left') \
.join(documents_entities_grouped_df
.withColumnRenamed('entity_id_list', 'doc_event_entity_id_list')
.withColumnRenamed('confidence_level_ent_list', 'doc_event_confidence_level_ent_list')
.alias('documents_event_entities_grouped'),
on=F.col("document_id_event") == F.col("documents_event_entities_grouped.document_id_ent"),
how='left') \
.select('display_id', 'uuid_event', 'event_country', 'event_country_state', 'platform_event',
'source_id_doc_event', 'publisher_doc_event', 'publish_time_doc_event',
'publish_time', 'ad_id', 'document_id_promo', 'clicked',
'geo_location_event', 'advertiser_id', 'publisher_id',
'campaign_id', 'document_id_event',
'traffic_source_pv',
int_list_null_to_empty_list_udf('doc_event_category_id_list')
.alias('doc_event_category_id_list'),
float_list_null_to_empty_list_udf('doc_event_confidence_level_cat_list')
.alias('doc_event_confidence_level_cat_list'),
int_list_null_to_empty_list_udf('doc_event_topic_id_list')
.alias('doc_event_topic_id_list'),
float_list_null_to_empty_list_udf('doc_event_confidence_level_top_list')
.alias('doc_event_confidence_level_top_list'),
str_list_null_to_empty_list_udf('doc_event_entity_id_list')
.alias('doc_event_entity_id_list'),
float_list_null_to_empty_list_udf('doc_event_confidence_level_ent_list')
.alias('doc_event_confidence_level_ent_list'),
int_null_to_minus_one_udf('source_id').alias('source_id'),
int_null_to_minus_one_udf('timestamp_event').alias('timestamp_event'),
int_list_null_to_empty_list_udf('category_id_list').alias('category_id_list'),
float_list_null_to_empty_list_udf('confidence_level_cat_list')
.alias('confidence_level_cat_list'),
int_list_null_to_empty_list_udf('topic_id_list').alias('topic_id_list'),
float_list_null_to_empty_list_udf('confidence_level_top_list')
.alias('confidence_level_top_list'),
str_list_null_to_empty_list_udf('entity_id_list').alias('entity_id_list'),
float_list_null_to_empty_list_udf('confidence_level_ent_list')
.alias('confidence_level_ent_list')) \
.join(user_profiles_df, on=[F.col("user_profiles.uuid") == F.col("uuid_event")], how='left') \
.withColumnRenamed('categories', 'user_categories') \
.withColumnRenamed('topics', 'user_topics') \
.withColumnRenamed('entities', 'user_entities') \
.withColumnRenamed('doc_ids', 'user_doc_ids_viewed') \
.withColumnRenamed('views', 'user_views_count')
train_set_feature_vectors_df = train_set_enriched_df \
.withColumn('feature_vector',
get_ad_feature_vector_integral_udf(
'user_doc_ids_viewed',
'user_views_count',
'user_categories',
'user_topics',
'user_entities',
'event_country',
'event_country_state',
'ad_id',
'document_id_promo',
'source_id',
'publish_time',
'timestamp_event',
'platform_event',
'geo_location_event',
'source_id_doc_event',
'publisher_doc_event',
'publish_time_doc_event',
'traffic_source_pv',
'advertiser_id',
'publisher_id',
'campaign_id',
'document_id_event',
'category_id_list',
'confidence_level_cat_list',
'topic_id_list',
'confidence_level_top_list',
'entity_id_list',
'confidence_level_ent_list',
'doc_event_category_id_list',
'doc_event_confidence_level_cat_list',
'doc_event_topic_id_list',
'doc_event_confidence_level_top_list',
'doc_event_entity_id_list',
'doc_event_confidence_level_ent_list')) \
.select(F.col('uuid_event').alias('uuid'), 'display_id', 'ad_id', 'document_id_event',
F.col('document_id_promo').alias('document_id'), F.col('clicked').alias('label'),
'feature_vector')
if evaluation:
train_feature_vector_gcs_folder_name = 'train_feature_vectors_integral_eval'
else:
train_feature_vector_gcs_folder_name = 'train_feature_vectors_integral'
train_set_feature_vectors_df.write.parquet(OUTPUT_BUCKET_FOLDER + train_feature_vector_gcs_folder_name,
mode='overwrite')
# # Export Validation/Test set feature vectors
def is_leak(max_timestamp_pv_leak, timestamp_event):
return max_timestamp_pv_leak >= 0 and max_timestamp_pv_leak >= timestamp_event
is_leak_udf = F.udf(lambda max_timestamp_pv_leak, timestamp_event: int(is_leak(max_timestamp_pv_leak, timestamp_event)),
IntegerType())
if evaluation:
data_df = validation_set_df
else:
data_df = test_set_df
test_validation_set_enriched_df = data_df.select(
'display_id', 'uuid_event', 'event_country', 'event_country_state', 'platform_event',
'source_id_doc_event', 'publisher_doc_event', 'publish_time_doc_event',
'publish_time',
'ad_id', 'document_id_promo', 'clicked',
'geo_location_event', 'advertiser_id', 'publisher_id',
'campaign_id', 'document_id_event',
'traffic_source_pv',
int_list_null_to_empty_list_udf('doc_event_category_id_list')
.alias('doc_event_category_id_list'),
float_list_null_to_empty_list_udf('doc_event_confidence_level_cat_list')
.alias('doc_event_confidence_level_cat_list'),
int_list_null_to_empty_list_udf('doc_event_topic_id_list')
.alias('doc_event_topic_id_list'),
float_list_null_to_empty_list_udf('doc_event_confidence_level_top_list')
.alias('doc_event_confidence_level_top_list'),
str_list_null_to_empty_list_udf('doc_event_entity_id_list')
.alias('doc_event_entity_id_list'),
float_list_null_to_empty_list_udf('doc_event_confidence_level_ent_list')
.alias('doc_event_confidence_level_ent_list'),
int_null_to_minus_one_udf('source_id')
.alias('source_id'),
int_null_to_minus_one_udf('timestamp_event').alias('timestamp_event'),
int_list_null_to_empty_list_udf('category_id_list').alias('category_id_list'),
float_list_null_to_empty_list_udf('confidence_level_cat_list')
.alias('confidence_level_cat_list'),
int_list_null_to_empty_list_udf('topic_id_list').alias('topic_id_list'),
float_list_null_to_empty_list_udf('confidence_level_top_list')
.alias('confidence_level_top_list'),
str_list_null_to_empty_list_udf('entity_id_list').alias('entity_id_list'),
float_list_null_to_empty_list_udf('confidence_level_ent_list')
.alias('confidence_level_ent_list'),
int_null_to_minus_one_udf('max_timestamp_pv').alias('max_timestamp_pv_leak')) \
.join(user_profiles_df, on=[F.col("user_profiles.uuid") == F.col("uuid_event")], how='left') \
.withColumnRenamed('categories', 'user_categories') \
.withColumnRenamed('topics', 'user_topics') \
.withColumnRenamed('entities', 'user_entities') \
.withColumnRenamed('doc_ids', 'user_doc_ids_viewed') \
.withColumnRenamed('views', 'user_views_count')
test_validation_set_feature_vectors_df = test_validation_set_enriched_df \
.withColumn('feature_vector',
get_ad_feature_vector_integral_udf(
'user_doc_ids_viewed',
'user_views_count',
'user_categories',
'user_topics',
'user_entities',
'event_country',
'event_country_state',
'ad_id',
'document_id_promo',
'source_id',
'publish_time',
'timestamp_event',
'platform_event',
'geo_location_event',
'source_id_doc_event',
'publisher_doc_event',
'publish_time_doc_event',
'traffic_source_pv',
'advertiser_id',
'publisher_id',
'campaign_id',
'document_id_event',
'category_id_list',
'confidence_level_cat_list',
'topic_id_list',
'confidence_level_top_list',
'entity_id_list',
'confidence_level_ent_list',
'doc_event_category_id_list',
'doc_event_confidence_level_cat_list',
'doc_event_topic_id_list',
'doc_event_confidence_level_top_list',
'doc_event_entity_id_list',
'doc_event_confidence_level_ent_list')) \
.select(F.col('uuid').alias('uuid'), 'display_id', 'ad_id', 'document_id_event',
F.col('document_id_promo').alias('document_id'), F.col('clicked').alias('label'),
is_leak_udf('max_timestamp_pv_leak', 'timestamp_event').alias('is_leak'),
'feature_vector')
if evaluation:
test_validation_feature_vector_gcs_folder_name = 'validation_feature_vectors_integral'
else:
test_validation_feature_vector_gcs_folder_name = 'test_feature_vectors_integral'
test_validation_set_feature_vectors_df.write.parquet(
OUTPUT_BUCKET_FOLDER + test_validation_feature_vector_gcs_folder_name, mode='overwrite')
spark.stop()
| DeepLearningExamples-master | TensorFlow/Recommendation/WideAndDeep/preproc/preproc3.py |
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
import numpy as np
import pandas as pd
import pyspark.sql.functions as F
import tensorflow as tf
import trainer
from pyspark import TaskContext
from pyspark.context import SparkContext, SparkConf
from pyspark.sql.functions import col, udf
from pyspark.sql.session import SparkSession
from pyspark.sql.types import ArrayType, DoubleType
from tensorflow_transform.tf_metadata import dataset_metadata
from tensorflow_transform.tf_metadata import dataset_schema
from tensorflow_transform.tf_metadata import metadata_io
from trainer.features import LABEL_COLUMN, DISPLAY_ID_COLUMN, IS_LEAK_COLUMN, DISPLAY_ID_AND_IS_LEAK_ENCODED_COLUMN, \
CATEGORICAL_COLUMNS, DOC_CATEGORICAL_MULTIVALUED_COLUMNS, BOOL_COLUMNS, INT_COLUMNS, FLOAT_COLUMNS, \
FLOAT_COLUMNS_LOG_BIN_TRANSFORM, FLOAT_COLUMNS_SIMPLE_BIN_TRANSFORM
evaluation = True
evaluation_verbose = False
OUTPUT_BUCKET_FOLDER = "/outbrain/preprocessed/"
DATA_BUCKET_FOLDER = "/outbrain/orig/"
SPARK_TEMP_FOLDER = "/outbrain/spark-temp/"
LOCAL_DATA_TFRECORDS_DIR = "/outbrain/tfrecords"
TEST_SET_MODE = False
TENSORFLOW_HADOOP = "preproc/data/tensorflow-hadoop-1.5.0.jar"
conf = SparkConf().setMaster('local[*]').set('spark.executor.memory', '40g').set('spark.driver.memory', '200g').set(
"spark.local.dir", SPARK_TEMP_FOLDER)
conf.set("spark.jars", TENSORFLOW_HADOOP)
conf.set("spark.sql.files.maxPartitionBytes", 805306368)
sc = SparkContext(conf=conf)
spark = SparkSession(sc)
parser = argparse.ArgumentParser()
parser.add_argument(
'--prebatch_size',
help='Prebatch size in created tfrecords',
type=int,
default=4096)
parser.add_argument(
'--submission',
action='store_true',
default=False
)
args = parser.parse_args()
batch_size = args.prebatch_size
# # Feature Vector export
bool_feature_names = ['event_weekend',
'user_has_already_viewed_doc']
int_feature_names = ['user_views',
'ad_views',
'doc_views',
'doc_event_days_since_published',
'doc_event_hour',
'doc_ad_days_since_published',
]
float_feature_names = [
'pop_ad_id',
'pop_ad_id_conf',
'pop_ad_id_conf_multipl',
'pop_document_id',
'pop_document_id_conf',
'pop_document_id_conf_multipl',
'pop_publisher_id',
'pop_publisher_id_conf',
'pop_publisher_id_conf_multipl',
'pop_advertiser_id',
'pop_advertiser_id_conf',
'pop_advertiser_id_conf_multipl',
'pop_campain_id',
'pop_campain_id_conf',
'pop_campain_id_conf_multipl',
'pop_doc_event_doc_ad',
'pop_doc_event_doc_ad_conf',
'pop_doc_event_doc_ad_conf_multipl',
'pop_source_id',
'pop_source_id_conf',
'pop_source_id_conf_multipl',
'pop_source_id_country',
'pop_source_id_country_conf',
'pop_source_id_country_conf_multipl',
'pop_entity_id',
'pop_entity_id_conf',
'pop_entity_id_conf_multipl',
'pop_entity_id_country',
'pop_entity_id_country_conf',
'pop_entity_id_country_conf_multipl',
'pop_topic_id',
'pop_topic_id_conf',
'pop_topic_id_conf_multipl',
'pop_topic_id_country',
'pop_topic_id_country_conf',
'pop_topic_id_country_conf_multipl',
'pop_category_id',
'pop_category_id_conf',
'pop_category_id_conf_multipl',
'pop_category_id_country',
'pop_category_id_country_conf',
'pop_category_id_country_conf_multipl',
'user_doc_ad_sim_categories',
'user_doc_ad_sim_categories_conf',
'user_doc_ad_sim_categories_conf_multipl',
'user_doc_ad_sim_topics',
'user_doc_ad_sim_topics_conf',
'user_doc_ad_sim_topics_conf_multipl',
'user_doc_ad_sim_entities',
'user_doc_ad_sim_entities_conf',
'user_doc_ad_sim_entities_conf_multipl',
'doc_event_doc_ad_sim_categories',
'doc_event_doc_ad_sim_categories_conf',
'doc_event_doc_ad_sim_categories_conf_multipl',
'doc_event_doc_ad_sim_topics',
'doc_event_doc_ad_sim_topics_conf',
'doc_event_doc_ad_sim_topics_conf_multipl',
'doc_event_doc_ad_sim_entities',
'doc_event_doc_ad_sim_entities_conf',
'doc_event_doc_ad_sim_entities_conf_multipl'
]
# ### Configuring feature vector
category_feature_names_integral = ['ad_advertiser',
'doc_ad_category_id_1',
'doc_ad_category_id_2',
'doc_ad_category_id_3',
'doc_ad_topic_id_1',
'doc_ad_topic_id_2',
'doc_ad_topic_id_3',
'doc_ad_entity_id_1',
'doc_ad_entity_id_2',
'doc_ad_entity_id_3',
'doc_ad_entity_id_4',
'doc_ad_entity_id_5',
'doc_ad_entity_id_6',
'doc_ad_publisher_id',
'doc_ad_source_id',
'doc_event_category_id_1',
'doc_event_category_id_2',
'doc_event_category_id_3',
'doc_event_topic_id_1',
'doc_event_topic_id_2',
'doc_event_topic_id_3',
'doc_event_entity_id_1',
'doc_event_entity_id_2',
'doc_event_entity_id_3',
'doc_event_entity_id_4',
'doc_event_entity_id_5',
'doc_event_entity_id_6',
'doc_event_publisher_id',
'doc_event_source_id',
'event_country',
'event_country_state',
'event_geo_location',
'event_hour',
'event_platform',
'traffic_source']
feature_vector_labels_integral = bool_feature_names \
+ int_feature_names \
+ float_feature_names \
+ category_feature_names_integral
if args.submission:
train_feature_vector_gcs_folder_name = 'train_feature_vectors_integral'
else:
train_feature_vector_gcs_folder_name = 'train_feature_vectors_integral_eval'
# ## Exporting integral feature vectors to CSV
train_feature_vectors_exported_df = spark.read.parquet(OUTPUT_BUCKET_FOLDER + train_feature_vector_gcs_folder_name)
train_feature_vectors_exported_df.take(3)
integral_headers = ['label', 'display_id', 'ad_id', 'doc_id', 'doc_event_id',
'is_leak'] + feature_vector_labels_integral
CSV_ORDERED_COLUMNS = ['label', 'display_id', 'ad_id', 'doc_id', 'doc_event_id', 'is_leak', 'event_weekend',
'user_has_already_viewed_doc', 'user_views', 'ad_views', 'doc_views',
'doc_event_days_since_published', 'doc_event_hour', 'doc_ad_days_since_published',
'pop_ad_id', 'pop_ad_id_conf',
'pop_ad_id_conf_multipl', 'pop_document_id', 'pop_document_id_conf',
'pop_document_id_conf_multipl', 'pop_publisher_id', 'pop_publisher_id_conf',
'pop_publisher_id_conf_multipl', 'pop_advertiser_id', 'pop_advertiser_id_conf',
'pop_advertiser_id_conf_multipl', 'pop_campain_id', 'pop_campain_id_conf',
'pop_campain_id_conf_multipl', 'pop_doc_event_doc_ad', 'pop_doc_event_doc_ad_conf',
'pop_doc_event_doc_ad_conf_multipl', 'pop_source_id', 'pop_source_id_conf',
'pop_source_id_conf_multipl', 'pop_source_id_country', 'pop_source_id_country_conf',
'pop_source_id_country_conf_multipl', 'pop_entity_id', 'pop_entity_id_conf',
'pop_entity_id_conf_multipl', 'pop_entity_id_country', 'pop_entity_id_country_conf',
'pop_entity_id_country_conf_multipl', 'pop_topic_id', 'pop_topic_id_conf',
'pop_topic_id_conf_multipl', 'pop_topic_id_country', 'pop_topic_id_country_conf',
'pop_topic_id_country_conf_multipl', 'pop_category_id', 'pop_category_id_conf',
'pop_category_id_conf_multipl', 'pop_category_id_country', 'pop_category_id_country_conf',
'pop_category_id_country_conf_multipl', 'user_doc_ad_sim_categories',
'user_doc_ad_sim_categories_conf', 'user_doc_ad_sim_categories_conf_multipl',
'user_doc_ad_sim_topics', 'user_doc_ad_sim_topics_conf', 'user_doc_ad_sim_topics_conf_multipl',
'user_doc_ad_sim_entities', 'user_doc_ad_sim_entities_conf',
'user_doc_ad_sim_entities_conf_multipl',
'doc_event_doc_ad_sim_categories', 'doc_event_doc_ad_sim_categories_conf',
'doc_event_doc_ad_sim_categories_conf_multipl', 'doc_event_doc_ad_sim_topics',
'doc_event_doc_ad_sim_topics_conf', 'doc_event_doc_ad_sim_topics_conf_multipl',
'doc_event_doc_ad_sim_entities', 'doc_event_doc_ad_sim_entities_conf',
'doc_event_doc_ad_sim_entities_conf_multipl', 'ad_advertiser', 'doc_ad_category_id_1',
'doc_ad_category_id_2', 'doc_ad_category_id_3', 'doc_ad_topic_id_1', 'doc_ad_topic_id_2',
'doc_ad_topic_id_3', 'doc_ad_entity_id_1', 'doc_ad_entity_id_2', 'doc_ad_entity_id_3',
'doc_ad_entity_id_4', 'doc_ad_entity_id_5', 'doc_ad_entity_id_6', 'doc_ad_publisher_id',
'doc_ad_source_id', 'doc_event_category_id_1', 'doc_event_category_id_2',
'doc_event_category_id_3',
'doc_event_topic_id_1', 'doc_event_topic_id_2', 'doc_event_topic_id_3', 'doc_event_entity_id_1',
'doc_event_entity_id_2', 'doc_event_entity_id_3', 'doc_event_entity_id_4',
'doc_event_entity_id_5',
'doc_event_entity_id_6', 'doc_event_publisher_id', 'doc_event_source_id', 'event_country',
'event_country_state', 'event_geo_location', 'event_hour', 'event_platform', 'traffic_source']
FEAT_CSV_ORDERED_COLUMNS = ['event_weekend',
'user_has_already_viewed_doc', 'user_views', 'ad_views', 'doc_views',
'doc_event_days_since_published', 'doc_event_hour', 'doc_ad_days_since_published',
'pop_ad_id', 'pop_ad_id_conf',
'pop_ad_id_conf_multipl', 'pop_document_id', 'pop_document_id_conf',
'pop_document_id_conf_multipl', 'pop_publisher_id', 'pop_publisher_id_conf',
'pop_publisher_id_conf_multipl', 'pop_advertiser_id', 'pop_advertiser_id_conf',
'pop_advertiser_id_conf_multipl', 'pop_campain_id', 'pop_campain_id_conf',
'pop_campain_id_conf_multipl', 'pop_doc_event_doc_ad', 'pop_doc_event_doc_ad_conf',
'pop_doc_event_doc_ad_conf_multipl', 'pop_source_id', 'pop_source_id_conf',
'pop_source_id_conf_multipl', 'pop_source_id_country', 'pop_source_id_country_conf',
'pop_source_id_country_conf_multipl', 'pop_entity_id', 'pop_entity_id_conf',
'pop_entity_id_conf_multipl', 'pop_entity_id_country', 'pop_entity_id_country_conf',
'pop_entity_id_country_conf_multipl', 'pop_topic_id', 'pop_topic_id_conf',
'pop_topic_id_conf_multipl', 'pop_topic_id_country', 'pop_topic_id_country_conf',
'pop_topic_id_country_conf_multipl', 'pop_category_id', 'pop_category_id_conf',
'pop_category_id_conf_multipl', 'pop_category_id_country', 'pop_category_id_country_conf',
'pop_category_id_country_conf_multipl', 'user_doc_ad_sim_categories',
'user_doc_ad_sim_categories_conf', 'user_doc_ad_sim_categories_conf_multipl',
'user_doc_ad_sim_topics', 'user_doc_ad_sim_topics_conf',
'user_doc_ad_sim_topics_conf_multipl',
'user_doc_ad_sim_entities', 'user_doc_ad_sim_entities_conf',
'user_doc_ad_sim_entities_conf_multipl',
'doc_event_doc_ad_sim_categories', 'doc_event_doc_ad_sim_categories_conf',
'doc_event_doc_ad_sim_categories_conf_multipl', 'doc_event_doc_ad_sim_topics',
'doc_event_doc_ad_sim_topics_conf', 'doc_event_doc_ad_sim_topics_conf_multipl',
'doc_event_doc_ad_sim_entities', 'doc_event_doc_ad_sim_entities_conf',
'doc_event_doc_ad_sim_entities_conf_multipl', 'ad_advertiser', 'doc_ad_category_id_1',
'doc_ad_category_id_2', 'doc_ad_category_id_3', 'doc_ad_topic_id_1', 'doc_ad_topic_id_2',
'doc_ad_topic_id_3', 'doc_ad_entity_id_1', 'doc_ad_entity_id_2', 'doc_ad_entity_id_3',
'doc_ad_entity_id_4', 'doc_ad_entity_id_5', 'doc_ad_entity_id_6', 'doc_ad_publisher_id',
'doc_ad_source_id', 'doc_event_category_id_1', 'doc_event_category_id_2',
'doc_event_category_id_3',
'doc_event_topic_id_1', 'doc_event_topic_id_2', 'doc_event_topic_id_3',
'doc_event_entity_id_1',
'doc_event_entity_id_2', 'doc_event_entity_id_3', 'doc_event_entity_id_4',
'doc_event_entity_id_5',
'doc_event_entity_id_6', 'doc_event_publisher_id', 'doc_event_source_id', 'event_country',
'event_country_state', 'event_geo_location', 'event_hour', 'event_platform',
'traffic_source']
def to_array(col):
def to_array_(v):
return v.toArray().tolist()
# Important: asNondeterministic requires Spark 2.3 or later
# It can be safely removed i.e.
# return udf(to_array_, ArrayType(DoubleType()))(col)
# but at the cost of decreased performance
return udf(to_array_, ArrayType(DoubleType())).asNondeterministic()(col)
CONVERT_TO_INT = ['doc_ad_category_id_1',
'doc_ad_category_id_2', 'doc_ad_category_id_3', 'doc_ad_topic_id_1', 'doc_ad_topic_id_2',
'doc_ad_topic_id_3', 'doc_ad_entity_id_1', 'doc_ad_entity_id_2', 'doc_ad_entity_id_3',
'doc_ad_entity_id_4', 'doc_ad_entity_id_5', 'doc_ad_entity_id_6',
'doc_ad_source_id', 'doc_event_category_id_1', 'doc_event_category_id_2', 'doc_event_category_id_3',
'doc_event_topic_id_1', 'doc_event_topic_id_2', 'doc_event_topic_id_3', 'doc_event_entity_id_1',
'doc_event_entity_id_2', 'doc_event_entity_id_3', 'doc_event_entity_id_4', 'doc_event_entity_id_5',
'doc_event_entity_id_6']
def format_number(element, name):
if name in BOOL_COLUMNS + CATEGORICAL_COLUMNS:
return element.cast("int")
elif name in CONVERT_TO_INT:
return element.cast("int")
else:
return element
def to_array_with_none(col):
def to_array_with_none_(v):
tmp = np.full((v.size,), fill_value=None, dtype=np.float64)
tmp[v.indices] = v.values
return tmp.tolist()
# Important: asNondeterministic requires Spark 2.3 or later
# It can be safely removed i.e.
# return udf(to_array_, ArrayType(DoubleType()))(col)
# but at the cost of decreased performance
return udf(to_array_with_none_, ArrayType(DoubleType())).asNondeterministic()(col)
@udf
def count_value(x):
from collections import Counter
tmp = Counter(x).most_common(2)
if not tmp or np.isnan(tmp[0][0]):
return 0
return float(tmp[0][0])
def replace_with_most_frequent(most_value):
return udf(lambda x: most_value if not x or np.isnan(x) else x)
train_feature_vectors_integral_csv_rdd_df = train_feature_vectors_exported_df.select('label', 'display_id', 'ad_id',
'document_id', 'document_id_event',
'feature_vector').withColumn(
'is_leak', F.lit(-1)).withColumn("featvec", to_array("feature_vector")).select(
['label'] + ['display_id'] + ['ad_id'] + ['document_id'] + ['document_id_event'] + ['is_leak'] + [
format_number(element, FEAT_CSV_ORDERED_COLUMNS[index]).alias(FEAT_CSV_ORDERED_COLUMNS[index]) for
index, element in enumerate([col("featvec")[i] for i in range(len(feature_vector_labels_integral))])]).replace(
float('nan'), 0)
if args.submission:
test_validation_feature_vector_gcs_folder_name = 'test_feature_vectors_integral'
else:
test_validation_feature_vector_gcs_folder_name = 'validation_feature_vectors_integral'
# ## Exporting integral feature vectors
test_validation_feature_vectors_exported_df = spark.read.parquet(
OUTPUT_BUCKET_FOLDER + test_validation_feature_vector_gcs_folder_name)
test_validation_feature_vectors_exported_df.take(3)
test_validation_feature_vectors_integral_csv_rdd_df = test_validation_feature_vectors_exported_df.select(
'label', 'display_id', 'ad_id', 'document_id', 'document_id_event',
'is_leak', 'feature_vector').withColumn("featvec", to_array("feature_vector")).select(
['label'] + ['display_id'] + ['ad_id'] + ['document_id'] + ['document_id_event'] + ['is_leak'] + [
format_number(element, FEAT_CSV_ORDERED_COLUMNS[index]).alias(FEAT_CSV_ORDERED_COLUMNS[index]) for
index, element in enumerate([col("featvec")[i] for i in range(len(feature_vector_labels_integral))])]).replace(
float('nan'), 0)
def make_spec(output_dir, batch_size=None):
fixed_shape = [batch_size, 1] if batch_size is not None else []
spec = {}
spec[LABEL_COLUMN] = tf.FixedLenFeature(shape=fixed_shape, dtype=tf.int64, default_value=None)
spec[DISPLAY_ID_COLUMN] = tf.FixedLenFeature(shape=fixed_shape, dtype=tf.int64, default_value=None)
spec[IS_LEAK_COLUMN] = tf.FixedLenFeature(shape=fixed_shape, dtype=tf.int64, default_value=None)
spec[DISPLAY_ID_AND_IS_LEAK_ENCODED_COLUMN] = tf.FixedLenFeature(shape=fixed_shape, dtype=tf.int64,
default_value=None)
for name in BOOL_COLUMNS:
spec[name] = tf.FixedLenFeature(shape=fixed_shape, dtype=tf.int64, default_value=None)
for name in FLOAT_COLUMNS_LOG_BIN_TRANSFORM + FLOAT_COLUMNS_SIMPLE_BIN_TRANSFORM:
spec[name] = tf.FixedLenFeature(shape=fixed_shape, dtype=tf.float32, default_value=None)
for name in FLOAT_COLUMNS_SIMPLE_BIN_TRANSFORM:
spec[name + '_binned'] = tf.FixedLenFeature(shape=fixed_shape, dtype=tf.int64, default_value=None)
for name in FLOAT_COLUMNS_LOG_BIN_TRANSFORM:
spec[name + '_binned'] = tf.FixedLenFeature(shape=fixed_shape, dtype=tf.int64, default_value=None)
spec[name + '_log_01scaled'] = tf.FixedLenFeature(shape=fixed_shape, dtype=tf.float32, default_value=None)
for name in INT_COLUMNS:
spec[name + '_log_int'] = tf.FixedLenFeature(shape=fixed_shape, dtype=tf.int64, default_value=None)
spec[name + '_log_01scaled'] = tf.FixedLenFeature(shape=fixed_shape, dtype=tf.float32, default_value=None)
for name in BOOL_COLUMNS + CATEGORICAL_COLUMNS:
spec[name] = tf.FixedLenFeature(shape=fixed_shape, dtype=tf.int64, default_value=None)
for multi_category in DOC_CATEGORICAL_MULTIVALUED_COLUMNS:
shape = fixed_shape[:-1] + [len(DOC_CATEGORICAL_MULTIVALUED_COLUMNS[multi_category])]
spec[multi_category] = tf.FixedLenFeature(shape=shape, dtype=tf.int64)
metadata = dataset_metadata.DatasetMetadata(dataset_schema.from_feature_spec(spec))
metadata_io.write_metadata(metadata, output_dir)
# write out tfrecords meta
make_spec(LOCAL_DATA_TFRECORDS_DIR + '/transformed_metadata', batch_size=batch_size)
def log2_1p(x):
return np.log1p(x) / np.log(2.0)
# calculate min and max stats for the given dataframes all in one go
def compute_min_max_logs(df):
print(str(datetime.datetime.now()) + '\tComputing min and max')
min_logs = {}
max_logs = {}
float_expr = []
for name in trainer.features.FLOAT_COLUMNS_LOG_BIN_TRANSFORM + trainer.features.INT_COLUMNS:
float_expr.append(F.min(name))
float_expr.append(F.max(name))
floatDf = all_df.agg(*float_expr).collect()
for name in trainer.features.FLOAT_COLUMNS_LOG_BIN_TRANSFORM:
minAgg = floatDf[0]["min(" + name + ")"]
maxAgg = floatDf[0]["max(" + name + ")"]
min_logs[name + '_log_01scaled'] = log2_1p(minAgg * 1000)
max_logs[name + '_log_01scaled'] = log2_1p(maxAgg * 1000)
for name in trainer.features.INT_COLUMNS:
minAgg = floatDf[0]["min(" + name + ")"]
maxAgg = floatDf[0]["max(" + name + ")"]
min_logs[name + '_log_01scaled'] = log2_1p(minAgg)
max_logs[name + '_log_01scaled'] = log2_1p(maxAgg)
return min_logs, max_logs
all_df = test_validation_feature_vectors_integral_csv_rdd_df.union(train_feature_vectors_integral_csv_rdd_df)
min_logs, max_logs = compute_min_max_logs(all_df)
if args.submission:
train_output_string = '/sub_train'
eval_output_string = '/test'
else:
train_output_string = '/train'
eval_output_string = '/eval'
path = LOCAL_DATA_TFRECORDS_DIR
def create_tf_example_spark(df, min_logs, max_logs):
result = {}
result[LABEL_COLUMN] = tf.train.Feature(int64_list=tf.train.Int64List(value=df[LABEL_COLUMN].to_list()))
result[DISPLAY_ID_COLUMN] = tf.train.Feature(int64_list=tf.train.Int64List(value=df[DISPLAY_ID_COLUMN].to_list()))
result[IS_LEAK_COLUMN] = tf.train.Feature(int64_list=tf.train.Int64List(value=df[IS_LEAK_COLUMN].to_list()))
encoded_value = df[DISPLAY_ID_COLUMN].multiply(10).add(df[IS_LEAK_COLUMN].clip(lower=0)).to_list()
result[DISPLAY_ID_AND_IS_LEAK_ENCODED_COLUMN] = tf.train.Feature(int64_list=tf.train.Int64List(value=encoded_value))
for name in FLOAT_COLUMNS:
value = df[name].to_list()
result[name] = tf.train.Feature(float_list=tf.train.FloatList(value=value))
for name in FLOAT_COLUMNS_SIMPLE_BIN_TRANSFORM:
value = df[name].multiply(10).astype('int64').to_list()
result[name + '_binned'] = tf.train.Feature(int64_list=tf.train.Int64List(value=value))
for name in FLOAT_COLUMNS_LOG_BIN_TRANSFORM:
value_prelim = df[name].multiply(1000).apply(np.log1p).multiply(1. / np.log(2.0))
value = value_prelim.astype('int64').to_list()
result[name + '_binned'] = tf.train.Feature(int64_list=tf.train.Int64List(value=value))
nn = name + '_log_01scaled'
value = value_prelim.add(-min_logs[nn]).multiply(1. / (max_logs[nn] - min_logs[nn])).to_list()
result[nn] = tf.train.Feature(float_list=tf.train.FloatList(value=value))
for name in INT_COLUMNS:
value_prelim = df[name].apply(np.log1p).multiply(1. / np.log(2.0))
value = value_prelim.astype('int64').to_list()
result[name + '_log_int'] = tf.train.Feature(int64_list=tf.train.Int64List(value=value))
nn = name + '_log_01scaled'
value = value_prelim.add(-min_logs[nn]).multiply(1. / (max_logs[nn] - min_logs[nn])).to_list()
result[nn] = tf.train.Feature(float_list=tf.train.FloatList(value=value))
for name in BOOL_COLUMNS + CATEGORICAL_COLUMNS:
value = df[name].fillna(0).astype('int64').to_list()
result[name] = tf.train.Feature(int64_list=tf.train.Int64List(value=value))
for multi_category in DOC_CATEGORICAL_MULTIVALUED_COLUMNS:
values = []
for category in DOC_CATEGORICAL_MULTIVALUED_COLUMNS[multi_category]:
values = values + [df[category].to_numpy()]
# need to transpose the series so they will be parsed correctly by the FixedLenFeature
# we can pass in a single series here; they'll be reshaped to [batch_size, num_values]
# when parsed from the TFRecord
value = np.stack(values, axis=1).flatten().tolist()
result[multi_category] = tf.train.Feature(int64_list=tf.train.Int64List(value=value))
tf_example = tf.train.Example(features=tf.train.Features(feature=result))
return tf_example
def _transform_to_tfrecords(rdds):
csv = pd.DataFrame(list(rdds), columns=CSV_ORDERED_COLUMNS)
num_rows = len(csv.index)
examples = []
for start_ind in range(0, num_rows, batch_size if batch_size is not None else 1): # for each batch
if start_ind + batch_size - 1 > num_rows: # if we'd run out of rows
csv_slice = csv.iloc[start_ind:]
# drop the remainder
print("last Example has: ", len(csv_slice))
examples.append((create_tf_example_spark(csv_slice, min_logs, max_logs), len(csv_slice)))
return examples
else:
csv_slice = csv.iloc[start_ind:start_ind + (batch_size if batch_size is not None else 1)]
examples.append((create_tf_example_spark(csv_slice, min_logs, max_logs), batch_size))
return examples
max_partition_num = 30
def _transform_to_slices(rdds):
taskcontext = TaskContext.get()
partitionid = taskcontext.partitionId()
csv = pd.DataFrame(list(rdds), columns=CSV_ORDERED_COLUMNS)
num_rows = len(csv.index)
print("working with partition: ", partitionid, max_partition_num, num_rows)
examples = []
for start_ind in range(0, num_rows, batch_size if batch_size is not None else 1): # for each batch
if start_ind + batch_size - 1 > num_rows: # if we'd run out of rows
csv_slice = csv.iloc[start_ind:]
print("last Example has: ", len(csv_slice), partitionid)
examples.append((csv_slice, len(csv_slice)))
return examples
else:
csv_slice = csv.iloc[start_ind:start_ind + (batch_size if batch_size is not None else 1)]
examples.append((csv_slice, len(csv_slice)))
return examples
def _transform_to_tfrecords_from_slices(rdds):
examples = []
for slice in rdds:
if len(slice[0]) != batch_size:
print("slice size is not correct, dropping: ", len(slice[0]))
else:
examples.append(
(bytearray((create_tf_example_spark(slice[0], min_logs, max_logs)).SerializeToString()), None))
return examples
def _transform_to_tfrecords_from_reslice(rdds):
examples = []
all_dataframes = pd.DataFrame([])
for slice in rdds:
all_dataframes = all_dataframes.append(slice[0])
num_rows = len(all_dataframes.index)
examples = []
for start_ind in range(0, num_rows, batch_size if batch_size is not None else 1): # for each batch
if start_ind + batch_size - 1 > num_rows: # if we'd run out of rows
csv_slice = all_dataframes.iloc[start_ind:]
if TEST_SET_MODE:
remain_len = batch_size - len(csv_slice)
(m, n) = divmod(remain_len, len(csv_slice))
print("remainder: ", len(csv_slice), remain_len, m, n)
if m:
for i in range(m):
csv_slice = csv_slice.append(csv_slice)
csv_slice = csv_slice.append(csv_slice.iloc[:n])
print("after fill remainder: ", len(csv_slice))
examples.append(
(bytearray((create_tf_example_spark(csv_slice, min_logs, max_logs)).SerializeToString()), None))
return examples
# drop the remainder
print("dropping remainder: ", len(csv_slice))
return examples
else:
csv_slice = all_dataframes.iloc[start_ind:start_ind + (batch_size if batch_size is not None else 1)]
examples.append(
(bytearray((create_tf_example_spark(csv_slice, min_logs, max_logs)).SerializeToString()), None))
return examples
TEST_SET_MODE = False
train_features = train_feature_vectors_integral_csv_rdd_df.coalesce(30).rdd.mapPartitions(_transform_to_slices)
cached_train_features = train_features.cache()
cached_train_features.count()
train_full = cached_train_features.filter(lambda x: x[1] == batch_size)
# split out slies where we don't have a full batch so that we can reslice them so we only drop mininal rows
train_not_full = cached_train_features.filter(lambda x: x[1] < batch_size)
train_examples_full = train_full.mapPartitions(_transform_to_tfrecords_from_slices)
train_left = train_not_full.coalesce(1).mapPartitions(_transform_to_tfrecords_from_reslice)
all_train = train_examples_full.union(train_left)
TEST_SET_MODE = True
valid_features = test_validation_feature_vectors_integral_csv_rdd_df.coalesce(30).rdd.mapPartitions(
_transform_to_slices)
cached_valid_features = valid_features.cache()
cached_valid_features.count()
valid_full = cached_valid_features.filter(lambda x: x[1] == batch_size)
valid_not_full = cached_valid_features.filter(lambda x: x[1] < batch_size)
valid_examples_full = valid_full.mapPartitions(_transform_to_tfrecords_from_slices)
valid_left = valid_not_full.coalesce(1).mapPartitions(_transform_to_tfrecords_from_reslice)
all_valid = valid_examples_full.union(valid_left)
all_train.saveAsNewAPIHadoopFile(LOCAL_DATA_TFRECORDS_DIR + train_output_string,
"org.tensorflow.hadoop.io.TFRecordFileOutputFormat",
keyClass="org.apache.hadoop.io.BytesWritable",
valueClass="org.apache.hadoop.io.NullWritable")
all_valid.saveAsNewAPIHadoopFile(LOCAL_DATA_TFRECORDS_DIR + eval_output_string,
"org.tensorflow.hadoop.io.TFRecordFileOutputFormat",
keyClass="org.apache.hadoop.io.BytesWritable",
valueClass="org.apache.hadoop.io.NullWritable")
spark.stop()
| DeepLearningExamples-master | TensorFlow/Recommendation/WideAndDeep/preproc/preproc4.py |
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyspark.context import SparkContext, SparkConf
from pyspark.sql.functions import col
from pyspark.sql.session import SparkSession
from pyspark.sql.types import IntegerType, StringType, StructType, StructField
OUTPUT_BUCKET_FOLDER = "/outbrain/preprocessed/"
DATA_BUCKET_FOLDER = "/outbrain/orig/"
SPARK_TEMP_FOLDER = "/outbrain/spark-temp/"
conf = SparkConf().setMaster('local[*]').set('spark.executor.memory', '40g').set('spark.driver.memory', '200g').set(
"spark.local.dir", SPARK_TEMP_FOLDER)
sc = SparkContext(conf=conf)
spark = SparkSession(sc)
print('Loading data...')
events_schema = StructType(
[StructField("display_id", IntegerType(), True),
StructField("uuid_event", StringType(), True),
StructField("document_id_event", IntegerType(), True),
StructField("timestamp_event", IntegerType(), True),
StructField("platform_event", IntegerType(), True),
StructField("geo_location_event", StringType(), True)]
)
events_df = spark.read.schema(events_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "events.csv") \
.withColumn('day_event', (col('timestamp_event') / 1000 / 60 / 60 / 24).cast("int")) \
.alias('events')
events_df.count()
print('Drop rows with empty "geo_location"...')
events_df = events_df.dropna(subset="geo_location_event")
events_df.count()
print('Drop rows with empty "platform"...')
events_df = events_df.dropna(subset="platform_event")
events_df.count()
promoted_content_schema = StructType(
[StructField("ad_id", IntegerType(), True),
StructField("document_id_promo", IntegerType(), True),
StructField("campaign_id", IntegerType(), True),
StructField("advertiser_id", IntegerType(), True)]
)
promoted_content_df = spark.read.schema(promoted_content_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "promoted_content.csv") \
.alias('promoted_content')
clicks_train_schema = StructType(
[StructField("display_id", IntegerType(), True),
StructField("ad_id", IntegerType(), True),
StructField("clicked", IntegerType(), True)]
)
clicks_train_df = spark.read.schema(clicks_train_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "clicks_train.csv") \
.alias('clicks_train')
clicks_train_joined_df = clicks_train_df \
.join(promoted_content_df, on='ad_id', how='left') \
.join(events_df, on='display_id', how='left')
clicks_train_joined_df.createOrReplaceTempView('clicks_train_joined')
validation_display_ids_df = clicks_train_joined_df.select('display_id', 'day_event') \
.distinct() \
.sampleBy("day_event", fractions={0: 0.2, 1: 0.2, 2: 0.2, 3: 0.2, 4: 0.2,
5: 0.2, 6: 0.2, 7: 0.2, 8: 0.2, 9: 0.2, 10: 0.2, 11: 1.0, 12: 1.0}, seed=0)
validation_display_ids_df.createOrReplaceTempView("validation_display_ids")
validation_set_df = spark.sql('''SELECT display_id, ad_id, uuid_event, day_event,
timestamp_event, document_id_promo, platform_event, geo_location_event
FROM clicks_train_joined t
WHERE EXISTS (SELECT display_id FROM validation_display_ids
WHERE display_id = t.display_id)''')
validation_set_gcs_output = "validation_set.parquet"
validation_set_df.write.parquet(OUTPUT_BUCKET_FOLDER + validation_set_gcs_output, mode='overwrite')
print(validation_set_df.take(5))
spark.stop()
| DeepLearningExamples-master | TensorFlow/Recommendation/WideAndDeep/preproc/preproc1.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import dllogger
import horovod.tensorflow as hvd
import json
import numpy as np
import os
import tensorflow as tf
import tensorflow_transform as tft
from tensorflow.core.protobuf import rewriter_config_pb2
from trainer import features
from utils.dataloader import separate_input_fn
from utils.hooks.benchmark_hooks import BenchmarkLoggingHook
from utils.metrics import map_custom_metric, map_custom_metric_with_leak
from utils.schedulers import learning_rate_scheduler
MODEL_TYPES = ['wide', 'deep', 'wide_n_deep']
WIDE, DEEP, WIDE_N_DEEP = MODEL_TYPES
# Default train dataset size
TRAIN_DATASET_SIZE = 59761827
def create_parser():
"""Initialize command line parser using arparse.
Returns:
An argparse.ArgumentParser.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
help='Model type to train on',
choices=MODEL_TYPES,
default=WIDE_N_DEEP)
parser.add_argument(
'--train_data_pattern',
help='Pattern of training file names. For example if training files are train_000.tfrecord, \
train_001.tfrecord then --train_data_pattern is train_*',
type=str,
default='/outbrain/tfrecords/train/part*',
nargs='+')
parser.add_argument(
'--eval_data_pattern',
help='Pattern of eval file names. For example if eval files are eval_000.tfrecord, \
eval_001.tfrecord then --eval_data_pattern is eval_*',
type=str,
default='/outbrain/tfrecords/eval/part*',
nargs='+')
parser.add_argument(
'--model_dir',
help='Model Checkpoint will be saved here',
type=str,
default='/outbrain/checkpoints')
parser.add_argument(
'--transformed_metadata_path',
help='Path to transformed_metadata.',
type=str,
default='/outbrain/tfrecords')
parser.add_argument(
'--deep_hidden_units',
help='Hidden units per layer, separated by spaces',
default=[1024, 1024, 1024, 1024, 1024],
type=int,
nargs="+")
parser.add_argument(
'--prebatch_size',
help='Size of the pre-batches in the tfrecords',
default=4096,
type=int)
parser.add_argument(
'--global_batch_size',
help='Total training batch size',
default=131072,
type=int)
parser.add_argument(
'--eval_batch_size',
help='Evaluation batch size',
default=32768,
type=int)
parser.add_argument(
'--eval_steps',
help='Number of evaluation steps to perform',
default=8,
type=int)
parser.add_argument(
'--training_set_size',
help='Number of samples in the training set',
default=TRAIN_DATASET_SIZE,
type=int)
parser.add_argument(
'--num_epochs',
help='Number of epochs',
default=120,
type=int)
parser.add_argument(
'--eval_epoch_interval',
help='Perform evaluation during training after this many epochs',
default=2,
type=float)
parser.add_argument(
'--xla',
help='Enable XLA',
default=False,
action='store_true')
parser.add_argument(
'--gpu',
help='Run computations on the GPU',
default=False,
action='store_true')
parser.add_argument(
'--amp',
help='Attempt automatic mixed precision conversion',
default=False,
action='store_true')
parser.add_argument(
'--hvd',
help='Use Horovod',
action='store_true',
default=False)
parser.add_argument(
'--linear_l1_regularization',
help='L1 regularization for linear model',
type=float,
default=0.0)
parser.add_argument(
'--linear_l2_regularization',
help='L2 regularization for linear model',
type=float,
default=0.0)
parser.add_argument(
'--linear_learning_rate',
help='Learning rate for linear model',
type=float,
default=0.2)
parser.add_argument(
'--deep_learning_rate',
help='Learning rate for deep model',
type=float,
default=1.0)
parser.add_argument(
'--deep_dropout',
help='Dropout regularization for deep model',
type=float,
default=0.0)
parser.add_argument(
'--deep_warmup_epochs',
help='Number of epochs for deep LR warmup',
type=float,
default=0)
parser.add_argument(
'--log_device_placement',
help='Ask Tensorflow (via ConfigProto) to print device placement of nodes',
default=False,
action='store_true')
parser.add_argument(
'--predict',
help='Only perform a prediction on the validation dataset, don\'t train',
default=False,
action='store_true')
parser.add_argument(
'--evaluate',
help='Only perform an evaluation on the validation dataset, don\'t train',
default=False,
action='store_true')
parser.add_argument(
'--results_dir',
type=str,
help='Directory to store training results',
default='/results')
parser.add_argument(
'--log_filename',
type=str,
help='Name of the file to store dlloger output',
default='log.json')
parser.add_argument(
'--shuffle_percentage',
type=float,
default=0.0,
help='Size of the shuffle buffer from 0 to 1. \
1 means that the shuffle buffer size will be equal to the size of the entire batch.')
parser.add_argument(
'--print_display_ids',
help='Print the display ids processed by the input pipeline',
default=False,
action='store_true')
parser.add_argument(
'--reader_num_threads',
default=12,
type=int)
parser.add_argument(
'--parser_num_threads',
default=3,
type=int)
parser.add_argument(
'--prefetch_buffer_size',
default=1,
type=int)
parser.add_argument(
'--submission',
action='store_true',
default=False)
parser.add_argument(
'--benchmark',
help='Collect performance metrics during training',
action='store_true',
default=False)
parser.add_argument(
'--benchmark_warmup_steps',
help='Warmup before starg of benchmarking the training',
type=int,
default=50)
parser.add_argument(
'--benchmark_steps',
help='Number of steps for train performance benchmark',
type=int,
default=100)
return parser
def construct_estimator(model_type, run_config,
wide_columns, wide_optimizer,
deep_columns, deep_hidden_units, deep_dropout, deep_optimizer):
assert model_type in [WIDE, DEEP, WIDE_N_DEEP], 'Canned estimator only supports basic wide, deep, wnd'
if model_type == WIDE:
estimator = tf.estimator.LinearClassifier(
feature_columns=wide_columns,
config=run_config,
optimizer=wide_optimizer)
elif model_type == DEEP:
estimator = tf.estimator.DNNClassifier(
feature_columns=deep_columns,
hidden_units=deep_hidden_units,
dropout=deep_dropout,
config=run_config,
optimizer=deep_optimizer)
elif model_type == WIDE_N_DEEP:
estimator = tf.estimator.DNNLinearCombinedClassifier(
config=run_config,
linear_feature_columns=wide_columns,
linear_optimizer=wide_optimizer,
dnn_feature_columns=deep_columns,
dnn_optimizer=deep_optimizer,
dnn_hidden_units=deep_hidden_units,
dnn_dropout=deep_dropout,
linear_sparse_combiner='sum',
loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE)
return estimator
def main(FLAGS):
if FLAGS.hvd:
hvd.init()
if hvd.local_rank() == 0:
tf.logging.set_verbosity(tf.logging.INFO)
log_path = os.path.join(FLAGS.results_dir, FLAGS.log_filename)
os.makedirs(FLAGS.results_dir, exist_ok=True)
dllogger.init(backends=[
dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE,
filename=log_path),
dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE)])
else:
tf.logging.set_verbosity(tf.logging.ERROR)
dllogger.init(backends=[])
num_gpus = hvd.size()
else:
tf.logging.set_verbosity(tf.logging.INFO)
log_path = os.path.join(FLAGS.results_dir, FLAGS.log_filename)
os.makedirs(FLAGS.results_dir, exist_ok=True)
dllogger.init(backends=[
dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE,
filename=log_path),
dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE)])
num_gpus = 1
dllogger.log(data=vars(FLAGS), step='PARAMETER')
dllogger.metadata('train_throughput', {'unit': 'samples/s'})
dllogger.metadata('infer_throughput', {'unit': 'samples/s'})
dllogger.metadata('map', {'unit': None})
dllogger.metadata('map_infer', {'unit': None})
dllogger.metadata('map_with_leak', {'unit': None})
dllogger.metadata('map_with_leak_infer', {'unit': None})
local_batch_size = FLAGS.global_batch_size // num_gpus
create_batches = local_batch_size // FLAGS.prebatch_size
wide_columns, deep_columns = features.get_feature_columns()
tf_transform_output = tft.TFTransformOutput(FLAGS.transformed_metadata_path)
if not FLAGS.hvd or hvd.local_rank() == 0:
tf.compat.v1.logging.warn('command line arguments: {}'.format(json.dumps(vars(FLAGS))))
if not os.path.exists(FLAGS.results_dir):
os.mkdir(FLAGS.results_dir)
with open('{}/args.json'.format(FLAGS.results_dir), 'w') as f:
json.dump(vars(FLAGS), f, indent=4)
if FLAGS.gpu:
if FLAGS.amp:
rewrite_options = rewriter_config_pb2.RewriterConfig(auto_mixed_precision=True)
session_config = tf.compat.v1.ConfigProto(
graph_options=tf.compat.v1.GraphOptions(rewrite_options=rewrite_options),
log_device_placement=FLAGS.log_device_placement
)
else:
session_config = tf.compat.v1.ConfigProto(
log_device_placement=FLAGS.log_device_placement
)
else:
session_config = tf.compat.v1.ConfigProto(
device_count={'GPU': 0},
log_device_placement=FLAGS.log_device_placement
)
if FLAGS.hvd:
session_config.gpu_options.visible_device_list = str(hvd.local_rank())
if FLAGS.xla:
session_config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
if FLAGS.benchmark:
model_dir = None
else:
model_dir = FLAGS.model_dir
steps_per_epoch = FLAGS.training_set_size / FLAGS.global_batch_size
print('Steps per epoch: {}'.format(steps_per_epoch))
max_steps = int(FLAGS.num_epochs * steps_per_epoch)
save_checkpoints_steps = FLAGS.benchmark_steps + 1 if FLAGS.benchmark else \
int(FLAGS.eval_epoch_interval * steps_per_epoch)
count_steps = FLAGS.benchmark_steps + 1 if FLAGS.benchmark else 100
run_config = tf.estimator.RunConfig(model_dir=model_dir) \
.replace(session_config=session_config,
save_checkpoints_steps=save_checkpoints_steps,
save_summary_steps=count_steps,
log_step_count_steps=count_steps,
keep_checkpoint_max=1)
def wide_optimizer():
opt = tf.compat.v1.train.FtrlOptimizer(
learning_rate=FLAGS.linear_learning_rate,
l1_regularization_strength=FLAGS.linear_l1_regularization,
l2_regularization_strength=FLAGS.linear_l2_regularization)
if FLAGS.hvd:
opt = hvd.DistributedOptimizer(opt)
if FLAGS.amp:
loss_scale = tf.train.experimental.DynamicLossScale()
opt = tf.compat.v1.train.experimental.MixedPrecisionLossScaleOptimizer(opt, loss_scale)
return opt
def deep_optimizer():
with tf.device("/cpu:0"):
learning_rate_fn = learning_rate_scheduler(
lr_init=FLAGS.deep_learning_rate,
warmup_steps=int(steps_per_epoch * FLAGS.deep_warmup_epochs),
global_step=tf.compat.v1.train.get_global_step()
)
opt = tf.compat.v1.train.AdagradOptimizer(
learning_rate=learning_rate_fn,
initial_accumulator_value=0.1,
use_locking=False)
if FLAGS.hvd:
opt = hvd.DistributedOptimizer(opt)
if FLAGS.amp:
loss_scale = tf.train.experimental.DynamicLossScale()
opt = tf.compat.v1.train.experimental.MixedPrecisionLossScaleOptimizer(opt, loss_scale)
return opt
# input functions to read data from disk
train_input_fn = lambda: separate_input_fn(
tf_transform_output,
FLAGS.train_data_pattern,
create_batches,
tf.estimator.ModeKeys.TRAIN,
reader_num_threads=FLAGS.reader_num_threads,
parser_num_threads=FLAGS.parser_num_threads,
shuffle_buffer_size=int(FLAGS.shuffle_percentage * create_batches),
prefetch_buffer_size=FLAGS.prefetch_buffer_size,
print_display_ids=FLAGS.print_display_ids)
eval_input_fn = lambda: separate_input_fn(
tf_transform_output,
FLAGS.eval_data_pattern,
(FLAGS.eval_batch_size // FLAGS.prebatch_size),
tf.estimator.ModeKeys.EVAL,
reader_num_threads=1,
parser_num_threads=1,
shuffle_buffer_size=int(FLAGS.shuffle_percentage * create_batches),
prefetch_buffer_size=FLAGS.prefetch_buffer_size,
print_display_ids=FLAGS.print_display_ids)
estimator = construct_estimator(FLAGS.model_type, run_config,
wide_columns, wide_optimizer,
deep_columns, FLAGS.deep_hidden_units, FLAGS.deep_dropout, deep_optimizer)
estimator = tf.estimator.add_metrics(estimator, map_custom_metric)
estimator = tf.estimator.add_metrics(estimator, map_custom_metric_with_leak)
hooks = []
if FLAGS.hvd:
hooks.append(hvd.BroadcastGlobalVariablesHook(0))
if FLAGS.predict or FLAGS.evaluate: # inference
if FLAGS.benchmark:
benchmark_hook = BenchmarkLoggingHook(global_batch_size=FLAGS.eval_batch_size,
warmup_steps=FLAGS.benchmark_warmup_steps)
hooks.append(benchmark_hook)
eval_steps = FLAGS.benchmark_steps
else:
eval_steps = FLAGS.eval_steps
predict_result_iter = estimator.predict(input_fn=eval_input_fn, hooks=hooks, yield_single_examples=False)
results = []
for i, r in enumerate(predict_result_iter):
print('predicting batch: ', i)
results.append(r)
# TODO: use eval_steps
if i >= eval_steps - 1:
break
if FLAGS.benchmark:
infer_throughput = benchmark_hook.mean_throughput.value()
if FLAGS.benchmark:
dllogger.log(data={'infer_throughput': infer_throughput}, step=tuple())
elif FLAGS.evaluate:
print('evaluating using estimator.evaluate with eval_batch_size = ',
FLAGS.eval_batch_size, ' and eval_steps = ', FLAGS.eval_steps)
result = estimator.evaluate(eval_input_fn, hooks=hooks, steps=FLAGS.eval_steps)
dllogger.log(step=(), data={'map_infer': float(result['map']),
'map_with_leak_infer': float(result['map_with_leak'])})
elif FLAGS.predict:
scores = [r['probabilities'][:, 1] for r in results]
scores = np.hstack(scores)
scores_path = os.path.join(FLAGS.model_dir, 'scores.txt')
print('saving the numpy scores array to: ', scores_path)
np.savetxt(scores_path, scores, fmt="%f", delimiter='\n')
else: # training
if FLAGS.benchmark:
benchmark_hook = BenchmarkLoggingHook(global_batch_size=FLAGS.global_batch_size,
warmup_steps=FLAGS.benchmark_warmup_steps)
hooks.append(benchmark_hook)
estimator.train(train_input_fn, hooks=hooks, steps=FLAGS.benchmark_steps)
train_throughput = benchmark_hook.mean_throughput.value()
dllogger.log(data={'train_throughput': train_throughput}, step=tuple())
else:
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn,
max_steps=max_steps,
hooks=hooks)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn,
throttle_secs=0,
steps=FLAGS.eval_steps)
result = tf.estimator.train_and_evaluate(estimator=estimator,
train_spec=train_spec,
eval_spec=eval_spec)
if result != (None, None):
dllogger.log(step=(), data={'map': float(result[0]['map']),
'map_with_leak': float(result[0]['map_with_leak'])})
if __name__ == '__main__':
FLAGS = create_parser().parse_args()
main(FLAGS)
| DeepLearningExamples-master | TensorFlow/Recommendation/WideAndDeep/trainer/task.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| DeepLearningExamples-master | TensorFlow/Recommendation/WideAndDeep/trainer/__init__.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
LABEL_COLUMN = "label"
DISPLAY_ID_COLUMN = 'display_id'
IS_LEAK_COLUMN = 'is_leak'
DISPLAY_ID_AND_IS_LEAK_ENCODED_COLUMN = 'display_ad_and_is_leak'
CATEGORICAL_COLUMNS = [
'ad_id',
'doc_id',
'doc_event_id',
'ad_advertiser',
'doc_ad_source_id',
'doc_ad_publisher_id',
'doc_event_publisher_id',
'doc_event_source_id',
'event_country',
'event_country_state',
'event_geo_location',
'event_hour',
'event_platform',
'traffic_source']
DOC_CATEGORICAL_MULTIVALUED_COLUMNS = {
'doc_ad_category_id': ['doc_ad_category_id_1',
'doc_ad_category_id_2',
'doc_ad_category_id_3'],
'doc_ad_topic_id': ['doc_ad_topic_id_1',
'doc_ad_topic_id_2',
'doc_ad_topic_id_3'],
'doc_ad_entity_id': ['doc_ad_entity_id_1',
'doc_ad_entity_id_2',
'doc_ad_entity_id_3',
'doc_ad_entity_id_4',
'doc_ad_entity_id_5',
'doc_ad_entity_id_6'],
'doc_event_category_id': ['doc_event_category_id_1',
'doc_event_category_id_2',
'doc_event_category_id_3'],
'doc_event_topic_id': ['doc_event_topic_id_1',
'doc_event_topic_id_2',
'doc_event_topic_id_3'],
'doc_event_entity_id': ['doc_event_entity_id_1',
'doc_event_entity_id_2',
'doc_event_entity_id_3',
'doc_event_entity_id_4',
'doc_event_entity_id_5',
'doc_event_entity_id_6']
}
BOOL_COLUMNS = [
'event_weekend',
'user_has_already_viewed_doc']
INT_COLUMNS = [
'user_views',
'ad_views',
'doc_views',
'doc_event_days_since_published',
'doc_event_hour',
'doc_ad_days_since_published']
FLOAT_COLUMNS_LOG_BIN_TRANSFORM = [
'pop_ad_id',
'pop_ad_id_conf_multipl',
'pop_document_id',
'pop_document_id_conf_multipl',
'pop_publisher_id',
'pop_publisher_id_conf_multipl',
'pop_advertiser_id',
'pop_advertiser_id_conf_multipl',
'pop_campain_id',
'pop_campain_id_conf_multipl',
'pop_doc_event_doc_ad',
'pop_doc_event_doc_ad_conf_multipl',
'pop_source_id',
'pop_source_id_conf_multipl',
'pop_source_id_country',
'pop_source_id_country_conf_multipl',
'pop_entity_id',
'pop_entity_id_conf_multipl',
'pop_entity_id_country',
'pop_entity_id_country_conf_multipl',
'pop_topic_id',
'pop_topic_id_conf_multipl',
'pop_topic_id_country',
'pop_topic_id_country_conf_multipl',
'pop_category_id',
'pop_category_id_conf_multipl',
'pop_category_id_country',
'pop_category_id_country_conf_multipl',
'user_doc_ad_sim_categories',
'user_doc_ad_sim_categories_conf_multipl',
'user_doc_ad_sim_topics',
'user_doc_ad_sim_topics_conf_multipl',
'user_doc_ad_sim_entities',
'user_doc_ad_sim_entities_conf_multipl',
'doc_event_doc_ad_sim_categories',
'doc_event_doc_ad_sim_categories_conf_multipl',
'doc_event_doc_ad_sim_topics',
'doc_event_doc_ad_sim_topics_conf_multipl',
'doc_event_doc_ad_sim_entities',
'doc_event_doc_ad_sim_entities_conf_multipl']
FLOAT_COLUMNS_SIMPLE_BIN_TRANSFORM = [
'pop_ad_id_conf',
'pop_document_id_conf',
'pop_publisher_id_conf',
'pop_advertiser_id_conf',
'pop_campain_id_conf',
'pop_doc_event_doc_ad_conf',
'pop_source_id_conf',
'pop_source_id_country_conf',
'pop_entity_id_conf',
'pop_entity_id_country_conf',
'pop_topic_id_conf',
'pop_topic_id_country_conf',
'pop_category_id_conf',
'pop_category_id_country_conf',
'user_doc_ad_sim_categories_conf',
'user_doc_ad_sim_topics_conf',
'user_doc_ad_sim_entities_conf',
'doc_event_doc_ad_sim_categories_conf',
'doc_event_doc_ad_sim_topics_conf',
'doc_event_doc_ad_sim_entities_conf']
FLOAT_COLUMNS = FLOAT_COLUMNS_LOG_BIN_TRANSFORM + FLOAT_COLUMNS_SIMPLE_BIN_TRANSFORM
# Let's define the columns we're actually going to use
# during training
REQUEST_SINGLE_HOT_COLUMNS = [
"doc_event_id",
"doc_id",
"doc_event_source_id",
"event_geo_location",
"event_country_state",
"doc_event_publisher_id",
"event_country",
"event_hour",
"event_platform",
"traffic_source",
"event_weekend",
"user_has_already_viewed_doc"]
REQUEST_MULTI_HOT_COLUMNS = [
"doc_event_entity_id",
"doc_event_topic_id",
"doc_event_category_id"]
REQUEST_NUMERIC_COLUMNS = [
"pop_document_id_conf",
"pop_publisher_id_conf",
"pop_source_id_conf",
"pop_entity_id_conf",
"pop_topic_id_conf",
"pop_category_id_conf",
"pop_document_id",
"pop_publisher_id",
"pop_source_id",
"pop_entity_id",
"pop_topic_id",
"pop_category_id",
"user_views",
"doc_views",
"doc_event_days_since_published",
"doc_event_hour"]
ITEM_SINGLE_HOT_COLUMNS = [
"ad_id",
"doc_ad_source_id",
"ad_advertiser",
"doc_ad_publisher_id"]
ITEM_MULTI_HOT_COLUMNS = [
"doc_ad_topic_id",
"doc_ad_entity_id",
"doc_ad_category_id"]
ITEM_NUMERIC_COLUMNS = [
"pop_ad_id_conf",
"user_doc_ad_sim_categories_conf",
"user_doc_ad_sim_topics_conf",
"pop_advertiser_id_conf",
"pop_campain_id_conf_multipl",
"pop_ad_id",
"pop_advertiser_id",
"pop_campain_id",
"user_doc_ad_sim_categories",
"user_doc_ad_sim_topics",
"user_doc_ad_sim_entities",
"doc_event_doc_ad_sim_categories",
"doc_event_doc_ad_sim_topics",
"doc_event_doc_ad_sim_entities",
"ad_views",
"doc_ad_days_since_published"]
NV_TRAINING_COLUMNS = (
REQUEST_SINGLE_HOT_COLUMNS +
REQUEST_MULTI_HOT_COLUMNS +
REQUEST_NUMERIC_COLUMNS +
ITEM_SINGLE_HOT_COLUMNS +
ITEM_MULTI_HOT_COLUMNS +
ITEM_NUMERIC_COLUMNS)
HASH_BUCKET_SIZES = {
'doc_event_id': 300000,
'ad_id': 250000,
'doc_id': 100000,
'doc_ad_entity_id': 10000,
'doc_event_entity_id': 10000,
'doc_ad_source_id': 4000,
'doc_event_source_id': 4000,
'event_geo_location': 2500,
'ad_advertiser': 2500,
'event_country_state': 2000,
'doc_ad_publisher_id': 1000,
'doc_event_publisher_id': 1000,
'doc_ad_topic_id': 350,
'doc_event_topic_id': 350,
'event_country': 300,
'doc_ad_category_id': 100,
'doc_event_category_id': 100}
IDENTITY_NUM_BUCKETS = {
'event_hour': 6,
'event_platform': 3,
'traffic_source': 3,
'event_weekend': 2,
'user_has_already_viewed_doc': 2}
EMBEDDING_DIMENSIONS = {
'doc_event_id': 128,
'ad_id': 128,
'doc_id': 128,
'doc_ad_entity_id': 64,
'doc_event_entity_id': 64,
'doc_ad_source_id': 64,
'doc_event_source_id': 64,
'event_geo_location': 64,
'ad_advertiser': 64,
'event_country_state': 64,
'doc_ad_publisher_id': 64,
'doc_event_publisher_id': 64,
'doc_ad_topic_id': 64,
'doc_event_topic_id': 64,
'event_country': 64,
'doc_ad_category_id': 64,
'doc_event_category_id': 64}
def get_feature_columns(force_subset=None):
# adding the force_subset as a way to directly pass in column changes for testing/profiling
deep_columns, wide_columns = [], []
if force_subset is not None:
training_columns = force_subset
else:
training_columns = NV_TRAINING_COLUMNS
tf.compat.v1.logging.warn('number of features: {}'.format(len(training_columns)))
for column_name in training_columns:
if column_name in HASH_BUCKET_SIZES:
categorical_column = tf.feature_column.categorical_column_with_hash_bucket(
column_name,
hash_bucket_size=HASH_BUCKET_SIZES[column_name],
dtype=tf.int32)
wide_columns.append(categorical_column)
elif column_name in IDENTITY_NUM_BUCKETS:
categorical_column = tf.feature_column.categorical_column_with_identity(
column_name, num_buckets=IDENTITY_NUM_BUCKETS[column_name])
wide_columns.append(categorical_column)
else:
columns = []
if column_name in FLOAT_COLUMNS_SIMPLE_BIN_TRANSFORM:
# add a categorical_column for column_name + "_binned"
# just add the regular float column for now
columns.append(tf.feature_column.numeric_column(
column_name, shape=(1,)))
elif column_name in FLOAT_COLUMNS_LOG_BIN_TRANSFORM:
# add a categorical_column for column_name + "_log_binned")
columns.append(tf.feature_column.numeric_column(
column_name + "_log_01scaled", shape=(1,)))
elif column_name in INT_COLUMNS:
# add a categorical_column for column_name + "_log_int"
columns.append(tf.feature_column.numeric_column(
column_name + "_log_01scaled", shape=(1,)))
for column in columns:
wide_columns.append(column)
deep_columns.append(column)
continue
if column_name in EMBEDDING_DIMENSIONS:
column = tf.feature_column.embedding_column(
categorical_column,
dimension=EMBEDDING_DIMENSIONS[column_name],
combiner='mean')
else:
column = tf.feature_column.indicator_column(categorical_column)
deep_columns.append(column)
tf.compat.v1.logging.warn('deep columns: {}'.format(len(deep_columns)))
tf.compat.v1.logging.warn('wide columns: {}'.format(len(wide_columns)))
tf.compat.v1.logging.warn(
'wide&deep intersection: {}'.format(len(set(wide_columns).intersection(set(deep_columns)))))
return wide_columns, deep_columns
| DeepLearningExamples-master | TensorFlow/Recommendation/WideAndDeep/trainer/features.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from vae.load.preprocessing import load_and_parse_ML_20M
import numpy as np
parser = ArgumentParser(description="Prepare data for VAE training")
parser.add_argument('--data_dir', default='/data', type=str,
help='Directory for storing the training data')
parser.add_argument('--seed', default=0, type=int,
help='Random seed')
args = parser.parse_args()
print('Preprocessing seed: ', args.seed)
np.random.seed(args.seed)
# load dataset
(train_data,
validation_data_input,
validation_data_true,
test_data_input,
test_data_true) = load_and_parse_ML_20M(args.data_dir)
| DeepLearningExamples-master | TensorFlow/Recommendation/VAE-CF/prepare_dataset.py |
#!/usr/bin/python3
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from functools import partial
import json
import logging
from argparse import ArgumentParser
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
import numpy as np
import horovod.tensorflow as hvd
from mpi4py import MPI
import dllogger
import time
from vae.utils.round import round_8
from vae.metrics.recall import recall
from vae.metrics.ndcg import ndcg
from vae.models.train import VAE
from vae.load.preprocessing import load_and_parse_ML_20M
def main():
hvd.init()
mpi_comm = MPI.COMM_WORLD
parser = ArgumentParser(description="Train a Variational Autoencoder for Collaborative Filtering in TensorFlow")
parser.add_argument('--train', action='store_true',
help='Run training of VAE')
parser.add_argument('--test', action='store_true',
help='Run validation of VAE')
parser.add_argument('--inference_benchmark', action='store_true',
help='Measure inference latency and throughput on a variety of batch sizes')
parser.add_argument('--amp', action='store_true', default=False,
help='Enable Automatic Mixed Precision')
parser.add_argument('--epochs', type=int, default=400,
help='Number of epochs to train')
parser.add_argument('--batch_size_train', type=int, default=24576,
help='Global batch size for training')
parser.add_argument('--batch_size_validation', type=int, default=10000,
help='Used both for validation and testing')
parser.add_argument('--validation_step', type=int, default=50,
help='Train epochs for one validation')
parser.add_argument('--warm_up_epochs', type=int, default=5,
help='Number of epochs to omit during benchmark')
parser.add_argument('--total_anneal_steps', type=int, default=15000,
help='Number of annealing steps')
parser.add_argument('--anneal_cap', type=float, default=0.1,
help='Annealing cap')
parser.add_argument('--lam', type=float, default=1.00,
help='Regularization parameter')
parser.add_argument('--lr', type=float, default=0.004,
help='Learning rate')
parser.add_argument('--beta1', type=float, default=0.90,
help='Adam beta1')
parser.add_argument('--beta2', type=float, default=0.90,
help='Adam beta2')
parser.add_argument('--top_results', type=int, default=100,
help='Number of results to be recommended')
parser.add_argument('--xla', action='store_true', default=False,
help='Enable XLA')
parser.add_argument('--trace', action='store_true', default=False,
help='Save profiling traces')
parser.add_argument('--activation', type=str, default='tanh',
help='Activation function')
parser.add_argument('--log_path', type=str, default='./vae_cf.log',
help='Path to the detailed training log to be created')
parser.add_argument('--seed', type=int, default=0,
help='Random seed for TensorFlow and numpy')
parser.add_argument('--data_dir', default='/data', type=str,
help='Directory for storing the training data')
parser.add_argument('--checkpoint_dir', type=str,
default=None,
help='Path for saving a checkpoint after the training')
args = parser.parse_args()
args.world_size = hvd.size()
if args.batch_size_train % hvd.size() != 0:
raise ValueError('Global batch size should be a multiple of the number of workers')
args.local_batch_size = args.batch_size_train // hvd.size()
logger = logging.getLogger("VAE")
if hvd.rank() == 0:
logger.setLevel(logging.INFO)
dllogger.init(backends=[dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE,
filename=args.log_path),
dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE)])
else:
dllogger.init(backends=[])
logger.setLevel(logging.ERROR)
dllogger.metadata("final_ndcg@100", {"unit": None})
dllogger.metadata("mean_inference_throughput", {"unit": "samples/s"})
dllogger.metadata("mean_training_throughput", {"unit": "samples/s"})
if args.seed is None:
if hvd.rank() == 0:
seed = int(time.time())
else:
seed = None
seed = mpi_comm.bcast(seed, root=0)
else:
seed = args.seed
tf.random.set_random_seed(seed)
np.random.seed(seed)
args.seed = seed
dllogger.log(data=vars(args), step='PARAMETER')
# Suppress TF warnings
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# set AMP
os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '1' if args.amp else '0'
# load dataset
(train_data,
validation_data_input,
validation_data_true,
test_data_input,
test_data_true) = load_and_parse_ML_20M(args.data_dir)
# make sure all dims and sizes are divisible by 8
number_of_train_users, number_of_items = train_data.shape
number_of_items = round_8(number_of_items)
for data in [train_data,
validation_data_input,
validation_data_true,
test_data_input,
test_data_true]:
number_of_users, _ = data.shape
data.resize(number_of_users, number_of_items)
number_of_users, number_of_items = train_data.shape
encoder_dims = [number_of_items, 600, 200]
vae = VAE(train_data, encoder_dims, total_anneal_steps=args.total_anneal_steps,
anneal_cap=args.anneal_cap, batch_size_train=args.local_batch_size,
batch_size_validation=args.batch_size_validation, lam=args.lam,
lr=args.lr, beta1=args.beta1, beta2=args.beta2, activation=args.activation,
xla=args.xla, checkpoint_dir=args.checkpoint_dir, trace=args.trace,
top_results=args.top_results)
metrics = {'ndcg@100': partial(ndcg, R=100),
'recall@20': partial(recall, R=20),
'recall@50': partial(recall, R=50)}
if args.train:
vae.train(n_epochs=args.epochs, validation_data_input=validation_data_input,
validation_data_true=validation_data_true, metrics=metrics,
validation_step=args.validation_step)
if args.test and hvd.size() <= 1:
test_results = vae.test(test_data_input=test_data_input,
test_data_true=test_data_true, metrics=metrics)
for k, v in test_results.items():
print("{}:\t{}".format(k, v))
elif args.test and hvd.size() > 1:
print("Testing is not supported with horovod multigpu yet")
elif args.test and hvd.size() > 1:
print("Testing is not supported with horovod multigpu yet")
if args.inference_benchmark:
items_per_user = 10
item_indices = np.random.randint(low=0, high=10000, size=items_per_user)
user_indices = np.zeros(len(item_indices))
indices = np.stack([user_indices, item_indices], axis=1)
num_batches = 200
latencies = []
for i in range(num_batches):
start_time = time.time()
_ = vae.query(indices=indices)
end_time = time.time()
if i < 10:
#warmup steps
continue
latencies.append(end_time - start_time)
result_data = {}
result_data[f'batch_1_mean_throughput'] = 1 / np.mean(latencies)
result_data[f'batch_1_mean_latency'] = np.mean(latencies)
result_data[f'batch_1_p90_latency'] = np.percentile(latencies, 90)
result_data[f'batch_1_p95_latency'] = np.percentile(latencies, 95)
result_data[f'batch_1_p99_latency'] = np.percentile(latencies, 99)
dllogger.log(data=result_data, step=tuple())
vae.close_session()
dllogger.flush()
if __name__ == '__main__':
main()
| DeepLearningExamples-master | TensorFlow/Recommendation/VAE-CF/main.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
LOG = logging.getLogger("VAE")
_log_format = logging.Formatter("[%(name)s| %(levelname)s]: %(message)s")
_log_handler = logging.StreamHandler()
_log_handler.setFormatter(_log_format)
LOG.addHandler(_log_handler)
| DeepLearningExamples-master | TensorFlow/Recommendation/VAE-CF/vae/__init__.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Recall is counting the number of relevant recommended items in R and normalizes it
by dividing by minimum of R and number of clicked items by user
Recall@R(u,ω) := Σ_{r=1}^{R} I[ω(r) ∈ I_u] / min(R,|I_u|)
https://arxiv.org/pdf/1802.05814.pdf, chapter 4.2
"""
import numpy as np
from scipy.sparse import csr_matrix
def recall(X_true: csr_matrix, X_top_k: np.array, R=100) -> np.array:
""" Calculates recall@R for each users in X_true and X_top_k matrices
Args:
X_true: Matrix containing True values for user-item interactions
X_top_k: Matrix containing indices picked by model
R: Number of elements taken into consideration
Returns:
Numpy array containing calculated recall@R for each user
"""
selected = np.take_along_axis(X_true, X_top_k[:, :R], axis=-1)
hit = selected.sum(axis=-1)
maxhit = np.minimum(X_true.getnnz(axis=1), R)
return np.squeeze(np.asarray(hit)) / maxhit
| DeepLearningExamples-master | TensorFlow/Recommendation/VAE-CF/vae/metrics/recall.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Discounted Cumulative Gain @ R is
DCG@R(u,ω) := Σ_{r=1}^{R} I[ω(r) ∈ I_u] − 1 / log(r + 1) / IDCG@R(u,ω)
IDCG@R(u,ω) := Σ_{r=1}^{|I_u|} 1 / log(r + 1)
https://en.wikipedia.org/wiki/Discounted_cumulative_gain#Normalized_DCG
https://arxiv.org/pdf/1802.05814.pdf, chapter 4.2
"""
import numpy as np
from scipy.sparse import csr_matrix
def ndcg(X_true: csr_matrix, X_top_k: np.array, R=100) -> np.array:
""" Calculate ndcg@R for each users in X_true and X_pred matrices
Args:
X_true: Matrix containing True values for user-item interactions
X_top_k: Matrix containing inidices picked by model
R: Number of elements taken into consideration
Returns:
Numpy array containing calculated ndcg@R for each user
"""
penalties = 1. / np.log2(np.arange(2, R + 2))
selected = np.take_along_axis(X_true, X_top_k[:, :R], axis=-1)
DCG = selected * penalties
cpenalties = np.empty(R + 1)
np.cumsum(penalties, out=cpenalties[1:])
cpenalties[0] = 0
maxhit = np.minimum(X_true.getnnz(axis=1), R)
IDCG = cpenalties[maxhit]
return DCG / IDCG
| DeepLearningExamples-master | TensorFlow/Recommendation/VAE-CF/vae/metrics/ndcg.py |
DeepLearningExamples-master | TensorFlow/Recommendation/VAE-CF/vae/metrics/__init__.py |
|
DeepLearningExamples-master | TensorFlow/Recommendation/VAE-CF/vae/utils/__init__.py |
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from functools import partial
def round_n(x, n=8):
return n * int(np.ceil(x / n))
round_8 = partial(round_n, n=8)
| DeepLearningExamples-master | TensorFlow/Recommendation/VAE-CF/vae/utils/round.py |
DeepLearningExamples-master | TensorFlow/Recommendation/VAE-CF/vae/models/__init__.py |
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import horovod.tensorflow as hvd
import scipy.sparse as sparse
import tensorflow as tf
import numpy as np
import time
import logging
import dllogger
from sklearn.preprocessing import normalize
from collections import defaultdict
from vae.models.vae import _VAEGraph, TRAINING, QUERY, VALIDATION
from vae.utils.round import round_8
LOG = logging.getLogger("VAE")
class VAE:
def __init__(self,
train_data,
encoder_dims,
decoder_dims=None,
batch_size_train=500,
batch_size_validation=2000,
lam=3e-2,
lr=1e-3,
beta1=0.9,
beta2=0.999,
total_anneal_steps=200000,
anneal_cap=0.2,
xla=True,
activation='tanh',
checkpoint_dir=None,
trace=False,
top_results=100):
if decoder_dims is None:
decoder_dims = encoder_dims[::-1]
for i in encoder_dims + decoder_dims + [batch_size_train, batch_size_validation]:
if i != round_8(i):
raise ValueError("all dims and batch sizes should be divisible by 8")
self.metrics_history = None
self.batch_size_train = batch_size_train
self.batch_size_validation = batch_size_validation
self.lam = lam
self.lr = lr
self.beta1 = beta1
self.beta2 = beta2
self.xla = xla
self.total_anneal_steps = total_anneal_steps
self.anneal_cap = anneal_cap
self.activation = activation
self.encoder_dims = encoder_dims
self.decoder_dims = decoder_dims
self.trace = trace
self.top_results = top_results
self.checkpoint_dir = checkpoint_dir if hvd.rank() == 0 else None
self._create_dataset(train_data,
batch_size_train,
encoder_dims)
self._setup_model()
self.metrics_history = defaultdict(lambda: [])
self.time_elapsed_training_history = []
self.time_elapsed_validation_history = []
self.training_throughputs = []
self.inference_throughputs = []
def _create_dataset(self, train_data, batch_size_train, encoder_dims):
generator, self.n_batch_per_train = self.batch_iterator(train_data,
None,
batch_size_train,
thread_idx=hvd.rank(),
thread_num=hvd.size())
dataset = tf.data.Dataset \
.from_generator(generator, output_types=(tf.int64, tf.float32)) \
.map(lambda i, v: tf.SparseTensor(i, v, (batch_size_train, encoder_dims[0]))) \
.prefetch(10)
self.iter = dataset.make_initializable_iterator()
self.inputs_train = self.iter.get_next()
def _setup_model(self):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(hvd.local_rank())
hooks = [hvd.BroadcastGlobalVariablesHook(0)]
if self.trace:
hooks.append(tf.train.ProfilerHook(save_steps=1, output_dir='.'))
if self.xla:
LOG.info('Enabling XLA')
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
else:
LOG.info('XLA disabled')
self._build_graph()
self.session = tf.train.MonitoredTrainingSession(config=config,
checkpoint_dir=self.checkpoint_dir,
save_checkpoint_secs=10,
hooks=hooks)
def _build_optimizer(self, loss):
optimizer= tf.train.AdamOptimizer(learning_rate=self.lr, beta1=self.beta1, beta2=self.beta2)
return hvd.DistributedOptimizer(optimizer).minimize(
loss, global_step=tf.train.get_or_create_global_step())
def close_session(self):
if self.session is not None:
self.session.close()
def batch_iterator(self, data_input, data_true=None, batch_size=500, thread_idx=0, thread_num=1):
training = data_true is None
data_input = normalize(data_input)
indices = np.arange(data_input.shape[0])
global_batch_size = batch_size * hvd.size()
if training:
# crop the data so that each gpu has the same number of batches
stop = data_input.shape[0] // global_batch_size * global_batch_size
LOG.info('Cropping each epoch from: {} to {} samples'.format(data_input.shape[0], stop))
else:
stop = data_input.shape[0]
def generator():
data_in = data_input
epoch = 0
while True:
if training:
# deterministic shuffle necessary for multigpu
np.random.seed(epoch)
np.random.shuffle(indices)
data_in = data_in[indices]
for st_idx in range(thread_idx * batch_size, stop, thread_num * batch_size):
batch = data_in[st_idx:st_idx + batch_size].copy()
batch = batch.tocoo()
idxs = np.stack([batch.row, batch.col], axis=1)
vals = batch.data
if training:
np.random.seed(epoch * thread_num + thread_idx)
nnz = vals.shape[0]
# dropout with keep_prob=0.5
vals *= (2 * np.random.randint(2, size=nnz))
yield (idxs, vals)
else:
yield idxs, vals, data_true[st_idx:st_idx + batch_size]
if not training:
break
epoch += 1
be = thread_idx * batch_size
st = thread_num * batch_size
return generator, int(np.ceil((stop - be) / st))
def _build_graph(self):
self.vae = _VAEGraph(self.encoder_dims, self.decoder_dims, self.activation)
self.inputs_validation = tf.sparse.placeholder(
dtype=tf.float32,
shape=np.array([self.batch_size_validation, self.vae.input_dim], dtype=np.int32))
self.inputs_query = tf.sparse.placeholder(
dtype=tf.float32,
shape=np.array([1, self.vae.input_dim], dtype=np.int32))
self.top_k_validation = self._gen_handlers(mode=VALIDATION)
self.logits_train, self.loss_train, self.optimizer = self._gen_handlers(mode=TRAINING)
self.top_k_query = self._gen_handlers(mode=QUERY)
global_step = tf.train.get_or_create_global_step()
self.increment_global_step = tf.assign(global_step, global_step + 1)
def _gen_handlers(self, mode):
# model input
if mode is TRAINING:
inputs = self.inputs_train
elif mode is VALIDATION:
inputs = self.inputs_validation
elif mode is QUERY:
inputs = self.inputs_query
else:
assert False
if mode is TRAINING:
batch_size = self.batch_size_train
elif mode is VALIDATION:
batch_size = self.batch_size_validation
elif mode is QUERY:
batch_size = 1
else:
assert False
# model output
logits, latent_mean, latent_log_var = self.vae(inputs, mode=mode)
if mode in [VALIDATION, QUERY]:
mask = tf.ones_like(inputs.values) * (-np.inf)
logits = tf.tensor_scatter_nd_update(logits, inputs.indices, mask)
top_k_values, top_k_indices = tf.math.top_k(logits, sorted=True, k=self.top_results)
return top_k_indices
softmax = tf.nn.log_softmax(logits)
anneal = tf.math.minimum(
tf.cast(tf.train.get_or_create_global_step(), tf.float32) /
self.total_anneal_steps, self.anneal_cap)
# KL divergence
KL = tf.reduce_mean(
tf.reduce_sum(
(-latent_log_var + tf.exp(latent_log_var) + latent_mean ** 2 - 1)
/ 2,
axis=1))
# per-user average negative log-likelihood part of loss
ll_loss = -tf.reduce_sum(tf.gather_nd(softmax, inputs.indices)) / batch_size
# regularization part of loss
reg_loss = 2 * tf.reduce_sum(
tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
loss = ll_loss + self.lam * reg_loss + anneal * KL
train_op = self._build_optimizer(loss)
return logits, ll_loss, train_op
def train(
self,
n_epochs: int,
validation_data_input: sparse.csr_matrix,
validation_data_true: sparse.csr_matrix,
metrics: dict, # Dict[str, matrix -> matrix -> float]
validation_step: 10,
):
"""
Train the model
:param n_epochs: number of epochs
:param train_data: train matrix of shape users count x items count
:param metrics: Dictionary of metric names to metric functions
:param validation_step: If it's set to n then validation is run once every n epochs
"""
self.total_time_start = time.time()
self.session.run(self.iter.initializer)
num_workers = hvd.size()
for epoch in range(1, n_epochs + 1):
init_time = time.time()
for i in range(self.n_batch_per_train):
self.session.run(self.optimizer)
batches_per_epoch = i + 1
training_duration = time.time() - init_time
self.time_elapsed_training_history.append(training_duration)
training_throughput = num_workers * batches_per_epoch * self.batch_size_train / training_duration
self.training_throughputs.append(training_throughput)
dllogger.log(data={"train_epoch_time" : training_duration,
"train_throughput" : training_throughput},
step=(epoch,))
if (epoch % validation_step == 0 or epoch == n_epochs) and hvd.rank() == 0:
init_time = time.time()
metrics_scores = self.test(validation_data_input,
validation_data_true,
metrics,
epoch=epoch)
for name, score in metrics_scores.items():
self.metrics_history[name].append(score)
validation_duration = time.time() - init_time
self.time_elapsed_validation_history.append(validation_duration)
dllogger.log(data={"valid_time" : validation_duration},
step=(epoch,))
self.log_metrics(epoch, metrics_scores, n_epochs)
self.total_time = time.time() - self.total_time_start
if hvd.rank() == 0:
self.log_final_stats()
def test(
self,
test_data_input,
test_data_true,
metrics,
epoch=0,
):
"""
Test the performance of the model
:param metrics: Dictionary of metric names to metric functions
"""
metrics_scores = defaultdict(lambda: [])
gen = self.batch_iterator_val(test_data_input, test_data_true)
for idxs, vals, X_true in gen():
inference_begin = time.time()
if self.trace:
pred_val, _ = self.session.run([self.top_k_validation, self.increment_global_step],
feed_dict={self.inputs_validation: (idxs, vals)})
else:
pred_val = self.session.run(self.top_k_validation,
feed_dict={self.inputs_validation: (idxs, vals)})
elapsed = time.time() - inference_begin
pred_val = np.copy(pred_val)
inference_throughput = self.batch_size_validation / elapsed
self.inference_throughputs.append(inference_throughput)
dllogger.log(data={"inference_throughput" : inference_throughput},
step=(epoch,))
for name, metric in metrics.items():
metrics_scores[name].append(metric(X_true, pred_val))
# For some random seeds passed to the data preprocessing script
# the test set might contain samples that have no true items to be predicted.
# At least one such sample is present in about 7% of all possible test sets.
# We decided not to change the preprocessing to remain comparable to the original implementation.
# Therefore we're using the nan-aware mean from numpy to ignore users with no items to be predicted.
return {name: np.nanmean(scores) for name, scores in metrics_scores.items()}
def query(self, indices: np.ndarray):
"""
inference for batch size 1
:param input_data:
:return:
"""
values = np.ones(shape=(1, len(indices)))
values = normalize(values)
values = values.reshape(-1)
res = self.session.run(
self.top_k_query,
feed_dict={self.inputs_query: (indices,
values)})
return res
def _increment_global_step(self):
res = self.session.run(self.increment_global_step)
print('increment global step result: ', res)
def batch_iterator_train(self, data_input):
"""
:return: iterator of consecutive batches and its length
"""
data_input = normalize(data_input)
indices = np.arange(data_input.shape[0])
np.random.shuffle(indices)
data_input = data_input[list(indices)]
nsize, _ = data_input.shape
csize = nsize // self.batch_size_train * self.batch_size_train
def generator():
while True:
for st_idx in range(0, csize, self.batch_size_train):
idxs, vals = self.next_batch(data_input,st_idx, self.batch_size_train)
nnz = vals.shape[0]
vals *= (2 * np.random.randint(2, size=nnz))
yield (idxs, vals)
return generator, int(np.ceil(csize / self.batch_size_train))
def batch_iterator_val(self, data_input, data_true):
"""
:return: iterator of consecutive batches and its length
"""
data_input = normalize(data_input)
nsize, _ = data_input.shape
csize = nsize // self.batch_size_validation * self.batch_size_validation
def generator():
for st_idx in range(0, csize, self.batch_size_validation):
idxs, vals = self.next_batch(data_input, st_idx, self.batch_size_validation)
yield idxs, vals, data_true[st_idx:st_idx + self.batch_size_validation]
return generator
def next_batch(self, data_input, st_idx, batch_size):
batch = data_input[st_idx:st_idx + batch_size].copy()
batch = batch.tocoo()
idxs = np.stack([batch.row, batch.col], axis=1)
vals = batch.data
return idxs,vals
def log_metrics(self, epoch, metrics_scores, n_epochs):
dllogger.log(data=metrics_scores, step=(epoch,))
def log_final_stats(self):
data = {"mean_training_throughput": np.mean(self.training_throughputs[10:]),
"mean_inference_throughput": np.mean(self.inference_throughputs[2:])}
for metric_name, metric_values in self.metrics_history.items():
data["final_" + metric_name] = metric_values[-1]
dllogger.log(data=data, step=tuple())
| DeepLearningExamples-master | TensorFlow/Recommendation/VAE-CF/vae/models/train.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.keras.layers import Dense
class DenseFromSparse(Dense):
def call(self, inputs):
if type(inputs) != tf.sparse.SparseTensor:
raise ValueError("input should be of type " + str(tf.sparse.SparseTensor))
rank = len(inputs.get_shape().as_list())
if rank != 2:
raise NotImplementedError("input should be rank 2")
else:
outputs = tf.sparse.sparse_dense_matmul(inputs, self.kernel)
if self.use_bias:
outputs = tf.nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
| DeepLearningExamples-master | TensorFlow/Recommendation/VAE-CF/vae/models/layers.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from vae.models.layers import DenseFromSparse
TRAINING = 0
VALIDATION = 1
QUERY = 2
class _VAEGraph(tf.keras.Model):
def __init__(self, encoder_dims, decoder_dims, activation='tanh'):
super(_VAEGraph, self).__init__()
if encoder_dims[-1] != decoder_dims[0]:
raise Exception("encoder/decoder dims mismatch")
self.input_dim = encoder_dims[0]
self.output_dim = decoder_dims[-1]
self.activation = tf.nn.tanh if activation == 'tanh' else tf.nn.relu
self.encoder = self.encoder_model(encoder_dims[1:])
self.decoder = self.decoder_model(decoder_dims[1:])
def call(self, inputs: tf.SparseTensor, mode):
""" Get handlers to VAE output
:param inputs: batch_size * items_count as sparse tensor.
:param mode: Either 0,1 or 2 representing type of network
:return: Tuple of 3 tensors:
1. decoder output: batch_size * items_count tensor
2. latent_mean: mean tensor between encoder and decoder. It has size batch_size * size_of_mean_vector
3. latent_log_var: tesor containing logarithms of variances. It has size batch_size * size_of_var_vector
"""
latent_all = self.encoder(inputs, training=(mode is TRAINING))
latent_mean = latent_all[:, 0]
latent_log_var = latent_all[:, 1]
latent_std = tf.exp(0.5 * latent_log_var)
# reparametrization trick
batch = tf.shape(latent_mean)[0]
dim = tf.shape(latent_mean)[1]
epsilon = tf.random_normal(shape=(batch, dim))
decoder_input = latent_mean + (int(mode is TRAINING)) * latent_std * epsilon
decoder_output = self.decoder(decoder_input, training=(mode is TRAINING))
return decoder_output, latent_mean, latent_log_var
def encoder_model(self, dims):
assert dims
last = dims[-1]
dims[-1] = 2 * last
layers = tf.keras.layers
return tf.keras.Sequential(
[DenseFromSparse(
dims[0],
activation=self.activation,
name="encoder_{}".format(dims[0]),
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_initializer=tf.truncated_normal_initializer(stddev=0.001),
kernel_regularizer=tf.contrib.layers.l2_regularizer)
] + [
layers.Dense(
d,
activation=self.activation,
name="encoder_{}".format(d),
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_initializer=tf.truncated_normal_initializer(stddev=0.001),
kernel_regularizer=tf.contrib.layers.l2_regularizer)
for d in dims[1:-1]
] + [
layers.Dense(
dims[-1],
name="encoder_{}".format(dims[-1]),
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_initializer=tf.truncated_normal_initializer(stddev=0.001),
kernel_regularizer=tf.contrib.layers.l2_regularizer)
] + [layers.Reshape(target_shape=(2, last))])
def decoder_model(self, dims):
assert dims
layers = tf.keras.layers
return tf.keras.Sequential([
layers.Dense(
d,
activation=self.activation,
name="decoder_{}".format(d),
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_initializer=tf.truncated_normal_initializer(stddev=0.001),
kernel_regularizer=tf.contrib.layers.l2_regularizer) for d in dims[:-1]
] + [
layers.Dense(
dims[-1],
name="decoder_{}".format(dims[-1]),
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_initializer=tf.truncated_normal_initializer(stddev=0.001),
kernel_regularizer=tf.contrib.layers.l2_regularizer)
])
| DeepLearningExamples-master | TensorFlow/Recommendation/VAE-CF/vae/models/vae.py |
DeepLearningExamples-master | TensorFlow/Recommendation/VAE-CF/vae/load/__init__.py |
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from collections import defaultdict
from glob import glob
import pandas as pd
from scipy import sparse
import scipy.sparse as sp
import numpy as np
from scipy.sparse import load_npz, csr_matrix
import logging
import json
LOG = logging.getLogger("VAE")
def save_as_npz(m_sp, path):
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
sp.save_npz(path, m_sp)
def get_count(tp, id):
playcount_groupbyid = tp[[id]].groupby(id, as_index=False)
count = playcount_groupbyid.size()
return count
def filter_triplets(tp, min_uc=5, min_sc=0):
# Only keep the triplets for items which were clicked on by at least min_sc users.
if min_sc > 0:
itemcount = get_count(tp, 'movieId')
tp = tp[tp['movieId'].isin(itemcount.index[itemcount >= min_sc])]
# Only keep the triplets for users who clicked on at least min_uc items
# After doing this, some of the items will have less than min_uc users, but should only be a small proportion
if min_uc > 0:
usercount = get_count(tp, 'userId')
tp = tp[tp['userId'].isin(usercount.index[usercount >= min_uc])]
# Update both usercount and itemcount after filtering
usercount, itemcount = get_count(tp, 'userId'), get_count(tp, 'movieId')
return tp, usercount, itemcount
def save_id_mappings(cache_dir, show2id, profile2id):
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
for d, filename in [(show2id, 'show2id.json'),
(profile2id, 'profile2id.json')]:
with open(os.path.join(cache_dir, filename), 'w') as f:
d = {str(k): v for k, v in d.items()}
json.dump(d, f, indent=4)
def load_and_parse_ML_20M(data_dir, threshold=4, parse=True):
"""
Original way of processing ml-20m dataset from VAE for CF paper
Copyright [2018] [Dawen Liang, Rahul G. Krishnan, Matthew D. Hoffman, and Tony Jebara]
SPDX-License-Identifier: Apache-2.0
Modifications copyright (C) 2019 Michał Filipiuk, Albert Cieślak, Frederic Grabowski, Radosław Rowicki
"""
cache_dir = os.path.join(data_dir, "ml-20m/preprocessed")
train_data_file = os.path.join(cache_dir, "train_data.npz")
vad_data_true_file = os.path.join(cache_dir, "vad_data_true.npz")
vad_data_test_file = os.path.join(cache_dir, "vad_data_test.npz")
test_data_true_file = os.path.join(cache_dir, "test_data_true.npz")
test_data_test_file = os.path.join(cache_dir, "test_data_test.npz")
if (os.path.isfile(train_data_file)
and os.path.isfile(vad_data_true_file)
and os.path.isfile(vad_data_test_file)
and os.path.isfile(test_data_true_file)
and os.path.isfile(test_data_test_file)):
LOG.info("Already processed, skipping.")
return load_npz(train_data_file), \
load_npz(vad_data_true_file), \
load_npz(vad_data_test_file), \
load_npz(test_data_true_file), \
load_npz(test_data_test_file),
if not parse:
raise ValueError('Dataset not preprocessed. Please run python3 prepare_dataset.py first.')
LOG.info("Parsing movielens.")
source_file = os.path.join(data_dir, "ml-20m/extracted/ml-20m", "ratings.csv")
if not glob(source_file):
raise ValueError('Dataset not downloaded. Please download the ML-20m dataset from https://grouplens.org/datasets/movielens/20m/, unzip it and put it in ', source_file)
raw_data = pd.read_csv(source_file)
raw_data.drop('timestamp', axis=1, inplace=True)
raw_data = raw_data[raw_data['rating'] >= threshold]
raw_data, user_activity, item_popularity = filter_triplets(raw_data)
unique_uid = user_activity.index
idx_perm = np.random.permutation(unique_uid.size)
unique_uid = unique_uid[idx_perm]
n_users = unique_uid.size
n_heldout_users = 10000
true_users = unique_uid[:(n_users - n_heldout_users * 2)]
vd_users = unique_uid[(n_users - n_heldout_users * 2): (n_users - n_heldout_users)]
test_users = unique_uid[(n_users - n_heldout_users):]
train_plays = raw_data.loc[raw_data['userId'].isin(true_users)]
unique_sid = pd.unique(train_plays['movieId'])
show2id = dict((sid, i) for (i, sid) in enumerate(unique_sid))
profile2id = dict((pid, i) for (i, pid) in enumerate(unique_uid))
save_id_mappings(cache_dir, show2id, profile2id)
def split_train_test_proportion(data, test_prop=0.2):
data_grouped_by_user = data.groupby('userId')
true_list, test_list = list(), list()
for i, (_, group) in enumerate(data_grouped_by_user):
n_items_u = len(group)
if n_items_u >= 5:
idx = np.zeros(n_items_u, dtype='bool')
idx[np.random.choice(n_items_u, size=int(test_prop * n_items_u), replace=False).astype('int64')] = True
true_list.append(group[np.logical_not(idx)])
test_list.append(group[idx])
else:
true_list.append(group)
data_true = pd.concat(true_list)
data_test = pd.concat(test_list)
return data_true, data_test
vad_plays = raw_data.loc[raw_data['userId'].isin(vd_users)]
vad_plays = vad_plays.loc[vad_plays['movieId'].isin(unique_sid)]
vad_plays_true, vad_plays_test = split_train_test_proportion(vad_plays)
test_plays = raw_data.loc[raw_data['userId'].isin(test_users)]
test_plays = test_plays.loc[test_plays['movieId'].isin(unique_sid)]
test_plays_true, test_plays_test = split_train_test_proportion(test_plays)
def numerize(tp):
uid = tp['userId'].map(lambda x: profile2id[x])
sid = tp['movieId'].map(lambda x: show2id[x])
return pd.DataFrame(data={'uid': uid, 'sid': sid}, columns=['uid', 'sid'])
train_data = numerize(train_plays)
vad_data_true = numerize(vad_plays_true)
vad_data_test = numerize(vad_plays_test)
test_data_true = numerize(test_plays_true)
test_data_test = numerize(test_plays_test)
n_items = len(unique_sid)
def load_train_data(tp):
n_users = tp['uid'].max() + 1
rows, cols = tp['uid'], tp['sid']
data = sparse.csr_matrix((np.ones_like(rows),
(rows, cols)), dtype='float64',
shape=(n_users, n_items))
return data
train_data = load_train_data(train_data)
def load_true_test_data(tp_true, tp_test):
start_idx = min(tp_true['uid'].min(), tp_test['uid'].min())
end_idx = max(tp_true['uid'].max(), tp_test['uid'].max())
rows_true, cols_true = tp_true['uid'] - start_idx, tp_true['sid']
rows_test, cols_test = tp_test['uid'] - start_idx, tp_test['sid']
data_true = sparse.csr_matrix((np.ones_like(rows_true),
(rows_true, cols_true)), dtype='float64', shape=(end_idx - start_idx + 1, n_items))
data_test = sparse.csr_matrix((np.ones_like(rows_test),
(rows_test, cols_test)), dtype='float64', shape=(end_idx - start_idx + 1, n_items))
return data_true, data_test
vad_data_true, vad_data_test = load_true_test_data(vad_data_true, vad_data_test)
test_data_true, test_data_test = load_true_test_data(test_data_true, test_data_test)
save_as_npz(train_data, train_data_file)
save_as_npz(vad_data_true, vad_data_true_file)
save_as_npz(vad_data_test, vad_data_test_file)
save_as_npz(test_data_true, test_data_true_file)
save_as_npz(test_data_test, test_data_test_file)
return train_data, vad_data_true, vad_data_test, test_data_true, test_data_test
def filter_data(data, min_users=1, min_items=5):
"""
:param data: input matrix
:param min_users: only keep items, that were clicked by at least min_users
:param min_items: only keep users, that clicked at least min_items
:return: filtered matrix
"""
col_count = defaultdict(lambda: 0)
for col in data.nonzero()[1]:
col_count[col] += 1
filtered_col = [k for k, v in col_count.items() if v >= min_users]
filtered_data_c = data[:, filtered_col]
del data
row_count = defaultdict(lambda: 0)
for row in filtered_data_c.nonzero()[0]:
row_count[row] += 1
filtered_row = [k for k, v in row_count.items() if v >= min_items]
filtered_data_r = filtered_data_c[filtered_row, :]
del filtered_data_c
return filtered_data_r
def split_into_train_val_test(data, val_ratio, test_ratio):
"""
:param data: input matrix
:param val_ratio: Ratio of validation users to all users
:param test_ratio: Ratio of test users to all users
:return: Tuple of 3 matrices : {train_matrix, val_matrix, test_matrix}
"""
assert val_ratio + test_ratio < 1
train_ratio = 1 - val_ratio - test_ratio
rows_count = data.shape[0]
idx = np.random.permutation(range(rows_count))
train_users_count = int(np.rint(rows_count * train_ratio))
val_users_count = int(np.rint(rows_count * val_ratio))
seperator = train_users_count + val_users_count
train_matrix = data[idx[:train_users_count]]
val_matrix = data[idx[train_users_count:seperator]]
test_matrix = data[idx[seperator:]]
return train_matrix, val_matrix, test_matrix
def split_movies_into_train_test(data, train_ratio):
"""
Splits data into 2 matrices. The users stay the same, but the items are being split by train_ratio
:param data: input matrix
:param train_ratio: Ratio of input items to all items
:return: tuple of 2 matrices: {train_matrix, test_matrix}
"""
rows_count, columns_count = data.shape
train_rows = list()
train_columns = list()
test_rows = list()
test_columns = list()
for i in range(rows_count):
user_movies = data.getrow(i).nonzero()[1]
np.random.shuffle(user_movies)
movies_count = len(user_movies)
train_count = int(np.floor(movies_count * train_ratio))
test_count = movies_count - train_count
train_movies = user_movies[:train_count]
test_movies = user_movies[train_count:]
train_rows += ([i] * train_count)
train_columns += list(train_movies)
test_rows += ([i] * test_count)
test_columns += list(test_movies)
train_matrix = csr_matrix(([1] * len(train_rows), (train_rows, train_columns)), shape=(rows_count, columns_count))
test_matrix = csr_matrix(([1] * len(test_rows), (test_rows, test_columns)), shape=(rows_count, columns_count))
return train_matrix, test_matrix
def remove_items_that_doesnt_occure_in_train(train_matrix, val_matrix, test_matrix):
"""
Remove items that don't occure in train matrix
:param train_matrix: training data
:param val_matrix: validation data
:param test_matrix: test data
:return: Input matrices without some items
"""
item_occure = defaultdict(lambda: False)
for col in train_matrix.nonzero()[1]:
item_occure[col] = True
non_empty_items = [k for k, v in item_occure.items() if v == True]
return train_matrix[:, non_empty_items], val_matrix[:, non_empty_items], test_matrix[:, non_empty_items]
| DeepLearningExamples-master | TensorFlow/Recommendation/VAE-CF/vae/load/preprocessing.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import numpy as np
import tensorflow as tf
__all__ = ['BenchmarkHook']
class BenchmarkHook(tf.train.SessionRunHook):
latencies = ['avg', 50, 90, 95, 99, 100]
def __init__(self, global_batch_size, warmup_steps=10):
self.warmup_steps = warmup_steps
self.global_batch_size = global_batch_size
self.iter_times = []
def before_run(self, run_context):
self.t0 = time.time()
def after_run(self, run_context, run_values):
batch_time = time.time() - self.t0
self.iter_times.append(batch_time)
def get_average_speed_and_latencies(self):
if len(self.iter_times) > self.warmup_steps + 5:
warmup_steps = self.warmup_steps
elif len(self.iter_times) > 15:
warmup_steps = 10
elif len(self.iter_times) > 10:
warmup_steps = 5
elif len(self.iter_times) > 4:
warmup_steps = 2
elif len(self.iter_times) > 1:
warmup_steps = 1
else:
warmup_steps = 0
times = self.iter_times[warmup_steps:]
avg_time = np.mean(times)
speed = self.global_batch_size / avg_time
latencies = {}
for lat in self.latencies:
if lat == 'avg':
val = avg_time
else:
val = np.percentile(times, lat)
latencies[str(lat)] = val
return speed, latencies
| DeepLearningExamples-master | TensorFlow/Translation/GNMT/benchmark_hooks.py |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic sequence-to-sequence model with dynamic RNN support."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import os
import tensorflow as tf
import numpy as np
from tensorflow.python.framework import function
from tensorflow.python.ops import math_ops
import attention_wrapper
import model_helper
import beam_search_decoder
from utils import iterator_utils
from utils import math_utils
from utils import misc_utils as utils
from utils import vocab_utils
utils.check_tensorflow_version()
__all__ = ["BaseModel"]
def create_attention_mechanism(
num_units, memory, source_sequence_length, dtype=None):
"""Create attention mechanism based on the attention_option."""
# Mechanism
attention_mechanism = attention_wrapper.BahdanauAttention(
num_units,
memory,
memory_sequence_length=tf.to_int64(source_sequence_length),
normalize=True, dtype=dtype)
return attention_mechanism
class BaseModel(object):
"""Sequence-to-sequence base class.
"""
def __init__(self, hparams, mode, features, scope=None, extra_args=None):
"""Create the model.
Args:
hparams: Hyperparameter configurations.
mode: TRAIN | EVAL | INFER
features: a dict of input features.
scope: scope of the model.
extra_args: model_helper.ExtraArgs, for passing customizable functions.
"""
self.hparams = hparams
# Set params
self._set_params_initializer(hparams, mode, features, scope, extra_args)
# Train graph
res = self.build_graph(hparams, scope=scope)
self._set_train_or_infer(res, hparams)
def _set_params_initializer(self,
hparams,
mode,
features,
scope,
extra_args=None):
"""Set various params for self and initialize."""
self.mode = mode
self.src_vocab_size = hparams.src_vocab_size
self.tgt_vocab_size = hparams.tgt_vocab_size
self.features = features
self.time_major = hparams.time_major
if hparams.use_char_encode:
assert (not self.time_major), ("Can't use time major for"
" char-level inputs.")
self.dtype = tf.float16 if hparams.use_fp16 else tf.float32
# extra_args: to make it flexible for adding external customizable code
self.single_cell_fn = None
if extra_args:
self.single_cell_fn = extra_args.single_cell_fn
# Set num units
self.num_units = hparams.num_units
# Set num layers
self.num_encoder_layers = hparams.num_encoder_layers
self.num_decoder_layers = hparams.num_decoder_layers
assert self.num_encoder_layers
assert self.num_decoder_layers
# Set num residual layers
if hasattr(hparams, "num_residual_layers"): # compatible common_test_utils
self.num_encoder_residual_layers = hparams.num_residual_layers
self.num_decoder_residual_layers = hparams.num_residual_layers
else:
self.num_encoder_residual_layers = hparams.num_encoder_residual_layers
self.num_decoder_residual_layers = hparams.num_decoder_residual_layers
# Batch size
self.batch_size = tf.size(self.features["source_sequence_length"])
# Global step
global_step = tf.train.get_global_step()
if global_step is not None:
utils.print_out("global_step already created!")
self.global_step = tf.train.get_or_create_global_step()
utils.print_out("model.global_step.name: %s" % self.global_step.name)
# Initializer
self.random_seed = hparams.random_seed
initializer = model_helper.get_initializer(
hparams.init_op, self.random_seed, hparams.init_weight)
tf.get_variable_scope().set_initializer(initializer)
# Embeddings
self.encoder_emb_lookup_fn = tf.nn.embedding_lookup
self.init_embeddings(hparams, scope)
def _set_train_or_infer(self, res, hparams):
"""Set up training."""
loss = res[1]
if self.mode == tf.contrib.learn.ModeKeys.TRAIN:
self.train_loss = loss
self.word_count = tf.reduce_sum(
self.features["source_sequence_length"]) + tf.reduce_sum(
self.features["target_sequence_length"])
elif self.mode == tf.contrib.learn.ModeKeys.EVAL:
self.eval_loss = loss
elif self.mode == tf.contrib.learn.ModeKeys.INFER:
self.infer_logits = res[0]
self.infer_loss = loss
self.sample_id = res[2]
if self.mode != tf.contrib.learn.ModeKeys.INFER:
## Count the number of predicted words for compute ppl.
self.predict_count = tf.reduce_sum(
self.features["target_sequence_length"])
# Gradients and SGD update operation for training the model.
# Arrange for the embedding vars to appear at the beginning.
# Only build bprop if running on GPU and using dist_strategy, in which
# case learning rate, grads and train_op are created in estimator model
# function.
with tf.name_scope("learning_rate"):
self.learning_rate = tf.constant(hparams.learning_rate)
# warm-up
self.learning_rate = self._get_learning_rate_warmup(hparams)
# decay
self.learning_rate = self._get_learning_rate_decay(hparams)
if (hparams.use_dist_strategy and
self.mode == tf.contrib.learn.ModeKeys.TRAIN):
# Gradients
params = tf.trainable_variables()
# Print trainable variables
utils.print_out("# Trainable variables")
utils.print_out(
"Format: <name>, <shape>, <dtype>, <(soft) device placement>")
for param in params:
utils.print_out(
" %s, %s, %s, %s" % (param.name, str(param.get_shape()),
param.dtype.name, param.op.device))
utils.print_out("Total params size: %.2f GB" % (4. * np.sum([
p.get_shape().num_elements()
for p in params
if p.shape.is_fully_defined()
]) / 2**30))
# Optimizer
if hparams.optimizer == "sgd":
opt = tf.train.GradientDescentOptimizer(self.learning_rate)
elif hparams.optimizer == "adam":
opt = tf.train.AdamOptimizer(self.learning_rate)
else:
raise ValueError("Unknown optimizer type %s" % hparams.optimizer)
assert opt is not None
grads_and_vars = opt.compute_gradients(
self.train_loss,
params,
colocate_gradients_with_ops=hparams.colocate_gradients_with_ops)
gradients = [x for (x, _) in grads_and_vars]
clipped_grads, grad_norm = model_helper.gradient_clip(
gradients, max_gradient_norm=hparams.max_gradient_norm)
self.grad_norm = grad_norm
self.params = params
self.grads = clipped_grads
self.update = opt.apply_gradients(
list(zip(clipped_grads, params)), global_step=self.global_step)
else:
self.grad_norm = None
self.update = None
self.params = None
self.grads = None
def _get_learning_rate_warmup(self, hparams):
"""Get learning rate warmup."""
warmup_steps = hparams.warmup_steps
warmup_scheme = hparams.warmup_scheme
utils.print_out(" learning_rate=%g, warmup_steps=%d, warmup_scheme=%s" %
(hparams.learning_rate, warmup_steps, warmup_scheme))
if not warmup_scheme:
return self.learning_rate
# Apply inverse decay if global steps less than warmup steps.
# Inspired by https://arxiv.org/pdf/1706.03762.pdf (Section 5.3)
# When step < warmup_steps,
# learing_rate *= warmup_factor ** (warmup_steps - step)
if warmup_scheme == "t2t":
# 0.01^(1/warmup_steps): we start with a lr, 100 times smaller
warmup_factor = tf.exp(tf.log(0.01) / warmup_steps)
inv_decay = warmup_factor**(tf.to_float(warmup_steps - self.global_step))
else:
raise ValueError("Unknown warmup scheme %s" % warmup_scheme)
return tf.cond(
self.global_step < hparams.warmup_steps,
lambda: inv_decay * self.learning_rate,
lambda: self.learning_rate,
name="learning_rate_warump_cond")
def _get_decay_info(self, hparams):
"""Return decay info based on decay_scheme."""
if hparams.decay_scheme in [
"luong5", "luong10", "luong234", "jamesqin1616"
]:
epoch_size, _, _ = iterator_utils.get_effective_epoch_size(hparams)
num_train_steps = int(hparams.max_train_epochs * epoch_size / hparams.batch_size)
decay_factor = 0.5
if hparams.decay_scheme == "luong5":
start_decay_step = int(num_train_steps / 2)
decay_times = 5
remain_steps = num_train_steps - start_decay_step
elif hparams.decay_scheme == "luong10":
start_decay_step = int(num_train_steps / 2)
decay_times = 10
remain_steps = num_train_steps - start_decay_step
elif hparams.decay_scheme == "luong234":
start_decay_step = int(num_train_steps * 2 / 3)
decay_times = 4
remain_steps = num_train_steps - start_decay_step
elif hparams.decay_scheme == "jamesqin1616":
# dehao@ reported TPU setting max_epoch = 2 and use luong234.
# They start decay after 2 * 2/3 epochs for 4 times.
# If keep max_epochs = 8 then decay should start at 8 * 2/(3 * 4) epochs
# and for (4 *4 = 16) times.
decay_times = 16
start_decay_step = int(num_train_steps / 16.)
remain_steps = num_train_steps - start_decay_step
decay_steps = int(remain_steps / decay_times)
elif not hparams.decay_scheme: # no decay
start_decay_step = num_train_steps
decay_steps = 0
decay_factor = 1.0
elif hparams.decay_scheme:
raise ValueError("Unknown decay scheme %s" % hparams.decay_scheme)
return start_decay_step, decay_steps, decay_factor
def _get_learning_rate_decay(self, hparams):
"""Get learning rate decay."""
start_decay_step, decay_steps, decay_factor = self._get_decay_info(hparams)
utils.print_out(" decay_scheme=%s, start_decay_step=%d, decay_steps %d, "
"decay_factor %g" % (hparams.decay_scheme, start_decay_step,
decay_steps, decay_factor))
return tf.cond(
self.global_step < start_decay_step,
lambda: self.learning_rate,
lambda: tf.train.exponential_decay( # pylint: disable=g-long-lambda
self.learning_rate,
(self.global_step - start_decay_step),
decay_steps, decay_factor, staircase=True),
name="learning_rate_decay_cond")
def init_embeddings(self, hparams, scope):
"""Init embeddings."""
self.embedding_encoder, self.embedding_decoder = (
model_helper.create_emb_for_encoder_and_decoder(
share_vocab=hparams.share_vocab,
src_vocab_size=self.src_vocab_size,
tgt_vocab_size=self.tgt_vocab_size,
src_embed_size=self.num_units,
tgt_embed_size=self.num_units,
dtype=self.dtype,
num_enc_partitions=hparams.num_enc_emb_partitions,
num_dec_partitions=hparams.num_dec_emb_partitions,
src_vocab_file=hparams.src_vocab_file,
tgt_vocab_file=hparams.tgt_vocab_file,
src_embed_file=hparams.src_embed_file,
tgt_embed_file=hparams.tgt_embed_file,
use_char_encode=hparams.use_char_encode,
scope=scope,
))
def build_graph(self, hparams, scope=None):
"""Subclass must implement this method.
Creates a sequence-to-sequence model with dynamic RNN decoder API.
Args:
hparams: Hyperparameter configurations.
scope: VariableScope for the created subgraph; default "dynamic_seq2seq".
Returns:
A tuple of the form (logits, loss_tuple, final_context_state, sample_id),
where:
logits: float32 Tensor [batch_size x num_decoder_symbols].
loss: loss = the total loss / batch_size.
final_context_state: the final state of decoder RNN.
sample_id: sampling indices.
Raises:
ValueError: if encoder_type differs from mono and bi, or
attention_option is not (luong | scaled_luong |
bahdanau | normed_bahdanau).
"""
utils.print_out("# Creating %s graph ..." % self.mode)
# Projection
with tf.variable_scope(scope or "build_network"):
with tf.variable_scope("decoder/output_projection"):
self.output_layer = tf.layers.Dense(
self.tgt_vocab_size, use_bias=False, name="output_projection",
dtype=self.dtype)
with tf.variable_scope(scope or "dynamic_seq2seq", dtype=self.dtype):
# Encoder
if hparams.language_model: # no encoder for language modeling
utils.print_out(" language modeling: no encoder")
self.encoder_outputs = None
encoder_state = None
else:
self.encoder_outputs, encoder_state = self._build_encoder(hparams)
## Decoder
logits, sample_id = (
self._build_decoder(self.encoder_outputs, encoder_state, hparams))
## Loss
if self.mode != tf.contrib.learn.ModeKeys.INFER:
loss = self._compute_loss(logits, hparams.label_smoothing)
else:
loss = tf.constant(0.0)
return logits, loss, sample_id
@abc.abstractmethod
def _build_encoder(self, hparams):
"""Subclass must implement this.
Build and run an RNN encoder.
Args:
hparams: Hyperparameters configurations.
Returns:
A tuple of encoder_outputs and encoder_state.
"""
pass
def _get_infer_maximum_iterations(self, hparams, source_sequence_length):
"""Maximum decoding steps at inference time."""
if hparams.tgt_max_len_infer:
maximum_iterations = hparams.tgt_max_len_infer
utils.print_out(" decoding maximum_iterations %d" % maximum_iterations)
else:
# TODO(thangluong): add decoding_length_factor flag
decoding_length_factor = 2.0
max_encoder_length = tf.reduce_max(source_sequence_length)
maximum_iterations = tf.to_int32(
tf.round(tf.to_float(max_encoder_length) * decoding_length_factor))
return maximum_iterations
def _build_decoder(self, encoder_outputs, encoder_state, hparams):
"""Build and run a RNN decoder with a final projection layer.
Args:
encoder_outputs: The outputs of encoder for every time step.
encoder_state: The final state of the encoder.
hparams: The Hyperparameters configurations.
Returns:
A tuple of final logits and final decoder state:
logits: size [time, batch_size, vocab_size] when time_major=True.
"""
## Decoder.
with tf.variable_scope("decoder") as decoder_scope:
## Train or eval
if self.mode != tf.contrib.learn.ModeKeys.INFER:
# [batch, time]
target_input = self.features["target_input"]
if self.time_major:
# If using time_major mode, then target_input should be [time, batch]
# then the decoder_emb_inp would be [time, batch, dim]
target_input = tf.transpose(target_input)
decoder_emb_inp = tf.cast(
tf.nn.embedding_lookup(self.embedding_decoder, target_input),
self.dtype)
if not hparams.use_fused_lstm_dec:
cell, decoder_initial_state = self._build_decoder_cell(
hparams, encoder_outputs, encoder_state,
self.features["source_sequence_length"])
if hparams.use_dynamic_rnn:
final_rnn_outputs, _ = tf.nn.dynamic_rnn(
cell,
decoder_emb_inp,
sequence_length=self.features["target_sequence_length"],
initial_state=decoder_initial_state,
dtype=self.dtype,
scope=decoder_scope,
parallel_iterations=hparams.parallel_iterations,
time_major=self.time_major)
else:
final_rnn_outputs, _ = tf.contrib.recurrent.functional_rnn(
cell,
decoder_emb_inp,
sequence_length=tf.to_int32(
self.features["target_sequence_length"]),
initial_state=decoder_initial_state,
dtype=self.dtype,
scope=decoder_scope,
time_major=self.time_major,
use_tpu=False)
else:
if hparams.pass_hidden_state:
decoder_initial_state = encoder_state
else:
decoder_initial_state = tuple((tf.nn.rnn_cell.LSTMStateTuple(
tf.zeros_like(s[0]), tf.zeros_like(s[1])) for s in encoder_state))
final_rnn_outputs = self._build_decoder_fused_for_training(
encoder_outputs, decoder_initial_state, decoder_emb_inp, self.hparams)
# We chose to apply the output_layer to all timesteps for speed:
# 10% improvements for small models & 20% for larger ones.
# If memory is a concern, we should apply output_layer per timestep.
logits = self.output_layer(final_rnn_outputs)
sample_id = None
## Inference
else:
cell, decoder_initial_state = self._build_decoder_cell(
hparams, encoder_outputs, encoder_state,
self.features["source_sequence_length"])
assert hparams.infer_mode == "beam_search"
_, tgt_vocab_table = vocab_utils.create_vocab_tables(
hparams.src_vocab_file, hparams.tgt_vocab_file, hparams.share_vocab)
tgt_sos_id = tf.cast(
tgt_vocab_table.lookup(tf.constant(hparams.sos)), tf.int32)
tgt_eos_id = tf.cast(
tgt_vocab_table.lookup(tf.constant(hparams.eos)), tf.int32)
start_tokens = tf.fill([self.batch_size], tgt_sos_id)
end_token = tgt_eos_id
beam_width = hparams.beam_width
length_penalty_weight = hparams.length_penalty_weight
coverage_penalty_weight = hparams.coverage_penalty_weight
my_decoder = beam_search_decoder.BeamSearchDecoder(
cell=cell,
embedding=self.embedding_decoder,
start_tokens=start_tokens,
end_token=end_token,
initial_state=decoder_initial_state,
beam_width=beam_width,
output_layer=self.output_layer,
length_penalty_weight=length_penalty_weight,
coverage_penalty_weight=coverage_penalty_weight)
# maximum_iteration: The maximum decoding steps.
maximum_iterations = self._get_infer_maximum_iterations(
hparams, self.features["source_sequence_length"])
# Dynamic decoding
outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(
my_decoder,
maximum_iterations=maximum_iterations,
output_time_major=self.time_major,
swap_memory=True,
scope=decoder_scope)
logits = tf.no_op()
sample_id = outputs.predicted_ids
return logits, sample_id
def get_max_time(self, tensor):
time_axis = 0 if self.time_major else 1
return tensor.shape[time_axis].value or tf.shape(tensor)[time_axis]
@abc.abstractmethod
def _build_decoder_cell(self, hparams, encoder_outputs, encoder_state,
source_sequence_length):
"""Subclass must implement this.
Args:
hparams: Hyperparameters configurations.
encoder_outputs: The outputs of encoder for every time step.
encoder_state: The final state of the encoder.
source_sequence_length: sequence length of encoder_outputs.
Returns:
A tuple of a multi-layer RNN cell used by decoder and the initial state of
the decoder RNN.
"""
pass
def _softmax_cross_entropy_loss(self, logits, labels, label_smoothing):
"""Compute softmax loss or sampled softmax loss."""
use_defun = os.environ["use_defun"] == "true"
use_xla = os.environ["use_xla"] == "true"
# @function.Defun(noinline=True, compiled=use_xla)
def ComputePositiveCrossent(labels, logits):
crossent = math_utils.sparse_softmax_crossent_with_logits(
labels=labels, logits=logits)
return crossent
crossent = ComputePositiveCrossent(labels, logits)
assert crossent.dtype == tf.float32
def _safe_shape_div(x, y):
"""Divides `x / y` assuming `x, y >= 0`, treating `0 / 0 = 0`."""
return x // tf.maximum(y, 1)
@function.Defun(tf.float32, tf.float32, compiled=use_xla)
def ReduceSumGrad(x, grad):
"""docstring."""
input_shape = tf.shape(x)
# TODO(apassos) remove this once device placement for eager ops makes more
# sense.
with tf.colocate_with(input_shape):
output_shape_kept_dims = math_ops.reduced_shape(input_shape, -1)
tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)
grad = tf.reshape(grad, output_shape_kept_dims)
return tf.tile(grad, tile_scaling)
def ReduceSum(x):
"""docstring."""
return tf.reduce_sum(x, axis=-1)
if use_defun:
ReduceSum = function.Defun(
tf.float32,
compiled=use_xla,
noinline=True,
grad_func=ReduceSumGrad)(ReduceSum)
if abs(label_smoothing) > 1e-3:
# pylint:disable=invalid-name
def ComputeNegativeCrossentFwd(logits):
"""docstring."""
# [time, batch, dim]
# [time, batch]
max_logits = tf.reduce_max(logits, axis=-1)
# [time, batch, dim]
shifted_logits = logits - tf.expand_dims(max_logits, axis=-1)
# Always compute loss in fp32
shifted_logits = tf.to_float(shifted_logits)
# [time, batch]
log_sum_exp = tf.log(ReduceSum(tf.exp(shifted_logits)))
# [time, batch, dim] - [time, batch, 1] --> reduce_sum(-1) -->
# [time, batch]
neg_crossent = ReduceSum(
shifted_logits - tf.expand_dims(log_sum_exp, axis=-1))
return neg_crossent
def ComputeNegativeCrossent(logits):
return ComputeNegativeCrossentFwd(logits)
if use_defun:
ComputeNegativeCrossent = function.Defun(
compiled=use_xla)(ComputeNegativeCrossent)
neg_crossent = ComputeNegativeCrossent(logits)
neg_crossent = tf.to_float(neg_crossent)
num_labels = logits.shape[-1].value
crossent = (1.0 - label_smoothing) * crossent - (
label_smoothing / tf.to_float(num_labels) * neg_crossent)
# pylint:enable=invalid-name
return crossent
def _compute_loss(self, logits, label_smoothing):
"""Compute optimization loss."""
target_output = self.features["target_output"]
if self.time_major:
target_output = tf.transpose(target_output)
max_time = self.get_max_time(target_output)
self.batch_seq_len = max_time
crossent = self._softmax_cross_entropy_loss(
logits, target_output, label_smoothing)
assert crossent.dtype == tf.float32
target_weights = tf.sequence_mask(
self.features["target_sequence_length"], max_time, dtype=crossent.dtype)
if self.time_major:
# [time, batch] if time_major, since the crossent is [time, batch] in this
# case.
target_weights = tf.transpose(target_weights)
loss = tf.reduce_sum(crossent * target_weights) / tf.to_float(
self.batch_size)
return loss
def build_encoder_states(self, include_embeddings=False):
"""Stack encoder states and return tensor [batch, length, layer, size]."""
assert self.mode == tf.contrib.learn.ModeKeys.INFER
if include_embeddings:
stack_state_list = tf.stack(
[self.encoder_emb_inp] + self.encoder_state_list, 2)
else:
stack_state_list = tf.stack(self.encoder_state_list, 2)
# transform from [length, batch, ...] -> [batch, length, ...]
if self.time_major:
stack_state_list = tf.transpose(stack_state_list, [1, 0, 2, 3])
return stack_state_list
| DeepLearningExamples-master | TensorFlow/Translation/GNMT/model.py |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow NMT model implementation."""
from __future__ import print_function
import argparse
import os
import random
import sys
import subprocess
# import matplotlib.image as mpimg
import numpy as np
import time
import tensorflow as tf
import dllogger
import estimator
from utils import evaluation_utils
from utils import iterator_utils
from utils import misc_utils as utils
from utils import vocab_utils
from variable_mgr import constants
utils.check_tensorflow_version()
FLAGS = None
# LINT.IfChange
def add_arguments(parser):
"""Build ArgumentParser."""
parser.register("type", "bool", lambda v: v.lower() == "true")
# network
parser.add_argument(
"--num_units", type=int, default=1024, help="Network size.")
parser.add_argument(
"--num_layers", type=int, default=4, help="Network depth.")
parser.add_argument("--num_encoder_layers", type=int, default=None,
help="Encoder depth, equal to num_layers if None.")
parser.add_argument("--num_decoder_layers", type=int, default=None,
help="Decoder depth, equal to num_layers if None.")
parser.add_argument(
"--encoder_type",
type=str,
default="gnmt",
help="""\
uni | bi | gnmt.
For bi, we build num_encoder_layers/2 bi-directional layers.
For gnmt, we build 1 bi-directional layer, and (num_encoder_layers - 1)
uni-directional layers.\
""")
parser.add_argument(
"--residual",
type="bool",
nargs="?",
const=True,
default=True,
help="Whether to add residual connections.")
parser.add_argument("--time_major", type="bool", nargs="?", const=True,
default=True,
help="Whether to use time-major mode for dynamic RNN.")
parser.add_argument("--num_embeddings_partitions", type=int, default=0,
help="Number of partitions for embedding vars.")
# attention mechanisms
parser.add_argument(
"--attention",
type=str,
default="normed_bahdanau",
help="""\
luong | scaled_luong | bahdanau | normed_bahdanau or set to "" for no
attention\
""")
parser.add_argument(
"--attention_architecture",
type=str,
default="gnmt_v2",
help="""\
standard | gnmt | gnmt_v2.
standard: use top layer to compute attention.
gnmt: GNMT style of computing attention, use previous bottom layer to
compute attention.
gnmt_v2: similar to gnmt, but use current bottom layer to compute
attention.\
""")
parser.add_argument(
"--output_attention", type="bool", nargs="?", const=True,
default=True,
help="""\
Only used in standard attention_architecture. Whether use attention as
the cell output at each timestep.
.\
""")
parser.add_argument(
"--pass_hidden_state", type="bool", nargs="?", const=True,
default=True,
help="""\
Whether to pass encoder's hidden state to decoder when using an attention
based model.\
""")
# optimizer
parser.add_argument(
"--optimizer", type=str, default="adam", help="sgd | adam")
parser.add_argument(
"--learning_rate",
type=float,
default=5e-4,
help="Learning rate. Adam: 0.001 | 0.0001")
parser.add_argument("--warmup_steps", type=int, default=200,
help="How many steps we inverse-decay learning.")
parser.add_argument("--warmup_scheme", type=str, default="t2t", help="""\
How to warmup learning rates. Options include:
t2t: Tensor2Tensor's way, start with lr 100 times smaller, then
exponentiate until the specified lr.\
""")
parser.add_argument(
"--decay_scheme", type=str, default="luong234", help="""\
How we decay learning rate. Options include:
luong234: after 2/3 num train steps, we start halving the learning rate
for 4 times before finishing.
luong5: after 1/2 num train steps, we start halving the learning rate
for 5 times before finishing.\
luong10: after 1/2 num train steps, we start halving the learning rate
for 10 times before finishing.\
""")
parser.add_argument(
"--max_train_epochs", type=int, default=6, help="Max number of epochs.")
parser.add_argument(
"--target_bleu", type=float, default=None, help="Target bleu.")
parser.add_argument("--colocate_gradients_with_ops", type="bool", nargs="?",
const=True,
default=True,
help=("Whether try colocating gradients with "
"corresponding op"))
parser.add_argument("--label_smoothing", type=float, default=0.1,
help=("If nonzero, smooth the labels towards "
"1/num_classes."))
# initializer
parser.add_argument("--init_op", type=str, default="uniform",
help="uniform | glorot_normal | glorot_uniform")
parser.add_argument("--init_weight", type=float, default=0.1,
help=("for uniform init_op, initialize weights "
"between [-this, this]."))
# data
parser.add_argument(
"--src", type=str, default="en", help="Source suffix, e.g., en.")
parser.add_argument(
"--tgt", type=str, default="de", help="Target suffix, e.g., de.")
parser.add_argument(
"--data_dir", type=str, default="data/wmt16_de_en",
help="Training/eval data directory.")
parser.add_argument(
"--train_prefix",
type=str,
default="train.tok.clean.bpe.32000",
help="Train prefix, expect files with src/tgt suffixes.")
parser.add_argument(
"--test_prefix",
type=str,
default="newstest2014.tok.bpe.32000",
help="Test prefix, expect files with src/tgt suffixes.")
parser.add_argument(
"--translate_file",
type=str,
help="File to translate, works only with translate mode")
parser.add_argument(
"--output_dir", type=str, default="results",
help="Store log/model files.")
# Vocab
parser.add_argument(
"--vocab_prefix",
type=str,
default="vocab.bpe.32000",
help="""\
Vocab prefix, expect files with src/tgt suffixes.\
""")
parser.add_argument(
"--embed_prefix",
type=str,
default=None,
help="""\
Pretrained embedding prefix, expect files with src/tgt suffixes.
The embedding files should be Glove formatted txt files.\
""")
parser.add_argument("--sos", type=str, default="<s>",
help="Start-of-sentence symbol.")
parser.add_argument("--eos", type=str, default="</s>",
help="End-of-sentence symbol.")
parser.add_argument(
"--share_vocab",
type="bool",
nargs="?",
const=True,
default=True,
help="""\
Whether to use the source vocab and embeddings for both source and
target.\
""")
parser.add_argument("--check_special_token", type="bool", default=True,
help="""\
Whether check special sos, eos, unk tokens exist in the
vocab files.\
""")
# Sequence lengths
parser.add_argument(
"--src_max_len",
type=int,
default=50,
help="Max length of src sequences during training (including EOS).")
parser.add_argument(
"--tgt_max_len",
type=int,
default=50,
help="Max length of tgt sequences during training (including BOS).")
parser.add_argument("--src_max_len_infer", type=int, default=None,
help="Max length of src sequences during inference (including EOS).")
parser.add_argument("--tgt_max_len_infer", type=int, default=80,
help="""\
Max length of tgt sequences during inference (including BOS). Also use to restrict the
maximum decoding length.\
""")
# Default settings works well (rarely need to change)
parser.add_argument("--unit_type", type=str, default="lstm",
help="lstm | gru | layer_norm_lstm | nas")
parser.add_argument("--forget_bias", type=float, default=0.0,
help="Forget bias for BasicLSTMCell.")
parser.add_argument("--dropout", type=float, default=0.2,
help="Dropout rate (not keep_prob)")
parser.add_argument("--max_gradient_norm", type=float, default=5.0,
help="Clip gradients to this norm.")
parser.add_argument("--batch_size", type=int, default=128, help="Total batch size.")
parser.add_argument(
"--num_buckets",
type=int,
default=5,
help="Put data into similar-length buckets (only for training).")
# SPM
parser.add_argument("--subword_option", type=str, default="bpe",
choices=["", "bpe", "spm"],
help="""\
Set to bpe or spm to activate subword desegmentation.\
""")
# Experimental encoding feature.
parser.add_argument("--use_char_encode", type="bool", default=False,
help="""\
Whether to split each word or bpe into character, and then
generate the word-level representation from the character
reprentation.
""")
# Misc
parser.add_argument(
"--save_checkpoints_steps", type=int, default=2000,
help="save_checkpoints_steps")
parser.add_argument(
"--log_step_count_steps", type=int, default=10,
help=("The frequency, in number of global steps, that the global step "
"and the loss will be logged during training"))
parser.add_argument(
"--num_gpus", type=int, default=1, help="Number of gpus in each worker.")
parser.add_argument("--hparams_path", type=str, default=None,
help=("Path to standard hparams json file that overrides"
"hparams values from FLAGS."))
parser.add_argument(
"--random_seed",
type=int,
default=1,
help="Random seed (>0, set a specific seed).")
parser.add_argument("--language_model", type="bool", nargs="?",
const=True, default=False,
help="True to train a language model, ignoring encoder")
# Inference
parser.add_argument("--ckpt", type=str, default=None,
help="Checkpoint file to load a model for inference. (defaults to newest checkpoint)")
parser.add_argument(
"--infer_batch_size",
type=int,
default=128,
help="Batch size for inference mode.")
parser.add_argument("--detokenizer_file", type=str,
default=None,
help=("""Detokenizer script file. Default: DATA_DIR/mosesdecoder/scripts/tokenizer/detokenizer.perl"""))
parser.add_argument("--tokenizer_file", type=str,
default=None,
help=("""Tokenizer script file. Default: DATA_DIR/mosesdecoder/scripts/tokenizer/tokenizer.perl"""))
# Advanced inference arguments
parser.add_argument("--infer_mode", type=str, default="beam_search",
choices=["greedy", "beam_search"],
help="Which type of decoder to use during inference.")
parser.add_argument("--beam_width", type=int, default=5,
help=("""\
beam width when using beam search decoder. If 0, use standard
decoder with greedy helper.\
"""))
parser.add_argument(
"--length_penalty_weight",
type=float,
default=0.6,
help="Length penalty for beam search.")
parser.add_argument(
"--coverage_penalty_weight",
type=float,
default=0.1,
help="Coverage penalty for beam search.")
# Job info
parser.add_argument("--num_workers", type=int, default=1,
help="Number of workers (inference only).")
parser.add_argument("--amp", action='store_true',
help="use amp for training and inference")
parser.add_argument("--use_fastmath", type="bool", default=False,
help="use_fastmath for training and inference")
parser.add_argument("--use_fp16", type="bool", default=False,
help="use_fp16 for training and inference")
parser.add_argument(
"--fp16_loss_scale",
type=float,
default=128,
help="If fp16 is enabled, the loss is multiplied by this amount "
"right before gradients are computed, then each gradient "
"is divided by this amount. Mathematically, this has no "
"effect, but it helps avoid fp16 underflow. Set to 1 to "
"effectively disable.")
parser.add_argument(
"--enable_auto_loss_scale",
type="bool",
default=True,
help="If True and use_fp16 is True, automatically adjust the "
"loss scale during training.")
parser.add_argument(
"--fp16_inc_loss_scale_every_n",
type=int,
default=128,
help="If fp16 is enabled and enable_auto_loss_scale is "
"True, increase the loss scale every n steps.")
parser.add_argument(
"--check_tower_loss_numerics",
type="bool",
default=False, # Set to false for xla.compile()
help="whether to check tower loss numerics")
parser.add_argument(
"--use_fp32_batch_matmul",
type="bool",
default=False,
help="Whether to use fp32 batch matmul")
# Performance
# XLA
parser.add_argument(
"--force_inputs_padding",
type="bool",
default=False,
help="Force padding input batch to src_max_len and tgt_max_len")
parser.add_argument(
"--use_xla",
type="bool",
default=False,
help="Use xla to compile a few selected locations, mostly Defuns.")
parser.add_argument(
"--xla_compile",
type="bool",
default=False,
help="Use xla.compile() for each tower's fwd and bak pass.")
parser.add_argument(
"--use_autojit_xla",
type="bool",
default=False,
help="Use auto jit xla.")
# GPU knobs
parser.add_argument(
"--use_pintohost_optimizer",
type="bool",
default=False,
help="whether to use PinToHost optimizer")
parser.add_argument(
"--use_cudnn_lstm",
type="bool",
default=False,
help="whether to use cudnn_lstm for encoder, non residual layers")
parser.add_argument(
"--use_loose_bidi_cudnn_lstm",
type="bool",
default=False,
help="whether to use loose bidi cudnn_lstm")
parser.add_argument(
"--use_fused_lstm",
type="bool",
default=True,
help="whether to use fused lstm and variant. If enabled, training will "
"use LSTMBlockFusedCell, infer will use LSTMBlockCell when appropriate.")
parser.add_argument(
"--use_fused_lstm_dec",
type="bool",
default=False,
help="whether to use fused lstm for decoder (training only).")
parser.add_argument(
"--gpu_indices",
type=str,
default="",
help="Indices of worker GPUs in ring order")
# Graph knobs
parser.add_argument("--parallel_iterations", type=int, default=10,
help="number of parallel iterations in dynamic_rnn")
parser.add_argument("--use_dist_strategy", type="bool", default=False,
help="whether to use distribution strategy")
parser.add_argument(
"--hierarchical_copy",
type="bool",
default=False,
help="Use hierarchical copies. Currently only optimized for "
"use on a DGX-1 with 8 GPUs and may perform poorly on "
"other hardware. Requires --num_gpus > 1, and only "
"recommended when --num_gpus=8")
parser.add_argument(
"--network_topology",
type=constants.NetworkTopology,
default=constants.NetworkTopology.DGX1,
choices=list(constants.NetworkTopology))
parser.add_argument(
"--use_block_lstm",
type="bool",
default=False,
help="whether to use block lstm")
parser.add_argument(
"--use_defun",
type="bool",
default=False,
help="whether to use Defun")
# Gradient tricks
parser.add_argument(
"--gradient_repacking",
type=int,
default=0,
help="Use gradient repacking. It"
"currently only works with replicated mode. At the end of"
"of each step, it repacks the gradients for more efficient"
"cross-device transportation. A non-zero value specifies"
"the number of split packs that will be formed.")
parser.add_argument(
"--compact_gradient_transfer",
type="bool",
default=True,
help="Compact gradient as much as possible for cross-device transfer and "
"aggregation.")
parser.add_argument(
"--all_reduce_spec",
type=str,
default="nccl",
help="A specification of the all_reduce algorithm to be used "
"for reducing gradients. For more details, see "
"parse_all_reduce_spec in variable_mgr.py. An "
"all_reduce_spec has BNF form:\n"
"int ::= positive whole number\n"
"g_int ::= int[KkMGT]?\n"
"alg_spec ::= alg | alg#int\n"
"range_spec ::= alg_spec | alg_spec/alg_spec\n"
"spec ::= range_spec | range_spec:g_int:range_spec\n"
"NOTE: not all syntactically correct constructs are "
"supported.\n\n"
"Examples:\n "
"\"xring\" == use one global ring reduction for all "
"tensors\n"
"\"pscpu\" == use CPU at worker 0 to reduce all tensors\n"
"\"nccl\" == use NCCL to locally reduce all tensors. "
"Limited to 1 worker.\n"
"\"nccl/xring\" == locally (to one worker) reduce values "
"using NCCL then ring reduce across workers.\n"
"\"pscpu:32k:xring\" == use pscpu algorithm for tensors of "
"size up to 32kB, then xring for larger tensors.")
parser.add_argument(
"--agg_small_grads_max_bytes",
type=int,
default=0,
help="If > 0, try to aggregate tensors of less than this "
"number of bytes prior to all-reduce.")
parser.add_argument(
"--agg_small_grads_max_group",
type=int,
default=10,
help="When aggregating small tensors for all-reduce do not "
"aggregate more than this many into one new tensor.")
parser.add_argument(
"--allreduce_merge_scope",
type=int,
default=1,
help="Establish a name scope around this many "
"gradients prior to creating the all-reduce operations. "
"It may affect the ability of the backend to merge "
"parallel ops.")
# Other knobs
parser.add_argument(
"--local_parameter_device",
type=str,
default="gpu",
help="Device to use as parameter server: cpu or gpu. For "
"distributed training, it can affect where caching of "
"variables happens.")
parser.add_argument(
"--use_resource_vars",
type="bool",
default=False,
help="Use resource variables instead of normal variables. "
"Resource variables are slower, but this option is useful "
"for debugging their performance.")
parser.add_argument("--debug", type="bool", default=False,
help="Debug train and eval")
parser.add_argument(
"--debug_num_train_steps", type=int, default=None, help="Num steps to train.")
parser.add_argument("--show_metrics", type="bool", default=True,
help="whether to show detailed metrics")
parser.add_argument("--clip_grads", type="bool", default=True,
help="whether to clip gradients")
parser.add_argument("--profile", type="bool", default=False,
help="If generate profile")
parser.add_argument("--profile_save_steps", type=int, default=10,
help="Save timeline every N steps.")
parser.add_argument("--use_dynamic_rnn", type="bool", default=True)
parser.add_argument("--use_synthetic_data", type="bool", default=False)
parser.add_argument(
"--mode", type=str, default="train_and_eval",
choices=("train_and_eval", "infer", "translate"))
def create_hparams(flags):
"""Create training hparams."""
return tf.contrib.training.HParams(
# Data
src=flags.src,
tgt=flags.tgt,
train_prefix=os.path.join(flags.data_dir, flags.train_prefix),
test_prefix=os.path.join(flags.data_dir, flags.test_prefix),
translate_file=flags.translate_file,
vocab_prefix=os.path.join(flags.data_dir, flags.vocab_prefix),
embed_prefix=flags.embed_prefix,
output_dir=flags.output_dir,
# Networks
num_units=flags.num_units,
num_encoder_layers=(flags.num_encoder_layers or flags.num_layers),
num_decoder_layers=(flags.num_decoder_layers or flags.num_layers),
dropout=flags.dropout,
unit_type=flags.unit_type,
encoder_type=flags.encoder_type,
residual=flags.residual,
time_major=flags.time_major,
num_embeddings_partitions=flags.num_embeddings_partitions,
# Attention mechanisms
attention=flags.attention,
attention_architecture=flags.attention_architecture,
output_attention=flags.output_attention,
pass_hidden_state=flags.pass_hidden_state,
# Train
optimizer=flags.optimizer,
max_train_epochs=flags.max_train_epochs,
target_bleu=flags.target_bleu,
label_smoothing=flags.label_smoothing,
batch_size=flags.batch_size,
init_op=flags.init_op,
init_weight=flags.init_weight,
max_gradient_norm=flags.max_gradient_norm,
learning_rate=flags.learning_rate,
warmup_steps=flags.warmup_steps,
warmup_scheme=flags.warmup_scheme,
decay_scheme=flags.decay_scheme,
colocate_gradients_with_ops=flags.colocate_gradients_with_ops,
# Data constraints
num_buckets=flags.num_buckets,
src_max_len=flags.src_max_len,
tgt_max_len=flags.tgt_max_len,
# Inference
src_max_len_infer=flags.src_max_len_infer,
tgt_max_len_infer=flags.tgt_max_len_infer,
ckpt=flags.ckpt,
infer_batch_size=flags.infer_batch_size,
detokenizer_file=flags.detokenizer_file if flags.detokenizer_file is not None \
else os.path.join(flags.data_dir, 'mosesdecoder/scripts/tokenizer/detokenizer.perl'),
tokenizer_file=flags.tokenizer_file if flags.tokenizer_file is not None \
else os.path.join(flags.data_dir, 'mosesdecoder/scripts/tokenizer/tokenizer.perl'),
# Advanced inference arguments
infer_mode=flags.infer_mode,
beam_width=flags.beam_width,
length_penalty_weight=flags.length_penalty_weight,
coverage_penalty_weight=flags.coverage_penalty_weight,
# Vocab
sos=flags.sos if flags.sos else vocab_utils.SOS,
eos=flags.eos if flags.eos else vocab_utils.EOS,
subword_option=flags.subword_option,
check_special_token=flags.check_special_token,
use_char_encode=flags.use_char_encode,
# Misc
forget_bias=flags.forget_bias,
num_gpus=flags.num_gpus,
save_checkpoints_steps=flags.save_checkpoints_steps,
log_step_count_steps=flags.log_step_count_steps,
epoch_step=0, # record where we were within an epoch.
share_vocab=flags.share_vocab,
random_seed=flags.random_seed,
language_model=flags.language_model,
amp=flags.amp,
use_fastmath=flags.use_fastmath,
use_fp16=flags.use_fp16,
fp16_loss_scale=flags.fp16_loss_scale,
enable_auto_loss_scale=flags.enable_auto_loss_scale,
fp16_inc_loss_scale_every_n=flags.fp16_inc_loss_scale_every_n,
check_tower_loss_numerics=flags.check_tower_loss_numerics,
use_fp32_batch_matmul=flags.use_fp32_batch_matmul,
# Performance
# GPU knbs
force_inputs_padding=flags.force_inputs_padding,
use_xla=flags.use_xla,
xla_compile=flags.xla_compile,
use_autojit_xla=flags.use_autojit_xla,
use_pintohost_optimizer=flags.use_pintohost_optimizer,
use_cudnn_lstm=flags.use_cudnn_lstm,
use_loose_bidi_cudnn_lstm=flags.use_loose_bidi_cudnn_lstm,
use_fused_lstm=flags.use_fused_lstm,
use_fused_lstm_dec=flags.use_fused_lstm_dec,
gpu_indices=flags.gpu_indices,
# Graph knobs
parallel_iterations=flags.parallel_iterations,
use_dynamic_rnn=flags.use_dynamic_rnn,
use_dist_strategy=flags.use_dist_strategy,
hierarchical_copy=flags.hierarchical_copy,
network_topology=flags.network_topology,
use_block_lstm=flags.use_block_lstm,
# Grad tricks
gradient_repacking=flags.gradient_repacking,
compact_gradient_transfer=flags.compact_gradient_transfer,
all_reduce_spec=flags.all_reduce_spec,
agg_small_grads_max_bytes=flags.agg_small_grads_max_bytes,
agg_small_grads_max_group=flags.agg_small_grads_max_group,
allreduce_merge_scope=flags.allreduce_merge_scope,
# Other knobs
local_parameter_device=("cpu" if flags.num_gpus ==0
else flags.local_parameter_device),
use_resource_vars=flags.use_resource_vars,
debug=flags.debug,
debug_num_train_steps=flags.debug_num_train_steps,
clip_grads=flags.clip_grads,
profile=flags.profile,
profile_save_steps=flags.profile_save_steps,
show_metrics=flags.show_metrics,
use_synthetic_data=flags.use_synthetic_data,
mode=flags.mode,
)
def _add_argument(hparams, key, value, update=True):
"""Add an argument to hparams; if exists, change the value if update==True."""
if hasattr(hparams, key):
if update:
setattr(hparams, key, value)
else:
hparams.add_hparam(key, value)
def extend_hparams(hparams):
"""Add new arguments to hparams."""
# Sanity checks
if hparams.encoder_type == "bi" and hparams.num_encoder_layers % 2 != 0:
raise ValueError("For bi, num_encoder_layers %d should be even" %
hparams.num_encoder_layers)
if (hparams.attention_architecture in ["gnmt"] and
hparams.num_encoder_layers < 2):
raise ValueError("For gnmt attention architecture, "
"num_encoder_layers %d should be >= 2" %
hparams.num_encoder_layers)
if hparams.subword_option and hparams.subword_option not in ["spm", "bpe"]:
raise ValueError("subword option must be either spm, or bpe")
if hparams.infer_mode == "beam_search" and hparams.beam_width <= 0:
raise ValueError("beam_width must greater than 0 when using beam_search"
"decoder.")
if hparams.mode == "translate" and not hparams.translate_file:
raise ValueError("--translate_file flag must be specified in translate mode")
# Different number of encoder / decoder layers
assert hparams.num_encoder_layers and hparams.num_decoder_layers
if hparams.num_encoder_layers != hparams.num_decoder_layers:
hparams.pass_hidden_state = False
utils.print_out("Num encoder layer %d is different from num decoder layer"
" %d, so set pass_hidden_state to False" % (
hparams.num_encoder_layers,
hparams.num_decoder_layers))
# Set residual layers
num_encoder_residual_layers = 0
num_decoder_residual_layers = 0
if hparams.residual:
if hparams.num_encoder_layers > 1:
num_encoder_residual_layers = hparams.num_encoder_layers - 1
if hparams.num_decoder_layers > 1:
num_decoder_residual_layers = hparams.num_decoder_layers - 1
if hparams.encoder_type == "gnmt":
# The first unidirectional layer (after the bi-directional layer) in
# the GNMT encoder can't have residual connection due to the input is
# the concatenation of fw_cell and bw_cell's outputs.
num_encoder_residual_layers = hparams.num_encoder_layers - 2
# Compatible for GNMT models
if hparams.num_encoder_layers == hparams.num_decoder_layers:
num_decoder_residual_layers = num_encoder_residual_layers
_add_argument(hparams, "num_encoder_residual_layers",
num_encoder_residual_layers)
_add_argument(hparams, "num_decoder_residual_layers",
num_decoder_residual_layers)
# Language modeling
if hparams.language_model:
hparams.attention = ""
hparams.attention_architecture = ""
hparams.pass_hidden_state = False
hparams.share_vocab = True
hparams.src = hparams.tgt
utils.print_out("For language modeling, we turn off attention and "
"pass_hidden_state; turn on share_vocab; set src to tgt.")
## Vocab
# Get vocab file names first
if hparams.vocab_prefix:
src_vocab_file = hparams.vocab_prefix + "." + hparams.src
tgt_vocab_file = hparams.vocab_prefix + "." + hparams.tgt
else:
raise ValueError("hparams.vocab_prefix must be provided.")
# Source vocab
src_vocab_size, src_vocab_file = vocab_utils.check_vocab(
src_vocab_file,
hparams.output_dir,
check_special_token=hparams.check_special_token,
sos=hparams.sos,
eos=hparams.eos,
unk=vocab_utils.UNK,
pad_vocab=True)
# Target vocab
if hparams.share_vocab:
utils.print_out(" using source vocab for target")
tgt_vocab_file = src_vocab_file
tgt_vocab_size = src_vocab_size
else:
tgt_vocab_size, tgt_vocab_file = vocab_utils.check_vocab(
tgt_vocab_file,
hparams.output_dir,
check_special_token=hparams.check_special_token,
sos=hparams.sos,
eos=hparams.eos,
unk=vocab_utils.UNK)
_add_argument(hparams, "src_vocab_size", src_vocab_size)
_add_argument(hparams, "tgt_vocab_size", tgt_vocab_size)
_add_argument(hparams, "src_vocab_file", src_vocab_file)
_add_argument(hparams, "tgt_vocab_file", tgt_vocab_file)
# Num embedding partitions
_add_argument(
hparams, "num_enc_emb_partitions", hparams.num_embeddings_partitions)
_add_argument(
hparams, "num_dec_emb_partitions", hparams.num_embeddings_partitions)
# Pretrained Embeddings
_add_argument(hparams, "src_embed_file", "")
_add_argument(hparams, "tgt_embed_file", "")
if hparams.embed_prefix:
src_embed_file = hparams.embed_prefix + "." + hparams.src
tgt_embed_file = hparams.embed_prefix + "." + hparams.tgt
if tf.gfile.Exists(src_embed_file):
utils.print_out(" src_embed_file %s exist" % src_embed_file)
hparams.src_embed_file = src_embed_file
utils.print_out(
"For pretrained embeddings, set num_enc_emb_partitions to 1")
hparams.num_enc_emb_partitions = 1
else:
utils.print_out(" src_embed_file %s doesn't exist" % src_embed_file)
if tf.gfile.Exists(tgt_embed_file):
utils.print_out(" tgt_embed_file %s exist" % tgt_embed_file)
hparams.tgt_embed_file = tgt_embed_file
utils.print_out(
"For pretrained embeddings, set num_dec_emb_partitions to 1")
hparams.num_dec_emb_partitions = 1
else:
utils.print_out(" tgt_embed_file %s doesn't exist" % tgt_embed_file)
# Evaluation
metric = "bleu"
best_metric_dir = os.path.join(hparams.output_dir, "best_" + metric)
tf.gfile.MakeDirs(best_metric_dir)
_add_argument(hparams, "best_" + metric, 0, update=False)
_add_argument(hparams, "best_" + metric + "_dir", best_metric_dir)
return hparams
def create_or_load_hparams(default_hparams, hparams_path):
"""Create hparams or load hparams from output_dir."""
hparams = utils.maybe_parse_standard_hparams(default_hparams, hparams_path)
hparams = extend_hparams(hparams)
# Print HParams
utils.print_hparams(hparams)
return hparams
def run_main(flags, default_hparams, estimator_fn):
"""Run main."""
# Random
random_seed = flags.random_seed
if random_seed is not None and random_seed > 0:
utils.print_out("# Set random seed to %d" % random_seed)
random.seed(random_seed)
np.random.seed(random_seed)
tf.set_random_seed(random_seed)
# Model output directory
output_dir = flags.output_dir
if output_dir and not tf.gfile.Exists(output_dir):
utils.print_out("# Creating output directory %s ..." % output_dir)
tf.gfile.MakeDirs(output_dir)
# Load hparams.
hparams = create_or_load_hparams(default_hparams, flags.hparams_path)
# Train or Evaluation
estimator_fn(hparams)
return hparams
def tokenize(hparams, file, tokenized_file):
utils.print_out("tokenizing {} -> {}".format(file, tokenized_file))
with open(file, 'rb') as input_file:
with open(tokenized_file, 'wb') as output_file:
subprocess.run([hparams.tokenizer_file, '-l', hparams.src], stdin=input_file, stdout=output_file)
def detokenize(hparams, file, detokenized_file):
utils.print_out("detokenizing {} -> {}".format(file, detokenized_file))
with open(file, 'rb') as input_file:
with open(detokenized_file, 'wb') as output_file:
subprocess.run([hparams.detokenizer_file, '-l', hparams.tgt], stdin=input_file, stdout=output_file)
def main(unused_argv):
experiment_start = time.time()
tf.logging.set_verbosity(tf.logging.INFO)
if FLAGS.use_fp16 and FLAGS.use_dist_strategy:
raise ValueError("use_fp16 and use_dist_strategy aren't compatible")
if FLAGS.use_fp16 + FLAGS.amp + FLAGS.use_fastmath > 1:
raise ValueError("Only one of use_fp16, amp, use_fastmath can be set")
if FLAGS.amp:
utils.print_out('Enabling TF-AMP')
os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '1'
if FLAGS.use_fastmath:
utils.print_out('Enabling FastMath')
os.environ["TF_ENABLE_CUBLAS_TENSOR_OP_MATH_FP32"] = '1'
os.environ["TF_ENABLE_CUDNN_TENSOR_OP_MATH_FP32"] = '1'
os.environ["TF_ENABLE_CUDNN_RNN_TENSOR_OP_MATH_FP32"] = '1'
# Set up hacky envvars.
# Hack that affects Defun in attention_wrapper.py
active_xla_option_nums = np.sum([FLAGS.use_xla, FLAGS.use_autojit_xla,
FLAGS.xla_compile])
if active_xla_option_nums > 1:
raise ValueError(
"Only one of use_xla, xla_compile, use_autojit_xla can be set")
os.environ["use_xla"] = str(FLAGS.use_xla).lower()
if FLAGS.use_xla:
os.environ["use_defun"] = str(True).lower()
else:
os.environ["use_defun"] = str(FLAGS.use_defun).lower()
utils.print_out("use_defun is %s for attention" % os.environ["use_defun"])
# TODO(jamesqin): retire this config after Cuda9.1
os.environ["use_fp32_batch_matmul"] = ("true" if FLAGS.use_fp32_batch_matmul
else "false")
os.environ["xla_compile"] = "true" if FLAGS.xla_compile else "false"
os.environ["force_inputs_padding"] = (
"true" if FLAGS.force_inputs_padding else "false")
if FLAGS.mode == "train":
utils.print_out("Running training mode.")
default_hparams = create_hparams(FLAGS)
run_main(FLAGS, default_hparams, estimator.train_fn)
elif FLAGS.mode == "infer" or FLAGS.mode == "translate":
if FLAGS.mode == "infer":
utils.print_out("Running inference mode.")
translate_mode = False
else:
utils.print_out("Running translate mode on file {}.".format(FLAGS.translate_file))
translate_mode = True
# Random
random_seed = FLAGS.random_seed
if random_seed is not None and random_seed > 0:
utils.print_out("# Set random seed to %d" % random_seed)
random.seed(random_seed)
np.random.seed(random_seed)
tf.set_random_seed(random_seed)
# Model output directory
output_dir = FLAGS.output_dir
if output_dir and not tf.gfile.Exists(output_dir):
utils.print_out("# Creating output directory %s ..." % output_dir)
tf.gfile.MakeDirs(output_dir)
dllogger.init(backends=[
dllogger.StdOutBackend(dllogger.Verbosity.DEFAULT),
dllogger.JSONStreamBackend(dllogger.Verbosity.VERBOSE, os.path.join(FLAGS.output_dir, FLAGS.mode + '-report.json')),
])
dllogger.log('PARAMETER', vars(FLAGS))
# Load hparams.
default_hparams = create_hparams(FLAGS)
default_hparams.num_buckets = 1
# The estimator model_fn is written in a way allowing train hparams to be
# passed in infer mode.
hparams = create_or_load_hparams(default_hparams, FLAGS.hparams_path)
utils.print_out("infer_hparams:")
utils.print_hparams(hparams)
if translate_mode:
tokenize(hparams, hparams.translate_file, hparams.translate_file + ".tok")
eval_sentences, eval_src_tokens, _ = iterator_utils.get_effective_epoch_size(hparams, train=False)
# Run evaluation when there's a new checkpoint
tf.logging.info("Starting to evaluate...")
eval_start = time.time()
_, (eval_speed, eval_latencies), eval_output_tokens = estimator.eval_fn(hparams, hparams.ckpt, only_translate=translate_mode)
eval_end = time.time()
eval_delta = eval_end - eval_start
utils.print_out("eval time for ckpt: %.2f mins (%.2f sent/sec, %.2f tokens/sec)" %
(eval_delta / 60., eval_speed, eval_speed * (eval_src_tokens + eval_output_tokens) / eval_sentences), f=sys.stderr)
logging_data = {
'infer_speed_sent': eval_speed,
'infer_speed_toks': eval_speed * (eval_src_tokens + eval_output_tokens) / eval_sentences,
}
for lat in sorted(eval_latencies):
utils.print_out("eval latency_%s for ckpt: %.2f ms" % (lat, eval_latencies[lat] * 1000))
logging_data['infer_latency_{}'.format(lat)] = eval_latencies[lat] * 1000
dllogger.log((), logging_data)
dllogger.flush()
if translate_mode:
detokenize(hparams, hparams.translate_file + ".trans.tok", hparams.translate_file + ".trans")
else:
assert FLAGS.mode == "train_and_eval"
utils.print_out("Running train and eval mode.")
# Random
random_seed = FLAGS.random_seed
if random_seed is not None and random_seed > 0:
utils.print_out("# Set random seed to %d" % random_seed)
random.seed(random_seed)
np.random.seed(random_seed)
tf.set_random_seed(random_seed)
# Model output directory
output_dir = FLAGS.output_dir
if output_dir and not tf.gfile.Exists(output_dir):
utils.print_out("# Creating output directory %s ..." % output_dir)
tf.gfile.MakeDirs(output_dir)
dllogger.init(backends=[
dllogger.StdOutBackend(dllogger.Verbosity.DEFAULT),
dllogger.JSONStreamBackend(dllogger.Verbosity.VERBOSE, os.path.join(FLAGS.output_dir, FLAGS.mode + '-report.json')),
])
dllogger.log('PARAMETER', vars(FLAGS))
dllogger.metadata("bleu", {"unit": None})
dllogger.metadata("train_speed_sent", {"unit": "sequences/s"})
dllogger.metadata("train_speed_toks", {"unit": "tokens/s"})
# Load hparams.
default_hparams = create_hparams(FLAGS)
hparams = create_or_load_hparams(default_hparams, FLAGS.hparams_path)
utils.print_out("training hparams:")
utils.print_hparams(hparams)
with tf.gfile.GFile(os.path.join(output_dir, "train_hparams.txt"), "w") as f:
f.write(utils.serialize_hparams(hparams) + "\n")
# The estimator model_fn is written in a way allowing train hparams to be
# passed in infer mode.
infer_hparams = tf.contrib.training.HParams(**hparams.values())
infer_hparams.num_buckets = 1
utils.print_out("infer_hparams:")
utils.print_hparams(infer_hparams)
with tf.gfile.GFile(os.path.join(output_dir, "infer_hparams.txt"), "w") as f:
f.write(utils.serialize_hparams(infer_hparams) + "\n")
epochs = 0
should_stop = epochs >= FLAGS.max_train_epochs
train_sentences, train_src_tokens, train_tgt_tokens = iterator_utils.get_effective_epoch_size(hparams)
eval_sentences, eval_src_tokens, _ = iterator_utils.get_effective_epoch_size(hparams, train=False)
while not should_stop:
utils.print_out("Starting epoch %d" % epochs)
try:
train_start = time.time()
train_speed, _ = estimator.train_fn(hparams)
except tf.errors.OutOfRangeError:
utils.print_out("training hits OutOfRangeError", f=sys.stderr)
train_end = time.time()
train_delta = train_end - train_start
utils.print_out("training time for epoch %d: %.2f mins (%.2f sent/sec, %.2f tokens/sec)" %
(epochs + 1, train_delta / 60., train_speed, train_speed * (train_src_tokens + train_tgt_tokens) / train_sentences), f=sys.stderr)
logging_data = {
'train_speed_sent': train_speed,
'train_speed_toks': train_speed * (train_src_tokens + train_tgt_tokens) / train_sentences,
}
# This is probably sub-optimal, doing eval per-epoch
eval_start = time.time()
bleu_score, (eval_speed, eval_latencies), eval_output_tokens = estimator.eval_fn(infer_hparams)
eval_end = time.time()
eval_delta = eval_end - eval_start
utils.print_out("eval time for epoch %d: %.2f mins (%.2f sent/sec, %.2f tokens/sec)" %
(epochs + 1, eval_delta / 60., eval_speed, eval_speed * (eval_src_tokens + eval_output_tokens) / eval_sentences), f=sys.stderr)
logging_data.update({
'bleu': bleu_score,
'infer_speed_sent': eval_speed,
'infer_speed_toks': eval_speed * (eval_src_tokens + eval_output_tokens) / eval_sentences,
})
for lat in sorted(eval_latencies):
utils.print_out("eval latency_%s for epoch %d: %.2f ms" % (lat, epochs + 1, eval_latencies[lat] * 1000))
logging_data['eval_latency_{}'.format(lat)] = eval_latencies[lat] * 1000
dllogger.log((epochs,), logging_data)
dllogger.flush()
if FLAGS.debug or (FLAGS.target_bleu is not None and bleu_score > FLAGS.target_bleu):
should_stop = True
utils.print_out(
"Stop job since target bleu is reached at epoch %d ." % epochs,
f=sys.stderr)
epochs += 1
if epochs >= FLAGS.max_train_epochs:
should_stop = True
utils.print_out("Stop job since max_train_epochs is reached.",
f=sys.stderr)
dllogger.log((), logging_data)
dllogger.flush()
experiment_end = time.time()
utils.print_out('Experiment took {} min'.format((experiment_end - experiment_start) / 60))
if __name__ == "__main__":
nmt_parser = argparse.ArgumentParser()
add_arguments(nmt_parser)
FLAGS, unparsed = nmt_parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| DeepLearningExamples-master | TensorFlow/Translation/GNMT/nmt.py |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for building models."""
from __future__ import print_function
import collections
import os
import time
import numpy as np
import six
import tensorflow as tf
from utils import math_utils
from utils import misc_utils as utils
from utils import vocab_utils
__all__ = [
"get_initializer", "create_emb_for_encoder_and_decoder", "create_rnn_cell",
"gradient_clip", "create_or_load_model", "load_model", "avg_checkpoints",
]
# If a vocab size is greater than this value, put the embedding on cpu instead
VOCAB_SIZE_THRESHOLD_CPU = 50000
def get_initializer(init_op, seed=None, init_weight=0):
"""Create an initializer. init_weight is only for uniform."""
if init_op == "uniform":
assert init_weight
return tf.random_uniform_initializer(
-init_weight, init_weight, seed=seed)
elif init_op == "glorot_normal":
return tf.keras.initializers.glorot_normal(
seed=seed)
elif init_op == "glorot_uniform":
return tf.keras.initializers.glorot_uniform(
seed=seed)
elif init_op.isdigit():
# dtype is default fp32 for variables.
val = int(init_op)
return tf.constant_initializer(val)
else:
raise ValueError("Unknown init_op %s" % init_op)
class ExtraArgs(collections.namedtuple(
"ExtraArgs", ("single_cell_fn", "model_device_fn",
"attention_mechanism_fn", "encoder_emb_lookup_fn"))):
pass
class TrainModel(
collections.namedtuple("TrainModel", ("graph", "model", "iterator",
"skip_count_placeholder"))):
pass
def _get_embed_device(vocab_size):
"""Decide on which device to place an embed matrix given its vocab size."""
if vocab_size > VOCAB_SIZE_THRESHOLD_CPU:
return "/cpu:0"
else:
return "/gpu:0"
def _create_pretrained_emb_from_txt(
vocab_file, embed_file, num_trainable_tokens=3, dtype=tf.float32,
scope=None):
"""Load pretrain embeding from embed_file, and return an embedding matrix.
Args:
vocab_file: Path to vocab file.
embed_file: Path to a Glove formmated embedding txt file.
num_trainable_tokens: Make the first n tokens in the vocab file as trainable
variables. Default is 3, which is "<unk>", "<s>" and "</s>".
dtype: data type.
scope: tf scope name.
Returns:
pretrained embedding table variable.
"""
vocab, _ = vocab_utils.load_vocab(vocab_file)
trainable_tokens = vocab[:num_trainable_tokens]
utils.print_out("# Using pretrained embedding: %s." % embed_file)
utils.print_out(" with trainable tokens: ")
emb_dict, emb_size = vocab_utils.load_embed_txt(embed_file)
for token in trainable_tokens:
utils.print_out(" %s" % token)
if token not in emb_dict:
emb_dict[token] = [0.0] * emb_size
emb_mat = np.array(
[emb_dict[token] for token in vocab], dtype=dtype.as_numpy_dtype())
emb_mat = tf.constant(emb_mat)
emb_mat_const = tf.slice(emb_mat, [num_trainable_tokens, 0], [-1, -1])
with tf.variable_scope(scope or "pretrain_embeddings", dtype=dtype) as scope:
emb_mat_var = tf.get_variable(
"emb_mat_var", [num_trainable_tokens, emb_size])
return tf.concat([emb_mat_var, emb_mat_const], 0)
def _create_or_load_embed(embed_name, vocab_file, embed_file,
vocab_size, embed_size, dtype):
"""Create a new or load an existing embedding matrix."""
if vocab_file and embed_file:
embedding = _create_pretrained_emb_from_txt(vocab_file, embed_file)
else:
embedding = tf.get_variable(
embed_name, [vocab_size, embed_size], dtype)
return embedding
def create_emb_for_encoder_and_decoder(share_vocab,
src_vocab_size,
tgt_vocab_size,
src_embed_size,
tgt_embed_size,
dtype=tf.float32,
num_enc_partitions=0,
num_dec_partitions=0,
src_vocab_file=None,
tgt_vocab_file=None,
src_embed_file=None,
tgt_embed_file=None,
use_char_encode=False,
scope=None):
"""Create embedding matrix for both encoder and decoder.
Args:
share_vocab: A boolean. Whether to share embedding matrix for both
encoder and decoder.
src_vocab_size: An integer. The source vocab size.
tgt_vocab_size: An integer. The target vocab size.
src_embed_size: An integer. The embedding dimension for the encoder's
embedding.
tgt_embed_size: An integer. The embedding dimension for the decoder's
embedding.
dtype: dtype of the embedding matrix. Default to float32.
num_enc_partitions: number of partitions used for the encoder's embedding
vars.
num_dec_partitions: number of partitions used for the decoder's embedding
vars.
src_vocab_file: A string. The source vocabulary file.
tgt_vocab_file: A string. The target vocabulary file.
src_embed_file: A string. The source embedding file.
tgt_embed_file: A string. The target embedding file.
use_char_encode: A boolean. If true, use char encoder.
scope: VariableScope for the created subgraph. Default to "embedding".
Returns:
embedding_encoder: Encoder's embedding matrix.
embedding_decoder: Decoder's embedding matrix.
Raises:
ValueError: if use share_vocab but source and target have different vocab
size.
"""
if num_enc_partitions <= 1:
enc_partitioner = None
else:
# Note: num_partitions > 1 is required for distributed training due to
# embedding_lookup tries to colocate single partition-ed embedding variable
# with lookup ops. This may cause embedding variables being placed on worker
# jobs.
enc_partitioner = tf.fixed_size_partitioner(num_enc_partitions)
if num_dec_partitions <= 1:
dec_partitioner = None
else:
# Note: num_partitions > 1 is required for distributed training due to
# embedding_lookup tries to colocate single partition-ed embedding variable
# with lookup ops. This may cause embedding variables being placed on worker
# jobs.
dec_partitioner = tf.fixed_size_partitioner(num_dec_partitions)
if src_embed_file and enc_partitioner:
raise ValueError(
"Can't set num_enc_partitions > 1 when using pretrained encoder "
"embedding")
if tgt_embed_file and dec_partitioner:
raise ValueError(
"Can't set num_dec_partitions > 1 when using pretrained decdoer "
"embedding")
with tf.variable_scope(
scope or "embeddings", dtype=dtype, partitioner=enc_partitioner) as scope:
# Share embedding
if share_vocab:
if src_vocab_size != tgt_vocab_size:
raise ValueError("Share embedding but different src/tgt vocab sizes"
" %d vs. %d" % (src_vocab_size, tgt_vocab_size))
assert src_embed_size == tgt_embed_size
utils.print_out("# Use the same embedding for source and target")
vocab_file = src_vocab_file or tgt_vocab_file
embed_file = src_embed_file or tgt_embed_file
embedding_encoder = _create_or_load_embed(
"embedding_share", vocab_file, embed_file,
src_vocab_size, src_embed_size, dtype)
embedding_decoder = embedding_encoder
else:
if not use_char_encode:
with tf.variable_scope("encoder", partitioner=enc_partitioner):
embedding_encoder = _create_or_load_embed(
"embedding_encoder", src_vocab_file, src_embed_file,
src_vocab_size, src_embed_size, dtype)
else:
embedding_encoder = None
with tf.variable_scope("decoder", partitioner=dec_partitioner):
embedding_decoder = _create_or_load_embed(
"embedding_decoder", tgt_vocab_file, tgt_embed_file,
tgt_vocab_size, tgt_embed_size, dtype)
return embedding_encoder, embedding_decoder
def build_cell(cell, input_shape):
if isinstance(cell, tf.contrib.rnn.MultiRNNCell):
assert isinstance(input_shape, collections.Sequence)
for i, c in enumerate(cell._cells):
if i == 0:
c.build((None, input_shape))
else:
c.build((None, c.num_units))
return
if isinstance(cell, tf.nn.rnn_cell.DropoutWrapper):
build_cell(cell._cell, input_shape)
elif isinstance(cell, tf.nn.rnn_cell.ResidualWrapper):
build_cell(cell._cell, input_shape)
elif isinstance(cell, tf.nn.rnn_cell.LSTMCell):
cell.build(input_shape)
else:
raise ValueError("%s not supported" % type(cell))
def _single_cell(unit_type, num_units, forget_bias, dropout, mode,
dtype=None, residual_connection=False, residual_fn=None,
use_block_lstm=False):
"""Create an instance of a single RNN cell."""
# dropout (= 1 - keep_prob) is set to 0 during eval and infer
dropout = dropout if mode == tf.contrib.learn.ModeKeys.TRAIN else 0.0
# Cell Type
if unit_type == "lstm":
utils.print_out(" LSTM, forget_bias=%g" % forget_bias, new_line=False)
if not use_block_lstm:
single_cell = tf.nn.rnn_cell.LSTMCell(
num_units, dtype=dtype, forget_bias=forget_bias)
else:
single_cell = tf.contrib.rnn.LSTMBlockCell(
num_units, forget_bias=forget_bias)
elif unit_type == "gru":
utils.print_out(" GRU", new_line=False)
single_cell = tf.contrib.rnn.GRUCell(num_units)
elif unit_type == "layer_norm_lstm":
utils.print_out(" Layer Normalized LSTM, forget_bias=%g" % forget_bias,
new_line=False)
single_cell = tf.contrib.rnn.LayerNormBasicLSTMCell(
num_units,
forget_bias=forget_bias,
layer_norm=True)
elif unit_type == "nas":
utils.print_out(" NASCell", new_line=False)
single_cell = tf.contrib.rnn.NASCell(num_units)
else:
raise ValueError("Unknown unit type %s!" % unit_type)
# Dropout (= 1 - keep_prob)
if dropout > 0.0:
single_cell = tf.contrib.rnn.DropoutWrapper(
cell=single_cell, input_keep_prob=(1.0 - dropout))
utils.print_out(" %s, dropout=%g " %(type(single_cell).__name__, dropout),
new_line=False)
# Residual
if residual_connection:
single_cell = tf.contrib.rnn.ResidualWrapper(
single_cell, residual_fn=residual_fn)
utils.print_out(" %s" % type(single_cell).__name__, new_line=False)
return single_cell
def _cell_list(unit_type, num_units, num_layers, num_residual_layers,
forget_bias, dropout, mode, dtype=None,
single_cell_fn=None, residual_fn=None, use_block_lstm=False):
"""Create a list of RNN cells."""
if not single_cell_fn:
single_cell_fn = _single_cell
# Multi-GPU
cell_list = []
for i in range(num_layers):
utils.print_out(" cell %d" % i, new_line=False)
single_cell = single_cell_fn(
unit_type=unit_type,
num_units=num_units,
forget_bias=forget_bias,
dropout=dropout,
mode=mode,
dtype=dtype,
residual_connection=(i >= num_layers - num_residual_layers),
residual_fn=residual_fn,
use_block_lstm=use_block_lstm
)
utils.print_out("")
cell_list.append(single_cell)
return cell_list
def create_rnn_cell(unit_type, num_units, num_layers, num_residual_layers,
forget_bias, dropout, mode, dtype=None,
single_cell_fn=None, use_block_lstm=False):
"""Create multi-layer RNN cell.
Args:
unit_type: string representing the unit type, i.e. "lstm".
num_units: the depth of each unit.
num_layers: number of cells.
num_residual_layers: Number of residual layers from top to bottom. For
example, if `num_layers=4` and `num_residual_layers=2`, the last 2 RNN
cells in the returned list will be wrapped with `ResidualWrapper`.
forget_bias: the initial forget bias of the RNNCell(s).
dropout: floating point value between 0.0 and 1.0:
the probability of dropout. this is ignored if `mode != TRAIN`.
mode: either tf.contrib.learn.TRAIN/EVAL/INFER
single_cell_fn: allow for adding customized cell.
When not specified, we default to model_helper._single_cell
Returns:
An `RNNCell` instance.
"""
cell_list = _cell_list(unit_type=unit_type,
num_units=num_units,
num_layers=num_layers,
num_residual_layers=num_residual_layers,
forget_bias=forget_bias,
dropout=dropout,
mode=mode,
dtype=dtype,
single_cell_fn=single_cell_fn,
use_block_lstm=use_block_lstm)
if len(cell_list) == 1: # Single layer.
return cell_list[0]
else: # Multi layers
return tf.contrib.rnn.MultiRNNCell(cell_list)
def gradient_clip(gradients, max_gradient_norm):
"""Clipping gradients of a model."""
clipped_gradients, gradient_norm = math_utils.clip_by_global_norm(
gradients, max_gradient_norm)
return clipped_gradients, gradient_norm
def print_variables_in_ckpt(ckpt_path):
"""Print a list of variables in a checkpoint together with their shapes."""
utils.print_out("# Variables in ckpt %s" % ckpt_path)
reader = tf.train.NewCheckpointReader(ckpt_path)
variable_map = reader.get_variable_to_shape_map()
for key in sorted(variable_map.keys()):
utils.print_out(" %s: %s" % (key, variable_map[key]))
def load_model(model, ckpt_path, session, name):
"""Load model from a checkpoint."""
start_time = time.time()
try:
model.saver.restore(session, ckpt_path)
except tf.errors.NotFoundError as e:
utils.print_out("Can't load checkpoint")
print_variables_in_ckpt(ckpt_path)
utils.print_out("%s" % str(e))
session.run(tf.tables_initializer())
utils.print_out(
" loaded %s model parameters from %s, time %.2fs" %
(name, ckpt_path, time.time() - start_time))
return model
def avg_checkpoints(model_dir, num_last_checkpoints, global_step_name):
"""Average the last N checkpoints in the model_dir."""
checkpoint_state = tf.train.get_checkpoint_state(model_dir)
if not checkpoint_state:
utils.print_out("# No checkpoint file found in directory: %s" % model_dir)
return None
# Checkpoints are ordered from oldest to newest.
checkpoints = (
checkpoint_state.all_model_checkpoint_paths[-num_last_checkpoints:])
if len(checkpoints) < num_last_checkpoints:
utils.print_out(
"# Skipping averaging checkpoints because not enough checkpoints is "
"available.")
return None
avg_model_dir = os.path.join(model_dir, "avg_checkpoints")
if not tf.gfile.Exists(avg_model_dir):
utils.print_out(
"# Creating new directory %s for saving averaged checkpoints." %
avg_model_dir)
tf.gfile.MakeDirs(avg_model_dir)
utils.print_out("# Reading and averaging variables in checkpoints:")
var_list = tf.contrib.framework.list_variables(checkpoints[0])
var_values, var_dtypes = {}, {}
for (name, shape) in var_list:
if name != global_step_name:
var_values[name] = np.zeros(shape)
for checkpoint in checkpoints:
utils.print_out(" %s" % checkpoint)
reader = tf.contrib.framework.load_checkpoint(checkpoint)
for name in var_values:
tensor = reader.get_tensor(name)
var_dtypes[name] = tensor.dtype
var_values[name] += tensor
for name in var_values:
var_values[name] /= len(checkpoints)
# Build a graph with same variables in the checkpoints, and save the averaged
# variables into the avg_model_dir.
with tf.Graph().as_default():
tf_vars = [
tf.get_variable(v, shape=var_values[v].shape, dtype=var_dtypes[name])
for v in var_values
]
placeholders = [tf.placeholder(v.dtype, shape=v.shape) for v in tf_vars]
assign_ops = [tf.assign(v, p) for (v, p) in zip(tf_vars, placeholders)]
saver = tf.train.Saver(tf.all_variables(), save_relative_paths=True)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for p, assign_op, (name, value) in zip(placeholders, assign_ops,
six.iteritems(var_values)):
sess.run(assign_op, {p: value})
# Use the built saver to save the averaged checkpoint. Only keep 1
# checkpoint and the best checkpoint will be moved to avg_best_metric_dir.
saver.save(
sess,
os.path.join(avg_model_dir, "translate.ckpt"))
return avg_model_dir
def create_or_load_model(model, model_dir, session, name):
"""Create translation model and initialize or load parameters in session."""
latest_ckpt = tf.train.latest_checkpoint(model_dir)
if latest_ckpt:
model = load_model(model, latest_ckpt, session, name)
else:
start_time = time.time()
session.run(tf.global_variables_initializer())
session.run(tf.tables_initializer())
utils.print_out(" created %s model with fresh parameters, time %.2fs" %
(name, time.time() - start_time))
global_step = model.global_step.eval(session=session)
return model, global_step
| DeepLearningExamples-master | TensorFlow/Translation/GNMT/model_helper.py |
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A powerful dynamic attention wrapper object."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import numpy as np
import tensorflow as tf
from utils import math_utils
from utils import misc_utils
from tensorflow.contrib.framework.python.framework import tensor_util
from tensorflow.python.framework import function
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import rnn_cell_impl
_zero_state_tensors = rnn_cell_impl._zero_state_tensors # pylint: disable=protected-access
nest = tf.contrib.framework.nest
class AttentionMechanism(object):
@property
def alignments_size(self):
raise NotImplementedError
@property
def state_size(self):
raise NotImplementedError
def _prepare_memory(memory, memory_sequence_length, check_inner_dims_defined):
"""Convert to tensor and possibly mask `memory`.
Args:
memory: `Tensor`, shaped `[batch_size, max_time, ...]`.
memory_sequence_length: `int32` `Tensor`, shaped `[batch_size]`.
check_inner_dims_defined: Python boolean. If `True`, the `memory`
argument's shape is checked to ensure all but the two outermost
dimensions are fully defined.
Returns:
A (possibly masked), checked, new `memory`.
Raises:
ValueError: If `check_inner_dims_defined` is `True` and not
`memory.shape[2:].is_fully_defined()`.
"""
memory = nest.map_structure(
lambda m: tf.convert_to_tensor(m, name="memory"), memory)
if memory_sequence_length is not None:
memory_sequence_length = tf.convert_to_tensor(
memory_sequence_length, name="memory_sequence_length")
if check_inner_dims_defined:
def _check_dims(m):
if not m.get_shape()[2:].is_fully_defined():
raise ValueError("Expected memory %s to have fully defined inner dims, "
"but saw shape: %s" % (m.name, m.get_shape()))
nest.map_structure(_check_dims, memory)
if memory_sequence_length is None:
seq_len_mask = None
else:
seq_len_mask = tf.sequence_mask(
memory_sequence_length,
maxlen=tf.shape(nest.flatten(memory)[0])[1],
dtype=nest.flatten(memory)[0].dtype)
seq_len_batch_size = (
memory_sequence_length.shape[0].value
or tf.shape(memory_sequence_length)[0])
def _maybe_mask(m, seq_len_mask):
rank = m.get_shape().ndims
rank = rank if rank is not None else tf.rank(m)
extra_ones = tf.ones(rank - 2, dtype=tf.int32)
m_batch_size = m.shape[0].value or tf.shape(m)[0]
if memory_sequence_length is not None:
seq_len_mask = tf.reshape(
seq_len_mask,
tf.concat((tf.shape(seq_len_mask), extra_ones), 0))
return m * seq_len_mask
else:
return m
return nest.map_structure(lambda m: _maybe_mask(m, seq_len_mask), memory)
def _maybe_mask_score(score, memory_sequence_length, score_mask_value):
if memory_sequence_length is None:
return score
if score_mask_value is None:
score_mask_value = tf.as_dtype(score.dtype).as_numpy_dtype(-np.inf)
score_mask = tf.sequence_mask(
memory_sequence_length, maxlen=tf.shape(score)[1])
score_mask_values = score_mask_value * tf.ones_like(score)
return tf.where(score_mask, score, score_mask_values)
class _BaseAttentionMechanism(AttentionMechanism):
"""A base AttentionMechanism class providing common functionality.
Common functionality includes:
1. Storing the query and memory layers.
2. Preprocessing and storing the memory.
"""
def __init__(self,
query_layer,
memory,
probability_fn,
memory_sequence_length=None,
memory_layer=None,
check_inner_dims_defined=True,
score_mask_value=None,
name=None):
"""Construct base AttentionMechanism class.
Args:
query_layer: Callable. Instance of `tf.layers.Layer`. The layer's depth
must match the depth of `memory_layer`. If `query_layer` is not
provided, the shape of `query` must match that of `memory_layer`.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
probability_fn: A `callable`. Converts the score and previous alignments
to probabilities. Its signature should be:
`probabilities = probability_fn(score, state)`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
memory_layer: Instance of `tf.layers.Layer` (may be None). The layer's
depth must match the depth of `query_layer`.
If `memory_layer` is not provided, the shape of `memory` must match
that of `query_layer`.
check_inner_dims_defined: Python boolean. If `True`, the `memory`
argument's shape is checked to ensure all but the two outermost
dimensions are fully defined.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
name: Name to use when creating ops.
"""
if (query_layer is not None
and not isinstance(query_layer, tf.layers.Layer)):
raise TypeError(
"query_layer is not a Layer: %s" % type(query_layer).__name__)
if (memory_layer is not None
and not isinstance(memory_layer, tf.layers.Layer)):
raise TypeError(
"memory_layer is not a Layer: %s" % type(memory_layer).__name__)
self._query_layer = query_layer
self._memory_layer = memory_layer
self.dtype = memory_layer.dtype
if not callable(probability_fn):
raise TypeError("probability_fn must be callable, saw type: %s" %
type(probability_fn).__name__)
self._probability_fn = lambda score, prev: ( # pylint:disable=g-long-lambda
probability_fn(
_maybe_mask_score(score, memory_sequence_length, score_mask_value),
prev))
with tf.name_scope(
name, "BaseAttentionMechanismInit", nest.flatten(memory)):
self._values = _prepare_memory(
memory, memory_sequence_length,
check_inner_dims_defined=check_inner_dims_defined)
self._keys = (
self.memory_layer(self._values) if self.memory_layer # pylint: disable=not-callable
else self._values)
self._batch_size = (
self._keys.shape[0].value or tf.shape(self._keys)[0])
self._alignments_size = (self._keys.shape[1].value or
tf.shape(self._keys)[1])
def build(self, query_shape):
self._query_layer.build((None, query_shape[-1]))
# memory_layer is built in the constructor.
self.built = True
@property
def memory_layer(self):
return self._memory_layer
@property
def query_layer(self):
return self._query_layer
@property
def values(self):
return self._values
@property
def keys(self):
return self._keys
@property
def batch_size(self):
return self._batch_size
@property
def alignments_size(self):
return self._alignments_size
@property
def state_size(self):
return self._alignments_size
def initial_alignments(self, batch_size, dtype):
"""Creates the initial alignment values for the `AttentionWrapper` class.
This is important for AttentionMechanisms that use the previous alignment
to calculate the alignment at the next time step (e.g. monotonic attention).
The default behavior is to return a tensor of all zeros.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A `dtype` tensor shaped `[batch_size, alignments_size]`
(`alignments_size` is the values' `max_time`).
"""
max_time = self._alignments_size
return _zero_state_tensors(max_time, batch_size, dtype)
def initial_state(self, batch_size, dtype):
"""Creates the initial state values for the `AttentionWrapper` class.
This is important for AttentionMechanisms that use the previous alignment
to calculate the alignment at the next time step (e.g. monotonic attention).
The default behavior is to return the same output as initial_alignments.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A structure of all-zero tensors with shapes as described by `state_size`.
"""
return self.initial_alignments(batch_size, dtype)
def _bahdanau_score(processed_query, keys, normalize, v, g, b):
"""Implements Bahdanau-style (additive) scoring function.
This attention has two forms. The first is Bhandanau attention,
as described in:
Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio.
"Neural Machine Translation by Jointly Learning to Align and Translate."
ICLR 2015. https://arxiv.org/abs/1409.0473
The second is the normalized form. This form is inspired by the
weight normalization article:
Tim Salimans, Diederik P. Kingma.
"Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks."
https://arxiv.org/abs/1602.07868
To enable the second form, set `normalize=True`.
Args:
processed_query: Tensor, shape `[batch_size, num_units]` to compare to keys.
keys: Processed memory, shape `[batch_size, max_time, num_units]`.
normalize: Whether to normalize the score function.
Returns:
A `[batch_size, max_time]` tensor of unnormalized score values.
"""
# Reshape from [batch_size, ...] to [batch_size, 1, ...] for broadcasting.
processed_query = tf.expand_dims(processed_query, 1)
if normalize:
# normed_v = g * v / ||v||
def NormalizedAttenFwd(keys, processed_query, g, v, b):
"""normalized atten."""
normed_v = g * v * tf.rsqrt(
tf.reduce_sum(tf.square(v)))
batch = tf.shape(keys)[0]
max_time = tf.shape(keys)[1]
units = tf.shape(keys)[-1]
# [batch, time, dim]
activation = tf.tanh(keys + processed_query + b)
# [batch * time, dim]
activation = tf.reshape(activation, [batch * max_time, units])
# [dim, 1]
v = tf.expand_dims(normed_v, -1)
# [batch * time, 1] -> [batch * time]
y = tf.squeeze(tf.matmul(activation, v), axis=1)
y = tf.reshape(y, [batch, max_time])
return y
use_xla = os.environ["use_xla"] == "true"
def NormalizedAtten(keys, processed_query, g, v, b):
return NormalizedAttenFwd(keys, processed_query, g, v, b)
fn = NormalizedAtten
if os.environ["use_defun"] == "true":
fn = function.Defun(compiled=use_xla)(fn)
res = fn(keys, processed_query, g, v, b)
res.set_shape((None, keys.shape[1]))
return res
else:
def _Atten(keys, processed_query, v):
"""atten."""
batch = tf.shape(keys)[0]
max_time = tf.shape(keys)[1]
units = tf.shape(keys)[-1]
activation = tf.tanh(keys + processed_query)
activation = tf.reshape(activation, [batch * max_time, units])
v = tf.expand_dims(v, -1)
y = tf.squeeze(tf.matmul(activation, v), axis=1)
y = tf.reshape(y, [batch, max_time])
return y
fn = _Atten
if os.environ["use_defun"] == "true":
fn = function.Defun()(fn)
return fn(keys, processed_query, v)
class BahdanauAttention(_BaseAttentionMechanism):
"""Implements Bahdanau-style (additive) attention.
This attention has two forms. The first is Bahdanau attention,
as described in:
Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio.
"Neural Machine Translation by Jointly Learning to Align and Translate."
ICLR 2015. https://arxiv.org/abs/1409.0473
The second is the normalized form. This form is inspired by the
weight normalization article:
Tim Salimans, Diederik P. Kingma.
"Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks."
https://arxiv.org/abs/1602.07868
To enable the second form, construct the object with parameter
`normalize=True`.
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
normalize=False,
probability_fn=None,
score_mask_value=None,
dtype=None,
name="BahdanauAttention"):
"""Construct the Attention mechanism.
Args:
num_units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
normalize: Python boolean. Whether to normalize the energy term.
probability_fn: (optional) A `callable`. Converts the score to
probabilities. The default is `tf.nn.softmax`. Other options include
`tf.contrib.seq2seq.hardmax` and `tf.contrib.sparsemax.sparsemax`.
Its signature should be: `probabilities = probability_fn(score)`.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
"""
if probability_fn is None:
probability_fn = tf.nn.softmax
if dtype is None:
dtype = tf.float32
wrapped_probability_fn = lambda score, _: probability_fn(score)
super(BahdanauAttention, self).__init__(
query_layer=tf.layers.Dense(
num_units, name="query_layer", use_bias=False, dtype=dtype),
memory_layer=tf.layers.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name)
self._num_units = num_units
self._normalize = normalize
self._name = name
self.v = tf.get_variable("attention_v", [num_units], dtype=dtype)
# TODO(jamesqin): I have no idea why the following is happening.
# If not doing read_value(), later in backprop it would complain about
# AddN op is fed with "resource" dtype input, which shouldn't be happening.
if isinstance(self.v, resource_variable_ops.ResourceVariable):
self.v = self.v.read_value()
self.g, self.b = None, None
if self._normalize:
# Scalar used in weight normalization
self.g = tf.get_variable(
"attention_g", dtype=dtype,
initializer=tf.constant_initializer(math.sqrt((1. / num_units))),
shape=())
# Same as above self.g
if isinstance(self.g, resource_variable_ops.ResourceVariable):
self.g = self.g.read_value()
# Bias added prior to the nonlinearity
self.b = tf.get_variable(
"attention_b", [num_units], dtype=dtype,
initializer=tf.zeros_initializer())
# Same as above self.g
if isinstance(self.b, resource_variable_ops.ResourceVariable):
self.b = self.b.read_value()
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
state: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
query_dim = query.shape[-1].value
assert query_dim
def _compute_alignments(query, state):
with tf.variable_scope(None, "bahdanau_attention", [query]):
# TODO(jamesqin): figure out the shape implications of Defun.
query.set_shape((None, query_dim))
processed_query = self.query_layer(query) if self.query_layer else query
score = _bahdanau_score(processed_query, self._keys, self._normalize,
self.v, self.g, self.b)
alignments = self._probability_fn(score, state)
next_state = alignments
return alignments, next_state
return _compute_alignments(query, state)
class AttentionWrapperState(
collections.namedtuple("AttentionWrapperState",
("cell_state", "attention", "time", "alignments",
"alignment_history", "attention_state"))):
"""`namedtuple` storing the state of a `AttentionWrapper`.
Contains:
- `cell_state`: The state of the wrapped `RNNCell` at the previous time
step.
- `attention`: The attention emitted at the previous time step.
- `time`: int32 scalar containing the current time step.
- `alignments`: A single or tuple of `Tensor`(s) containing the alignments
emitted at the previous time step for each attention mechanism.
- `alignment_history`: (if enabled) a single or tuple of `TensorArray`(s)
containing alignment matrices from all time steps for each attention
mechanism. Call `stack()` on each to convert to a `Tensor`.
- `attention_state`: A single or tuple of nested objects
containing attention mechanism state for each attention mechanism.
The objects may contain Tensors or TensorArrays.
"""
def clone(self, **kwargs):
"""Clone this object, overriding components provided by kwargs.
The new state fields' shape must match original state fields' shape. This
will be validated, and original fields' shape will be propagated to new
fields.
Example:
```python
initial_state = attention_wrapper.zero_state(dtype=..., batch_size=...)
initial_state = initial_state.clone(cell_state=encoder_state)
```
Args:
**kwargs: Any properties of the state object to replace in the returned
`AttentionWrapperState`.
Returns:
A new `AttentionWrapperState` whose properties are the same as
this one, except any overridden properties as provided in `kwargs`.
"""
def with_same_shape(old, new):
"""Check and set new tensor's shape."""
xla_compile = (os.environ["xla_compile"] == "true")
if not xla_compile:
if isinstance(old, tf.Tensor) and isinstance(new, tf.Tensor):
return tensor_util.with_same_shape(old, new)
return new
return nest.map_structure(
with_same_shape,
self,
super(AttentionWrapperState, self)._replace(**kwargs))
def _compute_attention(attention_mechanism, cell_output, attention_state,
attention_layer):
"""Computes the attention and alignments for a given attention_mechanism."""
alignments, next_attention_state = attention_mechanism(
cell_output, state=attention_state)
# Context is the inner product of alignments and values along the
# memory time dimension.
# alignments shape is
# [batch_size, 1, memory_time]
# attention_mechanism.values shape is
# [batch_size, memory_time, memory_size]
# the batched matmul is over memory_time, so the output shape is
# [batch_size, 1, memory_size].
# we then squeeze out the singleton dim.
expanded_alignments = tf.expand_dims(alignments, 1)
context = math_utils.BatchMatMul(expanded_alignments,
attention_mechanism.values)
context = tf.squeeze(context, [1])
if attention_layer is not None:
attention = attention_layer(tf.concat([cell_output, context], 1))
else:
attention = context
return attention, alignments, next_attention_state
class AttentionWrapper(tf.nn.rnn_cell.RNNCell):
"""Wraps another `RNNCell` with attention.
"""
def __init__(self,
cell,
attention_mechanism,
attention_layer_size=None,
alignment_history=False,
cell_input_fn=None,
output_attention=True,
initial_cell_state=None,
name=None,
attention_layer=None):
"""Construct the `AttentionWrapper`.
**NOTE** If you are using the `BeamSearchDecoder` with a cell wrapped in
`AttentionWrapper`, then you must ensure that:
- The encoder output has been tiled to `beam_width` via
`tf.contrib.seq2seq.tile_batch` (NOT `tf.tile`).
- The `batch_size` argument passed to the `zero_state` method of this
wrapper is equal to `true_batch_size * beam_width`.
- The initial state created with `zero_state` above contains a
`cell_state` value containing properly tiled final state from the
encoder.
An example:
```
tiled_encoder_outputs = tf.contrib.seq2seq.tile_batch(
encoder_outputs, multiplier=beam_width)
tiled_encoder_final_state = tf.conrib.seq2seq.tile_batch(
encoder_final_state, multiplier=beam_width)
tiled_sequence_length = tf.contrib.seq2seq.tile_batch(
sequence_length, multiplier=beam_width)
attention_mechanism = MyFavoriteAttentionMechanism(
num_units=attention_depth,
memory=tiled_inputs,
memory_sequence_length=tiled_sequence_length)
attention_cell = AttentionWrapper(cell, attention_mechanism, ...)
decoder_initial_state = attention_cell.zero_state(
dtype, batch_size=true_batch_size * beam_width)
decoder_initial_state = decoder_initial_state.clone(
cell_state=tiled_encoder_final_state)
```
Args:
cell: An instance of `RNNCell`.
attention_mechanism: A list of `AttentionMechanism` instances or a single
instance.
attention_layer_size: A list of Python integers or a single Python
integer, the depth of the attention (output) layer(s). If None
(default), use the context as attention at each time step. Otherwise,
feed the context and cell output into the attention layer to generate
attention at each time step. If attention_mechanism is a list,
attention_layer_size must be a list of the same length. If
attention_layer is set, this must be None.
alignment_history: Python boolean, whether to store alignment history
from all time steps in the final output state (currently stored as a
time major `TensorArray` on which you must call `stack()`).
cell_input_fn: (optional) A `callable`. The default is:
`lambda inputs, attention: tf.concat([inputs, attention], -1)`.
output_attention: Python bool. If `True` (default), the output at each
time step is the attention value. This is the behavior of Luong-style
attention mechanisms. If `False`, the output at each time step is
the output of `cell`. This is the behavior of Bhadanau-style
attention mechanisms. In both cases, the `attention` tensor is
propagated to the next time step via the state and is used there.
This flag only controls whether the attention mechanism is propagated
up to the next cell in an RNN stack or to the top RNN output.
initial_cell_state: The initial state value to use for the cell when
the user calls `zero_state()`. Note that if this value is provided
now, and the user uses a `batch_size` argument of `zero_state` which
does not match the batch size of `initial_cell_state`, proper
behavior is not guaranteed.
name: Name to use when creating ops.
attention_layer: A list of `tf.layers.Layer` instances or a
single `tf.layers.Layer` instance taking the context and cell output as
inputs to generate attention at each time step. If None (default), use
the context as attention at each time step. If attention_mechanism is a
list, attention_layer must be a list of the same length. If
attention_layers_size is set, this must be None.
Raises:
TypeError: `attention_layer_size` is not None and (`attention_mechanism`
is a list but `attention_layer_size` is not; or vice versa).
ValueError: if `attention_layer_size` is not None, `attention_mechanism`
is a list, and its length does not match that of `attention_layer_size`;
if `attention_layer_size` and `attention_layer` are set simultaneously.
"""
super(AttentionWrapper, self).__init__(name=name)
rnn_cell_impl.assert_like_rnncell("cell", cell)
if isinstance(attention_mechanism, (list, tuple)):
self._is_multi = True
attention_mechanisms = attention_mechanism
for attention_mechanism in attention_mechanisms:
if not isinstance(attention_mechanism, AttentionMechanism):
raise TypeError(
"attention_mechanism must contain only instances of "
"AttentionMechanism, saw type: %s"
% type(attention_mechanism).__name__)
else:
self._is_multi = False
if not isinstance(attention_mechanism, AttentionMechanism):
raise TypeError(
"attention_mechanism must be an AttentionMechanism or list of "
"multiple AttentionMechanism instances, saw type: %s"
% type(attention_mechanism).__name__)
attention_mechanisms = (attention_mechanism,)
if cell_input_fn is None:
cell_input_fn = (
lambda inputs, attention: tf.concat([inputs, attention], -1))
else:
if not callable(cell_input_fn):
raise TypeError(
"cell_input_fn must be callable, saw type: %s"
% type(cell_input_fn).__name__)
if attention_layer_size is not None and attention_layer is not None:
raise ValueError("Only one of attention_layer_size and attention_layer "
"should be set")
if attention_layer_size is not None:
attention_layer_sizes = tuple(
attention_layer_size
if isinstance(attention_layer_size, (list, tuple))
else (attention_layer_size,))
if len(attention_layer_sizes) != len(attention_mechanisms):
raise ValueError(
"If provided, attention_layer_size must contain exactly one "
"integer per attention_mechanism, saw: %d vs %d"
% (len(attention_layer_sizes), len(attention_mechanisms)))
self._attention_layers = tuple(
tf.layers.Dense(
attention_layer_size,
name="attention_layer",
use_bias=False,
dtype=attention_mechanisms[i].dtype)
for i, attention_layer_size in enumerate(attention_layer_sizes))
self._attention_layer_size = sum(attention_layer_sizes)
elif attention_layer is not None:
self._attention_layers = tuple(
attention_layer
if isinstance(attention_layer, (list, tuple))
else (attention_layer,))
if len(self._attention_layers) != len(attention_mechanisms):
raise ValueError(
"If provided, attention_layer must contain exactly one "
"layer per attention_mechanism, saw: %d vs %d"
% (len(self._attention_layers), len(attention_mechanisms)))
self._attention_layer_size = sum(
layer.compute_output_shape(
[None,
cell.output_size + mechanism.values.shape[-1].value])[-1].value
for layer, mechanism in zip(
self._attention_layers, attention_mechanisms))
else:
self._attention_layers = None
self._attention_layer_size = sum(
attention_mechanism.values.get_shape()[-1].value
for attention_mechanism in attention_mechanisms)
self._cell = cell
self._attention_mechanisms = attention_mechanisms
self._cell_input_fn = cell_input_fn
self._output_attention = output_attention
self._alignment_history = alignment_history
with tf.name_scope(name, "AttentionWrapperInit"):
if initial_cell_state is None:
self._initial_cell_state = None
else:
final_state_tensor = nest.flatten(initial_cell_state)[-1]
state_batch_size = (
final_state_tensor.shape[0].value
or tf.shape(final_state_tensor)[0])
error_message = (
"When constructing AttentionWrapper %s: " % self._base_name +
"Non-matching batch sizes between the memory "
"(encoder output) and initial_cell_state. Are you using "
"the BeamSearchDecoder? You may need to tile your initial state "
"via the tf.contrib.seq2seq.tile_batch function with argument "
"multiple=beam_width.")
with tf.control_dependencies(
self._batch_size_checks(state_batch_size, error_message)):
self._initial_cell_state = nest.map_structure(
lambda s: tf.identity(s, name="check_initial_cell_state"),
initial_cell_state)
def force_build(self, cell_input_shape, query_shape):
def _build_cell(cell, input_shape):
if isinstance(cell, tf.nn.rnn_cell.DropoutWrapper):
_build_cell(cell._cell, input_shape)
elif isinstance(cell, tf.nn.rnn_cell.ResidualWrapper):
_build_cell(cell._cell, input_shape)
else:
cell.build(input_shape)
_build_cell(self._cell,
(cell_input_shape[-1] + self._attention_layer_size))
for am in self._attention_mechanisms:
am.build(query_shape)
self.built = True
def _batch_size_checks(self, batch_size, error_message):
return []
def _item_or_tuple(self, seq):
"""Returns `seq` as tuple or the singular element.
Which is returned is determined by how the AttentionMechanism(s) were passed
to the constructor.
Args:
seq: A non-empty sequence of items or generator.
Returns:
Either the values in the sequence as a tuple if AttentionMechanism(s)
were passed to the constructor as a sequence or the singular element.
"""
t = tuple(seq)
if self._is_multi:
return t
else:
return t[0]
@property
def output_size(self):
if self._output_attention:
return self._attention_layer_size
else:
return self._cell.output_size
@property
def state_size(self):
"""The `state_size` property of `AttentionWrapper`.
Returns:
An `AttentionWrapperState` tuple containing shapes used by this object.
"""
return AttentionWrapperState(
cell_state=self._cell.state_size,
time=tf.TensorShape([]),
attention=self._attention_layer_size,
alignments=self._item_or_tuple(
a.alignments_size for a in self._attention_mechanisms),
attention_state=self._item_or_tuple(
a.state_size for a in self._attention_mechanisms),
alignment_history=self._item_or_tuple(
a.alignments_size if self._alignment_history else ()
for a in self._attention_mechanisms)) # sometimes a TensorArray
def zero_state(self, batch_size, dtype):
"""Return an initial (zero) state tuple for this `AttentionWrapper`.
**NOTE** Please see the initializer documentation for details of how
to call `zero_state` if using an `AttentionWrapper` with a
`BeamSearchDecoder`.
Args:
batch_size: `0D` integer tensor: the batch size.
dtype: The internal state data type.
Returns:
An `AttentionWrapperState` tuple containing zeroed out tensors and,
possibly, empty `TensorArray` objects.
Raises:
ValueError: (or, possibly at runtime, InvalidArgument), if
`batch_size` does not match the output size of the encoder passed
to the wrapper object at initialization time.
"""
with tf.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
if self._initial_cell_state is not None:
cell_state = self._initial_cell_state
else:
cell_state = self._cell.zero_state(batch_size, dtype)
error_message = (
"When calling zero_state of AttentionWrapper %s: " % self._base_name +
"Non-matching batch sizes between the memory "
"(encoder output) and the requested batch size. Are you using "
"the BeamSearchDecoder? If so, make sure your encoder output has "
"been tiled to beam_width via tf.contrib.seq2seq.tile_batch, and "
"the batch_size= argument passed to zero_state is "
"batch_size * beam_width.")
with tf.control_dependencies(
self._batch_size_checks(batch_size, error_message)):
cell_state = nest.map_structure(
lambda s: tf.identity(s, name="checked_cell_state"),
cell_state)
initial_alignments = [
attention_mechanism.initial_alignments(batch_size, dtype)
for attention_mechanism in self._attention_mechanisms]
return AttentionWrapperState(
cell_state=cell_state,
time=tf.zeros([], dtype=tf.int64),
attention=_zero_state_tensors(self._attention_layer_size, batch_size,
dtype),
alignments=self._item_or_tuple(initial_alignments),
attention_state=self._item_or_tuple(
attention_mechanism.initial_state(batch_size, dtype)
for attention_mechanism in self._attention_mechanisms),
alignment_history=self._item_or_tuple(
tf.TensorArray(
dtype,
size=0,
dynamic_size=True,
element_shape=alignment.shape)
if self._alignment_history else ()
for alignment in initial_alignments))
def call(self, inputs, state):
"""Perform a step of attention-wrapped RNN.
- Step 1: Mix the `inputs` and previous step's `attention` output via
`cell_input_fn`.
- Step 2: Call the wrapped `cell` with this input and its previous state.
- Step 3: Score the cell's output with `attention_mechanism`.
- Step 4: Calculate the alignments by passing the score through the
`normalizer`.
- Step 5: Calculate the context vector as the inner product between the
alignments and the attention_mechanism's values (memory).
- Step 6: Calculate the attention output by concatenating the cell output
and context through the attention layer (a linear layer with
`attention_layer_size` outputs).
Args:
inputs: (Possibly nested tuple of) Tensor, the input at this time step.
state: An instance of `AttentionWrapperState` containing
tensors from the previous time step.
Returns:
A tuple `(attention_or_cell_output, next_state)`, where:
- `attention_or_cell_output` depending on `output_attention`.
- `next_state` is an instance of `AttentionWrapperState`
containing the state calculated at this time step.
Raises:
TypeError: If `state` is not an instance of `AttentionWrapperState`.
"""
if not isinstance(state, AttentionWrapperState):
raise TypeError("Expected state to be instance of AttentionWrapperState. "
"Received type %s instead." % type(state))
# Step 1: Calculate the true inputs to the cell based on the
# previous attention value.
cell_inputs = self._cell_input_fn(inputs, state.attention)
cell_state = state.cell_state
cell_output, next_cell_state = self._cell(cell_inputs, cell_state)
cell_batch_size = (
cell_output.shape[0].value or tf.shape(cell_output)[0])
error_message = (
"When applying AttentionWrapper %s: " % self.name +
"Non-matching batch sizes between the memory "
"(encoder output) and the query (decoder output). Are you using "
"the BeamSearchDecoder? You may need to tile your memory input via "
"the tf.contrib.seq2seq.tile_batch function with argument "
"multiple=beam_width.")
with tf.control_dependencies(
self._batch_size_checks(cell_batch_size, error_message)):
cell_output = tf.identity(
cell_output, name="checked_cell_output")
if self._is_multi:
previous_attention_state = state.attention_state
previous_alignment_history = state.alignment_history
else:
previous_attention_state = [state.attention_state]
previous_alignment_history = [state.alignment_history]
all_alignments = []
all_attentions = []
all_attention_states = []
maybe_all_histories = []
for i, attention_mechanism in enumerate(self._attention_mechanisms):
attention, alignments, next_attention_state = _compute_attention(
attention_mechanism, cell_output, previous_attention_state[i],
self._attention_layers[i] if self._attention_layers else None)
alignment_history = previous_alignment_history[i].write(
state.time, alignments) if self._alignment_history else ()
all_attention_states.append(next_attention_state)
all_alignments.append(alignments)
all_attentions.append(attention)
maybe_all_histories.append(alignment_history)
attention = tf.concat(all_attentions, 1)
next_state = AttentionWrapperState(
time=state.time + 1,
cell_state=next_cell_state,
attention=attention,
attention_state=self._item_or_tuple(all_attention_states),
alignments=self._item_or_tuple(all_alignments),
alignment_history=self._item_or_tuple(maybe_all_histories))
if self._output_attention:
return attention, next_state
else:
return cell_output, next_state
class BahdanauAttentionFusedLayer(object):
"""Fused attention layer using Bahdanau attention.
Only used during training.
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
dtype=None,
name="BahdanauAttention"):
self.v = tf.get_variable("attention_v", [num_units], dtype=dtype)
# TODO(jamesqin): I have no idea why the following is happening.
# If not doing read_value(), later in backprop it would complain about
# AddN op is fed with "resource" dtype input, which shouldn't be happening.
if isinstance(self.v, resource_variable_ops.ResourceVariable):
self.v = self.v.read_value()
# Scalar used in weight normalization
self.g = tf.get_variable(
"attention_g", dtype=dtype,
initializer=tf.constant_initializer(math.sqrt((1. / num_units))),
shape=())
if isinstance(self.g, resource_variable_ops.ResourceVariable):
self.g = self.g.read_value()
# Bias added prior to the nonlinearity
self.b = tf.get_variable(
"attention_b", [num_units], dtype=dtype,
initializer=tf.zeros_initializer())
if isinstance(self.b, resource_variable_ops.ResourceVariable):
self.b = self.b.read_value()
self.query_layer = tf.layers.Dense(
num_units, name="query_layer", use_bias=False, dtype=dtype)
self.memory_layer = tf.layers.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype)
def _mask_score(score, memory_sequence_length):
# score is [batch, query_max_t, memory_t]
if memory_sequence_length is None:
return score
# [batch, 1]
memory_sequence_length = tf.expand_dims(memory_sequence_length, -1)
query_max_t = tf.shape(score)[1]
# [batch, query_t]
memory_sequence_length = tf.broadcast_to(
memory_sequence_length,
[tf.shape(memory_sequence_length)[0], query_max_t])
# [batch, query_t, memory_t]
score_mask = tf.sequence_mask(
memory_sequence_length, maxlen=tf.shape(score)[-1])
score_mask_value = tf.as_dtype(score.dtype).as_numpy_dtype(-np.inf)
score_mask_values = score_mask_value * tf.ones_like(score)
return tf.where(score_mask, score, score_mask_values)
# score: [batch, query_max_t, memory_t]
# memory_seq_length: [batch]
self._probability_fn = lambda score, prev: ( # pylint:disable=g-long-lambda
tf.nn.softmax(_mask_score(score, memory_sequence_length)))
with tf.name_scope(name, "BaseAttentionMechanismInit", [memory]):
# Mask padded memory.
# [batch, memory_t, units]
self.values = _prepare_memory(
memory, memory_sequence_length, check_inner_dims_defined=True)
# [batch, memory_t, units]
self.keys = self.memory_layer(self.values)
def __call__(self, queries):
"""Invoke the layer.
Args:
queries: [batch, queryt_t, query_size]
Returns:
attention: [batch, query_t, num_units]
"""
# Compute score for all queries, returns [batch, query_t, memory_t].
# The var scope naming is chosen to build inference graph correctly.
with tf.variable_scope(
"multi_rnn_cell/cell_0_attention/attention/bahdanau_attention",
[queries]):
# [batch, query_t, query_size] -> [batch, query_t, num_units]
processed_queries = self.query_layer(queries)
# [batch, memory_t, num_units] -> [batch, 1, memory_t, num_units]
keys = tf.expand_dims(self.keys, axis=1)
# [batch, query_t, num_units] -> [batch, query_t, 1, num_units]
processed_queries = tf.expand_dims(processed_queries, axis=2)
# [batch, 1, memory_t, units] + [batch, query_t, 1, units] + [units] ->
# [batch, query_t, memory_t, units]
activation = tf.tanh(keys + processed_queries + self.b)
# [units, 1]
normed_v = self.g * self.v * tf.rsqrt(tf.reduce_sum(tf.square(self.v)))
v = tf.expand_dims(normed_v, -1)
# [batch, query_t, memory_t, units] * [units, 1] ->
# [batch, query_t, memory_t, 1]
# [batch, query_t, memory_t, 1] --(squeeze)--> [batch, query_t, memory_t]
score = tf.squeeze(tf.tensordot(activation, v, axes=1), axis=-1)
# Compute alignment
# bahdanau attention doesn't use the attention state in prob func (softmax)
unused_state = None
# [batch, query_t, memory_t]
alignments = self._probability_fn(score, unused_state)
# Note: slow batched matmul in fp16
# [batch, query_t, memory_t] * [ batch, memory_t, units] ->
# [batch, query_t, units]
attention = math_utils.BatchMatMul(alignments, self.values)
return attention
| DeepLearningExamples-master | TensorFlow/Translation/GNMT/attention_wrapper.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LSTM Block Cell ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import tensorflow as tf
try:
from tensorflow.python.ops import gen_rnn_ops as gen_ops
except ImportError:
from tensorflow.contrib.rnn.ops import gen_lstm_ops as gen_ops
from tensorflow.python.framework import function
from tensorflow.python.layers import base as base_layer
class LSTMBlockWrapper(base_layer.Layer):
"""This is a helper class that provides housekeeping for LSTM cells.
This may be useful for alternative LSTM and similar type of cells.
The subclasses must implement `_call_cell` method and `num_units` property.
"""
@abc.abstractproperty
def num_units(self):
"""Number of units in this cell (output dimension)."""
pass
@abc.abstractmethod
def _call_cell(self, inputs, initial_cell_state, initial_output, dtype,
sequence_length):
"""Run this LSTM on inputs, starting from the given state.
This method must be implemented by subclasses and does the actual work
of calling the cell.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
initial_cell_state: initial value for cell state, shape `[batch_size,
self._num_units]`
initial_output: initial value of cell output, shape `[batch_size,
self._num_units]`
dtype: The data type for the initial state and expected output.
sequence_length: Specifies the length of each sequence in inputs. An int32
or int64 vector (tensor) size [batch_size], values in [0, time_len) or
None.
Returns:
A pair containing:
- State: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
- Output: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
"""
pass
def call(self, inputs, initial_state=None, dtype=None, sequence_length=None,
mask_output=False):
"""Run this LSTM on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`.
initial_state: a tuple `(initial_cell_state, initial_output)` with tensors
of shape `[batch_size, self._num_units]`. If this is not provided, the
cell is expected to create a zero initial state of type `dtype`.
dtype: The data type for the initial state and expected output. Required
if `initial_state` is not provided or RNN state has a heterogeneous
dtype.
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len).`
Defaults to `time_len` for each element.
mask_output: ...
Returns:
A pair containing:
- Output: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
or a list of time_len tensors of shape `[batch_size, output_size]`,
to match the type of the `inputs`.
- Final state: a tuple `(cell_state, output)` matching `initial_state`.
Raises:
ValueError: in case of shape mismatches
"""
is_list = isinstance(inputs, list)
if is_list:
inputs = tf.stack(inputs)
inputs_shape = inputs.get_shape().with_rank(3)
if not inputs_shape[2]:
raise ValueError("Expecting inputs_shape[2] to be set: %s" % inputs_shape)
batch_size = inputs_shape[1].value
if batch_size is None:
batch_size = tf.shape(inputs)[1]
time_len = inputs_shape[0].value
if time_len is None:
time_len = tf.shape(inputs)[0]
# Provide default values for initial_state and dtype
if initial_state is None:
if dtype is None:
raise ValueError("Either initial_state or dtype needs to be specified")
z = tf.zeros(
tf.stack([batch_size, self.num_units]), dtype=dtype)
initial_state = z, z
else:
if len(initial_state) != 2:
raise ValueError(
"Expecting initial_state to be a tuple with length 2 or None")
if dtype is None:
dtype = initial_state[0].dtype
# create the actual cell
if sequence_length is not None:
sequence_length = tf.convert_to_tensor(sequence_length)
initial_cell_state, initial_output = initial_state # pylint: disable=unpacking-non-sequence
cell_states, outputs = self._call_cell(
inputs, initial_cell_state, initial_output, dtype, sequence_length)
if sequence_length is not None:
if mask_output:
# Mask out the part beyond sequence_length.
# In MLPerf we don't do it b.c output is masked when computing loss.
# And in inference we don't use this layer.
mask = tf.transpose(
tf.sequence_mask(sequence_length, time_len, dtype=dtype),
[1, 0])
mask = tf.tile(
tf.expand_dims(mask, axis=-1), [1, 1, self.num_units])
outputs *= mask
"""
# Prepend initial states to cell_states and outputs for indexing to work
# correctly,since we want to access the last valid state at
# sequence_length - 1, which can even be -1, corresponding to the
# initial state.
mod_cell_states = tf.concat(
[tf.expand_dims(initial_cell_state, axis=0), cell_states], 0)
mod_outputs = tf.concat(
[tf.expand_dims(initial_output, axis=0), outputs], 0)
final_cell_state = self._gather_states(mod_cell_states,
sequence_length, batch_size)
final_output = self._gather_states(mod_outputs, sequence_length,
batch_size)
"""
# sequence_length can't be zero in our impl, pass sequence_length -1 for
# indices.
mod_cell_states = cell_states
mod_outputs = outputs
final_cell_state = self._gather_states(mod_cell_states,
sequence_length - 1, batch_size)
final_output = self._gather_states(mod_outputs, sequence_length - 1,
batch_size)
else:
# No sequence_lengths used: final state is the last state
final_cell_state = cell_states[-1]
final_output = outputs[-1]
if is_list:
# Input was a list, so return a list
outputs = tf.unstack(outputs)
final_state = tf.nn.rnn_cell.LSTMStateTuple(final_cell_state, final_output)
return outputs, final_state
def _gather_states(self, data, indices, batch_size):
"""Produce `out`, s.t. out(i, j) = data(indices(i), i, j)."""
gather_indices = tf.stack([indices, tf.range(batch_size)], axis=1)
# TODO(jamesqin): ScatterNd doesn't support fp16 on GPU.
return tf.gather_nd(data, gather_indices)
class LSTMBlockFusedCell(LSTMBlockWrapper):
"""FusedRNNCell implementation of LSTM.
This is an extremely efficient LSTM implementation, that uses a single TF op
for the entire LSTM. It should be both faster and more memory-efficient than
LSTMBlockCell defined above.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
The variable naming is consistent with `rnn_cell_impl.LSTMCell`.
"""
def __init__(self,
num_units,
forget_bias=1.0,
cell_clip=None,
use_peephole=False,
reuse=None,
dtype=None,
name="lstm_cell"):
"""Initialize the LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
cell_clip: clip the cell to this value. Default is no cell clipping.
use_peephole: Whether to use peephole connections or not.
reuse: (optional) boolean describing whether to reuse variables in an
existing scope. If not `True`, and the existing scope already has the
given variables, an error is raised.
dtype: the dtype of variables of this layer.
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such
cases. By default this is "lstm_cell", for variable-name compatibility
with `tf.nn.rnn_cell.LSTMCell`.
"""
super(LSTMBlockFusedCell, self).__init__(
_reuse=reuse, name=name, dtype=dtype)
self._num_units = num_units
self._forget_bias = forget_bias
self._cell_clip = cell_clip if cell_clip is not None else -1
self._use_peephole = use_peephole
# Inputs must be 3-dimensional.
self.input_spec = base_layer.InputSpec(ndim=3)
@property
def num_units(self):
"""Number of units in this cell (output dimension)."""
return self._num_units
def build(self, input_shape):
input_size = input_shape[2].value
self._kernel = self.add_variable(
"kernel", [input_size + self._num_units, self._num_units * 4])
self._bias = self.add_variable(
"bias", [self._num_units * 4],
initializer=tf.constant_initializer(0.0))
if self._use_peephole:
self._w_i_diag = self.add_variable("w_i_diag", [self._num_units])
self._w_f_diag = self.add_variable("w_f_diag", [self._num_units])
self._w_o_diag = self.add_variable("w_o_diag", [self._num_units])
self.built = True
def _call_cell(self,
inputs,
initial_cell_state=None,
initial_output=None,
dtype=None,
sequence_length=None):
"""Run this LSTM on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
initial_cell_state: initial value for cell state, shape `[batch_size,
self._num_units]`
initial_output: initial value of cell output, shape `[batch_size,
self._num_units]`
dtype: The data type for the initial state and expected output.
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len)` or None.
Returns:
A pair containing:
- Cell state (cs): A `3-D` tensor of shape `[time_len, batch_size,
output_size]`
- Output (h): A `3-D` tensor of shape `[time_len, batch_size,
output_size]`
"""
inputs_shape = inputs.get_shape().with_rank(3)
time_len = inputs_shape[0].value
if time_len is None:
time_len = tf.shape(inputs)[0]
if self._use_peephole:
wci = self._w_i_diag
wco = self._w_o_diag
wcf = self._w_f_diag
else:
wci = wcf = wco = tf.zeros([self._num_units], dtype=dtype)
if sequence_length is None:
max_seq_len = tf.to_int64(time_len)
else:
max_seq_len = tf.to_int64(tf.reduce_max(sequence_length))
_, cs, _, _, _, _, h = gen_ops.block_lstm(
seq_len_max=max_seq_len,
x=inputs,
cs_prev=initial_cell_state,
h_prev=initial_output,
w=self._kernel,
wci=wci,
wcf=wcf,
wco=wco,
b=self._bias,
forget_bias=self._forget_bias,
cell_clip=self._cell_clip,
use_peephole=self._use_peephole)
return cs, h
| DeepLearningExamples-master | TensorFlow/Translation/GNMT/block_lstm.py |
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Estimator functions supporting running on TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import codecs
import os
import subprocess
import time
import numpy as np
import tensorflow as tf
from tensorflow.contrib.compiler import xla
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.framework import ops as tf_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.util import tf_contextlib
import gnmt_model
import model_helper
from utils import iterator_utils
from utils import misc_utils
from utils import nmt_utils
from utils import vocab_utils
from variable_mgr import variable_mgr
from variable_mgr import variable_mgr_util
from benchmark_hooks import BenchmarkHook
def _get_custom_getter():
"""Returns a custom getter that this class's methods must be called under.
All methods of this class must be called under a variable scope that was
passed this custom getter. Example:
```python
network = ConvNetBuilder(...)
with tf.variable_scope('cg', custom_getter=network.get_custom_getter()):
network.conv(...)
# Call more methods of network here
```
Currently, this custom getter only does anything if self.use_tf_layers is
True. In that case, it causes variables to be stored as dtype
self.variable_type, then casted to the requested dtype, instead of directly
storing the variable as the requested dtype.
"""
def inner_custom_getter(getter, *args, **kwargs):
"""Custom getter that forces variables to have type self.variable_type."""
cast_to_float16 = False
requested_dtype = kwargs["dtype"]
if requested_dtype == tf.float16:
# Only change the variable dtype if doing so does not decrease variable
# precision.
kwargs["dtype"] = tf.float32
cast_to_float16 = True
var = getter(*args, **kwargs)
with tf_ops.init_scope():
# This if statement is needed to guard the cast, because batch norm
# assigns directly to the return value of this custom getter. The cast
# makes the return value not a variable so it cannot be assigned. Batch
# norm variables are always in fp32 so this if statement is never
# triggered for them.
if cast_to_float16:
var = tf.cast(var, tf.float16)
return var
return inner_custom_getter
@tf_contextlib.contextmanager
def mixed_precision_scope():
with tf.variable_scope("", custom_getter=_get_custom_getter()) as varscope:
yield varscope
def maybe_xla_compile(hparams, fn, *args):
pure_fn = lambda: fn(*args)
if hparams and hparams.xla_compile:
return xla.compile(pure_fn)
else:
return pure_fn()
class ModelFnFactory(object):
"""docstring."""
def __init__(self, hparams):
self.hparams = hparams
def build_graph_dist_strategy(self, features, labels, mode, params):
"""Model function."""
del labels, params
misc_utils.print_out("Running dist_strategy mode_fn")
hparams = self.hparams
# Create a GNMT model for training.
# assert (hparams.encoder_type == "gnmt" or
# hparams.attention_architecture in ["gnmt", "gnmt_v2"])
with mixed_precision_scope():
model = gnmt_model.GNMTModel(hparams, mode=mode, features=features)
if mode == tf.contrib.learn.ModeKeys.INFER:
sample_ids = model.sample_id
reverse_target_vocab_table = lookup_ops.index_to_string_table_from_file(
hparams.tgt_vocab_file, default_value=vocab_utils.UNK)
sample_words = reverse_target_vocab_table.lookup(
tf.to_int64(sample_ids))
# make sure outputs is of shape [batch_size, time] or [beam_width,
# batch_size, time] when using beam search.
if hparams.time_major:
sample_words = tf.transpose(sample_words)
elif sample_words.shape.ndims == 3:
# beam search output in [batch_size, time, beam_width] shape.
sample_words = tf.transpose(sample_words, [2, 0, 1])
predictions = {"predictions": sample_words}
# return loss, vars, grads, predictions, train_op, scaffold
return None, None, None, predictions, None, None
elif mode == tf.contrib.learn.ModeKeys.TRAIN:
loss = model.train_loss
train_op = model.update
return loss, model.params, model.grads, None, train_op, None
else:
raise ValueError("Unknown mode in model_fn: %s" % mode)
def _create_loss_scale_vars(self):
"""docstring."""
# Create loss scale vars if necessary
hparams = self.hparams
loss_scale, loss_scale_normal_steps = None, None
if hparams.use_fp16:
loss_scale = tf.get_variable(
"loss_scale",
initializer=float(hparams.fp16_loss_scale),
dtype=tf.float32,
trainable=False)
if hparams.enable_auto_loss_scale:
loss_scale_normal_steps = tf.get_variable(
"loss_scale_normal_steps", initializer=0, trainable=False)
return loss_scale, loss_scale_normal_steps
def _shard_inputs(self, features, num_towers):
"""docstring."""
if num_towers == 1:
return [features]
source = features["source"]
target_input = features["target_input"]
target_output = features["target_output"]
source_seq_length = features["source_sequence_length"]
target_seq_length = features["target_sequence_length"]
# Compute each split sizes.
global_batch_size = tf.size(source_seq_length)
tower_batch_size = tf.cast(global_batch_size / num_towers, dtype=tf.int32)
split_sizes = [tower_batch_size] * (num_towers - 1)
split_sizes.append(global_batch_size - (num_towers - 1) * tower_batch_size)
sources = tf.split(source, split_sizes, axis=0)
target_inputs = tf.split(target_input, split_sizes, axis=0)
target_outputs = tf.split(target_output, split_sizes, axis=0)
source_sequence_lengths = tf.split(source_seq_length, split_sizes)
target_sequence_lengths = tf.split(target_seq_length, split_sizes)
tower_features = []
for i in range(num_towers):
tower_features.append({
"source": sources[i],
"target_input": target_inputs[i],
"target_output": target_outputs[i],
"source_sequence_length": source_sequence_lengths[i],
"target_sequence_length": target_sequence_lengths[i]
})
return tower_features
def get_optimizer(self, hparams, learning_rate):
"""docstring."""
if hparams.optimizer == "sgd":
opt = tf.train.GradientDescentOptimizer(learning_rate)
elif hparams.optimizer == "adam":
opt = tf.train.AdamOptimizer(learning_rate)
else:
raise ValueError("Unknown optimizer type %s" % hparams.optimizer)
return opt
def _compute_tower_grads(self, tower_loss, tower_params, learning_rate, use_fp16=False,
loss_scale=None, colocate_gradients_with_ops=True):
"""docstring."""
if use_fp16:
assert loss_scale
scaled_loss = tf.multiply(
tower_loss,
tf.convert_to_tensor(loss_scale, dtype=tower_loss.dtype),
name="scaling_loss")
else:
scaled_loss = tower_loss
opt = self.get_optimizer(self.hparams, learning_rate)
grads_and_vars = opt.compute_gradients(scaled_loss, tower_params,
colocate_gradients_with_ops=self.hparams.colocate_gradients_with_ops)
grads = [x for (x, _) in grads_and_vars]
assert grads
for g in grads:
assert g.dtype == tf.float32, "grad.dtype isn't fp32: %s" % g.name
# Downscale grads
for var, grad in zip(tower_params, grads):
if grad is None:
misc_utils.print_out("%s gradient is None!" % var.name)
if use_fp16:
grads = [
grad * tf.reciprocal(loss_scale) for grad in grads
]
return tower_params, grads, opt
def _get_variable_mgr(self, hparams):
"""docstring."""
assert not hparams.use_dist_strategy
# A hack to create a container object that later get passed to VariableMgr
# __init__() as the ill-designed `benchmark_cnn` argument.
class Config(object):
pass
config = Config()
config.params = Config()
params = config.params
# This is num_gpus per worker, a.k.a the number of towers.
params.num_gpus = hparams.num_gpus
# TODO(jamesqin): make more robust
params.use_resource_vars = hparams.use_resource_vars
params.use_fp16 = hparams.use_fp16
params.compact_gradient_transfer = hparams.compact_gradient_transfer
# For nmt, only strong consistency
params.variable_consistency = "strong"
params.all_reduce_spec = hparams.all_reduce_spec
params.gpu_indices = hparams.gpu_indices
params.agg_small_grads_max_bytes = hparams.agg_small_grads_max_bytes
params.agg_small_grads_max_group = hparams.agg_small_grads_max_group
params.hierarchical_copy = hparams.hierarchical_copy
params.network_topology = hparams.network_topology
params.local_parameter_device = hparams.local_parameter_device
params.gradient_repacking = hparams.gradient_repacking
params.allreduce_merge_scope = hparams.allreduce_merge_scope
config.enable_auto_loss_scale = hparams.enable_auto_loss_scale
if hparams.num_gpus > 0:
config.raw_devices = ["gpu:%i" % i for i in range(hparams.num_gpus)]
else:
config.raw_devices = ["cpu:0"]
config.devices = config.raw_devices
return variable_mgr.VariableMgrLocalReplicated(
config, config.params.all_reduce_spec,
config.params.agg_small_grads_max_bytes,
config.params.agg_small_grads_max_group,
config.params.allreduce_merge_scope)
def _print_varinfo(self, var_params, tower_id):
# Print trainable variables
misc_utils.print_out("# Trainable variables for tower: %d" % tower_id)
misc_utils.print_out(
"Format: <name>, <shape>, <dtype>, <(soft) device placement>")
for param in var_params:
misc_utils.print_out(
" %s, %s, %s, %s" % (param.name, str(param.get_shape()),
param.dtype.name, param.op.device))
misc_utils.print_out("Total params size: %.2f GB" % (4. * np.sum([
p.get_shape().num_elements()
for p in var_params
if p.get_shape().is_fully_defined()
]) / 2**30))
def build_graph(self, features, labels, mode, params):
"""docstring."""
del labels, params
misc_utils.print_out("Running fast mode_fn")
hparams = self.hparams
# Create global_step
tf.train.get_or_create_global_step()
if mode == tf.contrib.learn.ModeKeys.INFER:
# Doing inference only on one GPU
inf_hparams = tf.contrib.training.HParams(**hparams.values())
inf_hparams.set_hparam("num_gpus", 1)
# Inference is done in fp32 and in the same way as that of dist_strategy.
inf_hparams.set_hparam("use_fp16", False)
misc_utils.print_out("inference hparmas:")
misc_utils.print_hparams(inf_hparams)
# Create variable_mgr
var_mgr = self._get_variable_mgr(inf_hparams)
with mixed_precision_scope(), tf.device("gpu:0"), tf.name_scope(
"tower_0"), var_mgr.create_outer_variable_scope(0):
model = gnmt_model.GNMTModel(inf_hparams, mode=mode, features=features)
sample_ids = model.sample_id
reverse_target_vocab_table = lookup_ops.index_to_string_table_from_file(
inf_hparams.tgt_vocab_file, default_value=vocab_utils.UNK)
sample_words = reverse_target_vocab_table.lookup(
tf.to_int64(sample_ids))
# make sure outputs is of shape [batch_size, time] or [beam_width,
# batch_size, time] when using beam search.
if inf_hparams.time_major:
sample_words = tf.transpose(sample_words)
elif sample_words.shape.ndims == 3:
# beam search output in [batch_size, time, beam_width] shape.
sample_words = tf.transpose(sample_words, [2, 0, 1])
predictions = {"predictions": sample_words}
# return loss, vars, grads, predictions, train_op, scaffold
return None, None, None, predictions, None, None
elif mode == tf.contrib.learn.ModeKeys.TRAIN:
num_towers = hparams.num_gpus
# Shard inputs
tower_features = self._shard_inputs(features, num_towers)
# Create loss scale vars if necessary
loss_scale, loss_scale_normal_steps = self._create_loss_scale_vars()
# Create variable_mgr
var_mgr = self._get_variable_mgr(hparams)
# Build per-tower fprop and bprop
devices = var_mgr.get_devices()
tower_gradvars = []
tower_scopes = []
var_scopes = []
train_losses = []
learning_rates = []
batch_sizes = []
opts = []
def fprop_and_bprop(tid):
"""docstring."""
model = gnmt_model.GNMTModel(
hparams, mode=mode, features=tower_features[tid])
# sync training.
assert model.learning_rate is not None
# The following handles shouldn't be built in when doing manual
assert model.grad_norm is None
assert model.update is None
tower_loss = model.train_loss
# Only check loss numerics if in fp16
if hparams.use_fp16 and hparams.check_tower_loss_numerics:
tower_loss = tf.check_numerics(
tower_loss, "tower_%d has Inf/NaN loss" % tid)
# Cast to fp32, otherwise would easily overflow.
tower_loss = tf.to_float(tower_loss)
var_params, grads, opt = self._compute_tower_grads(
tower_loss,
var_mgr.trainable_variables_on_device(tid, tid),
model.learning_rate,
use_fp16=hparams.use_fp16,
loss_scale=loss_scale,
colocate_gradients_with_ops=hparams.colocate_gradients_with_ops)
self._print_varinfo(var_params, tid)
res = [model.train_loss, model.learning_rate, model.batch_size]
res.extend(grads)
opts.append(opt)
return res
def unpack_fprop_and_bprop_output(output):
train_loss = output[0]
learning_rate = output[1]
batch_size = output[2]
grads = output[3:]
return train_loss, learning_rate, batch_size, grads
with mixed_precision_scope():
for tid in range(num_towers):
with tf.device(devices[tid % len(devices)]), tf.name_scope(
"tower_%s" % tid) as scope:
tower_scopes.append(scope)
with var_mgr.create_outer_variable_scope(tid) as var_scope:
var_scopes.append(var_scope)
outputs = maybe_xla_compile(hparams, fprop_and_bprop, tid)
(train_loss, learning_rate, batch_size,
grads) = unpack_fprop_and_bprop_output(outputs)
train_losses.append(train_loss)
learning_rates.append(learning_rate)
batch_sizes.append(batch_size)
var_params = var_mgr.trainable_variables_on_device(tid, tid)
tower_gradvars.append(list(zip(grads, var_params)))
# Add summaries
if hparams.show_metrics:
tf.summary.scalar("learning_rate", learning_rates[0])
if loss_scale:
tf.summary.scalar("loss_scale", loss_scale)
if hparams.enable_auto_loss_scale:
tf.summary.scalar("loss_scale_normal_steps",
loss_scale_normal_steps)
misc_utils.print_out("Finish building fprop and per-tower bprop.")
# Aggregate gradients
# The following compute the aggregated grads for each tower, stored in
# opaque grad_states structure.
apply_grads_devices, grad_states = var_mgr.preprocess_device_grads(
tower_gradvars)
master_grads = None
master_params = None
update_ops = []
for i, device in enumerate(apply_grads_devices):
with tf.device(device), tf.name_scope(tower_scopes[i]):
# Get per-tower grads.
with tf.name_scope("get_gradients_to_apply"):
avg_gradvars = var_mgr.get_gradients_to_apply(i, grad_states)
avg_grads = [gv[0] for gv in avg_gradvars]
# gradients post-processing
with tf.name_scope("clip_gradients"):
if hparams.clip_grads:
clipped_grads, grad_norm = model_helper.gradient_clip(
avg_grads, max_gradient_norm=hparams.max_gradient_norm)
# summary the grad on the 1st tower
if i == 0 and hparams.show_metrics:
tf.summary.scalar("grad_norm", grad_norm)
tf.summary.scalar("clipped_grad_norm",
tf.global_norm(clipped_grads))
else:
clipped_grads = avg_grads
if i == 0:
master_grads = clipped_grads
# Build apply-gradients ops
clipped_gradvars = list(
zip(clipped_grads, [gv[1] for gv in avg_gradvars]))
if i == 0:
master_params = [gv[1] for gv in avg_gradvars]
with tf.name_scope("append_gradient_ops"):
loss_scale_params = variable_mgr_util.AutoLossScaleParams(
enable_auto_loss_scale=hparams.enable_auto_loss_scale,
loss_scale=loss_scale,
loss_scale_normal_steps=loss_scale_normal_steps,
inc_loss_scale_every_n=hparams.fp16_inc_loss_scale_every_n,
is_chief=True)
opt = opts[i]
var_mgr.append_apply_gradients_ops(grad_states, opt,
clipped_gradvars, update_ops,
loss_scale_params)
misc_utils.print_out("Finish building grad aggregation.")
assert len(update_ops) == num_towers
train_op = tf.group(update_ops)
with tf.control_dependencies([train_op]):
global_step = tf.train.get_global_step()
train_op = global_step.assign_add(1)
# Compute loss on the first gpu
# TODO(jamesqin): optimize it?
with tf.device("gpu:0"):
loss = misc_utils.weighted_avg(train_losses, batch_sizes)
# Create local init_ops
# TODO(jamesqin): handle resource variables!
# At present if not using mirror strategy, not using resource vars.
local_init_ops = []
local_init_op = tf.local_variables_initializer()
with tf.control_dependencies([local_init_op]):
local_init_ops.append(var_mgr.get_post_init_ops())
local_init_ops.extend([local_init_op, tf.tables_initializer()])
saveable_vars = var_mgr.savable_variables()
# Add saveables for cudnn vars in master tower.
saveable_objects = tf.get_collection(tf.GraphKeys.SAVEABLE_OBJECTS)
saveable_objects = [x for x in saveable_objects if "v0" in x.name]
misc_utils.print_out("Saveable vars(%d): " % len(saveable_vars))
for mv in saveable_vars:
misc_utils.print_out(mv.name)
misc_utils.print_out(
"All global trainable vars(%d): " % len(tf.trainable_variables()))
for tv in tf.trainable_variables():
misc_utils.print_out(tv.name)
misc_utils.print_out(
"All global vars(%d): " % len(tf.global_variables()))
for gv in tf.global_variables():
misc_utils.print_out(gv.name)
misc_utils.print_out(
"master backproped params(%d): " % len(master_params))
for mp in master_params:
misc_utils.print_out(mp.name)
# Note the cudnn vars are skipped the init check. :(
scaffold = tf.train.Scaffold(
ready_op=tf.report_uninitialized_variables(saveable_vars),
ready_for_local_init_op=tf.report_uninitialized_variables(
saveable_vars),
local_init_op=tf.group(*local_init_ops),
saver=tf.train.Saver(saveable_vars + saveable_objects, save_relative_paths=True))
misc_utils.print_out("Finish building model_fn")
# return loss, vars, grads, predictions, train_op, scaffold
return loss, master_params, master_grads, None, train_op, scaffold
def make_model_fn(hparams):
"""Construct a GNMT model function for training."""
factory = ModelFnFactory(hparams)
if hparams.use_dist_strategy:
def fn(features, labels, mode, params):
"""docstring."""
(loss, _, _, predictions, train_op,
_) = factory.build_graph_dist_strategy(features, labels, mode, params)
if mode == tf.contrib.learn.ModeKeys.INFER:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
else:
if hparams.use_tpu:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, loss=loss, train_op=train_op)
else:
return tf.estimator.EstimatorSpec(mode=mode, loss=loss,
train_op=train_op)
return fn
else:
build_fn = factory.build_graph
def fn(features, labels, mode, params):
"""docstring."""
(loss, _, _, predictions, train_op, scaffold) = build_fn(
features, labels, mode, params)
if mode == tf.contrib.learn.ModeKeys.INFER:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
else:
return tf.estimator.EstimatorSpec(mode=mode, loss=loss,
scaffold=scaffold,
train_op=train_op)
return fn
def make_input_fn(hparams, mode):
"""Construct a input function for training."""
def _input_fn(params):
"""Input function."""
del params
if mode == tf.contrib.learn.ModeKeys.TRAIN:
src_file = "%s.%s" % (hparams.train_prefix, hparams.src)
tgt_file = "%s.%s" % (hparams.train_prefix, hparams.tgt)
else:
if hparams.mode == "translate":
src_file = hparams.translate_file + ".tok"
tgt_file = hparams.translate_file + ".tok"
else:
src_file = "%s.%s" % (hparams.test_prefix, hparams.src)
tgt_file = "%s.%s" % (hparams.test_prefix, hparams.tgt)
src_vocab_file = hparams.src_vocab_file
tgt_vocab_file = hparams.tgt_vocab_file
src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
src_vocab_file, tgt_vocab_file, hparams.share_vocab)
src_dataset = tf.data.TextLineDataset(src_file)
tgt_dataset = tf.data.TextLineDataset(tgt_file)
if mode == tf.contrib.learn.ModeKeys.TRAIN:
# Run one epoch and stop if running train_and_eval.
if hparams.mode == "train_and_eval":
# In this mode input pipeline is restarted every epoch, so choose a
# different random_seed.
num_repeat = 1
random_seed = hparams.random_seed + int(time.time()) % 100
else:
num_repeat = 8
random_seed = hparams.random_seed
return iterator_utils.get_iterator(
src_dataset,
tgt_dataset,
src_vocab_table,
tgt_vocab_table,
batch_size=hparams.batch_size,
sos=hparams.sos,
eos=hparams.eos,
random_seed=random_seed,
num_buckets=hparams.num_buckets,
src_max_len=hparams.src_max_len,
tgt_max_len=hparams.tgt_max_len,
output_buffer_size=None,
skip_count=None,
num_shards=1, # flags.num_workers
shard_index=0, # flags.jobid
reshuffle_each_iteration=True,
use_char_encode=hparams.use_char_encode,
num_repeat=num_repeat,
filter_oversized_sequences=True
) # need to update get_effective_train_epoch_size() if this flag flips.
else:
return iterator_utils.get_infer_iterator(
src_dataset,
src_vocab_table,
batch_size=hparams.infer_batch_size,
eos=hparams.eos,
src_max_len=hparams.src_max_len,
use_char_encode=hparams.use_char_encode)
def _synthetic_input_fn(params):
"""Fake inputs for debugging and benchmarking."""
del params
batch_size = hparams.batch_size
src_max_len = hparams.src_max_len
tgt_max_len = hparams.tgt_max_len
features = {
"source":
tf.random_uniform(
dtype=tf.int32,
minval=1,
maxval=10,
seed=1,
shape=(batch_size, src_max_len)),
"target_input":
tf.random_uniform(
dtype=tf.int32,
minval=1,
maxval=10,
seed=2,
shape=(batch_size, tgt_max_len)),
"target_output":
tf.random_uniform(
dtype=tf.int32,
minval=1,
maxval=10,
seed=3,
shape=(batch_size, tgt_max_len)),
"source_sequence_length":
tf.constant([src_max_len] * batch_size),
"target_sequence_length":
tf.constant([tgt_max_len] * batch_size)
}
return features
if hparams.use_synthetic_data:
return _synthetic_input_fn
else:
return _input_fn
def get_distribution_strategy(num_gpus):
if num_gpus == 0:
return tf.contrib.distribute.OneDeviceStrategy("device:CPU:0")
elif num_gpus == 1:
return tf.contrib.distribute.OneDeviceStrategy("device:GPU:0")
else:
return tf.contrib.distribute.MirroredStrategy(num_gpus=num_gpus)
def get_sacrebleu(trans_file, detokenizer_file):
"""Detokenize the trans_file and get the sacrebleu score."""
assert tf.gfile.Exists(detokenizer_file)
local_detokenizer_file = "/tmp/detokenizer.perl"
if tf.gfile.Exists(local_detokenizer_file):
tf.gfile.Remove(local_detokenizer_file)
tf.gfile.Copy(detokenizer_file, local_detokenizer_file, overwrite=True)
assert tf.gfile.Exists(trans_file)
local_trans_file = "/tmp/newstest2014_out.tok.de"
if tf.gfile.Exists(local_trans_file):
tf.gfile.Remove(local_trans_file)
tf.gfile.Copy(trans_file, local_trans_file, overwrite=True)
detok_trans_path = "/tmp/newstest2014_out.detok.de"
if tf.gfile.Exists(detok_trans_path):
tf.gfile.Remove(detok_trans_path)
# Detokenize the trans_file.
cmd = "cat %s | perl %s -l de | cat > %s" % (
local_trans_file, local_detokenizer_file, detok_trans_path)
subprocess.run(cmd, shell=True)
assert tf.gfile.Exists(detok_trans_path)
# run sacrebleu
cmd = ("cat %s | sacrebleu -t wmt14/full -l en-de --score-only -lc --tokenize"
" intl") % (detok_trans_path)
sacrebleu = subprocess.run([cmd], stdout=subprocess.PIPE, shell=True)
score = sacrebleu.stdout.strip()
return float(score)
def get_metrics(hparams, model_fn, ckpt=None, only_translate=False):
"""Run inference and compute metrics."""
pred_estimator = tf.estimator.Estimator(
model_fn=model_fn, model_dir=hparams.output_dir)
benchmark_hook = BenchmarkHook(hparams.infer_batch_size)
predictions = pred_estimator.predict(
make_input_fn(hparams, tf.contrib.learn.ModeKeys.INFER),
checkpoint_path=ckpt, hooks=[benchmark_hook])
translations = []
output_tokens = []
beam_id = 0
for prediction in predictions:
# get the top translation.
if beam_id == 0:
for sent_id in range(hparams.infer_batch_size):
if sent_id >= prediction["predictions"].shape[0]:
break
trans, output_length = nmt_utils.get_translation(
prediction["predictions"],
sent_id=sent_id,
tgt_eos=hparams.eos,
subword_option=hparams.subword_option)
translations.append(trans)
output_tokens.append(output_length)
beam_id += 1
if beam_id == hparams.beam_width:
beam_id = 0
if only_translate:
trans_file = hparams.translate_file + '.trans.tok'
else:
trans_file = os.path.join(
hparams.output_dir, "newstest2014_out_{}.tok.de".format(
pred_estimator.get_variable_value(tf.GraphKeys.GLOBAL_STEP)))
trans_dir = os.path.dirname(trans_file)
if not tf.gfile.Exists(trans_dir):
tf.gfile.MakeDirs(trans_dir)
tf.logging.info("Writing to file %s" % trans_file)
with codecs.getwriter("utf-8")(tf.gfile.GFile(trans_file,
mode="wb")) as trans_f:
trans_f.write("") # Write empty string to ensure file is created.
for translation in translations:
trans_f.write((translation + b"\n").decode("utf-8"))
if only_translate:
return None, benchmark_hook.get_average_speed_and_latencies(), sum(output_tokens)
# Evaluation
output_dir = os.path.join(pred_estimator.model_dir, "eval")
tf.gfile.MakeDirs(output_dir)
summary_writer = tf.summary.FileWriter(output_dir)
ref_file = "%s.%s" % (hparams.test_prefix, hparams.tgt)
# Hardcoded.
metric = "bleu"
score = get_sacrebleu(trans_file, hparams.detokenizer_file)
misc_utils.print_out("bleu is %.5f" % score)
with tf.Graph().as_default():
summaries = []
summaries.append(tf.Summary.Value(tag=metric, simple_value=score))
tf_summary = tf.Summary(value=list(summaries))
summary_writer.add_summary(
tf_summary, pred_estimator.get_variable_value(tf.GraphKeys.GLOBAL_STEP))
summary_writer.close()
return score, benchmark_hook.get_average_speed_and_latencies(), sum(output_tokens)
def train_fn(hparams):
"""Train function."""
model_fn = make_model_fn(hparams)
input_fn = make_input_fn(hparams, tf.contrib.learn.ModeKeys.TRAIN)
log_step_count_steps = hparams.log_step_count_steps
save_checkpoints_steps = hparams.save_checkpoints_steps
if hparams.use_dist_strategy:
distribution_strategy = get_distribution_strategy(hparams.num_gpus)
config = tf.estimator.RunConfig(
train_distribute=distribution_strategy,
log_step_count_steps=log_step_count_steps,
keep_checkpoint_max=None,
save_checkpoints_steps=save_checkpoints_steps)
else:
sess_config = tf.ConfigProto(allow_soft_placement=True)
if hparams.use_autojit_xla:
sess_config.graph_options.optimizer_options.global_jit_level = (
tf.OptimizerOptions.ON_1)
if not hparams.use_pintohost_optimizer:
sess_config.graph_options.rewrite_options.pin_to_host_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
config = tf.estimator.RunConfig(
log_step_count_steps=log_step_count_steps,
session_config=sess_config,
keep_checkpoint_max=None,
save_checkpoints_steps=save_checkpoints_steps)
misc_utils.print_out("sess master is %s" % config.master)
estimator = tf.estimator.Estimator(
model_fn=model_fn, model_dir=hparams.output_dir, config=config)
benchmark_hook = BenchmarkHook(hparams.batch_size, hparams.warmup_steps + 5)
train_hooks = [benchmark_hook]
if hparams.profile:
train_hooks.append(tf.train.ProfilerHook(
output_dir=hparams.output_dir,
save_steps=hparams.profile_save_steps,
show_dataflow=True,
show_memory=True))
max_steps = hparams.debug_num_train_steps
estimator.train(
input_fn=input_fn,
max_steps=max_steps,
hooks=train_hooks,
)
return benchmark_hook.get_average_speed_and_latencies()
def eval_fn(hparams, ckpt=None, only_translate=False):
model_fn = make_model_fn(hparams)
return get_metrics(hparams, model_fn, ckpt, only_translate=only_translate)
| DeepLearningExamples-master | TensorFlow/Translation/GNMT/estimator.py |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GNMT attention sequence-to-sequence model with dynamic RNN support."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.cudnn_rnn.python.layers import cudnn_rnn
import attention_wrapper
import block_lstm
import model
import model_helper
from utils import misc_utils as utils
class GNMTModel(model.BaseModel):
"""Sequence-to-sequence dynamic model with GNMT attention architecture.
"""
def __init__(self,
hparams,
mode,
features,
scope=None,
extra_args=None):
self.is_gnmt_attention = (
hparams.attention_architecture in ["gnmt", "gnmt_v2"])
super(GNMTModel, self).__init__(
hparams=hparams,
mode=mode,
features=features,
scope=scope,
extra_args=extra_args)
def _prepare_beam_search_decoder_inputs(
self, beam_width, memory, source_sequence_length, encoder_state):
memory = tf.contrib.seq2seq.tile_batch(
memory, multiplier=beam_width)
source_sequence_length = tf.contrib.seq2seq.tile_batch(
source_sequence_length, multiplier=beam_width)
encoder_state = tf.contrib.seq2seq.tile_batch(
encoder_state, multiplier=beam_width)
batch_size = self.batch_size * beam_width
return memory, source_sequence_length, encoder_state, batch_size
def _build_encoder(self, hparams):
"""Build a GNMT encoder."""
assert hparams.encoder_type == "gnmt"
# Build GNMT encoder.
num_bi_layers = 1
num_uni_layers = self.num_encoder_layers - num_bi_layers
utils.print_out("# Build a GNMT encoder")
utils.print_out(" num_bi_layers = %d" % num_bi_layers)
utils.print_out(" num_uni_layers = %d" % num_uni_layers)
# source is batch-majored
source = self.features["source"]
import sys
print('source.shape: %s' % source.shape, file=sys.stderr)
if self.time_major:
# Later rnn would use time-majored inputs
source = tf.transpose(source)
with tf.variable_scope("encoder"):
dtype = self.dtype
encoder_emb_inp = tf.cast(
self.encoder_emb_lookup_fn(self.embedding_encoder, source), dtype)
# Build 1st bidi layer.
bi_encoder_outputs, bi_encoder_state = self._build_encoder_layers_bidi(
encoder_emb_inp, self.features["source_sequence_length"], hparams,
dtype)
# Build all the rest unidi layers
encoder_state, encoder_outputs = self._build_encoder_layers_unidi(
bi_encoder_outputs, self.features["source_sequence_length"],
num_uni_layers, hparams, dtype)
# Pass all encoder states to the decoder
# except the first bi-directional layer
encoder_state = (bi_encoder_state[1],) + (
(encoder_state,) if num_uni_layers == 1 else encoder_state)
return encoder_outputs, encoder_state
def _build_encoder_layers_bidi(self, inputs, sequence_length, hparams, dtype):
"""docstring."""
if hparams.use_fused_lstm:
fn = self._build_bidi_rnn_fused
elif hparams.use_cudnn_lstm:
fn = self._build_bidi_rnn_cudnn
else:
fn = self._build_bidi_rnn_base
return fn(inputs, sequence_length, hparams, dtype)
def _build_bidi_rnn_fused(self, inputs, sequence_length, hparams, dtype):
if (not np.isclose(hparams.dropout, 0.) and
self.mode == tf.contrib.learn.ModeKeys.TRAIN):
inputs = tf.nn.dropout(inputs, keep_prob=1-hparams.dropout)
fwd_cell = block_lstm.LSTMBlockFusedCell(
hparams.num_units, hparams.forget_bias, dtype=dtype)
fwd_encoder_outputs, (fwd_final_c, fwd_final_h) = fwd_cell(
inputs,
dtype=dtype,
sequence_length=sequence_length)
inputs_r = tf.reverse_sequence(
inputs, sequence_length, batch_axis=1, seq_axis=0)
bak_cell = block_lstm.LSTMBlockFusedCell(
hparams.num_units, hparams.forget_bias, dtype=dtype)
bak_encoder_outputs, (bak_final_c, bak_final_h) = bak_cell(
inputs_r,
dtype=dtype,
sequence_length=sequence_length)
bak_encoder_outputs = tf.reverse_sequence(
bak_encoder_outputs, sequence_length, batch_axis=1, seq_axis=0)
bi_encoder_outputs = tf.concat(
[fwd_encoder_outputs, bak_encoder_outputs], axis=-1)
fwd_state = tf.nn.rnn_cell.LSTMStateTuple(fwd_final_c, fwd_final_h)
bak_state = tf.nn.rnn_cell.LSTMStateTuple(bak_final_c, bak_final_h)
bi_encoder_state = (fwd_state, bak_state)
# mask aren't applied on outputs, but final states are post-masking.
return bi_encoder_outputs, bi_encoder_state
def _build_unidi_rnn_fused(self, inputs, state,
sequence_length, hparams, dtype):
if (not np.isclose(hparams.dropout, 0.) and
self.mode == tf.contrib.learn.ModeKeys.TRAIN):
inputs = tf.nn.dropout(inputs, keep_prob=1-hparams.dropout)
cell = block_lstm.LSTMBlockFusedCell(
hparams.num_units, hparams.forget_bias, dtype=dtype)
outputs, (final_c, final_h) = cell(
inputs,
state,
dtype=dtype,
sequence_length=sequence_length)
# mask aren't applied on outputs, but final states are post-masking.
return outputs, tf.nn.rnn_cell.LSTMStateTuple(final_c, final_h)
def _build_unidi_rnn_cudnn(self, inputs, state, sequence_length, dtype,
hparams, num_layers, is_fwd):
# cudnn inputs only support time-major
if not self.time_major:
inputs = tf.transpose(inputs, axis=[1, 0, 2])
if num_layers == 1 and not np.isclose(hparams.dropout, 0.):
# Special case when drop is used and only one layer
dropout = 0.
inputs = tf.nn.dropout(inputs, keep_prob=1-dropout)
else:
dropout = hparams.dropout
# the outputs would be in time-majored
sequence_length = tf.transpose(sequence_length)
if not is_fwd:
inputs = tf.reverse_sequence(
inputs, sequence_length, batch_axis=1, seq_axis=0)
cell = tf.contrib.cudnn_rnn.CudnnLSTM(
num_layers=num_layers,
num_units=hparams.num_units,
direction=cudnn_rnn.CUDNN_RNN_UNIDIRECTION,
dtype=self.dtype,
dropout=dropout)
outputs, (h, c) = cell(inputs, initial_state=state)
"""
# Mask outputs
# [batch, time]
mask = tf.sequence_mask(sequence_length, dtype=self.dtype)
# [time, batch]
mask = tf.transpose(mask)
outputs *= mask
"""
if not is_fwd:
outputs = tf.reverse_sequence(
inputs, sequence_length, batch_axis=1, seq_axis=0)
# NOTICE! There's no way to get the "correct" masked cell state in cudnn
# rnn.
if num_layers == 1:
h = tf.squeeze(h, axis=0)
c = tf.squeeze(c, axis=0)
return outputs, tf.nn.rnn_cell.LSTMStateTuple(c=c, h=h)
# Split h and c to form a
h.set_shape((num_layers, None, hparams.num_units))
c.set_shape((num_layers, None, hparams.num_units))
hs = tf.unstack(h)
cs = tf.unstack(c)
# The cell passed to bidi-dyanmic-rnn is a MultiRNNCell consisting 2 regular
# LSTM, the state of each is a simple LSTMStateTuple. Thus the state of the
# MultiRNNCell is a tuple of LSTMStateTuple.
states = tuple(
tf.nn.rnn_cell.LSTMStateTuple(c=c, h=h) for h, c in zip(hs, cs))
# No need to transpose back
return outputs, states
def _build_encoder_cell(self, hparams, num_layers, num_residual_layers,
dtype=None):
"""Build a multi-layer RNN cell that can be used by encoder."""
return model_helper.create_rnn_cell(
unit_type=hparams.unit_type,
num_units=self.num_units,
num_layers=num_layers,
num_residual_layers=num_residual_layers,
forget_bias=hparams.forget_bias,
dropout=hparams.dropout,
mode=self.mode,
dtype=dtype,
single_cell_fn=self.single_cell_fn,
use_block_lstm=hparams.use_block_lstm)
def _build_bidi_rnn_base(self, inputs, sequence_length, hparams, dtype):
"""Create and call biddirectional RNN cells."""
# num_residual_layers: Number of residual layers from top to bottom. For
# example, if `num_bi_layers=4` and `num_residual_layers=2`, the last 2
# RNN layers in each RNN cell will be wrapped with `ResidualWrapper`.
# Construct forward and backward cells
fw_cell = self._build_encoder_cell(hparams,
1, # num_bi_layers,
0, # num_bi_residual_layers,
dtype)
bw_cell = self._build_encoder_cell(hparams,
1, # num_bi_layers,
0, # num_bi_residual_layers,
dtype)
if hparams.use_dynamic_rnn:
bi_outputs, bi_state = tf.nn.bidirectional_dynamic_rnn(
fw_cell,
bw_cell,
inputs,
dtype=dtype,
sequence_length=sequence_length,
time_major=self.time_major,
swap_memory=True)
else:
bi_outputs, bi_state = tf.contrib.recurrent.bidirectional_functional_rnn(
fw_cell,
bw_cell,
inputs,
dtype=dtype,
sequence_length=sequence_length,
time_major=self.time_major,
use_tpu=False)
return tf.concat(bi_outputs, -1), bi_state
def _build_bidi_rnn_cudnn(self, inputs, sequence_length, hparams, dtype):
# Notice cudnn rnn dropout is applied between layers. (if 1 layer only then
# no dropout).
if not np.isclose(hparams.dropout, 0.):
inputs = tf.nn.dropout(inputs, keep_prob=1-hparams.dropout)
if not hparams.use_loose_bidi_cudnn_lstm:
fwd_outputs, fwd_states = self._build_unidi_rnn_cudnn(
inputs, None, # initial_state
sequence_length, dtype, hparams,
1, # num_layer
is_fwd=True)
bak_outputs, bak_states = self._build_unidi_rnn_cudnn(
inputs, None, # initial_state
sequence_length, dtype, hparams,
1, # num_layer
is_fwd=False)
bi_outputs = tf.concat([fwd_outputs, bak_outputs], axis=-1)
return bi_outputs, (fwd_states, bak_states)
else:
# Cudnn only accept time-majored inputs
if not self.time_major:
inputs = tf.transpose(inputs, axis=[1, 0, 2])
bi_outputs, (bi_h, bi_c) = tf.contrib.cudnn_rnn.CudnnLSTM(
num_layers=1, # num_bi_layers,
num_units=hparams.num_units,
direction=cudnn_rnn.CUDNN_RNN_BIDIRECTION,
dropout=0., # one layer, dropout isn't applied anyway,
seed=hparams.random_seed,
dtype=self.dtype,
kernel_initializer=tf.get_variable_scope().initializer,
bias_initializer=tf.zeros_initializer())(inputs)
# state shape is [num_layers * num_dir, batch, dim]
bi_h.set_shape((2, None, hparams.num_units))
bi_c.set_shape((2, None, hparams.num_units))
fwd_h, bak_h = tf.unstack(bi_h)
fwd_c, bak_c = tf.unstack(bi_c)
# No need to transpose back
return bi_outputs, (tf.nn.rnn_cell.LSTMStateTuple(c=fwd_c, h=fwd_h),
tf.nn.rnn_cell.LSTMStateTuple(c=bak_c, h=bak_h))
def _build_encoder_layers_unidi(self, inputs, sequence_length,
num_uni_layers, hparams, dtype):
"""Build encoder layers all at once."""
encoder_outputs = None
encoder_state = tuple()
if hparams.use_fused_lstm:
for i in range(num_uni_layers):
if (not np.isclose(hparams.dropout, 0.) and
self.mode == tf.contrib.learn.ModeKeys.TRAIN):
cell_inputs = tf.nn.dropout(inputs, keep_prob=1-hparams.dropout)
else:
cell_inputs = inputs
cell = block_lstm.LSTMBlockFusedCell(
hparams.num_units, hparams.forget_bias, dtype=dtype)
encoder_outputs, (final_c, final_h) = cell(
cell_inputs,
dtype=dtype,
sequence_length=sequence_length)
encoder_state += (tf.nn.rnn_cell.LSTMStateTuple(final_c, final_h),)
if i >= num_uni_layers - self.num_encoder_residual_layers:
# Add the pre-dropout inputs. Residual wrapper is applied after
# dropout wrapper.
encoder_outputs += inputs
inputs = encoder_outputs
elif hparams.use_cudnn_lstm:
# Single layer cudnn rnn, dropout isnt applied in the kernel
for i in range(num_uni_layers):
if (not np.isclose(hparams.dropout, 0.) and
self.mode == tf.contrib.learn.ModeKeys.TRAIN):
inputs = tf.nn.dropout(inputs, keep_prob=1-hparams.dropout)
encoder_outputs, encoder_states = self._build_unidi_rnn_cudnn(
inputs,
None, # initial_state
sequence_length,
dtype,
hparams,
1, # num_layer
is_fwd=True)
encoder_state += (tf.nn.rnn_cell.LSTMStateTuple(encoder_states.c,
encoder_states.h),)
if i >= num_uni_layers - self.num_encoder_residual_layers:
encoder_outputs += inputs
inputs = encoder_outputs
else:
uni_cell = model_helper.create_rnn_cell(
unit_type=hparams.unit_type,
num_units=hparams.num_units,
num_layers=num_uni_layers,
num_residual_layers=self.num_encoder_residual_layers,
forget_bias=hparams.forget_bias,
dropout=hparams.dropout,
dtype=dtype,
mode=self.mode,
single_cell_fn=self.single_cell_fn,
use_block_lstm=hparams.use_block_lstm)
if hparams.use_dynamic_rnn:
encoder_outputs, encoder_state = tf.nn.dynamic_rnn(
uni_cell,
inputs,
dtype=dtype,
sequence_length=sequence_length,
time_major=self.time_major)
else:
encoder_outputs, encoder_state = tf.contrib.recurrent.functional_rnn(
uni_cell,
inputs,
dtype=dtype,
sequence_length=sequence_length,
time_major=self.time_major,
use_tpu=False)
return encoder_state, encoder_outputs
def _build_decoder_cell(self, hparams, encoder_outputs, encoder_state,
source_sequence_length):
"""Build a RNN cell with GNMT attention architecture."""
# GNMT attention
assert self.is_gnmt_attention
attention_option = hparams.attention
attention_architecture = hparams.attention_architecture
assert attention_option == "normed_bahdanau"
assert attention_architecture == "gnmt_v2"
num_units = hparams.num_units
infer_mode = hparams.infer_mode
dtype = tf.float16 if hparams.use_fp16 else tf.float32
if self.time_major:
memory = tf.transpose(encoder_outputs, [1, 0, 2])
else:
memory = encoder_outputs
if (self.mode == tf.contrib.learn.ModeKeys.INFER and
infer_mode == "beam_search"):
memory, source_sequence_length, encoder_state, batch_size = (
self._prepare_beam_search_decoder_inputs(
hparams.beam_width, memory, source_sequence_length,
encoder_state))
else:
batch_size = self.batch_size
attention_mechanism = model.create_attention_mechanism(
num_units, memory, source_sequence_length, dtype=dtype)
cell_list = model_helper._cell_list( # pylint: disable=protected-access
unit_type=hparams.unit_type,
num_units=num_units,
num_layers=self.num_decoder_layers,
num_residual_layers=self.num_decoder_residual_layers,
forget_bias=hparams.forget_bias,
dropout=hparams.dropout,
mode=self.mode,
dtype=dtype,
single_cell_fn=self.single_cell_fn,
residual_fn=gnmt_residual_fn,
use_block_lstm=hparams.use_block_lstm)
# Only wrap the bottom layer with the attention mechanism.
attention_cell = cell_list.pop(0)
# Only generate alignment in greedy INFER mode.
alignment_history = (self.mode == tf.contrib.learn.ModeKeys.INFER and
infer_mode != "beam_search")
attention_cell = attention_wrapper.AttentionWrapper(
attention_cell,
attention_mechanism,
attention_layer_size=None, # don't use attention layer.
output_attention=False,
alignment_history=alignment_history,
name="attention")
cell = GNMTAttentionMultiCell(attention_cell, cell_list)
if hparams.pass_hidden_state:
decoder_initial_state = tuple(
zs.clone(cell_state=es)
if isinstance(zs, attention_wrapper.AttentionWrapperState) else es
for zs, es in zip(
cell.zero_state(batch_size, dtype), encoder_state))
else:
decoder_initial_state = cell.zero_state(batch_size, dtype)
return cell, decoder_initial_state
def _build_decoder_cudnn(self, encoder_outputs, encoder_state, hparams):
pass
"""
# Training
# Use dynamic_rnn to compute the 1st layer outputs and attention
# GNMT attention
with tf.variable_scope("decoder") as decoder_scope:
assert self.is_gnmt_attention
attention_option = hparams.attention
attention_architecture = hparams.attention_architecture
assert attention_option == "normed_bahdanau"
assert attention_architecture == "gnmt_v2"
num_units = hparams.num_units
infer_mode = hparams.infer_mode
dtype = tf.float16 if hparams.use_fp16 else tf.float32
if self.time_major:
memory = tf.transpose(encoder_outputs, [1, 0, 2])
else:
memory = encoder_outputs
source_sequence_length = self.features["source_sequence_length"]
if (self.mode == tf.contrib.learn.ModeKeys.INFER and
infer_mode == "beam_search"):
memory, source_sequence_length, encoder_state, batch_size = (
self._prepare_beam_search_decoder_inputs(
hparams.beam_width, memory, source_sequence_length,
encoder_state))
else:
batch_size = self.batch_size
attention_mechanism = model.create_attention_mechanism(
num_units, memory, source_sequence_length, dtype=dtype)
attention_cell = model_helper._cell_list( # pylint: disable=protected-access
unit_type=hparams.unit_type,
num_units=num_units,
num_layers=1, # just one layer
num_residual_layers=0, # 1st layer has no residual connection.
forget_bias=hparams.forget_bias,
dropout=hparams.dropout,
mode=self.mode,
dtype=dtype,
single_cell_fn=self.single_cell_fn,
residual_fn=gnmt_residual_fn,
use_block_lstm=False)[0]
# Only generate alignment in greedy INFER mode.
alignment_history = (self.mode == tf.contrib.learn.ModeKeys.INFER and
infer_mode != "beam_search")
attention_cell = attention_wrapper.AttentionWrapper(
attention_cell,
attention_mechanism,
attention_layer_size=None, # don't use attention layer.
output_attention=False,
alignment_history=alignment_history,
name="attention")
decoder_attention_cell_initial_state = attention_cell.zero_state(
batch_size, dtype).clone(cell_state=encoder_state[0])
# TODO(jamesqin): support frnn
# [batch, time]
target_input = self.features["target_input"]
if self.time_major:
# If using time_major mode, then target_input should be [time, batch]
# then the decoder_emb_inp would be [time, batch, dim]
target_input = tf.transpose(target_input)
decoder_emb_inp = tf.cast(
tf.nn.embedding_lookup(self.embedding_decoder, target_input),
self.dtype)
attention_cell_outputs, attention_cell_state = tf.nn.dynamic_rnn(
attention_cell,
decoder_emb_inp,
sequence_length=self.features["target_sequence_length"],
initial_state=decoder_attention_cell_initial_state,
dtype=self.dtype,
scope=decoder_scope,
parallel_iterations=hparams.parallel_iterations,
time_major=self.time_major)
attention = None
inputs = tf.concat([target_input, attention_cell_outputs], axis=-1)
initial_state = encoder_state[1:]
num_bi_layers = 1
num_unidi_decoder_layers = self.num_decoder_layers = num_bi_layers
# 3 layers of uni cudnn
for i in range(num_unidi_decoder_layers):
# Concat input with attention
if (not np.isclose(hparams.dropout, 0.) and
self.mode == tf.contrib.learn.ModeKeys.TRAIN):
inputs = tf.nn.dropout(inputs, keep_prob=1 - hparams.dropout)
outputs, states = self._build_unidi_rnn_cudnn(
inputs,
initial_state[i],
self.features["target_sequence_length"],
self.dtype,
hparams,
1, # num_layer
is_fwd=True)
if i >= num_unidi_decoder_layers - self.num_decoder_residual_layers:
outputs += inputs
inputs = outputs
pass
"""
def _build_decoder_fused_for_training(self, encoder_outputs, initial_state,
decoder_emb_inp, hparams):
assert self.mode == tf.contrib.learn.ModeKeys.TRAIN
num_bi_layers = 1
num_unidi_decoder_layers = self.num_decoder_layers - num_bi_layers
assert num_unidi_decoder_layers == 3
# The 1st LSTM layer
if self.time_major:
batch = tf.shape(encoder_outputs)[1]
tgt_max_len = tf.shape(decoder_emb_inp)[0]
# [batch_size] -> scalar
initial_attention = tf.zeros(
shape=[tgt_max_len, batch, hparams.num_units], dtype=self.dtype)
else:
batch = tf.shape(encoder_outputs)[0]
tgt_max_len = tf.shape(decoder_emb_inp)[1]
initial_attention = tf.zeros(
shape=[batch, tgt_max_len, hparams.num_units], dtype=self.dtype)
# Concat with initial attention
dec_inp = tf.concat([decoder_emb_inp, initial_attention], axis=-1)
# [tgt_time, batch, units]
# var_scope naming chosen to agree with inference graph.
with tf.variable_scope("multi_rnn_cell/cell_0_attention/attention"):
outputs, _ = self._build_unidi_rnn_fused(
dec_inp,
initial_state[0],
self.features["target_sequence_length"],
hparams,
self.dtype)
# Get attention
# Fused attention layer has memory of shape [batch, src_time, ...]
if self.time_major:
memory = tf.transpose(encoder_outputs, [1, 0, 2])
else:
memory = encoder_outputs
fused_attention_layer = attention_wrapper.BahdanauAttentionFusedLayer(
hparams.num_units, memory,
memory_sequence_length=self.features["source_sequence_length"],
dtype=self.dtype)
# [batch, tgt_time, units]
if self.time_major:
queries = tf.transpose(outputs, [1, 0, 2])
else:
queries = outputs
fused_attention = fused_attention_layer(queries)
if self.time_major:
# [tgt_time, batch, units]
fused_attention = tf.transpose(fused_attention, [1, 0, 2])
# 2-4th layer
inputs = outputs
for i in range(num_unidi_decoder_layers):
# [tgt_time, batch, 2 * units]
concat_inputs = tf.concat([inputs, fused_attention], axis=-1)
# var_scope naming chosen to agree with inference graph.
with tf.variable_scope("multi_rnn_cell/cell_%d" % (i+1)):
outputs, _ = self._build_unidi_rnn_fused(
concat_inputs, initial_state[i + 1],
self.features["target_sequence_length"], hparams, self.dtype)
if i >= num_unidi_decoder_layers - self.num_decoder_residual_layers:
# gnmt_v2 attention adds the original inputs.
outputs += inputs
inputs = outputs
return outputs
class GNMTAttentionMultiCell(tf.nn.rnn_cell.MultiRNNCell):
"""A MultiCell with GNMT attention style."""
def __init__(self, attention_cell, cells):
"""Creates a GNMTAttentionMultiCell.
Args:
attention_cell: An instance of AttentionWrapper.
cells: A list of RNNCell wrapped with AttentionInputWrapper.
"""
cells = [attention_cell] + cells
super(GNMTAttentionMultiCell, self).__init__(cells, state_is_tuple=True)
def __call__(self, inputs, state, scope=None):
"""Run the cell with bottom layer's attention copied to all upper layers."""
if not tf.contrib.framework.nest.is_sequence(state):
raise ValueError(
"Expected state to be a tuple of length %d, but received: %s"
% (len(self.state_size), state))
with tf.variable_scope(scope or "multi_rnn_cell"):
new_states = []
with tf.variable_scope("cell_0_attention"):
attention_cell = self._cells[0]
attention_state = state[0]
cur_inp, new_attention_state = attention_cell(inputs, attention_state)
new_states.append(new_attention_state)
for i in range(1, len(self._cells)):
with tf.variable_scope("cell_%d" % i):
cell = self._cells[i]
cur_state = state[i]
cur_inp = tf.concat([cur_inp, new_attention_state.attention], -1)
cur_inp, new_state = cell(cur_inp, cur_state)
new_states.append(new_state)
return cur_inp, tuple(new_states)
def gnmt_residual_fn(inputs, outputs):
"""Residual function that handles different inputs and outputs inner dims.
Args:
inputs: cell inputs, this is actual inputs concatenated with the attention
vector.
outputs: cell outputs
Returns:
outputs + actual inputs
"""
def split_input(inp, out):
inp_dim = inp.get_shape().as_list()[-1]
out_dim = out.get_shape().as_list()[-1]
return tf.split(inp, [out_dim, inp_dim - out_dim], axis=-1)
actual_inputs, _ = tf.contrib.framework.nest.map_structure(
split_input, inputs, outputs)
def assert_shape_match(inp, out):
inp.get_shape().assert_is_compatible_with(out.get_shape())
tf.contrib.framework.nest.assert_same_structure(actual_inputs, outputs)
tf.contrib.framework.nest.map_structure(
assert_shape_match, actual_inputs, outputs)
return tf.contrib.framework.nest.map_structure(
lambda inp, out: inp + out, actual_inputs, outputs)
| DeepLearningExamples-master | TensorFlow/Translation/GNMT/gnmt_model.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.