controller.py 20.0 KB
Newer Older
A
A. Unique TensorFlower 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright 2020 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
D
Dan Holtmann-Rice 已提交
15
"""Provides a `Controller` class for managing the outer training loop."""
A
A. Unique TensorFlower 已提交
16

D
Dan Holtmann-Rice 已提交
17
import pprint
A
A. Unique TensorFlower 已提交
18
import time
D
Dan Holtmann-Rice 已提交
19

D
Dan Holtmann-Rice 已提交
20
from typing import Callable, Optional, Union
D
Dan Holtmann-Rice 已提交
21

A
A. Unique TensorFlower 已提交
22
from absl import logging
D
Dan Holtmann-Rice 已提交
23

A
A. Unique TensorFlower 已提交
24 25 26 27 28 29
from orbit import runner
from orbit import utils

import tensorflow as tf


D
Dan Holtmann-Rice 已提交
30
def _log(message: str):
A
A. Unique TensorFlower 已提交
31 32 33 34 35
  """Logs `message` to the `info` log, and also prints to stdout."""
  logging.info(message)
  print(message)


D
Dan Holtmann-Rice 已提交
36 37 38 39 40 41 42 43 44 45 46 47 48
logging.ABSLLogger.register_frame_to_skip(__file__, _log.__name__)


def _format_output(output, indent=4):
  """Formats `output`, either on one line, or indented across multiple lines."""
  formatted = pprint.pformat(output)
  lines = formatted.splitlines()
  if len(lines) == 1:
    return formatted
  lines = [" " * indent + line for line in lines]
  return "\n" + "\n".join(lines)


H
Hongkun Yu 已提交
49
class Controller:
D
Dan Holtmann-Rice 已提交
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
  """Class that controls the outer loop of model training and evaluation.

  Orbit divides training and evaluation into "inner" and "outer" loops. Inner
  loops are implemented by users in the form of `AbstractTrainer` and
  `AbstractEvaluator` subclasses, and define how to run a given number of
  training or evaluation steps. The outer loop is provided by this `Controller`,
  and interleaves calls to the user provided inner loops with additional actions
  such as saving checkpoints, running evaluations, and writing summaries
  (depending on the arguments passed to `Controller.__init__` and the method
  being called).

  There are four top-level "outer loops" provided:

    - `train`, which trains until a specified number of global steps is reached;
    - `evaluate`, for one-off model evaluation;
    - `train_and_evaluate`, for interleaved training and evaluation;
    - `evaluate_continuously`, for monitoring a given directory and running
      evaluations on new model checkpoints.

  While this class attempts to provide out-of-the-box solutions for common
  training and evaluation use cases, the internal details and method
  implementations are also intended to be simple enough to make subclassing or
  other custom outer loop implementations easy to achieve.
  """
A
A. Unique TensorFlower 已提交
74 75 76 77 78 79 80 81 82 83 84 85

  def __init__(
      self,
      strategy: Optional[tf.distribute.Strategy] = None,
      trainer: Optional[runner.AbstractTrainer] = None,
      evaluator: Optional[runner.AbstractEvaluator] = None,
      global_step: Optional[tf.Variable] = None,
      # Train related
      steps_per_loop: Optional[int] = None,
      checkpoint_manager: Optional[tf.train.CheckpointManager] = None,
      # Summary related
      summary_interval: Optional[int] = None,
D
Dan Holtmann-Rice 已提交
86
      summary_dir: Optional[str] = None,
A
A. Unique TensorFlower 已提交
87
      # Evaluation related
D
Dan Holtmann-Rice 已提交
88 89 90 91 92 93
      eval_summary_dir: Optional[str] = None):
    """Initializes a `Controller` instance.

    Note that if `checkpoint_manager` is provided and there are checkpoints in
    the associated model directory, the model will be restored from the most
    recent checkpoint during this `__init__` method.
A
A. Unique TensorFlower 已提交
94 95

    Args:
D
Dan Holtmann-Rice 已提交
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
      strategy: An instance of `tf.distribute.Strategy`. If not provided, the
        strategy will be initialized from the current in-scope strategy using
        `tf.distribute.get_strategy()`.
      trainer: An instance of `orbit.AbstractTrainer`, which implements the
        inner training loop.
      evaluator: An instance of `orbit.AbstractEvaluator`, which implements
        evaluation.
      global_step: An integer `tf.Variable` storing the global training step
        number. Usually this can be obtained from the `iterations` property of
        the model's optimizer (e.g. `trainer.optimizer.iterations`). In cases
        where multiple optimizers are used, or if one model "step" corresponds
        to more than one update to model parameters, users can create and
        increment their own global step variable as well. In this case it is
        recommended to create the `tf.Variable` inside the distribution strategy
        scope, with `aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA` (see
        also `orbit.utils.create_global_step()`).
      steps_per_loop: The number of steps to run in each inner loop of training
        (passed as the `num_steps` parameter of `trainer.train`).
      checkpoint_manager: An instance of `tf.train.CheckpointManager`. If
        provided and there are checkpoints in the associated model directory,
        the model will be restored from the most recent checkpoint inside this
        `__init__` method. If not provided, the `Controller` will not
        automatically save to or restore from checkpoints.
A
A. Unique TensorFlower 已提交
119
      summary_interval: Step interval for training summaries. Note that this
D
Dan Holtmann-Rice 已提交
120 121 122 123 124 125 126 127 128 129 130
        argument only applies to `tf.summary` calls inside the `trainer.train`
        function. Summaries written by the `Controller` (specifically
        "steps_per_second" and output from the `trainer.train` method) will
        always be enabled unless the `summary_dir` parameter is `None`. If set,
        the value must be divisible by `steps_per_loop`.
      summary_dir: The directory to write summaries to. To use the same
        directory as for checkpointing, pass `checkpoint_manager.directory`. If
        `None`, no training summaries will be written.
      eval_summary_dir: The directory to write eval summaries to. If `None`, it
        will be set to `summary_dir`. If both `summary_dir` and
        `eval_summary_dir` are `None`, no eval summaries will be written.
A
A. Unique TensorFlower 已提交
131 132

    Raises:
D
Dan Holtmann-Rice 已提交
133
      ValueError: If both `trainer` and `evaluator` are `None`.
A
A. Unique TensorFlower 已提交
134
      ValueError: If `steps_per_loop` is not a positive integer.
D
Dan Holtmann-Rice 已提交
135 136
      ValueError: If `summary_interval` is not a positive integer or is not
        divisible by `steps_per_loop`.
A
A. Unique TensorFlower 已提交
137 138
    """
    if trainer is None and evaluator is None:
D
Dan Holtmann-Rice 已提交
139
      raise ValueError("`trainer` and `evaluator` should not both be `None`.")
A
A. Unique TensorFlower 已提交
140 141 142

    if trainer is not None:
      if steps_per_loop is None:
D
Dan Holtmann-Rice 已提交
143 144 145 146 147
        raise ValueError(
            "`steps_per_loop` is required when `trainer` is provided.")
      elif not isinstance(steps_per_loop, int) or steps_per_loop < 1:
        raise ValueError(
            f"`steps_per_loop` ({steps_per_loop}) must be a positive integer.")
A
A. Unique TensorFlower 已提交
148 149 150

      if summary_interval is not None:
        if summary_interval <= 0:
D
Dan Holtmann-Rice 已提交
151 152 153 154 155 156 157 158 159 160 161
          raise ValueError(
              f"`summary_interval` ({summary_interval}) must be larger than 0.")
        elif summary_interval % steps_per_loop != 0:
          raise ValueError(
              f"`summary interval` ({summary_interval}) must be a multiple "
              f"of `steps_per_loop` ({steps_per_loop}).")

    if global_step is None:
      raise ValueError("`global_step` is required.")
    elif not isinstance(global_step, tf.Variable):
      raise ValueError("`global_step` must be a `tf.Variable`.")
A
A. Unique TensorFlower 已提交
162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193

    self.trainer = trainer
    self.evaluator = evaluator

    self.strategy = strategy or tf.distribute.get_strategy()

    self.global_step = global_step
    self.checkpoint_manager = checkpoint_manager

    if self.trainer is not None:
      self.step_timer = None
      self.steps_per_loop = steps_per_loop
      self.summary_interval = summary_interval
      self.summary_manager = utils.SummaryManager(
          summary_dir, tf.summary.scalar, global_step=self.global_step)

    if self.evaluator is not None:
      eval_summary_dir = eval_summary_dir or summary_dir
      if eval_summary_dir == summary_dir and self.trainer is not None:
        # Reuse the summary writer if train and evaluation summary directory
        # are the same.
        self.eval_summary_manager = self.summary_manager
      else:
        self.eval_summary_manager = utils.SummaryManager(
            eval_summary_dir, tf.summary.scalar, global_step=self.global_step)

    if self.global_step is not None:
      tf.summary.experimental.set_step(self.global_step)

    # Restores the model if needed.
    # TODO(momernick): We probably only want to do this on certain occasions?
    if self.checkpoint_manager is not None:
194 195
      restored_path = self.restore_checkpoint()
      if restored_path:
D
Dan Holtmann-Rice 已提交
196
        _log(f"restored from checkpoint: {restored_path}")
A
A. Unique TensorFlower 已提交
197 198

  def train(self, steps: int, checkpoint_at_completion: bool = True):
D
Dan Holtmann-Rice 已提交
199
    """Runs training until the specified global step count has been reached.
A
A. Unique TensorFlower 已提交
200

D
Dan Holtmann-Rice 已提交
201 202 203 204
    This method makes calls to `self.trainer.train()` until the global step
    count is equal to `steps`. It will additionally save checkpoints (if a
    `CheckpointManager` was passed to `Controller.__init__`) and summarize
    training output (if `summary_dir` is set).
A
A. Unique TensorFlower 已提交
205 206 207 208

    Args:
      steps: The global step count to train up to.
      checkpoint_at_completion: Whether to save a checkpoint when this method
D
Dan Holtmann-Rice 已提交
209
        returns (regardless of the checkpointing interval). Defaults to `True`.
A
A. Unique TensorFlower 已提交
210
    """
D
Dan Holtmann-Rice 已提交
211
    self._require("trainer", for_method="train")
A
A. Unique TensorFlower 已提交
212 213

    # TODO(momernick): Support steps=None or -1 (training to exhaustion).
D
Dan Holtmann-Rice 已提交
214 215
    current_step = self.global_step.numpy()  # Cache, since this is expensive.
    _log(f"train | step: {current_step: 6d} | training until step {steps}...")
A
A. Unique TensorFlower 已提交
216 217 218 219 220
    while current_step < steps:
      # Calculates steps to run for the next train loop.
      num_steps = min(steps - current_step, self.steps_per_loop)
      self._train_n_steps(num_steps)
      self._maybe_save_checkpoint()
D
Dan Holtmann-Rice 已提交
221
      current_step = self.global_step.numpy()
A
A. Unique TensorFlower 已提交
222 223

    if checkpoint_at_completion:
D
Dan Holtmann-Rice 已提交
224
      self._maybe_save_checkpoint(check_interval=False)
A
A. Unique TensorFlower 已提交
225

D
Dan Holtmann-Rice 已提交
226 227
  def evaluate(self, steps: int = -1) -> Optional[runner.Output]:
    """Runs evaluation for the given number of steps.
A
A. Unique TensorFlower 已提交
228

D
Dan Holtmann-Rice 已提交
229 230
    This method calls `self.evaluator.evaluate(steps)`, then writes the returned
    summaries (if any).
A
A. Unique TensorFlower 已提交
231 232

    Args:
D
Dan Holtmann-Rice 已提交
233 234 235 236
      steps: The number of evaluation steps to run. The value `-1` is reserved
        as a special sentinel to indicate a "complete" evaluation that runs
        until the underlying dataset is exhausted. Support for this is dependent
        on the specific `evaluator` being used.
A
A. Unique TensorFlower 已提交
237

S
Simon Kornblith 已提交
238
    Returns:
D
Dan Holtmann-Rice 已提交
239
      The evaluation results as a dictionary mapping names to NumPy values.
S
Simon Kornblith 已提交
240

A
A. Unique TensorFlower 已提交
241
    Raises:
D
Dan Holtmann-Rice 已提交
242 243 244
      ValueError: If `evaluator` was not provided to `Controller.__init__`.
      ValueError: If no checkpoint is present in `checkpoint_manager.directory`.
      ValueError: If `steps` is not a positive value or -1.
A
A. Unique TensorFlower 已提交
245
    """
D
Dan Holtmann-Rice 已提交
246
    self._require("evaluator", for_method="evaluate")
A
A. Unique TensorFlower 已提交
247 248

    if steps > 0:
D
Dan Holtmann-Rice 已提交
249 250 251
      steps_msg = f"running {steps} steps of evaluation..."
    elif steps == -1:
      steps_msg = "running complete evaluation..."
A
A. Unique TensorFlower 已提交
252
    else:
D
Dan Holtmann-Rice 已提交
253
      raise ValueError(f"`steps` ({steps}) should be > 0, or == -1.")
A
A. Unique TensorFlower 已提交
254

D
Dan Holtmann-Rice 已提交
255 256
    current_step = self.global_step.numpy()
    _log(f" eval | step: {current_step: 6d} | {steps_msg}")
A
A. Unique TensorFlower 已提交
257

D
Dan Holtmann-Rice 已提交
258 259 260 261 262 263
    start = time.time()
    with self.eval_summary_manager.summary_writer().as_default():
      steps_tensor = tf.convert_to_tensor(steps, dtype=tf.int32)
      eval_output = self.evaluator.evaluate(steps_tensor)
    eval_output = tf.nest.map_structure(utils.get_value, eval_output or {})
    elapsed = time.time() - start
A
A. Unique TensorFlower 已提交
264

D
Dan Holtmann-Rice 已提交
265 266 267
    _log(f" eval | step: {current_step: 6d} | "
         f"eval time: {elapsed: 6.1f} | "
         f"output: {_format_output(eval_output)}")
A
A. Unique TensorFlower 已提交
268

D
Dan Holtmann-Rice 已提交
269
    self.eval_summary_manager.write_summaries(eval_output)
A
A. Unique TensorFlower 已提交
270 271
    self.eval_summary_manager.flush()

D
Dan Holtmann-Rice 已提交
272
    return eval_output
A
A. Unique TensorFlower 已提交
273 274 275 276 277

  def train_and_evaluate(self,
                         train_steps: int = None,
                         eval_steps: int = None,
                         eval_interval: int = None):
D
Dan Holtmann-Rice 已提交
278
    """Runs interleaved training and evaluation.
A
A. Unique TensorFlower 已提交
279

D
Dan Holtmann-Rice 已提交
280 281 282 283 284
    This method interleaves calls to `self.train()` and `self.evaluate()`,
    training the model until the global step count equals `train_steps`, and
    running an evaluation for `eval_steps` every `eval_interval` training steps.
    In addition, this method will run a final evaluation at the end of the
    training sequence.
A
A. Unique TensorFlower 已提交
285 286 287

    Args:
      train_steps: The global step count to train up to.
D
Dan Holtmann-Rice 已提交
288 289 290 291 292
      eval_steps: The number of steps to run during an evaluation. If None, this
        method will evaluate over the entire evaluation dataset.
      eval_interval: The number of training steps to run between evaluations. If
        set, training will always stop every `eval_interval` steps, even if this
        results in a shorter inner loop than specified by `steps_per_loop`
R
Ruoxin Sang 已提交
293 294
        setting. If None, evaluation will only be performed after training is
        complete.
A
A. Unique TensorFlower 已提交
295 296 297 298

    Raises:
      ValueError: If eval_interval is not a multiple of self.steps_per_loop.
    """
D
Dan Holtmann-Rice 已提交
299 300 301 302
    self._require("trainer", for_method="train_and_evaluate")
    self._require("evaluator", for_method="train_and_evaluate")

    current_step = self.global_step.numpy()  # Cache, since this is expensive.
A
A. Unique TensorFlower 已提交
303 304 305 306 307 308
    eval_interval = eval_interval or (train_steps - current_step)
    while current_step < train_steps:
      interval = min(train_steps - current_step, eval_interval)
      num_steps = current_step + interval
      self.train(steps=num_steps, checkpoint_at_completion=False)
      self.evaluate(steps=eval_steps)
D
Dan Holtmann-Rice 已提交
309 310
      current_step = self.global_step.numpy()
    self._maybe_save_checkpoint(check_interval=False)
A
A. Unique TensorFlower 已提交
311 312 313 314 315

  def evaluate_continuously(self,
                            steps: int = None,
                            timeout: Optional[Union[int, float]] = None,
                            timeout_fn: Optional[Callable[[], bool]] = None):
D
Dan Holtmann-Rice 已提交
316
    """Continuously monitors a directory and evaluates new checkpoints in it.
A
A. Unique TensorFlower 已提交
317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333

    This method continuously monitors a directory as specified by this
    Controller's CheckpointManager init arg and runs evaluation on the
    checkpoints found there.

    Args:
      steps: The number of steps to run when evaluating.
      timeout: The maximum number of seconds to wait between checkpoints. See
        tf.train.checkpoints_iterator documentation.
      timeout_fn: Optional callable to call after a timeout. If the function
        returns True, then it means that no new checkpoints will be generated
        and the iterator will exit.

    Raises:
      ValueError: If no checkpoint found in `self.checkpoint_manager.directory`.
      ValueError: If `evaluator` was not provided as a controller init arg.
    """
D
Dan Holtmann-Rice 已提交
334 335 336
    self._require("evaluator", for_method="evaluate_continuously")
    self._require("checkpoint_manager", for_method="evaluate_continuously")

A
A. Unique TensorFlower 已提交
337 338 339 340 341 342 343
    for checkpoint_path in tf.train.checkpoints_iterator(
        self.checkpoint_manager.directory,
        timeout=timeout,
        timeout_fn=timeout_fn):
      self.restore_checkpoint(checkpoint_path)
      self.evaluate(steps)

D
Dan Holtmann-Rice 已提交
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
  def restore_checkpoint(self, checkpoint_path: str = None):
    """Restores the model from a checkpoint.

    Args:
      checkpoint_path: An optional string specifying the checkpoint path to
        restore from. If `None`, will restore from the most recent checkpoint
        (or initialize the model using a custom `init_fn` if no checkpoints can
        be found) using `self.checkpoint_manager.restore_or_initialize()`.

    Returns:
      The path to the restored checkpoint if a restore happened, or `None` if no
      restore occurred.
    """
    self._require("checkpoint_manager", for_method="restore_checkpoint")

    with self.strategy.scope():
      # Checkpoint restoring should be inside scope (b/139450638).
      if checkpoint_path is not None:
        _log(f"restoring model from {checkpoint_path}...")
        self.checkpoint_manager.checkpoint.restore(checkpoint_path)
      else:
        _log("restoring or initializing model...")
        checkpoint_path = self.checkpoint_manager.restore_or_initialize()

    if checkpoint_path is not None:
      _log(f"restored model from {checkpoint_path}.")
    else:
      _log("initialized model.")

    return checkpoint_path

  def save_checkpoint(self):
    """Saves the model to a checkpoint.

    This method will save a checkpoint containing the current state of the
    model.

    Raises:
      ValueError: If no `checkpoint_manager` was provided to
        `Controller.__init__`.
    """
    self._require("checkpoint_manager", for_method="save_checkpoint")
    self._maybe_save_checkpoint(check_interval=False)

A
A. Unique TensorFlower 已提交
388
  def _train_n_steps(self, num_steps: int):
D
Dan Holtmann-Rice 已提交
389
    """Runs training for `num_steps` steps.
A
A. Unique TensorFlower 已提交
390

D
Dan Holtmann-Rice 已提交
391 392 393
    Also prints/logs updates about training progress, and summarizes training
    output (if output is returned from `self.trainer.train()`, and if
    `self.summary_dir` is set).
A
A. Unique TensorFlower 已提交
394 395

    Args:
D
Dan Holtmann-Rice 已提交
396
      num_steps: An integer specifying how many steps of training to run.
A
A. Unique TensorFlower 已提交
397 398

    Raises:
D
Dan Holtmann-Rice 已提交
399 400
      RuntimeError: If `global_step` is not properly incremented by `num_steps`
        after calling `self.trainer.train(num_steps)`.
A
A. Unique TensorFlower 已提交
401 402 403 404 405
    """
    if not self.step_timer:
      self.step_timer = StepTimer(self.global_step)
    current_step = self.global_step.numpy()

R
Ruoxin Sang 已提交
406
    with self.summary_manager.summary_writer().as_default():
A
A. Unique TensorFlower 已提交
407 408
      should_record = False  # Allows static optimization in no-summary cases.
      if self.summary_interval:
D
Dan Holtmann-Rice 已提交
409
        # Create a predicate to determine when summaries should be written.
A
A. Unique TensorFlower 已提交
410 411
        should_record = lambda: (self.global_step % self.summary_interval == 0)
      with tf.summary.record_if(should_record):
D
Dan Holtmann-Rice 已提交
412 413 414 415 416 417 418 419 420 421 422 423
        num_steps_tensor = tf.convert_to_tensor(num_steps, dtype=tf.int32)
        train_output = self.trainer.train(num_steps_tensor)
    train_output = tf.nest.map_structure(utils.get_value, train_output or {})

    # Verify that global_step was updated properly, then update current_step.
    expected_step = current_step + num_steps
    if self.global_step.numpy() != expected_step:
      raise RuntimeError(
          f"`trainer.train({num_steps})` did not update `global_step` by "
          f"{num_steps}. Old value was {current_step}, expected updated value "
          f"to be {expected_step}, but it was {self.global_step.numpy()}.")
    current_step = expected_step
A
A. Unique TensorFlower 已提交
424 425

    steps_per_second = self.step_timer.steps_per_second()
D
Dan Holtmann-Rice 已提交
426 427 428 429 430 431 432
    _log(f"train | step: {current_step: 6d} | "
         f"steps/sec: {steps_per_second: 6.1f} | "
         f"output: {_format_output(train_output)}")

    train_output["steps_per_second"] = steps_per_second
    self.summary_manager.write_summaries(train_output)
    self.summary_manager.flush()
A
A. Unique TensorFlower 已提交
433

D
Dan Holtmann-Rice 已提交
434 435
  def _maybe_save_checkpoint(self, check_interval: bool = True):
    """Conditionally saves a checkpoint.
A
A. Unique TensorFlower 已提交
436

D
Dan Holtmann-Rice 已提交
437 438 439
    A checkpoint is saved if a `CheckpointManager` is available, and if the
    required number of steps has elapsed since the last checkpoint was saved
    (although this condition can be disabled by setting `check_interval=False`).
A
A. Unique TensorFlower 已提交
440 441

    Args:
D
Dan Holtmann-Rice 已提交
442 443 444 445
      check_interval: Whether to check if the checkpoint interval has fully
        elapsed. If `False`, a checkpoint is saved regardless of the elapsed
        steps since the most recent checkpoint, unless no `checkpoint_manager`
        was provided to `Controller.__init__`.
A
A. Unique TensorFlower 已提交
446 447 448 449 450 451 452

    Returns:
      A boolean indicating whether a checkpoint was saved.
    """
    if self.checkpoint_manager and self.checkpoint_manager.checkpoint_interval:
      ckpt_path = self.checkpoint_manager.save(
          checkpoint_number=self.global_step.numpy(),
D
Dan Holtmann-Rice 已提交
453
          check_interval=check_interval)
A
A. Unique TensorFlower 已提交
454
      if ckpt_path is not None:
D
Dan Holtmann-Rice 已提交
455
        _log(f"saved checkpoint to {ckpt_path}.")
A
A. Unique TensorFlower 已提交
456 457 458
        return True
    return False

D
Dan Holtmann-Rice 已提交
459 460 461 462 463 464 465
  def _require(self, attribute, for_method):
    """Utility method to raise an error if the given `attribute` is not set."""
    if getattr(self, attribute, None) is None:
      raise ValueError(
          f"`{attribute}` is not set. Pass `{attribute}` to "
          f"`Controller.__init__` before calling `{for_method}()`.")

A
A. Unique TensorFlower 已提交
466

H
Hongkun Yu 已提交
467
class StepTimer:
A
A. Unique TensorFlower 已提交
468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
  """Utility class for measuring steps/second."""

  def __init__(self, step):
    self.step = step
    self.start()

  def start(self):
    self.last_iteration = self.step.numpy()
    self.last_time = time.time()

  def steps_per_second(self, restart=True):
    value = ((self.step.numpy() - self.last_iteration) /
             (time.time() - self.last_time))
    if restart:
      self.start()
    return value