提交 32c11fd9 编写于 作者: D Dandelion Mané 提交者: TensorFlower Gardener

Fix lint issues introduced by my pull from GitHub.

Change: 149985352
上级 7b8e31c5
......@@ -1709,7 +1709,7 @@ class BatchNormTest(test.TestCase):
with self.test_session():
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
images = np.random.uniform(size=(5, height, width, 3)).astype('f')
output = _layers.batch_norm(images, param_regularizers={'beta': reg})
_layers.batch_norm(images, param_regularizers={'beta': reg})
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
beta_decay = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0]
......@@ -1720,7 +1720,7 @@ class BatchNormTest(test.TestCase):
with self.test_session():
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
images = np.random.uniform(size=(5, height, width, 3)).astype('f')
output = _layers.batch_norm(
_layers.batch_norm(
images, param_regularizers={'gamma': reg}, scale=True)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
......
......@@ -18,9 +18,9 @@ This script contains various functions for training models. These include
manipulating gradients, creating a `train_op` (an operation that computes the
loss and applies the gradients) and a training loop function. The training loop
allows the user to pass in the `train_op` and runs the optimization according
to user-specified arguments. Note that the training loop uses the tf.train.Supervisor
and its managed_session in its implementation to ensure the ability of worker
processes to recover from failures.
to user-specified arguments. Note that the training loop uses the
tf.train.Supervisor and its managed_session in its implementation to ensure the
ability of worker processes to recover from failures.
************************************
* A simple working training script *
......
......@@ -16,8 +16,11 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import model_selection
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
......
......@@ -34,6 +34,7 @@ from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import errors
......@@ -44,7 +45,6 @@ from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util.protobuf import compare
from tensorflow.python.client import device_lib
def gpu_device_name():
......@@ -54,6 +54,7 @@ def gpu_device_name():
return x.name
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
......
......@@ -18,8 +18,8 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import operator
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
......@@ -157,9 +157,10 @@ try:
# @ operator supported since python 3.5.
infix_matmul = operator.matmul
except AttributeError:
# For earlier versions of python, emulate regular behavior.
# Useful to build and test for 3.5+ on earlier versions.
def infix_matmul(x, y):
def infix_matmul(x, y): # pylint: disable=invalid-name
try:
r = type(x).__matmul__(x, y)
except AttributeError:
......
......@@ -723,11 +723,11 @@ class VariableScopeTest(test.TestCase):
def testGetCollection(self):
with self.test_session():
a = variable_scope.get_variable("a", [])
b = variable_scope.get_variable("b", [], trainable=False)
_ = variable_scope.get_variable("a", [])
_ = variable_scope.get_variable("b", [], trainable=False)
with variable_scope.variable_scope("foo_") as scope1:
a = variable_scope.get_variable("a", [])
b = variable_scope.get_variable("b", [], trainable=False)
_ = variable_scope.get_variable("a", [])
_ = variable_scope.get_variable("b", [], trainable=False)
self.assertEqual([
v.name
for v in scope1.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
......@@ -737,8 +737,8 @@ class VariableScopeTest(test.TestCase):
for v in scope1.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], ["foo_/a:0", "foo_/b:0"])
with variable_scope.variable_scope("foo") as scope2:
a = variable_scope.get_variable("a", [])
b = variable_scope.get_variable("b", [], trainable=False)
_ = variable_scope.get_variable("a", [])
_ = variable_scope.get_variable("b", [], trainable=False)
self.assertEqual([
v.name
for v in scope2.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
......@@ -758,21 +758,22 @@ class VariableScopeTest(test.TestCase):
def testGetTrainableVariables(self):
with self.test_session():
a = variable_scope.get_variable("a", [])
_ = variable_scope.get_variable("a", [])
with variable_scope.variable_scope("foo") as scope:
b = variable_scope.get_variable("b", [])
c = variable_scope.get_variable("c", [], trainable=False)
_ = variable_scope.get_variable("b", [])
_ = variable_scope.get_variable("c", [], trainable=False)
self.assertEqual([v.name
for v in scope.trainable_variables()], ["foo/b:0"])
def testGetGlobalVariables(self):
with self.test_session():
a = variable_scope.get_variable("a", [])
_ = variable_scope.get_variable("a", [])
with variable_scope.variable_scope("foo") as scope:
b = variable_scope.get_variable("b", [])
_ = variable_scope.get_variable("b", [])
self.assertEqual([v.name
for v in scope.global_variables()], ["foo/b:0"])
def axis0_into1_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
return part
......
......@@ -67,8 +67,8 @@ class Scaffold(object):
The following pieces are directly accessible as attributes of the `Scaffold`
object:
* `saver`: A `tf.train.Saver` object taking care of saving the variables. Picked
from and stored into the `SAVERS` collection in the graph by default.
* `saver`: A `tf.train.Saver` object taking care of saving the variables.
Picked from and stored into the `SAVERS` collection in the graph by default.
* `init_op`: An op to run to initialize the variables. Picked from and
stored into the `INIT_OP` collection in the graph by default.
* `ready_op`: An op to verify that the variables are initialized. Picked
......@@ -124,7 +124,8 @@ class Scaffold(object):
local_init_op: Optional op to initialize local variables.
summary_op: Optional op to gather all summaries. Must return a scalar
string tensor containing a serialized `Summary` proto.
saver: Optional `tf.train.Saver` object to use to save and restore variables.
saver: Optional `tf.train.Saver` object to use to save and restore
variables.
"""
# NOTE(touts): modifying the init function to be passed the scaffold is a
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册