提交 bf16a751 编写于 作者: G Gaurav Jain 提交者: TensorFlower Gardener

Wrap global_variables_initializer with self.evaluate()

In addition, fix a few eval() calls as well as remove some
@test_util.run_v1_only annotations.

PiperOrigin-RevId: 225180248
上级 1068d773
......@@ -230,7 +230,7 @@ class DistributeCoordinatorTestBase(test.TestCase):
with ops.device("/job:worker/task:0"):
result = math_ops.add_n(xs)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
result_value = sess.run(result)
self.assertEqual(result_value, expected)
if result_value == expected:
......@@ -278,7 +278,7 @@ class DistributeCoordinatorTestBase(test.TestCase):
train_op = control_flow_ops.group([x_add, y_sub])
if context.is_chief:
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
# Synchronize workers after initializaton.
if context.has_barrier:
......
......@@ -579,7 +579,7 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
return self.v * 2
o = HasAVar()
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
call = def_function.function(o.call)
op = call()
self.assertAllEqual(self.evaluate(op), 2.0)
......
......@@ -39,7 +39,7 @@ class AutomaticControlDependenciesTest(test.TestCase):
def testBasic(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
with acd.AutomaticControlDependencies() as c:
v.assign(v + 1)
v.assign(2 * v)
......@@ -51,7 +51,7 @@ class AutomaticControlDependenciesTest(test.TestCase):
def testCondMustRun(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
p = array_ops.placeholder(dtype=dtypes.bool)
with acd.AutomaticControlDependencies() as c:
......@@ -73,7 +73,7 @@ class AutomaticControlDependenciesTest(test.TestCase):
def testCondMustRunSeparateRead(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
p = array_ops.placeholder(dtype=dtypes.bool)
with acd.AutomaticControlDependencies() as c:
......@@ -97,7 +97,7 @@ class AutomaticControlDependenciesTest(test.TestCase):
def testCondNested(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
p = array_ops.placeholder(dtype=dtypes.bool)
q = array_ops.placeholder(dtype=dtypes.bool)
with acd.AutomaticControlDependencies() as c:
......@@ -132,7 +132,7 @@ class AutomaticControlDependenciesTest(test.TestCase):
def testCondOneBranch(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
p = array_ops.placeholder(dtype=dtypes.bool)
with acd.AutomaticControlDependencies() as c:
......@@ -153,7 +153,7 @@ class AutomaticControlDependenciesTest(test.TestCase):
def testCondOneBranchUpdateBefore(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
p = array_ops.placeholder(dtype=dtypes.bool)
with acd.AutomaticControlDependencies() as c:
v.assign(v * 2)
......@@ -175,7 +175,7 @@ class AutomaticControlDependenciesTest(test.TestCase):
def testCondOneBranchUpdateAfter(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
p = array_ops.placeholder(dtype=dtypes.bool)
with acd.AutomaticControlDependencies() as c:
......@@ -211,7 +211,7 @@ class AutomaticControlDependenciesTest(test.TestCase):
def testDecorator(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
@acd.automatic_control_dependencies
def f():
......
......@@ -105,7 +105,6 @@ class GenerateVocabRemappingTest(test.TestCase):
self.assertAllEqual(expected_num_present, self.evaluate(num_present))
@test_util.run_v1_only('b/120545219')
class LoadAndRemapMatrixTest(test.TestCase):
"""Tests for the load_and_remap_matrix() op."""
......@@ -126,7 +125,7 @@ class LoadAndRemapMatrixTest(test.TestCase):
save = saver.Saver([matrix])
with self.cached_session() as sess:
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.bundle_file = os.path.join(test.get_temp_dir(), 'bundle_checkpoint')
save.save(sess, self.bundle_file)
......@@ -231,6 +230,7 @@ class LoadAndRemapMatrixTest(test.TestCase):
np.reshape(initializing_values, (num_rows, num_cols)),
self.evaluate(remapped_matrix))
@test_util.run_v1_only('b/120545219')
def test_load_and_remap_invalid_remapping(self):
"""Tests that errors are raised when an ID maps to multiple new IDs.
......@@ -262,6 +262,7 @@ class LoadAndRemapMatrixTest(test.TestCase):
with self.cached_session(), self.assertRaises(errors.UnimplementedError):
self.evaluate(remapped_matrix)
@test_util.run_v1_only('b/120545219')
def test_load_and_remap_incorrect_initializing_values(self):
"""Tests that errors are raised with incorrect number of init values."""
remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
......@@ -313,7 +314,7 @@ class LoadAndRemapMatrixWithMaxRowsTest(test.TestCase):
with self.cached_session() as sess:
ckpt_path = os.path.join(test.get_temp_dir(), 'temp_ckpt')
save = saver.Saver([matrix])
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
save.save(sess, ckpt_path)
num_rows, num_cols = np_value.shape
......
......@@ -408,7 +408,7 @@ class ConditionalAccumulatorTest(test.TestCase):
set_global_step_op = q.set_global_step(new_global_step)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
for _ in range(3):
set_global_step_op.run()
self.evaluate(inc_global_step)
......
......@@ -20,7 +20,6 @@ from __future__ import print_function
import numpy as np
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
......@@ -33,7 +32,6 @@ class AssignOpTest(test.TestCase):
# NOTE(mrry): We exclude thess tests from the TSAN TAP target, because they
# contain benign and deliberate data races when multiple threads update
# the same parameters without a lock.
@test_util.run_v1_only("b/120545219")
def testParallelUpdateWithoutLocking(self):
with self.cached_session() as sess:
ones_t = array_ops.fill([1024, 1024], 1.0)
......@@ -42,7 +40,7 @@ class AssignOpTest(test.TestCase):
state_ops.assign_add(
p, ones_t, use_locking=False) for _ in range(20)
]
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
def run_add(add_op):
self.evaluate(add_op)
......@@ -61,7 +59,6 @@ class AssignOpTest(test.TestCase):
self.assertTrue((vals >= ones).all())
self.assertTrue((vals <= ones * 20).all())
@test_util.run_v1_only("b/120545219")
def testParallelAssignWithoutLocking(self):
with self.cached_session() as sess:
ones_t = array_ops.fill([1024, 1024], float(1))
......@@ -70,7 +67,7 @@ class AssignOpTest(test.TestCase):
state_ops.assign(p, math_ops.multiply(ones_t, float(i)), False)
for i in range(1, 21)
]
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
def run_assign(assign_op):
self.evaluate(assign_op)
......@@ -94,7 +91,6 @@ class AssignOpTest(test.TestCase):
# contain non-benign but known data races between the variable assignment and
# returning the output tensors. This issue will be resolved with the new
# resource variables.
@test_util.run_v1_only("b/120545219")
def testParallelUpdateWithLocking(self):
with self.cached_session() as sess:
zeros_t = array_ops.fill([1024, 1024], 0.0)
......@@ -104,7 +100,7 @@ class AssignOpTest(test.TestCase):
state_ops.assign_add(
p, ones_t, use_locking=True) for _ in range(20)
]
p.initializer.run()
self.evaluate(p.initializer)
def run_add(add_op):
self.evaluate(add_op)
......@@ -122,7 +118,6 @@ class AssignOpTest(test.TestCase):
ones = np.ones((1024, 1024)).astype(np.float32)
self.assertAllEqual(vals, ones * 20)
@test_util.run_v1_only("b/120545219")
def testParallelAssignWithLocking(self):
with self.cached_session() as sess:
zeros_t = array_ops.fill([1024, 1024], 0.0)
......@@ -133,7 +128,7 @@ class AssignOpTest(test.TestCase):
p, math_ops.multiply(ones_t, float(i)), use_locking=True)
for i in range(1, 21)
]
p.initializer.run()
self.evaluate(p.initializer)
def run_assign(assign_op):
self.evaluate(assign_op)
......
......@@ -466,7 +466,7 @@ class FunctionalOpsTest(test.TestCase):
loss = l0 + array_ops.stop_gradient(l1)
grad = gradients_impl.gradients(ys=[loss], xs=[a, b])
with self.test_session(use_gpu=True) as sess:
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.evaluate(grad)
@test_util.run_in_graph_and_eager_modes
......
......@@ -323,26 +323,24 @@ class PartitionedVariablesTestCase(test.TestCase):
for i in xrange(len(expected_specs)):
self.assertEquals(expected_specs[i], slices[i]._save_slice_info.spec)
@test_util.run_deprecated_v1
def testVecConstantInit(self):
with self.cached_session():
rnd_par = constant_op.constant([1, 2, 3, 4])
vs = partitioned_variables.create_partitioned_variables([4], [4], rnd_par)
variables.global_variables_initializer().run()
val = array_ops.concat(vs, 0).eval()
self.evaluate(variables.global_variables_initializer())
val = array_ops.concat(vs, 0)
rnd = self.evaluate(rnd_par)
self.assertAllClose(rnd, val)
self.assertEqual([dtypes.int32] * 4, [v.dtype.base_dtype for v in vs])
self._TestSaveSpec(vs, ["4 0,1", "4 1,1", "4 2,1", "4 3,1"])
@test_util.run_deprecated_v1
def testConstantInit(self):
with self.cached_session():
rnd_par = constant_op.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
vs = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
variables.global_variables_initializer().run()
val = array_ops.concat(vs, 1).eval()
self.evaluate(variables.global_variables_initializer())
val = array_ops.concat(vs, 1)
rnd = self.evaluate(rnd_par)
self.assertAllClose(rnd, val)
self.assertEqual([dtypes.int32] * 2, [v.dtype.base_dtype for v in vs])
......@@ -356,7 +354,7 @@ class PartitionedVariablesTestCase(test.TestCase):
rnd_par)
vs2 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
var1_name = vs1[0]._save_slice_info.full_name
var2_name = vs2[0]._save_slice_info.full_name
self.assertEqual("hi/PartitionedVariable", var1_name)
......@@ -376,7 +374,7 @@ class PartitionedVariablesTestCase(test.TestCase):
vs, reuse=True, use_resource=use_resource):
vs2 = partitioned_variables.create_partitioned_variables(
[2, 4], [1, 2], rnd_par, dtype=dtypes.int32)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
var1_name = vs1[0]._save_slice_info.full_name
var2_name = vs2[0]._save_slice_info.full_name
self.assertEqual("hola/PartitionedVariable", var1_name)
......@@ -393,7 +391,7 @@ class PartitionedVariablesTestCase(test.TestCase):
rnd_par)
vs2 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
var1_name = vs1[0]._save_slice_info.full_name
var2_name = vs2[0]._save_slice_info.full_name
# Currently, the name scope 'ola' has no effect.
......@@ -408,18 +406,16 @@ class PartitionedVariablesTestCase(test.TestCase):
def testName(self):
self._testNameHelper(use_resource=False)
@test_util.run_deprecated_v1
def testResourceName(self):
self._testNameHelper(use_resource=True)
@test_util.run_v1_only("b/120545219")
def testRandomInitValue(self):
with self.cached_session():
rnd = variables.Variable(random_ops.random_uniform([200, 40]))
vs = partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [1, 10], rnd.initialized_value())
variables.global_variables_initializer().run()
val = array_ops.concat(vs, 1).eval()
self.evaluate(variables.global_variables_initializer())
val = array_ops.concat(vs, 1)
rnd = self.evaluate(rnd)
self.assertAllClose(rnd, val)
self.assertEqual([dtypes.float32] * 10, [v.dtype.base_dtype for v in vs])
......@@ -430,7 +426,6 @@ class PartitionedVariablesTestCase(test.TestCase):
"200 40 0,200:36,4"
])
@test_util.run_v1_only("b/120545219")
def testRandomInitUnevenPartitions(self):
with self.cached_session():
rnd = variables.Variable(
......@@ -440,7 +435,7 @@ class PartitionedVariablesTestCase(test.TestCase):
rnd.get_shape(), [1, i], rnd.initialized_value())
for i in xrange(1, 10)
]
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
rnd_val = self.evaluate(rnd)
# Only check the slice save specs for the first 5 tf.
save_specs = [
......@@ -462,33 +457,31 @@ class PartitionedVariablesTestCase(test.TestCase):
]
]
for i, vs in enumerate(var_lists):
var_val = array_ops.concat(vs, 1).eval()
var_val = array_ops.concat(vs, 1)
self.assertAllClose(rnd_val, var_val)
self.assertEqual([dtypes.float64] * len(vs),
[v.dtype.base_dtype for v in vs])
if i < len(save_specs):
self._TestSaveSpec(vs, save_specs[i])
@test_util.run_v1_only("b/120545219")
def testDegenerate(self):
with self.cached_session():
rnd = variables.Variable(random_ops.random_uniform([10, 43]))
vs = partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [1, 1], rnd.initialized_value())
variables.global_variables_initializer().run()
val = array_ops.concat(vs, 0).eval()
self.evaluate(variables.global_variables_initializer())
val = array_ops.concat(vs, 0)
rnd = self.evaluate(rnd)
self.assertAllClose(rnd, val)
self._TestSaveSpec(vs, ["10 43 0,10:0,43"])
@test_util.run_v1_only("b/120545219")
def testSliceSizeOne(self):
with self.cached_session():
rnd = variables.Variable(random_ops.random_uniform([10, 43]))
vs = partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [10, 1], rnd.initialized_value())
variables.global_variables_initializer().run()
val = array_ops.concat(vs, 0).eval()
self.evaluate(variables.global_variables_initializer())
val = array_ops.concat(vs, 0)
rnd = self.evaluate(rnd)
self.assertAllClose(rnd, val)
self._TestSaveSpec(vs, [
......@@ -497,7 +490,6 @@ class PartitionedVariablesTestCase(test.TestCase):
"10 43 6,1:0,43", "10 43 7,1:0,43", "10 43 8,1:0,43", "10 43 9,1:0,43"
])
@test_util.run_deprecated_v1
def testIotaInitializer(self):
self.assertAllClose([0., 1., 2., 3.], _IotaInitializer([4]))
self.assertAllClose([[0., 1.], [0., 10.], [0., 100.], [0., 1000.]],
......@@ -505,11 +497,11 @@ class PartitionedVariablesTestCase(test.TestCase):
with self.cached_session():
vs = partitioned_variables.create_partitioned_variables([13, 5], [3, 1],
_IotaInitializer)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
slice0 = _IotaInitializer([5, 5])
slice1 = _IotaInitializer([4, 5])
slice2 = _IotaInitializer([4, 5])
val = array_ops.concat(vs, 0).eval()
val = array_ops.concat(vs, 0)
self.assertAllClose(slice0 + slice1 + slice2, val)
self._TestSaveSpec(vs, ["13 5 0,5:0,5", "13 5 5,4:0,5", "13 5 9,4:0,5"])
......@@ -520,7 +512,7 @@ class PartitionedVariablesTestCase(test.TestCase):
with self.cached_session():
var0, var1 = partitioned_variables.create_partitioned_variables(
[20, 12], [1, 2], init_ops.random_uniform_initializer())
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
val0, val1 = self.evaluate(var0).flatten(), self.evaluate(var1).flatten()
self.assertTrue(np.linalg.norm(val0 - val1) > 1e-6)
# Negative test that proves that slices have the same values if
......@@ -528,7 +520,7 @@ class PartitionedVariablesTestCase(test.TestCase):
with self.cached_session():
var0, var1 = partitioned_variables.create_partitioned_variables(
[20, 12], [1, 2], init_ops.random_uniform_initializer(seed=201))
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
val0, val1 = self.evaluate(var0).flatten(), self.evaluate(var1).flatten()
self.assertAllClose(val0, val1)
......@@ -607,8 +599,8 @@ class PartitionedVariablesTestCase(test.TestCase):
self.assertTrue(
c.op in concat_control_inputs,
"var_x._concat() should get control dependencies from its scope.")
variables.global_variables_initializer().run()
self.assertAllClose(value.eval(), var_x.as_tensor().eval())
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(value, var_x.as_tensor())
def testMetaGraphSaveLoad(self):
save_prefix = os.path.join(self.get_temp_dir(), "ckpt")
......@@ -623,7 +615,7 @@ class PartitionedVariablesTestCase(test.TestCase):
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), 5)
self.assertAllEqual(v0_part, (5, 1))
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
save_graph.get_collection_ref("partvar").append(v0)
saver = saver_lib.Saver()
......
......@@ -689,7 +689,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
def testToFromProto(self):
with self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
w = resource_variable_ops.ResourceVariable.from_proto(v.to_proto())
self.assertEquals(2, math_ops.add(w, 1).eval())
......@@ -793,11 +793,11 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
with self.assertRaises(ValueError):
_ = w.value().op.get_attr("_class")
@test_util.run_v1_only("b/120545219")
@test_util.run_deprecated_v1
def testSharedName(self):
with self.cached_session():
v = resource_variable_ops.ResourceVariable(300.0, name="var4")
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
w = resource_variable_ops.var_handle_op(
dtype=v.dtype.base_dtype, shape=v.get_shape(), shared_name="var4",
......
......@@ -425,7 +425,6 @@ class TensorArrayTest(test.TestCase):
self.assertAllEqual(t_g_ta_0, t_g_ta_1)
self.assertAllEqual([[4.0, 5.0]], d_r1_0)
@test_util.run_v1_only("b/120545219")
def testTensorArrayWriteWrongIndexOrDataTypeFails(self):
with self.session(use_gpu=True):
ta = _make_ta(3, "foo", dtype=dtypes.float32)
......@@ -459,7 +458,6 @@ class TensorArrayTest(test.TestCase):
with self.assertRaisesOpError(error_msg):
self.evaluate(ta.write(3, 3.0).flow)
@test_util.run_v1_only("b/120545219")
def testTensorArrayReadWrongIndexOrDataTypeFails(self):
with self.session(use_gpu=True):
ta = _make_ta(3, "foo", dtype=dtypes.float32)
......@@ -505,7 +503,6 @@ class TensorArrayTest(test.TestCase):
"it has already been written to."):
self.evaluate(ta.write(2, 3.0).write(2, 3.0).flow)
@test_util.run_v1_only("b/120545219")
def testTensorArrayConcatIncompatibleShapesFails(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
......@@ -537,7 +534,6 @@ class TensorArrayTest(test.TestCase):
with self.assertRaisesOpError("shape"):
self.evaluate(w3.concat())
@test_util.run_v1_only("b/120545219")
def testTensorArraySplitIncompatibleShapesFails(self):
with self.session(use_gpu=True):
in_eager_mode = context.executing_eagerly()
......@@ -959,7 +955,7 @@ class TensorArrayTest(test.TestCase):
v0_grad = gradients_impl.gradients([vout], [v0], [grad_val])[0]
state0_grad = gradients_impl.gradients([vout], [state0], [grad_val])[0]
var_grad = gradients_impl.gradients([vout], [var], [grad_val])[0]
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
state0_t, var_t, v0_t, vout_t, v0_grad_t, var_grad_t, state0_grad_t = (
self.evaluate(
......@@ -1578,7 +1574,7 @@ class TensorArrayTest(test.TestCase):
self.assertEqual(tensor_shape.scalar(), read1.get_shape())
if not context.executing_eagerly():
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
read0_v, read1_v, size0_v, size1_v = self.evaluate((read0, read1, size0,
size1))
......
......@@ -66,7 +66,7 @@ class VariablesTestCase(test.TestCase):
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(var1)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(0.0, self.evaluate(var0))
self.assertAllClose(1.1, self.evaluate(var1))
......@@ -96,11 +96,11 @@ class VariablesTestCase(test.TestCase):
self.assertEqual([3, 6], depdep.get_shape())
self.assertEqual([3, 6], depdep.shape)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(rnd.eval(), self.evaluate(dep))
self.assertAllClose(rnd.eval() + self.evaluate(dep) + 2.0,
self.evaluate(depdep))
self.assertAllClose(self.evaluate(rnd), self.evaluate(dep))
self.assertAllClose(
self.evaluate(rnd) + self.evaluate(dep) + 2.0, self.evaluate(depdep))
def testIterable(self):
with self.assertRaisesRegexp(TypeError, "not iterable"):
......@@ -117,7 +117,7 @@ class VariablesTestCase(test.TestCase):
plus_one = var.assign_add(1.0)
minus_one = var.assign_sub(2.0)
four = var.assign(4.0)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(0.0, self.evaluate(var))
self.assertAllClose(1.0, self.evaluate(plus_one))
......@@ -136,7 +136,7 @@ class VariablesTestCase(test.TestCase):
plus_one = var.assign_add(1.0)
minus_one = var.assign_sub(2.0)
four = var.assign(4.0)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(0.0, self.evaluate(var))
self.evaluate(plus_one)
......@@ -166,7 +166,7 @@ class VariablesTestCase(test.TestCase):
var = variables.Variable(zero)
count_up_to = var.count_up_to(3)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertEqual(0, self.evaluate(var))
self.assertEqual(0, self.evaluate(count_up_to))
......@@ -264,10 +264,10 @@ class VariablesTestCase(test.TestCase):
with self.cached_session():
var_x = variables.Variable(2.0)
var_y = variables.Variable(3.0)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(2.0, self.evaluate(var_x))
self.assertAllClose(3.0, self.evaluate(var_y))
self.assertAllClose(5.0, math_ops.add(var_x, var_y).eval())
self.assertAllClose(5.0, self.evaluate(math_ops.add(var_x, var_y)))
@test_util.run_deprecated_v1
def testZeroSizeVarSameAsConst(self):
......@@ -277,9 +277,9 @@ class VariablesTestCase(test.TestCase):
variable_mul = math_ops.matmul(zero_size_const, zero_size_var)
const_mul = math_ops.matmul(
zero_size_const, zero_size_const, transpose_b=True)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
variable_output = self.evaluate(variable_mul)
self.assertAllClose(const_mul.eval(), variable_output)
self.assertAllClose(self.evaluate(const_mul), variable_output)
self.assertAllClose([[0., 0.], [0., 0.]], variable_output)
@test_util.run_deprecated_v1
......@@ -372,7 +372,7 @@ class VariablesTestCase(test.TestCase):
matmul = var_m.__matmul__([[10.0], [20.0]])
rmatmul = var_m.__rmatmul__([[10.0], [20.0]])
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([2.0], self.evaluate(add))
self.assertAllClose([3.0], self.evaluate(radd))
self.assertAllClose([1.0], self.evaluate(sub))
......@@ -409,7 +409,7 @@ class VariablesTestCase(test.TestCase):
def testSession(self):
with self.cached_session() as sess:
var = variables.Variable([1, 12])
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([1, 12], self.evaluate(var))
@test_util.run_v1_only("b/120545219")
......@@ -431,7 +431,7 @@ class VariablesTestCase(test.TestCase):
v1 = variables.Variable(initializer, dtype=dtypes.float32)
self.assertEqual(shape, v1.get_shape())
self.assertEqual(shape, v1.shape)
self.assertAllClose(value, v1.initial_value.eval())
self.assertAllClose(value, self.evaluate(v1.initial_value))
with self.assertRaises(errors_impl.FailedPreconditionError):
self.evaluate(v1)
......@@ -439,11 +439,11 @@ class VariablesTestCase(test.TestCase):
math_ops.negative(v1.initialized_value()), dtype=dtypes.float32)
self.assertEqual(v1.get_shape(), v2.get_shape())
self.assertEqual(v1.shape, v2.shape)
self.assertAllClose(np.negative(value), v2.initial_value.eval())
self.assertAllClose(np.negative(value), self.evaluate(v2.initial_value))
with self.assertRaises(errors_impl.FailedPreconditionError):
self.evaluate(v2)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(np.negative(value), self.evaluate(v2))
def testConstraintArg(self):
......@@ -465,10 +465,10 @@ class VariablesTestCase(test.TestCase):
a = variables.Variable([1, 2, 3], dtype=dtypes.float32)
b = variables.Variable(a.initialized_value() + 2)
c = variables.Variable(b.initialized_value() + 2)
variables.global_variables_initializer().run()
self.assertAllEqual(a.eval(), [1, 2, 3])
self.assertAllEqual(b.eval(), [3, 4, 5])
self.assertAllEqual(c.eval(), [5, 6, 7])
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate(a), [1, 2, 3])
self.assertAllEqual(self.evaluate(b), [3, 4, 5])
self.assertAllEqual(self.evaluate(c), [5, 6, 7])
@test_util.run_deprecated_v1
def testInitializerFunctionDevicePlacement(self):
......@@ -503,7 +503,7 @@ class VariablesTestCase(test.TestCase):
# initialized_value should not rerun the initializer_op if the variable
# has already been initialized elsewhere.
self.evaluate(v.assign(1.0))
self.assertEqual(1.0, v.initialized_value().eval())
self.assertEqual(1.0, self.evaluate(v.initialized_value()))
v_def.ClearField("initial_value_name")
with ops.Graph().as_default(), self.cached_session() as sess:
......@@ -537,7 +537,7 @@ class VariablesTestCase(test.TestCase):
def testLoad(self):
with self.cached_session():
var = variables.Variable(np.zeros((5, 5), np.float32))
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
var.load(np.ones((5, 5), np.float32))
self.assertAllClose(np.ones((5, 5), np.float32), self.evaluate(var))
......@@ -573,7 +573,7 @@ class IsInitializedTest(test.TestCase):
_ = v, w
uninited = variables.report_uninitialized_variables()
self.assertAllEqual(np.array([b"v", b"w"]), self.evaluate(uninited))
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertEqual(0, self.evaluate(uninited).size)
@test_util.run_v1_only("b/120545219")
......@@ -601,20 +601,20 @@ class IsInitializedTest(test.TestCase):
b = variables.Variable(array_ops.ones([2, 2]))
objective = math_ops.reduce_sum(b + math_ops.matmul(
a, a, transpose_a=True))
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
do_opt = gradient_descent.GradientDescentOptimizer(0.1).minimize(
objective)
self.evaluate([do_opt])
self.assertAllClose([[0.9, 0.9], [0.9, 0.9]], self.evaluate(b))
@test_util.run_v1_only("b/120545219")
class ObsoleteIsInitializedTest(test.TestCase):
def testNoVars(self):
with ops.Graph().as_default():
self.assertEqual(None, variables.assert_variables_initialized())
@test_util.run_v1_only("b/120545219")
def testVariables(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([1, 2])
......@@ -623,10 +623,9 @@ class ObsoleteIsInitializedTest(test.TestCase):
inited = variables.assert_variables_initialized()
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(inited)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.evaluate(inited)
@test_util.run_v1_only("b/120545219")
def testVariableList(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([1, 2])
......@@ -766,36 +765,36 @@ class PartitionedVariableTest(test.TestCase):
assign_list = pv_1.assign([c_0, c_1])
assign_part_value = pv_1.assign_add(assign_ones)
assign_part_var = pv_1.assign_sub(pv_0)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertEqual([1.0], plus_delta[0].eval())
self.assertEqual([1.0], self.evaluate(plus_delta[0]))
self.assertEqual([1.0], self.evaluate(v0))
self.assertEqual([3.0], plus_delta[1].eval())
self.assertEqual([3.0], self.evaluate(plus_delta[1]))
self.assertEqual([3.0], self.evaluate(v1))
self.assertEqual([-2.0], minus_delta[0].eval())
self.assertEqual([-2.0], self.evaluate(minus_delta[0]))
self.assertEqual([-2.0], self.evaluate(v0))
self.assertEqual([-1.0], minus_delta[1].eval())
self.assertEqual([-1.0], self.evaluate(minus_delta[1]))
self.assertEqual([-1.0], self.evaluate(v1))
self.assertEqual([1.0], assign_ones[0].eval())
self.assertEqual([1.0], self.evaluate(assign_ones[0]))
self.assertEqual([1.0], self.evaluate(v0))
self.assertEqual([1.0], assign_ones[1].eval())
self.assertEqual([1.0], self.evaluate(assign_ones[1]))
self.assertEqual([1.0], self.evaluate(v1))
self.assertEqual([2.0], assign_list[0].eval())
self.assertEqual([2.0], self.evaluate(assign_list[0]))
self.assertEqual([2.0], self.evaluate(v2))
self.assertEqual([3.0], assign_list[1].eval())
self.assertEqual([3.0], self.evaluate(assign_list[1]))
self.assertEqual([3.0], self.evaluate(v3))
self.assertEqual([3.0], assign_part_value[0].eval())
self.assertEqual([3.0], self.evaluate(assign_part_value[0]))
self.assertEqual([3.0], self.evaluate(v2))
self.assertEqual([4.0], assign_part_value[1].eval())
self.assertEqual([4.0], self.evaluate(assign_part_value[1]))
self.assertEqual([4.0], self.evaluate(v3))
self.assertEqual([2.0], assign_part_var[0].eval())
self.assertEqual([2.0], self.evaluate(assign_part_var[0]))
self.assertEqual([2.0], self.evaluate(v2))
self.assertEqual([3.0], assign_part_var[1].eval())
self.assertEqual([3.0], self.evaluate(assign_part_var[1]))
self.assertEqual([3.0], self.evaluate(v3))
......
......@@ -565,7 +565,7 @@ class DataTypesTest(test_util.TensorFlowTestCase):
strict=strict)
with self.cached_session() as sess:
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
true_feed_dict = {condition: True}
true_feed_dict.update(feed_dict)
result_cond, result_case = sess.run([output_cond, output_case],
......
......@@ -1027,7 +1027,7 @@ class CustomGradientTest(test_util.TensorFlowTestCase):
conditional, lambda: alpha * 2, lambda: alpha * 3)
g, = gradients_impl.gradients(output, alpha)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(g.eval(), [2.0])
self.assertAllEqual(g.eval(feed_dict={conditional: False}), [3.0])
......
......@@ -1084,7 +1084,7 @@ class SavedModelTest(SavedModelTestBase):
# CheckpointedOp is a key-value table that can be saved across sessions.
# The table register itself in SAVEABLE_OBJECTS collection.
v1 = saver_test_utils.CheckpointedOp(name="v1")
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
v1.insert("k1", 3.0).run()
# Once the table is restored, we can access it through this reference.
ops.add_to_collection("table_ref", v1.table_ref)
......
......@@ -106,7 +106,7 @@ class AdagradOptimizerTest(test.TestCase):
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = adagrad.AdagradOptimizer(1.0).minimize(loss)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0], [3.0, 4.0]],
self.evaluate(var0))
......@@ -129,7 +129,7 @@ class AdagradOptimizerTest(test.TestCase):
constant_op.constant(3.0), initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
......@@ -163,7 +163,7 @@ class AdagradOptimizerTest(test.TestCase):
ada_opt = adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([[1.0], [2.0]], self.evaluate(var0))
self.assertAllClose([[3.0], [4.0]], self.evaluate(var1))
......@@ -198,7 +198,7 @@ class AdagradOptimizerTest(test.TestCase):
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = adagrad.AdagradOptimizer(3.0).apply_gradients(
[(grad_aggregated, aggregated_update_var)])
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(aggregated_update_var.eval(),
self.evaluate(repeated_index_update_var))
for _ in range(3):
......@@ -223,7 +223,7 @@ class AdagradOptimizerTest(test.TestCase):
2.0).minimize(loss_repeated)
update_op_aggregated = adagrad.AdagradOptimizer(
2.0).minimize(loss_aggregated)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(
self.evaluate(var_repeated), self.evaluate(var_aggregated))
for _ in range(3):
......@@ -289,7 +289,7 @@ class AdagradOptimizerTest(test.TestCase):
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = ada_opt.get_slot(var1, "accumulator")
self.assertEquals(slot1.get_shape(), var1.get_shape())
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values.
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
......
......@@ -154,7 +154,7 @@ class LoadAndRemapWrappersTest(test.TestCase):
partitioner=partitioned_variables.fixed_size_partitioner(2))
with self.cached_session():
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(expected_remapped_matrix,
remapped_matrix.as_tensor().eval())
......@@ -188,7 +188,7 @@ class LoadAndRemapWrappersTest(test.TestCase):
partitioner=partitioned_variables.fixed_size_partitioner(2))
with self.cached_session():
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(expected_remapped_matrix,
remapped_matrix.as_tensor().eval())
......@@ -226,7 +226,7 @@ class LoadAndRemapWrappersTest(test.TestCase):
partitioner=partitioned_variables.fixed_size_partitioner(2))
with self.cached_session():
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(expected_remapped_matrix,
remapped_matrix.as_tensor().eval())
......@@ -262,7 +262,7 @@ class LoadAndRemapWrappersTest(test.TestCase):
partitioner=partitioned_variables.fixed_size_partitioner(2))
with self.cached_session():
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(expected_remapped_matrix,
remapped_matrix.as_tensor().eval())
......@@ -296,7 +296,7 @@ class LoadAndRemapWrappersTest(test.TestCase):
partitioner=partitioned_variables.fixed_size_partitioner(2))
with self.cached_session():
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(expected_remapped_embeddings,
remapped_embeddings.as_tensor().eval())
......@@ -342,7 +342,7 @@ class LoadAndRemapWrappersTest(test.TestCase):
partitioner=partitioned_variables.fixed_size_partitioner(2))
with self.cached_session():
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(expected_remapped_embeddings,
remapped_embeddings.as_tensor().eval())
......@@ -380,7 +380,7 @@ class LoadAndRemapWrappersTest(test.TestCase):
partitioner=partitioned_variables.fixed_size_partitioner(2))
with self.cached_session():
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(expected_remapped_embeddings,
remapped_embeddings.as_tensor().eval())
......
......@@ -58,7 +58,7 @@ class MatchFilenamesOnceTest(test_lib.TestCase):
question = inp.match_filenames_once(
os.path.join(self.get_temp_dir(), "match_filenames.?"))
one = inp.match_filenames_once(additional[1])
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
self.assertItemsEqual(
map(compat.as_bytes, filenames), self.evaluate(star))
......@@ -84,7 +84,7 @@ class LimitEpochsTest(test_lib.TestCase):
with self.cached_session():
love_me = constant_op.constant("Love Me")
love_me_two_times = inp.limit_epochs(love_me, num_epochs=2)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
self.assertEqual(b"Love Me", self.evaluate(love_me_two_times))
self.assertEqual(b"Love Me", self.evaluate(love_me_two_times))
......@@ -105,7 +105,7 @@ class InputProducerTest(test_lib.TestCase):
input_tensor, num_epochs=num_epochs, shuffle=False)
dequeue_many = queue.dequeue_many(len(input_tensor) * num_epochs)
dequeue = queue.dequeue()
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
......@@ -132,7 +132,7 @@ class InputProducerTest(test_lib.TestCase):
input_tensor, element_shape=[4], num_epochs=num_epochs, shuffle=False)
dequeue_many = queue.dequeue_many(len(input_value) * num_epochs)
dequeue = queue.dequeue()
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
......@@ -163,7 +163,7 @@ class StringInputProducerTest(test_lib.TestCase):
strings, num_epochs=num_epochs, shuffle=False)
dequeue_many = queue.dequeue_many(len(strings) * num_epochs)
dequeue = queue.dequeue()
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
......@@ -186,7 +186,7 @@ class StringInputProducerTest(test_lib.TestCase):
strings, num_epochs=num_epochs, shuffle=True, seed=271828)
dequeue_many = queue.dequeue_many(len(strings))
dequeue = queue.dequeue()
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
......@@ -234,7 +234,7 @@ class StringInputProducerTest(test_lib.TestCase):
constant_op.constant(
[], dtype=dtypes.string))
dequeue = queue.dequeue()
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners(coord=coord)
with self.assertRaises(errors_impl.OutOfRangeError):
......@@ -284,7 +284,7 @@ class RangeInputProducerTest(test_lib.TestCase):
range_size, num_epochs=num_epochs, shuffle=False)
dequeue_many = queue.dequeue_many(range_size * num_epochs)
dequeue = queue.dequeue()
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
......@@ -307,7 +307,7 @@ class RangeInputProducerTest(test_lib.TestCase):
range_size, num_epochs=num_epochs, shuffle=True, seed=314159)
dequeue_many = queue.dequeue_many(range_size)
dequeue = queue.dequeue()
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
......@@ -358,7 +358,7 @@ class SliceInputProducerTest(test_lib.TestCase):
source_ints = [2, 3, 5, 7]
slices = inp.slice_input_producer(
[source_strings, source_ints], num_epochs=num_epochs, shuffle=False)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
......@@ -386,7 +386,7 @@ class SliceInputProducerTest(test_lib.TestCase):
num_epochs=num_epochs,
shuffle=True,
seed=161803)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
......@@ -487,7 +487,7 @@ class BatchTest(test_lib.TestCase):
batched = inp.batch(
[counter, sparse_counter, "string"], batch_size=batch_size)
batched_fetch = batched
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
......@@ -555,7 +555,7 @@ class BatchTest(test_lib.TestCase):
counter = examples.count_up_to(num_batches * batch_size)
string = array_ops.tile(["string"],
math_ops.to_int32(array_ops.stack([counter])))
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
batched = inp.batch(
[counter, string], batch_size=batch_size, dynamic_pad=True)
......@@ -590,7 +590,7 @@ class BatchTest(test_lib.TestCase):
dense_shape=[1])
pre_batched = inp.batch([counter, sparse_counter, "string"], batch_size=2)
batched = inp.batch(pre_batched, enqueue_many=True, batch_size=batch_size)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
......@@ -629,7 +629,7 @@ class BatchTest(test_lib.TestCase):
[counter, sparse_counter, "string"],
batch_size=batch_size,
num_threads=4)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
......@@ -672,7 +672,7 @@ class BatchTest(test_lib.TestCase):
[counter, sparse_counter, "string"],
batch_size=batch_size,
allow_smaller_final_batch=True)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
......@@ -730,7 +730,7 @@ class BatchTest(test_lib.TestCase):
batch_size=batch_size,
num_threads=4,
allow_smaller_final_batch=True)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
......@@ -1058,7 +1058,7 @@ class BatchJoinTest(test_lib.TestCase):
batched_fetch[1].dense_shape.get_shape().as_list())
self.assertAllEqual((batch_size,), batched_fetch[2].get_shape().as_list())
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
......@@ -1157,7 +1157,7 @@ class BatchJoinTest(test_lib.TestCase):
self.assertAllEqual((batch_size,), batched[0].get_shape().as_list())
self.assertAllEqual((batch_size, None), batched[1].get_shape().as_list())
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
......@@ -1244,7 +1244,7 @@ class BatchJoinTest(test_lib.TestCase):
self.assertAllEqual((2,), batched[1].dense_shape.get_shape().as_list())
self.assertAllEqual((None,), batched[2].get_shape().as_list())
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
......@@ -1339,7 +1339,7 @@ class BatchJoinTest(test_lib.TestCase):
self.assertAllEqual((None,), batched[0].get_shape().as_list())
self.assertAllEqual((None, None), batched[1].get_shape().as_list())
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
......@@ -1644,7 +1644,7 @@ class ShuffleBatchTest(test_lib.TestCase):
min_after_dequeue=16,
seed=141421)
batched_fetch = batched
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
......@@ -1702,7 +1702,7 @@ class ShuffleBatchTest(test_lib.TestCase):
seed=141421,
allow_smaller_final_batch=True)
batched_fetch = batched
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
......@@ -1756,7 +1756,7 @@ class ShuffleBatchTest(test_lib.TestCase):
min_after_dequeue=16,
seed=173205,
num_threads=4)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
......@@ -1807,7 +1807,7 @@ class ShuffleBatchTest(test_lib.TestCase):
seed=173205,
num_threads=4,
allow_smaller_final_batch=True)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
......@@ -2070,7 +2070,7 @@ class ShuffleBatchJoinTest(test_lib.TestCase):
batched_fetch[1].dense_shape.get_shape().as_list())
self.assertAllEqual((batch_size,), batched_fetch[2].get_shape().as_list())
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
......@@ -2165,7 +2165,7 @@ class ShuffleBatchJoinTest(test_lib.TestCase):
self.assertAllEqual((2,), batched[1].dense_shape.get_shape().as_list())
self.assertAllEqual((None,), batched[2].get_shape().as_list())
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
......
......@@ -43,7 +43,7 @@ class MovingAveragesTest(test.TestCase):
decay = 0.25
assign = moving_averages.assign_moving_average(
var, val, decay, zero_debias=False)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([10.0, 11.0], self.evaluate(var))
assign.op.run()
self.assertAllClose(
......@@ -57,7 +57,7 @@ class MovingAveragesTest(test.TestCase):
val = constant_op.constant([1.0, 2.0], dtypes.float32)
decay = 0.25
assign = moving_averages.assign_moving_average(var, val, decay)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([0.0, 0.0], self.evaluate(var))
assign.op.run()
self.assertAllClose(
......@@ -98,7 +98,7 @@ class MovingAveragesTest(test.TestCase):
val = array_ops.placeholder(dtypes.float32, [])
wma = moving_averages.weighted_moving_average(val, decay, weight)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
# Get the first weighted moving average.
val_1 = 3.0
......@@ -125,7 +125,7 @@ class MovingAveragesTest(test.TestCase):
val = array_ops.placeholder(dtypes.bfloat16, [])
wma = moving_averages.weighted_moving_average(val, decay, weight)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
# Get the first weighted moving average.
val_1 = 3.0
......@@ -164,7 +164,7 @@ class ExponentialMovingAverageTest(test.TestCase):
thirties = _Repeat(30.0, dim)
var0 = variables.Variable(tens, name="v0")
var1 = variables.Variable(thirties, name="v1")
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
# Note that tensor2 is not a Variable but just a plain Tensor resulting
# from the sum operation.
tensor2 = var0 + var1
......@@ -178,7 +178,7 @@ class ExponentialMovingAverageTest(test.TestCase):
self.assertFalse(avg0 in variables.trainable_variables())
self.assertFalse(avg1 in variables.trainable_variables())
self.assertFalse(avg2 in variables.trainable_variables())
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertEqual("v0/ExponentialMovingAverage:0", avg0.name)
self.assertEqual("v1/ExponentialMovingAverage:0", avg1.name)
......
......@@ -49,7 +49,7 @@ class QueueRunnerTest(test.TestCase):
var = variables.VariableV1(zero64)
count_up_to = var.count_up_to(3)
queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
qr = queue_runner_impl.QueueRunner(queue, [count_up_to])
threads = qr.create_threads(sess)
self.assertEqual(sorted(t.name for t in threads),
......@@ -77,7 +77,7 @@ class QueueRunnerTest(test.TestCase):
self.assertEqual(sorted(t.name for t in threads),
["QueueRunnerThread-fifo_queue-CountUpTo:0",
"QueueRunnerThread-fifo_queue-CountUpTo_1:0"])
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
for t in threads:
t.start()
for t in threads:
......@@ -93,7 +93,7 @@ class QueueRunnerTest(test.TestCase):
qr = queue_runner_impl.QueueRunner(queue, [_MockOp("i fail"),
_MockOp("so fail")])
threads = qr.create_threads(sess)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
for t in threads:
t.start()
for t in threads:
......@@ -140,7 +140,7 @@ class QueueRunnerTest(test.TestCase):
var = variables.VariableV1(zero64)
count_up_to = var.count_up_to(3)
queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
qr = queue_runner_impl.QueueRunner(queue, [count_up_to])
# As the coordinator to stop. The queue runner should
# finish immediately.
......@@ -196,7 +196,7 @@ class QueueRunnerTest(test.TestCase):
var = variables.VariableV1(zero64)
count_up_to = var.count_up_to(3)
queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
coord = coordinator.Coordinator()
qr = queue_runner_impl.QueueRunner(queue, [count_up_to])
# NOTE that this test does not actually start the threads.
......@@ -212,7 +212,7 @@ class QueueRunnerTest(test.TestCase):
var = variables.VariableV1(zero64)
count_up_to = var.count_up_to(3)
queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
coord = coordinator.Coordinator()
qr = queue_runner_impl.QueueRunner(queue, [count_up_to])
threads = []
......@@ -229,7 +229,7 @@ class QueueRunnerTest(test.TestCase):
var = variables.VariableV1(zero64)
count_up_to = var.count_up_to(3)
queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
qr = queue_runner_impl.QueueRunner(queue, [count_up_to,
_MockOp("bad_op")])
threads = qr.create_threads(sess, start=True)
......
......@@ -38,7 +38,7 @@ class SlotCreatorTest(test.TestCase):
v = variables.Variable([1.0, 2.5], name="var")
slot = slot_creator.create_slot(v, v.initialized_value(), name="slot")
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertEqual("var/slot", slot.op.name)
self.assertEqual([2], slot.get_shape().as_list())
......@@ -51,7 +51,7 @@ class SlotCreatorTest(test.TestCase):
v = constant_op.constant([1.0, 2.5], name="const")
slot = slot_creator.create_slot(v, v * 2, name="slot")
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertEqual("const/slot", slot.op.name)
self.assertEqual([2], slot.get_shape().as_list())
......@@ -66,7 +66,7 @@ class SlotCreatorTest(test.TestCase):
slot = slot_creator.create_zeros_slot(
v, name="slot", dtype=dtypes.float64)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertEqual("var/slot", slot.op.name)
self.assertEqual([2], slot.get_shape().as_list())
......@@ -88,7 +88,7 @@ class SlotCreatorTest(test.TestCase):
slot = slot_creator.create_zeros_slot(
v, name="slot", dtype=dtypes.float64)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertEqual("var/slot", slot.op.name)
self.assertEqual([2], array_ops.shape(slot).eval())
......@@ -102,7 +102,7 @@ class SlotCreatorTest(test.TestCase):
with ops.control_dependencies(None):
slot = slot_creator.create_zeros_slot(v, name="slot")
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertEqual("const/slot", slot.op.name)
self.assertEqual([2], slot.get_shape().as_list())
......@@ -118,7 +118,7 @@ class SlotCreatorTest(test.TestCase):
slot = slot_creator.create_zeros_slot(
v, name="slot", dtype=dtypes.float64)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertEqual("const/slot", slot.op.name)
self.assertEqual([2], array_ops.shape(slot).eval())
......
......@@ -53,7 +53,7 @@ class TrainingOpsTest(TensorFlowTestCase):
self.setUp()
with self.session(use_gpu=use_gpu):
var = variables.VariableV1(x)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(x, self.evaluate(var))
apply_sgd = training_ops.apply_gradient_descent(var, alpha, delta)
out = self.evaluate(apply_sgd)
......@@ -74,7 +74,7 @@ class TrainingOpsTest(TensorFlowTestCase):
with self.session(use_gpu=use_gpu):
var = variables.VariableV1(x)
accum = variables.VariableV1(y)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(x, self.evaluate(var))
apply_adagrad = training_ops.apply_adagrad(var, accum, lr, grad)
......@@ -99,7 +99,7 @@ class TrainingOpsTest(TensorFlowTestCase):
var = variables.VariableV1(x)
accum = variables.VariableV1(y)
linear = variables.VariableV1(z)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(x, self.evaluate(var))
apply_ftrl = training_ops.apply_ftrl(var, accum, linear, grad, lr, l1, l2,
......@@ -156,7 +156,7 @@ class TrainingOpsTest(TensorFlowTestCase):
with self.session(use_gpu=False):
var = variables.VariableV1(x)
accum = variables.VariableV1(y)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(x, self.evaluate(var))
sparse_apply_adagrad = training_ops.sparse_apply_adagrad(
......@@ -187,7 +187,7 @@ class TrainingOpsTest(TensorFlowTestCase):
var = variables.VariableV1(x)
accum = variables.VariableV1(y)
linear = variables.VariableV1(z)
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(x, self.evaluate(var))
sparse_apply_ftrl = training_ops.sparse_apply_ftrl(
......@@ -285,7 +285,7 @@ class TrainingOpsTest(TensorFlowTestCase):
beta2_power_t = variables.VariableV1(beta2_power)
lr_t = constant_op.constant(lr, self._toType(var.dtype), [])
epsilon_t = constant_op.constant(epsilon, self._toType(var.dtype), [])
variables.global_variables_initializer().run()
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(var, self.evaluate(var_t))
new_var, _, _ = self._adamUpdateNumpy(var, grad, t, m, v, lr, beta1,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册