diff --git a/tensorflow/contrib/layers/python/layers/feature_column_test.py b/tensorflow/contrib/layers/python/layers/feature_column_test.py index 67c2d3c6525210afc4753910512c9dc9a3e6657c..2e8bf9ffef298bc91025cd4db766b06d2f495d3f 100644 --- a/tensorflow/contrib/layers/python/layers/feature_column_test.py +++ b/tensorflow/contrib/layers/python/layers/feature_column_test.py @@ -21,6 +21,7 @@ from __future__ import print_function import itertools import os +import tempfile import numpy as np import tensorflow as tf @@ -609,7 +610,10 @@ class FeatureColumnTest(tf.test.TestCase): {embedding_col: input_tensor}, [embedding_col]) save = tf.train.Saver() - checkpoint_path = os.path.join(self.get_temp_dir(), "model.ckpt") + ckpt_dir_prefix = os.path.join( + self.get_temp_dir(), "init_embedding_col_w_from_ckpt") + ckpt_dir = tempfile.mkdtemp(prefix=ckpt_dir_prefix) + checkpoint_path = os.path.join(ckpt_dir, "model.ckpt") with self.test_session() as sess: sess.run(tf.initialize_all_variables()) @@ -670,7 +674,10 @@ class FeatureColumnTest(tf.test.TestCase): assign_op = tf.assign(weight[0], weight[0] + 0.5) save = tf.train.Saver() - checkpoint_path = os.path.join(self.get_temp_dir(), "model.ckpt") + ckpt_dir_prefix = os.path.join( + self.get_temp_dir(), "init_crossed_col_w_from_ckpt") + ckpt_dir = tempfile.mkdtemp(prefix=ckpt_dir_prefix) + checkpoint_path = os.path.join(ckpt_dir, "model.ckpt") with self.test_session() as sess: sess.run(tf.initialize_all_variables()) diff --git a/tensorflow/contrib/lookup/lookup_ops_test.py b/tensorflow/contrib/lookup/lookup_ops_test.py index 803cc3eb1ef5f0b11320a538bfdbdaf0e6be936a..d2d722214064d45165e0a28ce9832a102aa9c83c 100644 --- a/tensorflow/contrib/lookup/lookup_ops_test.py +++ b/tensorflow/contrib/lookup/lookup_ops_test.py @@ -18,6 +18,7 @@ from __future__ import division from __future__ import print_function import os +import tempfile import numpy as np import six import tensorflow as tf @@ -296,7 +297,8 @@ class MutableHashTableOpTest(tf.test.TestCase): self.assertAllEqual([0, 1, 2], sorted_values) def testSaveRestore(self): - save_path = os.path.join(self.get_temp_dir(), "hash") + save_dir = os.path.join(self.get_temp_dir(), "save_restore") + save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash") with self.test_session(graph=tf.Graph()) as sess: v0 = tf.Variable(10.0, name="v0") @@ -867,7 +869,8 @@ class MutableDenseHashTableOpTest(tf.test.TestCase): [100, 0], [100, 0], [100, 0]], pairs) def testSaveRestore(self): - save_path = os.path.join(self.get_temp_dir(), "hash") + save_dir = os.path.join(self.get_temp_dir(), "save_restore") + save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash") with self.test_session(graph=tf.Graph()) as sess: default_value = -1 @@ -922,7 +925,8 @@ class MutableDenseHashTableOpTest(tf.test.TestCase): self.assertAllEqual([-1, 0, 1, 2, -1], output.eval()) def testVectorSaveRestore(self): - save_path = os.path.join(self.get_temp_dir(), "hash") + save_dir = os.path.join(self.get_temp_dir(), "vector_save_restore") + save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash") with self.test_session(graph=tf.Graph()) as sess: empty_key = tf.constant([11, 13], tf.int64) diff --git a/tensorflow/contrib/opt/python/training/moving_average_optimizer_test.py b/tensorflow/contrib/opt/python/training/moving_average_optimizer_test.py index 4c24276cdd5e09ba830e9b49bf7479affc90ae0a..02ce9ff24fb1530ff726f045ed35f2a8450efc85 100644 --- a/tensorflow/contrib/opt/python/training/moving_average_optimizer_test.py +++ b/tensorflow/contrib/opt/python/training/moving_average_optimizer_test.py @@ -18,6 +18,7 @@ from __future__ import division from __future__ import print_function import os.path +import tempfile import six import tensorflow as tf @@ -40,7 +41,9 @@ class MovingAverageOptimizerTest(tf.test.TestCase): tf.train.GradientDescentOptimizer(learning_rate=2.0), average_decay=0.5, sequential_update=sequential_update) - save_path = os.path.join(self.get_temp_dir(), 'model') + save_dir = tempfile.mkdtemp( + prefix=os.path.join(self.get_temp_dir(), 'run_1')) + save_path = os.path.join(save_dir, 'model') update = opt.apply_gradients( list(six.moves.zip([grads0, grads1], [var0, var1]))) train_saver = opt.swapping_saver()