From 9a4878cc17d4039939e8df5a2a984cca8028baeb Mon Sep 17 00:00:00 2001 From: Vijay Vasudevan Date: Tue, 15 Mar 2016 18:45:27 -0800 Subject: [PATCH] Rollback of: "Merge changes from github." Change: 117304114 --- configure | 2 +- tensorflow/cc/ops/const_op.cc | 3 - tensorflow/cc/ops/const_op.h | 1 - .../linear_optimizer/python/ops/sdca_ops.py | 4 +- tensorflow/core/framework/allocator.h | 7 +- .../core/framework/node_def_util_test.cc | 2 +- tensorflow/core/framework/numeric_types.h | 2 - .../core/framework/op_def_builder_test.cc | 2 +- tensorflow/core/framework/op_def_util_test.cc | 4 - tensorflow/core/framework/register_types.h | 8 +- tensorflow/core/framework/tensor.cc | 17 - tensorflow/core/framework/tensor.proto | 4 - tensorflow/core/framework/tensor_test.cc | 72 +- tensorflow/core/framework/tensor_testutil.h | 6 - tensorflow/core/framework/types.cc | 18 +- tensorflow/core/framework/types.h | 1 - tensorflow/core/framework/types.proto | 3 +- .../core/ops/compat/ops_history.v0.pbtxt | 1862 ++--------------- tensorflow/core/ops/ops.pbtxt | 29 - tensorflow/core/public/tensor_c_api.h | 4 +- .../core/util/saved_tensor_slice_util.h | 1 - tensorflow/examples/android/README.md | 8 +- tensorflow/examples/label_image/README.md | 12 +- tensorflow/examples/udacity/1_notmnist.ipynb | 2 +- tensorflow/models/image/cifar10/cifar10.py | 4 +- .../models/image/cifar10/cifar10_input.py | 2 +- tensorflow/python/__init__.py | 1 - tensorflow/python/client/session_test.py | 5 +- tensorflow/python/client/tf_session_helper.cc | 12 +- tensorflow/python/framework/dtypes.py | 22 +- tensorflow/python/framework/dtypes_test.py | 11 +- tensorflow/python/framework/ops_test.py | 2 +- tensorflow/python/framework/tensor_util.py | 21 +- .../python/framework/tensor_util_test.py | 71 +- tensorflow/python/lib/core/py_func.cc | 6 - tensorflow/python/training/session_manager.py | 2 +- tensorflow/tools/ci_build/Dockerfile.cpu | 1 - tensorflow/tools/ci_build/Dockerfile.gpu | 1 - tensorflow/tools/ci_build/builds/pip.sh | 62 +- .../ci_build/builds/test_installation.sh | 3 +- .../tools/ci_build/ci_parameterized_build.sh | 2 +- .../ci_build/install/install_deb_packages.sh | 2 - .../ci_build/install/install_pip_packages.sh | 20 - tensorflow/tools/docker/docker_run_gpu.sh | 2 +- tensorflow/tools/docker/run_jupyter.sh | 2 +- tensorflow/tools/docs/gen_docs.sh | 2 +- tensorflow/tools/docs/gen_docs_test.sh | 4 +- .../tools/pip_package/build_pip_package.sh | 2 +- tensorflow/tools/swig/swig.sh | 2 +- tensorflow/tools/test/BUILD | 32 +- third_party/gpus/cuda/cuda_config.sh | 2 +- util/python/python_config.sh | 2 +- 52 files changed, 294 insertions(+), 2080 deletions(-) delete mode 100755 tensorflow/tools/ci_build/install/install_pip_packages.sh diff --git a/configure b/configure index 0faf61c67b1..2d7ec77aec2 100755 --- a/configure +++ b/configure @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/bin/bash ## Set up python-related environment settings while true; do diff --git a/tensorflow/cc/ops/const_op.cc b/tensorflow/cc/ops/const_op.cc index ddfa2a5b0e0..cdf653a880e 100644 --- a/tensorflow/cc/ops/const_op.cc +++ b/tensorflow/cc/ops/const_op.cc @@ -87,9 +87,6 @@ DEFINE_CONST(bool, bool_val); DEFINE_CONST_IMPL(complex64, proto.add_scomplex_val(t.begin()->real()); proto.add_scomplex_val(t.begin()->imag());); -DEFINE_CONST_IMPL(complex128, proto.add_dcomplex_val(t.begin()->real()); - proto.add_dcomplex_val(t.begin()->imag());); - Node* Const(StringPiece s, const GraphDefBuilder::Options& options) { if (options.HaveError()) return nullptr; NodeBuilder node_builder(options.GetNameForOp(OpName()), OpName(), diff --git a/tensorflow/cc/ops/const_op.h b/tensorflow/cc/ops/const_op.h index 0a1ee3f1e05..36a97f8e4ca 100644 --- a/tensorflow/cc/ops/const_op.h +++ b/tensorflow/cc/ops/const_op.h @@ -49,7 +49,6 @@ DECLARE_CONST(uint8); DECLARE_CONST(int16); DECLARE_CONST(int8); DECLARE_CONST(complex64); -DECLARE_CONST(complex128); DECLARE_CONST(int64); DECLARE_CONST(bool); diff --git a/tensorflow/contrib/linear_optimizer/python/ops/sdca_ops.py b/tensorflow/contrib/linear_optimizer/python/ops/sdca_ops.py index 5e98e149b9d..525a984ee98 100644 --- a/tensorflow/contrib/linear_optimizer/python/ops/sdca_ops.py +++ b/tensorflow/contrib/linear_optimizer/python/ops/sdca_ops.py @@ -21,8 +21,6 @@ import os.path import threading import uuid -from six.moves import range - from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework.load_library import load_op_library @@ -225,7 +223,7 @@ class SdcaModel(object): dense_features = self._convert_n_to_tensor(examples['dense_features']) dense_variables = self._convert_n_to_tensor(self._variables[ 'dense_features_weights']) - for i in range(len(dense_variables)): + for i in xrange(len(dense_variables)): predictions += dense_features[i] * dense_variables[i] return predictions diff --git a/tensorflow/core/framework/allocator.h b/tensorflow/core/framework/allocator.h index 30c7c191023..97a3f616930 100644 --- a/tensorflow/core/framework/allocator.h +++ b/tensorflow/core/framework/allocator.h @@ -187,14 +187,13 @@ class Allocator { // is_simple::value if T[] can be safely constructed and destructed // without running T() and ~T(). We do not use std::is_trivial - // directly because std::complex and std::complex are - // not trival, but their arrays can be constructed and destructed - // without running their default ctors and dtors. + // directly because std::complex is not trival but its array + // can be constructed and destructed without running its default ctor + // and dtor. template struct is_simple { static const bool value = std::is_trivial::value || std::is_same::value || - std::is_same::value || is_quantized::value; }; diff --git a/tensorflow/core/framework/node_def_util_test.cc b/tensorflow/core/framework/node_def_util_test.cc index e7dd1e58271..07bd60f3b7b 100644 --- a/tensorflow/core/framework/node_def_util_test.cc +++ b/tensorflow/core/framework/node_def_util_test.cc @@ -151,7 +151,7 @@ TEST(NodeDefUtilTest, Out) { ExpectFailure(bad, op, "Value for attr 'T' of string is not in the list of allowed " "values: float, double, int64, int32, uint8, uint16, int16, " - "int8, complex64, complex128, qint8, quint8, qint32"); + "int8, complex64, qint8, quint8, qint32"); } TEST(NodeDefUtilTest, Enum) { diff --git a/tensorflow/core/framework/numeric_types.h b/tensorflow/core/framework/numeric_types.h index 9523e35b4ea..c6230dab24e 100644 --- a/tensorflow/core/framework/numeric_types.h +++ b/tensorflow/core/framework/numeric_types.h @@ -24,8 +24,6 @@ namespace tensorflow { // Single precision complex. typedef std::complex complex64; -// Double precision complex. -typedef std::complex complex128; } // end namespace tensorflow diff --git a/tensorflow/core/framework/op_def_builder_test.cc b/tensorflow/core/framework/op_def_builder_test.cc index fbef9ebf624..2d6a7f01aea 100644 --- a/tensorflow/core/framework/op_def_builder_test.cc +++ b/tensorflow/core/framework/op_def_builder_test.cc @@ -113,7 +113,7 @@ TEST_F(OpDefBuilderTest, AttrWithRestrictions) { ExpectSuccess(b().Attr("a:numbertype"), "attr: { name: 'a' type: 'type' allowed_values { list { type: " "[DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8, DT_INT16, " - "DT_UINT16, DT_INT8, DT_COMPLEX64, DT_COMPLEX128, DT_QINT8, DT_QUINT8, " + "DT_UINT16, DT_INT8, DT_COMPLEX64, DT_QINT8, DT_QUINT8, " "DT_QINT32] } } }"); ExpectSuccess(b().Attr("a:realnumbertype"), "attr: { name: 'a' type: 'type' allowed_values { list { type: " diff --git a/tensorflow/core/framework/op_def_util_test.cc b/tensorflow/core/framework/op_def_util_test.cc index 813576c2e1b..854016f3569 100644 --- a/tensorflow/core/framework/op_def_util_test.cc +++ b/tensorflow/core/framework/op_def_util_test.cc @@ -246,10 +246,6 @@ TEST_F(ValidateOpDefTest, BadAttrAllowed) { TestBuilder(OpDefBuilder("BadAttrtude") .Attr("x: list(realnumbertype) = [DT_COMPLEX64]")), "attr 'x' of complex64 is not in the list of allowed values"); - ExpectFailure( - TestBuilder(OpDefBuilder("BadAttrtude") - .Attr("x: list(realnumbertype) = [DT_COMPLEX128]")), - "attr 'x' of complex128 is not in the list of allowed values"); // Is in list of allowed strings. TF_EXPECT_OK(TestBuilder( OpDefBuilder("GoodAttrtude").Attr("x: {'foo', 'bar'} = 'bar'"))); diff --git a/tensorflow/core/framework/register_types.h b/tensorflow/core/framework/register_types.h index 1474dc62431..d08388a83bb 100644 --- a/tensorflow/core/framework/register_types.h +++ b/tensorflow/core/framework/register_types.h @@ -63,16 +63,14 @@ limitations under the License. m(int16); \ m(int8) -// Call "m" for all number types, including complex64 and complex128. +// Call "m" for all number types, including complex64. #define TF_CALL_NUMBER_TYPES(m) \ TF_CALL_REAL_NUMBER_TYPES(m); \ - m(complex64); \ - m(complex128) + m(complex64) #define TF_CALL_NUMBER_TYPES_NO_INT32(m) \ TF_CALL_REAL_NUMBER_TYPES_NO_INT32(m); \ - m(complex64); \ - m(complex128) + m(complex64) #define TF_CALL_POD_TYPES(m) \ TF_CALL_NUMBER_TYPES(m); \ diff --git a/tensorflow/core/framework/tensor.cc b/tensorflow/core/framework/tensor.cc index e56db2af8c2..e701b663194 100644 --- a/tensorflow/core/framework/tensor.cc +++ b/tensorflow/core/framework/tensor.cc @@ -215,22 +215,6 @@ struct ProtoHelper { } }; -template <> -struct ProtoHelper { - typedef Helper::RepeatedFieldType FieldType; - static const complex128* Begin(const TensorProto& proto) { - return reinterpret_cast(proto.dcomplex_val().data()); - } - static size_t NumElements(const TensorProto& proto) { - return proto.dcomplex_val().size() / 2; - } - static void Fill(const complex128* data, size_t n, TensorProto* proto) { - const double* p = reinterpret_cast(data); - FieldType copy(p, p + n * 2); - proto->mutable_dcomplex_val()->Swap(©); - } -}; - template <> struct ProtoHelper { typedef Helper::RepeatedFieldType FieldType; @@ -401,7 +385,6 @@ void Tensor::UnsafeCopyFromInternal(const Tensor& other, CASE(int8, SINGLE_ARG(STMTS)) \ CASE(string, SINGLE_ARG(STMTS)) \ CASE(complex64, SINGLE_ARG(STMTS)) \ - CASE(complex128, SINGLE_ARG(STMTS)) \ CASE(int64, SINGLE_ARG(STMTS)) \ CASE(bool, SINGLE_ARG(STMTS)) \ CASE(qint32, SINGLE_ARG(STMTS)) \ diff --git a/tensorflow/core/framework/tensor.proto b/tensorflow/core/framework/tensor.proto index 59fc96420bb..013a2d0607a 100644 --- a/tensorflow/core/framework/tensor.proto +++ b/tensorflow/core/framework/tensor.proto @@ -57,8 +57,4 @@ message TensorProto { // DT_BOOL repeated bool bool_val = 11 [packed = true]; - - // DT_COMPLEX128. dcomplex_val(2*i) and dcomplex_val(2*i+1) are real - // and imaginary parts of i-th double precision complex. - repeated double dcomplex_val = 12 [packed = true]; }; diff --git a/tensorflow/core/framework/tensor_test.cc b/tensorflow/core/framework/tensor_test.cc index 13896f9177d..ec0fb57aad7 100644 --- a/tensorflow/core/framework/tensor_test.cc +++ b/tensorflow/core/framework/tensor_test.cc @@ -47,17 +47,12 @@ TEST(TensorTest, DataType_Traits) { // Unfortunately. std::complex::complex() initializes (0, 0). EXPECT_FALSE(std::is_trivial::value); - EXPECT_FALSE(std::is_trivial::value); + EXPECT_FALSE(std::is_trivial>::value); EXPECT_TRUE(std::is_trivial::value); - EXPECT_TRUE(std::is_trivial::value); - struct MyComplex64 { + struct MyComplex { float re, im; }; - EXPECT_TRUE(std::is_trivial::value); - struct MyComplex128 { - double re, im; - }; - EXPECT_TRUE(std::is_trivial::value); + EXPECT_TRUE(std::is_trivial::value); } template @@ -425,19 +420,13 @@ TEST(Tensor_Bool, SimpleWithHelper) { test::ExpectTensorEqual(t1, t2); } -TEST(Tensor_Complex, Simple64) { +TEST(Tensor_Complex, Simple) { Tensor t(DT_COMPLEX64, {4, 5, 3, 7}); t.flat().setRandom(); TestCopies(t); } -TEST(Tensor_Complex, Simple128) { - Tensor t(DT_COMPLEX128, {4, 5, 3, 7}); - t.flat().setRandom(); - TestCopies(t); -} - -TEST(Tensor_Complex, SimpleWithHelper64) { +TEST(Tensor_Complex, SimpleWithHelper) { { Tensor t1 = test::AsTensor({0, {1, 1}, @@ -455,7 +444,7 @@ TEST(Tensor_Complex, SimpleWithHelper64) { test::ExpectTensorEqual(t2, t3); } - // Does some numeric operations for complex64 numbers. + // Does some numeric operations for complex numbers. { const float PI = std::acos(-1); const complex64 rotate_45 = std::polar(1.0f, PI / 4); @@ -486,55 +475,6 @@ TEST(Tensor_Complex, SimpleWithHelper64) { } } -TEST(Tensor_Complex, SimpleWithHelper128) { - { - Tensor t1 = test::AsTensor({0, - {1, 1}, - complex128(2), - complex128(3, 3), - complex128(0, 4), - complex128(2, 5)}, - {2, 3}); - Tensor t2(t1.dtype(), t1.shape()); - t2.flat() = t1.flat() * complex128(0, 2); - Tensor t3 = test::AsTensor( - {0, {-2, 2}, {0, 4}, {-6, 6}, {-8, 0}, {-10, 4}}, - // shape - {2, 3}); - test::ExpectTensorEqual(t2, t3); - } - - // Does some numeric operations for complex128 numbers. - { - const double PI = std::acos(-1); - const complex128 rotate_45 = std::polar(1.0, PI / 4); - - // x contains all the 8-th root of unity. - Tensor x(DT_COMPLEX128, TensorShape({8})); - for (int i = 0; i < 8; ++i) { - x.vec()(i) = std::pow(rotate_45, i); - } - - // Shift the roots by 45 degree. - Tensor y(DT_COMPLEX128, TensorShape({8})); - y.vec() = x.vec() * rotate_45; - Tensor y_expected(DT_COMPLEX128, TensorShape({8})); - for (int i = 0; i < 8; ++i) { - y_expected.vec()(i) = std::pow(rotate_45, i + 1); - } - test::ExpectTensorNear(y, y_expected, 1e-5); - - // Raise roots to the power of 8. - Tensor z(DT_COMPLEX128, TensorShape({8})); - z.vec() = x.vec().pow(8); - Tensor z_expected(DT_COMPLEX128, TensorShape({8})); - for (int i = 0; i < 8; ++i) { - z_expected.vec()(i) = 1; - } - test::ExpectTensorNear(z, z_expected, 1e-5); - } -} - // On the alignment. // // As of 2015/8, tensorflow::Tensor allocates its buffer with 32-byte diff --git a/tensorflow/core/framework/tensor_testutil.h b/tensorflow/core/framework/tensor_testutil.h index 0d88eda17d7..71e1767924e 100644 --- a/tensorflow/core/framework/tensor_testutil.h +++ b/tensorflow/core/framework/tensor_testutil.h @@ -127,12 +127,6 @@ inline void ExpectEqual(const complex64& a, const complex64& b) { EXPECT_FLOAT_EQ(a.imag(), b.imag()) << a << " vs. " << b; } -template <> -void ExpectEqual(const complex128& a, const complex128& b) { - EXPECT_DOUBLE_EQ(a.real(), b.real()) << a << " vs. " << b; - EXPECT_DOUBLE_EQ(a.imag(), b.imag()) << a << " vs. " << b; -} - inline void AssertSameTypeDims(const Tensor& x, const Tensor& y) { ASSERT_EQ(x.dtype(), y.dtype()); ASSERT_TRUE(x.IsSameSize(y)) diff --git a/tensorflow/core/framework/types.cc b/tensorflow/core/framework/types.cc index c87a0445cdf..54b55e49c01 100644 --- a/tensorflow/core/framework/types.cc +++ b/tensorflow/core/framework/types.cc @@ -64,8 +64,6 @@ string DataTypeString(DataType dtype) { return "string"; case DT_COMPLEX64: return "complex64"; - case DT_COMPLEX128: - return "complex128"; case DT_INT64: return "int64"; case DT_BOOL: @@ -127,9 +125,6 @@ bool DataTypeFromString(StringPiece sp, DataType* dt) { } else if (sp == "complex64") { *dt = DT_COMPLEX64; return true; - } else if (sp == "complex128") { - *dt = DT_COMPLEX128; - return true; } else if (sp == "int64") { *dt = DT_INT64; return true; @@ -170,10 +165,9 @@ string DataTypeSliceString(const DataTypeSlice types) { } DataTypeVector AllTypes() { - return {DT_FLOAT, DT_DOUBLE, DT_INT32, DT_UINT8, DT_INT16, - DT_UINT16, DT_INT8, DT_STRING, DT_COMPLEX64, DT_COMPLEX128, - DT_INT64, DT_BOOL, DT_QINT8, DT_QUINT8, DT_QINT16, - DT_QUINT16, DT_QINT32}; + return {DT_FLOAT, DT_DOUBLE, DT_INT32, DT_UINT8, DT_INT16, DT_UINT16, + DT_INT8, DT_STRING, DT_COMPLEX64, DT_INT64, DT_BOOL, DT_QINT8, + DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32}; } #if !defined(__ANDROID__) @@ -194,9 +188,8 @@ DataTypeVector RealAndQuantizedTypes() { } DataTypeVector NumberTypes() { - return {DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8, - DT_UINT16, DT_INT16, DT_INT8, DT_COMPLEX64, DT_COMPLEX128, - DT_QINT8, DT_QUINT8, DT_QINT32 }; + return {DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8, DT_UINT16, + DT_INT16, DT_INT8, DT_COMPLEX64, DT_QINT8, DT_QUINT8, DT_QINT32}; } #else // defined(__ANDROID__) @@ -230,7 +223,6 @@ bool DataTypeCanUseMemcpy(DataType dt) { case DT_INT16: case DT_INT8: case DT_COMPLEX64: - case DT_COMPLEX128: case DT_INT64: case DT_BOOL: case DT_QINT8: diff --git a/tensorflow/core/framework/types.h b/tensorflow/core/framework/types.h index 6de9917d710..9651d2b64cd 100644 --- a/tensorflow/core/framework/types.h +++ b/tensorflow/core/framework/types.h @@ -174,7 +174,6 @@ MATCH_TYPE_AND_ENUM(int16, DT_INT16); MATCH_TYPE_AND_ENUM(int8, DT_INT8); MATCH_TYPE_AND_ENUM(string, DT_STRING); MATCH_TYPE_AND_ENUM(complex64, DT_COMPLEX64); -MATCH_TYPE_AND_ENUM(complex128, DT_COMPLEX128); MATCH_TYPE_AND_ENUM(int64, DT_INT64); MATCH_TYPE_AND_ENUM(bool, DT_BOOL); MATCH_TYPE_AND_ENUM(qint8, DT_QINT8); diff --git a/tensorflow/core/framework/types.proto b/tensorflow/core/framework/types.proto index 27e0b7e9cf4..e6f0b13d97e 100644 --- a/tensorflow/core/framework/types.proto +++ b/tensorflow/core/framework/types.proto @@ -30,10 +30,10 @@ enum DataType { DT_QINT16 = 15; // Quantized int16 DT_QUINT16 = 16; // Quantized uint16 DT_UINT16 = 17; - DT_COMPLEX128 = 18; // Double-precision complex // TODO(josh11b): DT_GENERIC_PROTO = ??; // TODO(jeff,josh11b): DT_UINT64? DT_UINT32? + // TODO(zhifengc): DT_COMPLEX128 (double-precision complex)? // Do not use! These are only for parameters. Every enum above // should have a corresponding value below (verified by types_test). @@ -54,5 +54,4 @@ enum DataType { DT_QINT16_REF = 115; DT_QUINT16_REF = 116; DT_UINT16_REF = 117; - DT_COMPLEX128_REF = 118; } diff --git a/tensorflow/core/ops/compat/ops_history.v0.pbtxt b/tensorflow/core/ops/compat/ops_history.v0.pbtxt index 0d9360a931c..7d4b14e9658 100644 --- a/tensorflow/core/ops/compat/ops_history.v0.pbtxt +++ b/tensorflow/core/ops/compat/ops_history.v0.pbtxt @@ -205,47 +205,6 @@ op { is_aggregate: true is_commutative: true } -op { - name: "AddN" - input_arg { - name: "inputs" - type_attr: "T" - number_attr: "N" - } - output_arg { - name: "sum" - type_attr: "T" - } - attr { - name: "N" - type: "int" - has_minimum: true - minimum: 1 - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - type: DT_INT64 - type: DT_INT32 - type: DT_UINT8 - type: DT_UINT16 - type: DT_INT16 - type: DT_INT8 - type: DT_COMPLEX64 - type: DT_COMPLEX128 - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 - } - } - } - is_aggregate: true - is_commutative: true -} op { name: "AdjustContrast" input_arg { @@ -497,137 +456,6 @@ op { } } } -op { - name: "ApplyAdagrad" - input_arg { - name: "var" - type_attr: "T" - is_ref: true - } - input_arg { - name: "accum" - type_attr: "T" - is_ref: true - } - input_arg { - name: "lr" - type_attr: "T" - } - input_arg { - name: "grad" - type_attr: "T" - } - output_arg { - name: "out" - type_attr: "T" - is_ref: true - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - type: DT_INT64 - type: DT_INT32 - type: DT_UINT8 - type: DT_UINT16 - type: DT_INT16 - type: DT_INT8 - type: DT_COMPLEX64 - type: DT_COMPLEX128 - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 - } - } - } - attr { - name: "use_locking" - type: "bool" - default_value { - b: false - } - } -} -op { - name: "ApplyAdam" - input_arg { - name: "var" - type_attr: "T" - is_ref: true - } - input_arg { - name: "m" - type_attr: "T" - is_ref: true - } - input_arg { - name: "v" - type_attr: "T" - is_ref: true - } - input_arg { - name: "beta1_power" - type_attr: "T" - } - input_arg { - name: "beta2_power" - type_attr: "T" - } - input_arg { - name: "lr" - type_attr: "T" - } - input_arg { - name: "beta1" - type_attr: "T" - } - input_arg { - name: "beta2" - type_attr: "T" - } - input_arg { - name: "epsilon" - type_attr: "T" - } - input_arg { - name: "grad" - type_attr: "T" - } - output_arg { - name: "out" - type_attr: "T" - is_ref: true - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - type: DT_INT64 - type: DT_INT32 - type: DT_UINT8 - type: DT_INT16 - type: DT_INT8 - type: DT_COMPLEX64 - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 - } - } - } - attr { - name: "use_locking" - type: "bool" - default_value { - b: false - } - } -} op { name: "ApplyAdam" input_arg { @@ -688,7 +516,6 @@ op { type: DT_INT64 type: DT_INT32 type: DT_UINT8 - type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 @@ -770,7 +597,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -856,40 +682,18 @@ op { } } op { - name: "ApplyFtrl" + name: "ApplyGradientDescent" input_arg { name: "var" type_attr: "T" is_ref: true } input_arg { - name: "accum" - type_attr: "T" - is_ref: true - } - input_arg { - name: "linear" - type_attr: "T" - is_ref: true - } - input_arg { - name: "grad" - type_attr: "T" - } - input_arg { - name: "lr" - type_attr: "T" - } - input_arg { - name: "l1" - type_attr: "T" - } - input_arg { - name: "l2" + name: "alpha" type_attr: "T" } input_arg { - name: "lr_power" + name: "delta" type_attr: "T" } output_arg { @@ -907,11 +711,9 @@ op { type: DT_INT64 type: DT_INT32 type: DT_UINT8 - type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -956,6 +758,7 @@ op { type: DT_INT64 type: DT_INT32 type: DT_UINT8 + type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 @@ -974,66 +777,27 @@ op { } } op { - name: "ApplyGradientDescent" + name: "ApplyMomentum" input_arg { name: "var" type_attr: "T" is_ref: true } input_arg { - name: "alpha" - type_attr: "T" - } - input_arg { - name: "delta" - type_attr: "T" - } - output_arg { - name: "out" + name: "accum" type_attr: "T" is_ref: true } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - type: DT_INT64 - type: DT_INT32 - type: DT_UINT8 - type: DT_UINT16 - type: DT_INT16 - type: DT_INT8 - type: DT_COMPLEX64 - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 - } - } - } - attr { - name: "use_locking" - type: "bool" - default_value { - b: false - } - } -} -op { - name: "ApplyGradientDescent" input_arg { - name: "var" + name: "lr" type_attr: "T" - is_ref: true } input_arg { - name: "alpha" + name: "grad" type_attr: "T" } input_arg { - name: "delta" + name: "momentum" type_attr: "T" } output_arg { @@ -1051,11 +815,9 @@ op { type: DT_INT64 type: DT_INT32 type: DT_UINT8 - type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -1109,6 +871,7 @@ op { type: DT_INT64 type: DT_INT32 type: DT_UINT8 + type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 @@ -1127,129 +890,14 @@ op { } } op { - name: "ApplyMomentum" + name: "ApplyRMSProp" input_arg { name: "var" type_attr: "T" is_ref: true } input_arg { - name: "accum" - type_attr: "T" - is_ref: true - } - input_arg { - name: "lr" - type_attr: "T" - } - input_arg { - name: "grad" - type_attr: "T" - } - input_arg { - name: "momentum" - type_attr: "T" - } - output_arg { - name: "out" - type_attr: "T" - is_ref: true - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - type: DT_INT64 - type: DT_INT32 - type: DT_UINT8 - type: DT_UINT16 - type: DT_INT16 - type: DT_INT8 - type: DT_COMPLEX64 - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 - } - } - } - attr { - name: "use_locking" - type: "bool" - default_value { - b: false - } - } -} -op { - name: "ApplyMomentum" - input_arg { - name: "var" - type_attr: "T" - is_ref: true - } - input_arg { - name: "accum" - type_attr: "T" - is_ref: true - } - input_arg { - name: "lr" - type_attr: "T" - } - input_arg { - name: "grad" - type_attr: "T" - } - input_arg { - name: "momentum" - type_attr: "T" - } - output_arg { - name: "out" - type_attr: "T" - is_ref: true - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - type: DT_INT64 - type: DT_INT32 - type: DT_UINT8 - type: DT_UINT16 - type: DT_INT16 - type: DT_INT8 - type: DT_COMPLEX64 - type: DT_COMPLEX128 - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 - } - } - } - attr { - name: "use_locking" - type: "bool" - default_value { - b: false - } - } -} -op { - name: "ApplyRMSProp" - input_arg { - name: "var" - type_attr: "T" - is_ref: true - } - input_arg { - name: "ms" + name: "ms" type_attr: "T" is_ref: true } @@ -1271,199 +919,17 @@ op { type_attr: "T" } input_arg { - name: "epsilon" - type_attr: "T" - } - input_arg { - name: "grad" - type_attr: "T" - } - output_arg { - name: "out" - type_attr: "T" - is_ref: true - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - type: DT_INT64 - type: DT_INT32 - type: DT_UINT8 - type: DT_INT16 - type: DT_INT8 - type: DT_COMPLEX64 - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 - } - } - } - attr { - name: "use_locking" - type: "bool" - default_value { - b: false - } - } -} -op { - name: "ApplyRMSProp" - input_arg { - name: "var" - type_attr: "T" - is_ref: true - } - input_arg { - name: "ms" - type_attr: "T" - is_ref: true - } - input_arg { - name: "mom" - type_attr: "T" - is_ref: true - } - input_arg { - name: "lr" - type_attr: "T" - } - input_arg { - name: "rho" - type_attr: "T" - } - input_arg { - name: "momentum" - type_attr: "T" - } - input_arg { - name: "epsilon" - type_attr: "T" - } - input_arg { - name: "grad" - type_attr: "T" - } - output_arg { - name: "out" - type_attr: "T" - is_ref: true - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - type: DT_INT64 - type: DT_INT32 - type: DT_UINT8 - type: DT_UINT16 - type: DT_INT16 - type: DT_INT8 - type: DT_COMPLEX64 - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 - } - } - } - attr { - name: "use_locking" - type: "bool" - default_value { - b: false - } - } -} -op { - name: "ApplyRMSProp" - input_arg { - name: "var" - type_attr: "T" - is_ref: true - } - input_arg { - name: "ms" - type_attr: "T" - is_ref: true - } - input_arg { - name: "mom" - type_attr: "T" - is_ref: true - } - input_arg { - name: "lr" - type_attr: "T" - } - input_arg { - name: "rho" - type_attr: "T" - } - input_arg { - name: "momentum" - type_attr: "T" - } - input_arg { - name: "epsilon" - type_attr: "T" - } - input_arg { - name: "grad" - type_attr: "T" - } - output_arg { - name: "out" - type_attr: "T" - is_ref: true - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - type: DT_INT64 - type: DT_INT32 - type: DT_UINT8 - type: DT_UINT16 - type: DT_INT16 - type: DT_INT8 - type: DT_COMPLEX64 - type: DT_COMPLEX128 - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 - } - } - } - attr { - name: "use_locking" - type: "bool" - default_value { - b: false - } - } -} -op { - name: "ArgMax" - input_arg { - name: "input" + name: "epsilon" type_attr: "T" } input_arg { - name: "dimension" - type: DT_INT32 + name: "grad" + type_attr: "T" } output_arg { - name: "output" - type: DT_INT64 + name: "out" + type_attr: "T" + is_ref: true } attr { name: "T" @@ -1484,20 +950,55 @@ op { } } } + attr { + name: "use_locking" + type: "bool" + default_value { + b: false + } + } } op { - name: "ArgMax" + name: "ApplyRMSProp" input_arg { - name: "input" + name: "var" type_attr: "T" + is_ref: true } input_arg { - name: "dimension" - type: DT_INT32 + name: "ms" + type_attr: "T" + is_ref: true + } + input_arg { + name: "mom" + type_attr: "T" + is_ref: true + } + input_arg { + name: "lr" + type_attr: "T" + } + input_arg { + name: "rho" + type_attr: "T" + } + input_arg { + name: "momentum" + type_attr: "T" + } + input_arg { + name: "epsilon" + type_attr: "T" + } + input_arg { + name: "grad" + type_attr: "T" } output_arg { - name: "output" - type: DT_INT64 + name: "out" + type_attr: "T" + is_ref: true } attr { name: "T" @@ -1519,6 +1020,13 @@ op { } } } + attr { + name: "use_locking" + type: "bool" + default_value { + b: false + } + } } op { name: "ArgMax" @@ -1544,11 +1052,9 @@ op { type: DT_INT64 type: DT_INT32 type: DT_UINT8 - type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -1557,7 +1063,7 @@ op { } } op { - name: "ArgMin" + name: "ArgMax" input_arg { name: "input" type_attr: "T" @@ -1580,6 +1086,7 @@ op { type: DT_INT64 type: DT_INT32 type: DT_UINT8 + type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 @@ -1614,7 +1121,6 @@ op { type: DT_INT64 type: DT_INT32 type: DT_UINT8 - type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 @@ -1653,7 +1159,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -1808,94 +1313,6 @@ op { } } } -op { - name: "AssignAdd" - input_arg { - name: "ref" - type_attr: "T" - is_ref: true - } - input_arg { - name: "value" - type_attr: "T" - } - output_arg { - name: "output_ref" - type_attr: "T" - is_ref: true - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - type: DT_INT64 - type: DT_INT32 - type: DT_UINT8 - type: DT_UINT16 - type: DT_INT16 - type: DT_INT8 - type: DT_COMPLEX64 - type: DT_COMPLEX128 - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 - } - } - } - attr { - name: "use_locking" - type: "bool" - default_value { - b: false - } - } -} -op { - name: "AssignSub" - input_arg { - name: "ref" - type_attr: "T" - is_ref: true - } - input_arg { - name: "value" - type_attr: "T" - } - output_arg { - name: "output_ref" - type_attr: "T" - is_ref: true - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - type: DT_INT64 - type: DT_INT32 - type: DT_UINT8 - type: DT_INT16 - type: DT_INT8 - type: DT_COMPLEX64 - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 - } - } - } - attr { - name: "use_locking" - type: "bool" - default_value { - b: false - } - } -} op { name: "AssignSub" input_arg { @@ -1922,7 +1339,6 @@ op { type: DT_INT64 type: DT_INT32 type: DT_UINT8 - type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 @@ -1970,7 +1386,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -2367,107 +1782,16 @@ op { type_attr: "T" } output_arg { - name: "output" - type_attr: "T" - } - attr { - name: "lower" - type: "bool" - default_value { - b: true - } - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - } - } - } -} -op { - name: "BatchNormWithGlobalNormalization" - input_arg { - name: "t" - type_attr: "T" - } - input_arg { - name: "m" - type_attr: "T" - } - input_arg { - name: "v" - type_attr: "T" - } - input_arg { - name: "beta" - type_attr: "T" - } - input_arg { - name: "gamma" - type_attr: "T" - } - output_arg { - name: "result" - type_attr: "T" - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - type: DT_INT64 - type: DT_INT32 - type: DT_UINT8 - type: DT_INT16 - type: DT_INT8 - type: DT_COMPLEX64 - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 - } - } - } - attr { - name: "variance_epsilon" - type: "float" - } - attr { - name: "scale_after_normalization" - type: "bool" - } -} -op { - name: "BatchNormWithGlobalNormalization" - input_arg { - name: "t" - type_attr: "T" - } - input_arg { - name: "m" - type_attr: "T" - } - input_arg { - name: "v" - type_attr: "T" - } - input_arg { - name: "beta" - type_attr: "T" - } - input_arg { - name: "gamma" - type_attr: "T" - } - output_arg { - name: "result" + name: "output" type_attr: "T" } + attr { + name: "lower" + type: "bool" + default_value { + b: true + } + } attr { name: "T" type: "type" @@ -2475,27 +1799,9 @@ op { list { type: DT_FLOAT type: DT_DOUBLE - type: DT_INT64 - type: DT_INT32 - type: DT_UINT8 - type: DT_UINT16 - type: DT_INT16 - type: DT_INT8 - type: DT_COMPLEX64 - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 } } } - attr { - name: "variance_epsilon" - type: "float" - } - attr { - name: "scale_after_normalization" - type: "bool" - } } op { name: "BatchNormWithGlobalNormalization" @@ -2533,11 +1839,9 @@ op { type: DT_INT64 type: DT_INT32 type: DT_UINT8 - type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -2554,7 +1858,7 @@ op { } } op { - name: "BatchNormWithGlobalNormalizationGrad" + name: "BatchNormWithGlobalNormalization" input_arg { name: "t" type_attr: "T" @@ -2568,31 +1872,15 @@ op { type_attr: "T" } input_arg { - name: "gamma" + name: "beta" type_attr: "T" } input_arg { - name: "backprop" - type_attr: "T" - } - output_arg { - name: "dx" - type_attr: "T" - } - output_arg { - name: "dm" - type_attr: "T" - } - output_arg { - name: "dv" - type_attr: "T" - } - output_arg { - name: "db" + name: "gamma" type_attr: "T" } output_arg { - name: "dg" + name: "result" type_attr: "T" } attr { @@ -2605,6 +1893,7 @@ op { type: DT_INT64 type: DT_INT32 type: DT_UINT8 + type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 @@ -2675,7 +1964,6 @@ op { type: DT_INT64 type: DT_INT32 type: DT_UINT8 - type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 @@ -2750,7 +2038,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -2904,99 +2191,6 @@ op { } } } -op { - name: "BiasAdd" - input_arg { - name: "value" - type_attr: "T" - } - input_arg { - name: "bias" - type_attr: "T" - } - output_arg { - name: "output" - type_attr: "T" - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - type: DT_INT64 - type: DT_INT32 - type: DT_UINT8 - type: DT_UINT16 - type: DT_INT16 - type: DT_INT8 - type: DT_COMPLEX64 - type: DT_COMPLEX128 - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 - } - } - } - attr { - name: "data_format" - type: "string" - default_value { - s: "NHWC" - } - allowed_values { - list { - s: "NHWC" - s: "NCHW" - } - } - } -} -op { - name: "BiasAddGrad" - input_arg { - name: "out_backprop" - type_attr: "T" - } - output_arg { - name: "output" - type_attr: "T" - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - type: DT_INT64 - type: DT_INT32 - type: DT_UINT8 - type: DT_UINT16 - type: DT_INT16 - type: DT_INT8 - type: DT_COMPLEX64 - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 - } - } - } - attr { - name: "data_format" - type: "string" - default_value { - s: "NHWC" - } - allowed_values { - list { - s: "NHWC" - s: "NCHW" - } - } - } -} op { name: "BiasAddGrad" input_arg { @@ -3021,7 +2215,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -3077,93 +2270,6 @@ op { } } } -op { - name: "BiasAddV1" - input_arg { - name: "value" - type_attr: "T" - } - input_arg { - name: "bias" - type_attr: "T" - } - output_arg { - name: "output" - type_attr: "T" - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - type: DT_INT64 - type: DT_INT32 - type: DT_UINT8 - type: DT_UINT16 - type: DT_INT16 - type: DT_INT8 - type: DT_COMPLEX64 - type: DT_COMPLEX128 - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 - } - } - } -} -op { - name: "Bitcast" - input_arg { - name: "input" - type_attr: "T" - } - output_arg { - name: "output" - type_attr: "type" - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - type: DT_INT64 - type: DT_INT32 - type: DT_UINT8 - type: DT_UINT16 - type: DT_INT16 - type: DT_INT8 - type: DT_COMPLEX64 - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 - } - } - } - attr { - name: "type" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - type: DT_INT64 - type: DT_INT32 - type: DT_UINT8 - type: DT_UINT16 - type: DT_INT16 - type: DT_INT8 - type: DT_COMPLEX64 - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 - } - } - } -} op { name: "Bitcast" input_arg { @@ -3188,7 +2294,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -3209,7 +2314,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -5998,34 +5102,13 @@ op { name: "x" type: DT_INT32 } - output_arg { - name: "y" - type: DT_INT32 - } -} -op { - name: "IsFinite" - input_arg { - name: "x" - type_attr: "T" - } - output_arg { - name: "y" - type: DT_BOOL - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - } - } - } + output_arg { + name: "y" + type: DT_INT32 + } } op { - name: "IsInf" + name: "IsFinite" input_arg { name: "x" type_attr: "T" @@ -6046,7 +5129,7 @@ op { } } op { - name: "IsNan" + name: "IsInf" input_arg { name: "x" type_attr: "T" @@ -6067,14 +5150,14 @@ op { } } op { - name: "L2Loss" + name: "IsNan" input_arg { - name: "t" + name: "x" type_attr: "T" } output_arg { - name: "output" - type_attr: "T" + name: "y" + type: DT_BOOL } attr { name: "T" @@ -6083,15 +5166,6 @@ op { list { type: DT_FLOAT type: DT_DOUBLE - type: DT_INT64 - type: DT_INT32 - type: DT_UINT8 - type: DT_INT16 - type: DT_INT8 - type: DT_COMPLEX64 - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 } } } @@ -6116,7 +5190,6 @@ op { type: DT_INT64 type: DT_INT32 type: DT_UINT8 - type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 @@ -6151,7 +5224,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -6995,49 +6067,6 @@ op { } } } -op { - name: "Max" - input_arg { - name: "input" - type_attr: "T" - } - input_arg { - name: "reduction_indices" - type: DT_INT32 - } - output_arg { - name: "output" - type_attr: "T" - } - attr { - name: "keep_dims" - type: "bool" - default_value { - b: false - } - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - type: DT_INT64 - type: DT_INT32 - type: DT_UINT8 - type: DT_UINT16 - type: DT_INT16 - type: DT_INT8 - type: DT_COMPLEX64 - type: DT_COMPLEX128 - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 - } - } - } -} op { name: "MaxPool" input_arg { @@ -7424,49 +6453,6 @@ op { } } } -op { - name: "Mean" - input_arg { - name: "input" - type_attr: "T" - } - input_arg { - name: "reduction_indices" - type: DT_INT32 - } - output_arg { - name: "output" - type_attr: "T" - } - attr { - name: "keep_dims" - type: "bool" - default_value { - b: false - } - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - type: DT_INT64 - type: DT_INT32 - type: DT_UINT8 - type: DT_UINT16 - type: DT_INT16 - type: DT_INT8 - type: DT_COMPLEX64 - type: DT_COMPLEX128 - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 - } - } - } -} op { name: "Merge" input_arg { @@ -7594,49 +6580,6 @@ op { } } } -op { - name: "Min" - input_arg { - name: "input" - type_attr: "T" - } - input_arg { - name: "reduction_indices" - type: DT_INT32 - } - output_arg { - name: "output" - type_attr: "T" - } - attr { - name: "keep_dims" - type: "bool" - default_value { - b: false - } - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - type: DT_INT64 - type: DT_INT32 - type: DT_UINT8 - type: DT_UINT16 - type: DT_INT16 - type: DT_INT8 - type: DT_COMPLEX64 - type: DT_COMPLEX128 - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 - } - } - } -} op { name: "Minimum" input_arg { @@ -8743,49 +7686,6 @@ op { } } } -op { - name: "Prod" - input_arg { - name: "input" - type_attr: "T" - } - input_arg { - name: "reduction_indices" - type: DT_INT32 - } - output_arg { - name: "output" - type_attr: "T" - } - attr { - name: "keep_dims" - type: "bool" - default_value { - b: false - } - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - type: DT_INT64 - type: DT_INT32 - type: DT_UINT8 - type: DT_UINT16 - type: DT_INT16 - type: DT_INT8 - type: DT_COMPLEX64 - type: DT_COMPLEX128 - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 - } - } - } -} op { name: "PyFunc" input_arg { @@ -10745,68 +9645,13 @@ op { } input_arg { name: "data" - type_list_attr: "T" - } - attr { - name: "T" - type: "list(type)" - has_minimum: true - minimum: 1 - } -} -op { - name: "ScalarSummary" - input_arg { - name: "tags" - type: DT_STRING - } - input_arg { - name: "values" - type_attr: "T" - } - output_arg { - name: "summary" - type: DT_STRING - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - } - } - } -} -op { - name: "ScalarSummary" - input_arg { - name: "tags" - type: DT_STRING - } - input_arg { - name: "values" - type_attr: "T" - } - output_arg { - name: "summary" - type: DT_STRING - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - type: DT_INT32 - type: DT_INT64 - type: DT_UINT8 - type: DT_INT16 - type: DT_INT8 - } - } + type_list_attr: "T" + } + attr { + name: "T" + type: "list(type)" + has_minimum: true + minimum: 1 } } op { @@ -10830,35 +9675,23 @@ op { list { type: DT_FLOAT type: DT_DOUBLE - type: DT_INT32 - type: DT_INT64 - type: DT_UINT8 - type: DT_INT16 - type: DT_INT8 - type: DT_UINT16 } } } } op { - name: "ScatterAdd" - input_arg { - name: "ref" - type_attr: "T" - is_ref: true - } + name: "ScalarSummary" input_arg { - name: "indices" - type_attr: "Tindices" + name: "tags" + type: DT_STRING } input_arg { - name: "updates" + name: "values" type_attr: "T" } output_arg { - name: "output_ref" - type_attr: "T" - is_ref: true + name: "summary" + type: DT_STRING } attr { name: "T" @@ -10867,55 +9700,28 @@ op { list { type: DT_FLOAT type: DT_DOUBLE - type: DT_INT64 type: DT_INT32 + type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 - type: DT_COMPLEX64 - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 - } - } - } - attr { - name: "Tindices" - type: "type" - allowed_values { - list { - type: DT_INT32 - type: DT_INT64 } } } - attr { - name: "use_locking" - type: "bool" - default_value { - b: false - } - } } op { - name: "ScatterAdd" - input_arg { - name: "ref" - type_attr: "T" - is_ref: true - } + name: "ScalarSummary" input_arg { - name: "indices" - type_attr: "Tindices" + name: "tags" + type: DT_STRING } input_arg { - name: "updates" + name: "values" type_attr: "T" } output_arg { - name: "output_ref" - type_attr: "T" - is_ref: true + name: "summary" + type: DT_STRING } attr { name: "T" @@ -10924,36 +9730,15 @@ op { list { type: DT_FLOAT type: DT_DOUBLE - type: DT_INT64 type: DT_INT32 + type: DT_INT64 type: DT_UINT8 - type: DT_UINT16 type: DT_INT16 type: DT_INT8 - type: DT_COMPLEX64 - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 - } - } - } - attr { - name: "Tindices" - type: "type" - allowed_values { - list { - type: DT_INT32 - type: DT_INT64 + type: DT_UINT16 } } } - attr { - name: "use_locking" - type: "bool" - default_value { - b: false - } - } } op { name: "ScatterAdd" @@ -10985,11 +9770,9 @@ op { type: DT_INT64 type: DT_INT32 type: DT_UINT8 - type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -11015,7 +9798,7 @@ op { } } op { - name: "ScatterSub" + name: "ScatterAdd" input_arg { name: "ref" type_attr: "T" @@ -11044,6 +9827,7 @@ op { type: DT_INT64 type: DT_INT32 type: DT_UINT8 + type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 @@ -11101,7 +9885,6 @@ op { type: DT_INT64 type: DT_INT32 type: DT_UINT8 - type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 @@ -11163,7 +9946,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -12195,173 +10977,21 @@ op { type: DT_INT64 type: DT_UINT8 type: DT_INT16 - type: DT_INT8 - type: DT_UINT16 - } - } - } -} -op { - name: "Softsign" - input_arg { - name: "features" - type_attr: "T" - } - output_arg { - name: "activations" - type_attr: "T" - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - type: DT_INT32 - type: DT_INT64 - type: DT_UINT8 - type: DT_INT16 - type: DT_INT8 - } - } - } -} -op { - name: "Softsign" - input_arg { - name: "features" - type_attr: "T" - } - output_arg { - name: "activations" - type_attr: "T" - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - type: DT_INT32 - type: DT_INT64 - type: DT_UINT8 - type: DT_INT16 - type: DT_INT8 - type: DT_UINT16 - } - } - } -} -op { - name: "SoftsignGrad" - input_arg { - name: "gradients" - type_attr: "T" - } - input_arg { - name: "features" - type_attr: "T" - } - output_arg { - name: "backprops" - type_attr: "T" - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - type: DT_INT32 - type: DT_INT64 - type: DT_UINT8 - type: DT_INT16 - type: DT_INT8 - } - } - } -} -op { - name: "SoftsignGrad" - input_arg { - name: "gradients" - type_attr: "T" - } - input_arg { - name: "features" - type_attr: "T" - } - output_arg { - name: "backprops" - type_attr: "T" - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - type: DT_INT32 - type: DT_INT64 - type: DT_UINT8 - type: DT_INT16 - type: DT_INT8 - type: DT_UINT16 - } - } - } -} -op { - name: "SpaceToDepth" - input_arg { - name: "input" - type_attr: "T" - } - output_arg { - name: "output" - type_attr: "T" - } - attr { - name: "T" - type: "type" - } - attr { - name: "block_size" - type: "int" - } -} -op { - name: "SparseApplyAdagrad" - input_arg { - name: "var" - type_attr: "T" - is_ref: true - } - input_arg { - name: "accum" - type_attr: "T" - is_ref: true - } - input_arg { - name: "lr" - type_attr: "T" + type: DT_INT8 + type: DT_UINT16 + } + } } +} +op { + name: "Softsign" input_arg { - name: "grad" + name: "features" type_attr: "T" } - input_arg { - name: "indices" - type_attr: "Tindices" - } output_arg { - name: "out" + name: "activations" type_attr: "T" - is_ref: true } attr { name: "T" @@ -12370,64 +11000,85 @@ op { list { type: DT_FLOAT type: DT_DOUBLE - type: DT_INT64 type: DT_INT32 + type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 - type: DT_COMPLEX64 - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 } } } +} +op { + name: "Softsign" + input_arg { + name: "features" + type_attr: "T" + } + output_arg { + name: "activations" + type_attr: "T" + } attr { - name: "Tindices" + name: "T" type: "type" allowed_values { list { + type: DT_FLOAT + type: DT_DOUBLE type: DT_INT32 type: DT_INT64 + type: DT_UINT8 + type: DT_INT16 + type: DT_INT8 + type: DT_UINT16 } } } - attr { - name: "use_locking" - type: "bool" - default_value { - b: false - } - } } op { - name: "SparseApplyAdagrad" + name: "SoftsignGrad" input_arg { - name: "var" + name: "gradients" type_attr: "T" - is_ref: true } input_arg { - name: "accum" + name: "features" type_attr: "T" - is_ref: true } - input_arg { - name: "lr" + output_arg { + name: "backprops" type_attr: "T" } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT32 + type: DT_INT64 + type: DT_UINT8 + type: DT_INT16 + type: DT_INT8 + } + } + } +} +op { + name: "SoftsignGrad" input_arg { - name: "grad" + name: "gradients" type_attr: "T" } input_arg { - name: "indices" - type_attr: "Tindices" + name: "features" + type_attr: "T" } output_arg { - name: "out" + name: "backprops" type_attr: "T" - is_ref: true } attr { name: "T" @@ -12436,35 +11087,33 @@ op { list { type: DT_FLOAT type: DT_DOUBLE - type: DT_INT64 type: DT_INT32 + type: DT_INT64 type: DT_UINT8 - type: DT_UINT16 type: DT_INT16 type: DT_INT8 - type: DT_COMPLEX64 - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 + type: DT_UINT16 } } } +} +op { + name: "SpaceToDepth" + input_arg { + name: "input" + type_attr: "T" + } + output_arg { + name: "output" + type_attr: "T" + } attr { - name: "Tindices" + name: "T" type: "type" - allowed_values { - list { - type: DT_INT32 - type: DT_INT64 - } - } } attr { - name: "use_locking" - type: "bool" - default_value { - b: false - } + name: "block_size" + type: "int" } } op { @@ -12506,11 +11155,9 @@ op { type: DT_INT64 type: DT_INT32 type: DT_UINT8 - type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -12536,7 +11183,7 @@ op { } } op { - name: "SparseApplyFtrl" + name: "SparseApplyAdagrad" input_arg { name: "var" type_attr: "T" @@ -12548,9 +11195,8 @@ op { is_ref: true } input_arg { - name: "linear" + name: "lr" type_attr: "T" - is_ref: true } input_arg { name: "grad" @@ -12560,22 +11206,6 @@ op { name: "indices" type_attr: "Tindices" } - input_arg { - name: "lr" - type_attr: "T" - } - input_arg { - name: "l1" - type_attr: "T" - } - input_arg { - name: "l2" - type_attr: "T" - } - input_arg { - name: "lr_power" - type_attr: "T" - } output_arg { name: "out" type_attr: "T" @@ -12679,77 +11309,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 - } - } - } - attr { - name: "Tindices" - type: "type" - allowed_values { - list { - type: DT_INT32 - type: DT_INT64 - } - } - } - attr { - name: "use_locking" - type: "bool" - default_value { - b: false - } - } -} -op { - name: "SparseApplyMomentum" - input_arg { - name: "var" - type_attr: "T" - is_ref: true - } - input_arg { - name: "accum" - type_attr: "T" - is_ref: true - } - input_arg { - name: "lr" - type_attr: "T" - } - input_arg { - name: "grad" - type_attr: "T" - } - input_arg { - name: "indices" - type_attr: "Tindices" - } - input_arg { - name: "momentum" - type_attr: "T" - } - output_arg { - name: "out" - type_attr: "T" - is_ref: true - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - type: DT_INT64 - type: DT_INT32 - type: DT_UINT8 - type: DT_INT16 - type: DT_INT8 - type: DT_COMPLEX64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -12817,7 +11376,6 @@ op { type: DT_INT64 type: DT_INT32 type: DT_UINT8 - type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 @@ -12892,7 +11450,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -13847,49 +12404,6 @@ op { } } } -op { - name: "Sum" - input_arg { - name: "input" - type_attr: "T" - } - input_arg { - name: "reduction_indices" - type: DT_INT32 - } - output_arg { - name: "output" - type_attr: "T" - } - attr { - name: "keep_dims" - type: "bool" - default_value { - b: false - } - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_FLOAT - type: DT_DOUBLE - type: DT_INT64 - type: DT_INT32 - type: DT_UINT8 - type: DT_UINT16 - type: DT_INT16 - type: DT_INT8 - type: DT_COMPLEX64 - type: DT_COMPLEX128 - type: DT_QINT8 - type: DT_QUINT8 - type: DT_QINT32 - } - } - } -} op { name: "Switch" input_arg { diff --git a/tensorflow/core/ops/ops.pbtxt b/tensorflow/core/ops/ops.pbtxt index 703a92c0ce4..130c2e41567 100644 --- a/tensorflow/core/ops/ops.pbtxt +++ b/tensorflow/core/ops/ops.pbtxt @@ -102,7 +102,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -332,7 +331,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -425,7 +423,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -508,7 +505,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -564,7 +560,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -630,7 +625,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -712,7 +706,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -759,7 +752,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -797,7 +789,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -911,7 +902,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -962,7 +952,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -1389,7 +1378,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -1475,7 +1463,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -1551,7 +1538,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -1601,7 +1587,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -1656,7 +1641,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -1690,7 +1674,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -1711,7 +1694,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -4338,7 +4320,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -5128,7 +5109,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -5439,7 +5419,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -5541,7 +5520,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -6442,7 +6420,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -8155,7 +8132,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -8221,7 +8197,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -9171,7 +9146,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -9269,7 +9243,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -9351,7 +9324,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -10262,7 +10234,6 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 - type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 diff --git a/tensorflow/core/public/tensor_c_api.h b/tensorflow/core/public/tensor_c_api.h index b7ac96b6b9b..14f4dfa8125 100644 --- a/tensorflow/core/public/tensor_c_api.h +++ b/tensorflow/core/public/tensor_c_api.h @@ -78,8 +78,7 @@ typedef enum { TF_INT16 = 5, TF_INT8 = 6, TF_STRING = 7, - TF_COMPLEX64 = 8, // Single-precision complex - TF_COMPLEX = 8, // Old identifier kept for API backwards compatibility + TF_COMPLEX = 8, // Single-precision complex TF_INT64 = 9, TF_BOOL = 10, TF_QINT8 = 11, // Quantized int8 @@ -89,7 +88,6 @@ typedef enum { TF_QINT16 = 15, // Quantized int16 TF_QUINT16 = 16, // Quantized uint16 TF_UINT16 = 17, - TF_COMPLEX128 = 18, // Double-precision complex } TF_DataType; // -------------------------------------------------------------------------- diff --git a/tensorflow/core/util/saved_tensor_slice_util.h b/tensorflow/core/util/saved_tensor_slice_util.h index ce2dc5552e2..6c3759ffac8 100644 --- a/tensorflow/core/util/saved_tensor_slice_util.h +++ b/tensorflow/core/util/saved_tensor_slice_util.h @@ -108,7 +108,6 @@ TENSOR_PROTO_EXTRACT_TYPE(bool, bool, bool); TENSOR_PROTO_EXTRACT_TYPE(float, float, float); TENSOR_PROTO_EXTRACT_TYPE(double, double, double); TENSOR_PROTO_EXTRACT_TYPE_COMPLEX(complex64, scomplex, float); -TENSOR_PROTO_EXTRACT_TYPE_COMPLEX(complex128, dcomplex, double); TENSOR_PROTO_EXTRACT_TYPE(int32, int, int32); TENSOR_PROTO_EXTRACT_TYPE(int64, int64, int64); TENSOR_PROTO_EXTRACT_TYPE(uint8, int, int32); diff --git a/tensorflow/examples/android/README.md b/tensorflow/examples/android/README.md index fb737f7004e..11c8252f85e 100644 --- a/tensorflow/examples/android/README.md +++ b/tensorflow/examples/android/README.md @@ -19,7 +19,7 @@ installed on your system. 3. The Android SDK and build tools may be obtained from: https://developer.android.com/tools/revisions/build-tools.html -The Android entries in [`/WORKSPACE`](../../../WORKSPACE#L2-L13) must be +The Android entries in [`/WORKSPACE`](../../WORKSPACE) must be uncommented with the paths filled in appropriately depending on where you installed the NDK and SDK. Otherwise an error such as: "The external label '//external:android/sdk' is not bound to anything" will @@ -45,8 +45,10 @@ your workspace root: $ bazel build //tensorflow/examples/android:tensorflow_demo ``` -If you get build errors about protocol buffers, run -`git submodule update --init` and build again. +If you get build errors about protocol buffers then you may have left out the +`--recurse-submodules` argument to `git clone`. Review the instructions +here and then build again: +https://www.tensorflow.org/versions/master/get_started/os_setup.html#clone-the-tensorflow-repository If adb debugging is enabled on your Android 5.0 or later device, you may then use the following command from your workspace root to install the APK once diff --git a/tensorflow/examples/label_image/README.md b/tensorflow/examples/label_image/README.md index 1f40e8bef0d..c24ce19f7f3 100644 --- a/tensorflow/examples/label_image/README.md +++ b/tensorflow/examples/label_image/README.md @@ -43,15 +43,15 @@ This uses the default example image that ships with the framework, and should output something similar to this: ``` -I tensorflow/examples/label_image/main.cc:207] military uniform (866): 0.647299 -I tensorflow/examples/label_image/main.cc:207] suit (794): 0.0477195 -I tensorflow/examples/label_image/main.cc:207] academic gown (896): 0.0232407 -I tensorflow/examples/label_image/main.cc:207] bow tie (817): 0.0157355 -I tensorflow/examples/label_image/main.cc:207] bolo tie (940): 0.0145023 +I tensorflow/examples/label_image/main.cc:200] military uniform (866): 0.902268 +I tensorflow/examples/label_image/main.cc:200] bow tie (817): 0.05407 +I tensorflow/examples/label_image/main.cc:200] suit (794): 0.0113195 +I tensorflow/examples/label_image/main.cc:200] bulletproof vest (833): 0.0100269 +I tensorflow/examples/label_image/main.cc:200] bearskin (849): 0.00649746 ``` In this case, we're using the default image of Admiral Grace Hopper, and you can see the network correctly spots she's wearing a military uniform, with a high -score of 0.6. +score of 0.9. Next, try it out on your own images by supplying the --image= argument, e.g. diff --git a/tensorflow/examples/udacity/1_notmnist.ipynb b/tensorflow/examples/udacity/1_notmnist.ipynb index 9d864ccd374..b4704a3985d 100644 --- a/tensorflow/examples/udacity/1_notmnist.ipynb +++ b/tensorflow/examples/udacity/1_notmnist.ipynb @@ -117,7 +117,7 @@ " print('Found and verified', filename)\n", " else:\n", " raise Exception(\n", - " 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n", + " 'Failed to verify' + filename + '. Can you get to it with a browser?')\n", " return filename\n", "\n", "train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)\n", diff --git a/tensorflow/models/image/cifar10/cifar10.py b/tensorflow/models/image/cifar10/cifar10.py index 05c8e70f5fc..8f5fd4f50d1 100644 --- a/tensorflow/models/image/cifar10/cifar10.py +++ b/tensorflow/models/image/cifar10/cifar10.py @@ -67,7 +67,7 @@ NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays. LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor. INITIAL_LEARNING_RATE = 0.1 # Initial learning rate. -# If a model is trained with multiple GPUs, prefix all Op names with tower_name +# If a model is trained with multiple GPU's prefix all Op names with tower_name # to differentiate the operations. Note that this prefix is removed from the # names of the summaries when visualizing a model. TOWER_NAME = 'tower' @@ -255,7 +255,7 @@ def inference(images): def loss(logits, labels): """Add L2Loss to all the trainable variables. - Add summary for "Loss" and "Loss/avg". + Add summary for for "Loss" and "Loss/avg". Args: logits: Logits from inference(). labels: Labels from distorted_inputs or inputs(). 1-D tensor diff --git a/tensorflow/models/image/cifar10/cifar10_input.py b/tensorflow/models/image/cifar10/cifar10_input.py index 0d48a3549ce..a9a086992dc 100644 --- a/tensorflow/models/image/cifar10/cifar10_input.py +++ b/tensorflow/models/image/cifar10/cifar10_input.py @@ -172,7 +172,7 @@ def distorted_inputs(data_dir, batch_size): distorted_image = tf.image.random_flip_left_right(distorted_image) # Because these operations are not commutative, consider randomizing - # the order their operation. + # randomize the order their operation. distorted_image = tf.image.random_brightness(distorted_image, max_delta=63) distorted_image = tf.image.random_contrast(distorted_image, diff --git a/tensorflow/python/__init__.py b/tensorflow/python/__init__.py index c36cdfe30fc..11bee08246c 100644 --- a/tensorflow/python/__init__.py +++ b/tensorflow/python/__init__.py @@ -181,7 +181,6 @@ __all__.extend([ 'bfloat16', 'bfloat16_ref', 'bool', 'bool_ref', 'complex64', 'complex64_ref', - 'complex128', 'complex128_ref', 'double', 'double_ref', 'float32', 'float32_ref', 'float64', 'float64_ref', diff --git a/tensorflow/python/client/session_test.py b/tensorflow/python/client/session_test.py index 491b293125d..55868328ffa 100644 --- a/tensorflow/python/client/session_test.py +++ b/tensorflow/python/client/session_test.py @@ -687,8 +687,7 @@ class SessionTest(test_util.TensorFlowTestCase): dtypes.int8, dtypes.int64, dtypes.bool, - dtypes.complex64, - dtypes.complex128]: + dtypes.complex64]: for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]: np_dtype = dtype.as_numpy_dtype @@ -701,8 +700,6 @@ class SessionTest(test_util.TensorFlowTestCase): np_array = np_array > 0 elif dtype == dtypes.complex64: np_array = np.sqrt(np_array.astype(np_dtype)) - elif dtype == dtypes.complex64: - np_array = np.sqrt(np_array.astype(np_dtype)) else: np_array = np_array.astype(np_dtype) diff --git a/tensorflow/python/client/tf_session_helper.cc b/tensorflow/python/client/tf_session_helper.cc index e5cdcddd5d5..02014cf3b36 100644 --- a/tensorflow/python/client/tf_session_helper.cc +++ b/tensorflow/python/client/tf_session_helper.cc @@ -1,4 +1,4 @@ -/* Copyright 2016 Google Inc. All Rights Reserved. +/* Copyright 2015 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -121,10 +121,7 @@ Status PyArray_TYPE_to_TF_DataType(PyArrayObject* array, *out_tf_datatype = TF_BOOL; break; case NPY_COMPLEX64: - *out_tf_datatype = TF_COMPLEX64; - break; - case NPY_COMPLEX128: - *out_tf_datatype = TF_COMPLEX128; + *out_tf_datatype = TF_COMPLEX; break; case NPY_OBJECT: *out_tf_datatype = TF_STRING; @@ -171,12 +168,9 @@ Status TF_DataType_to_PyArray_TYPE(TF_DataType tf_datatype, case TF_BOOL: *out_pyarray_type = NPY_BOOL; break; - case TF_COMPLEX64: + case TF_COMPLEX: *out_pyarray_type = NPY_COMPLEX64; break; - case TF_COMPLEX128: - *out_pyarray_type = NPY_COMPLEX128; - break; case TF_STRING: *out_pyarray_type = NPY_OBJECT; break; diff --git a/tensorflow/python/framework/dtypes.py b/tensorflow/python/framework/dtypes.py index d964a7f29b9..9c1e05f8bcd 100644 --- a/tensorflow/python/framework/dtypes.py +++ b/tensorflow/python/framework/dtypes.py @@ -32,7 +32,6 @@ class DType(object): * `tf.float64`: 64-bit double-precision floating-point. * `tf.bfloat16`: 16-bit truncated floating-point. * `tf.complex64`: 64-bit single-precision complex. - * `tf.complex128`: 128-bit double-precision complex. * `tf.int8`: 8-bit signed integer. * `tf.uint8`: 8-bit unsigned integer. @@ -123,8 +122,6 @@ class DType(object): base = self.base_dtype if base == complex64: return float32 - elif base == complex128: - return float64 else: return self @@ -152,7 +149,7 @@ class DType(object): @property def is_complex(self): """Returns whether this is a complex floating point type.""" - return self.base_dtype in (complex64, complex128) + return self.base_dtype == complex64 @property def is_quantized(self): @@ -182,8 +179,8 @@ class DType(object): TypeError: if this is a non-numeric, unordered, or quantized type. """ - if (self.is_quantized or self.base_dtype in - (bool, string, complex64, complex128)): + if (self.is_quantized or self.base_dtype == bool or + self.base_dtype == string or self.base_dtype == complex64): raise TypeError("Cannot find minimum value of %s." % self) # there is no simple way to get the min value of a dtype, we have to check @@ -204,8 +201,8 @@ class DType(object): TypeError: if this is a non-numeric, unordered, or quantized type. """ - if (self.is_quantized or self.base_dtype in - (bool, string, complex64, complex128)): + if (self.is_quantized or self.base_dtype == bool or + self.base_dtype == string or self.base_dtype == complex64): raise TypeError("Cannot find maximum value of %s." % self) # there is no simple way to get the min value of a dtype, we have to check @@ -280,7 +277,6 @@ int16 = DType(types_pb2.DT_INT16) int8 = DType(types_pb2.DT_INT8) string = DType(types_pb2.DT_STRING) complex64 = DType(types_pb2.DT_COMPLEX64) -complex128 = DType(types_pb2.DT_COMPLEX128) int64 = DType(types_pb2.DT_INT64) bool = DType(types_pb2.DT_BOOL) qint8 = DType(types_pb2.DT_QINT8) @@ -299,7 +295,6 @@ int16_ref = DType(types_pb2.DT_INT16_REF) int8_ref = DType(types_pb2.DT_INT8_REF) string_ref = DType(types_pb2.DT_STRING_REF) complex64_ref = DType(types_pb2.DT_COMPLEX64_REF) -complex128_ref = DType(types_pb2.DT_COMPLEX128_REF) int64_ref = DType(types_pb2.DT_INT64_REF) bool_ref = DType(types_pb2.DT_BOOL_REF) qint8_ref = DType(types_pb2.DT_QINT8_REF) @@ -322,7 +317,6 @@ _INTERN_TABLE = { types_pb2.DT_INT8: int8, types_pb2.DT_STRING: string, types_pb2.DT_COMPLEX64: complex64, - types_pb2.DT_COMPLEX128: complex128, types_pb2.DT_INT64: int64, types_pb2.DT_BOOL: bool, types_pb2.DT_QINT8: qint8, @@ -340,7 +334,6 @@ _INTERN_TABLE = { types_pb2.DT_INT8_REF: int8_ref, types_pb2.DT_STRING_REF: string_ref, types_pb2.DT_COMPLEX64_REF: complex64_ref, - types_pb2.DT_COMPLEX128_REF: complex128_ref, types_pb2.DT_INT64_REF: int64_ref, types_pb2.DT_BOOL_REF: bool_ref, types_pb2.DT_QINT8_REF: qint8_ref, @@ -363,7 +356,6 @@ _TYPE_TO_STRING = { types_pb2.DT_INT8: "int8", types_pb2.DT_STRING: "string", types_pb2.DT_COMPLEX64: "complex64", - types_pb2.DT_COMPLEX128: "complex128", types_pb2.DT_INT64: "int64", types_pb2.DT_BOOL: "bool", types_pb2.DT_QINT8: "qint8", @@ -381,7 +373,6 @@ _TYPE_TO_STRING = { types_pb2.DT_INT8_REF: "int8_ref", types_pb2.DT_STRING_REF: "string_ref", types_pb2.DT_COMPLEX64_REF: "complex64_ref", - types_pb2.DT_COMPLEX128_REF: "complex128_ref", types_pb2.DT_INT64_REF: "int64_ref", types_pb2.DT_BOOL_REF: "bool_ref", types_pb2.DT_QINT8_REF: "qint8_ref", @@ -423,7 +414,6 @@ _NP_TO_TF = frozenset([ (np.int16, int16), (np.int8, int8), (np.complex64, complex64), - (np.complex128, complex128), (np.object, string), (np.bool, bool), (_np_qint8, qint8), @@ -445,7 +435,6 @@ _TF_TO_NP = { # strings. types_pb2.DT_STRING: np.object, types_pb2.DT_COMPLEX64: np.complex64, - types_pb2.DT_COMPLEX128: np.complex128, types_pb2.DT_INT64: np.int64, types_pb2.DT_BOOL: np.bool, types_pb2.DT_QINT8: _np_qint8, @@ -465,7 +454,6 @@ _TF_TO_NP = { types_pb2.DT_INT8_REF: np.int8, types_pb2.DT_STRING_REF: np.object, types_pb2.DT_COMPLEX64_REF: np.complex64, - types_pb2.DT_COMPLEX128_REF: np.complex128, types_pb2.DT_INT64_REF: np.int64, types_pb2.DT_BOOL_REF: np.bool, types_pb2.DT_QINT8_REF: _np_qint8, diff --git a/tensorflow/python/framework/dtypes_test.py b/tensorflow/python/framework/dtypes_test.py index d303918987c..91fada9f0f2 100644 --- a/tensorflow/python/framework/dtypes_test.py +++ b/tensorflow/python/framework/dtypes_test.py @@ -71,7 +71,6 @@ class TypesTest(test_util.TensorFlowTestCase): self.assertIs(tf.int16, tf.as_dtype(np.int16)) self.assertIs(tf.int8, tf.as_dtype(np.int8)) self.assertIs(tf.complex64, tf.as_dtype(np.complex64)) - self.assertIs(tf.complex128, tf.as_dtype(np.complex128)) self.assertIs(tf.string, tf.as_dtype(np.object)) self.assertIs(tf.string, tf.as_dtype(np.array(["foo", "bar"]).dtype)) self.assertIs(tf.bool, tf.as_dtype(np.bool)) @@ -83,7 +82,6 @@ class TypesTest(test_util.TensorFlowTestCase): tf.int32, tf.int64]: self.assertIs(dtype.real_dtype, dtype) self.assertIs(tf.complex64.real_dtype, tf.float32) - self.assertIs(tf.complex128.real_dtype, tf.float64) def testStringConversion(self): self.assertIs(tf.float32, tf.as_dtype("float32")) @@ -95,7 +93,6 @@ class TypesTest(test_util.TensorFlowTestCase): self.assertIs(tf.int8, tf.as_dtype("int8")) self.assertIs(tf.string, tf.as_dtype("string")) self.assertIs(tf.complex64, tf.as_dtype("complex64")) - self.assertIs(tf.complex128, tf.as_dtype("complex128")) self.assertIs(tf.int64, tf.as_dtype("int64")) self.assertIs(tf.bool, tf.as_dtype("bool")) self.assertIs(tf.qint8, tf.as_dtype("qint8")) @@ -110,7 +107,6 @@ class TypesTest(test_util.TensorFlowTestCase): self.assertIs(tf.int8_ref, tf.as_dtype("int8_ref")) self.assertIs(tf.string_ref, tf.as_dtype("string_ref")) self.assertIs(tf.complex64_ref, tf.as_dtype("complex64_ref")) - self.assertIs(tf.complex128_ref, tf.as_dtype("complex128_ref")) self.assertIs(tf.int64_ref, tf.as_dtype("int64_ref")) self.assertIs(tf.bool_ref, tf.as_dtype("bool_ref")) self.assertIs(tf.qint8_ref, tf.as_dtype("qint8_ref")) @@ -139,7 +135,6 @@ class TypesTest(test_util.TensorFlowTestCase): self.assertEqual(tf.as_dtype("uint8").is_integer, True) self.assertEqual(tf.as_dtype("uint16").is_integer, True) self.assertEqual(tf.as_dtype("complex64").is_integer, False) - self.assertEqual(tf.as_dtype("complex128").is_integer, False) self.assertEqual(tf.as_dtype("float").is_integer, False) self.assertEqual(tf.as_dtype("double").is_integer, False) self.assertEqual(tf.as_dtype("string").is_integer, False) @@ -153,7 +148,6 @@ class TypesTest(test_util.TensorFlowTestCase): self.assertEqual(tf.as_dtype("uint8").is_floating, False) self.assertEqual(tf.as_dtype("uint16").is_floating, False) self.assertEqual(tf.as_dtype("complex64").is_floating, False) - self.assertEqual(tf.as_dtype("complex128").is_floating, False) self.assertEqual(tf.as_dtype("float32").is_floating, True) self.assertEqual(tf.as_dtype("float64").is_floating, True) self.assertEqual(tf.as_dtype("string").is_floating, False) @@ -167,7 +161,6 @@ class TypesTest(test_util.TensorFlowTestCase): self.assertEqual(tf.as_dtype("uint8").is_complex, False) self.assertEqual(tf.as_dtype("uint16").is_complex, False) self.assertEqual(tf.as_dtype("complex64").is_complex, True) - self.assertEqual(tf.as_dtype("complex128").is_complex, True) self.assertEqual(tf.as_dtype("float32").is_complex, False) self.assertEqual(tf.as_dtype("float64").is_complex, False) self.assertEqual(tf.as_dtype("string").is_complex, False) @@ -185,7 +178,6 @@ class TypesTest(test_util.TensorFlowTestCase): self.assertEqual(tf.as_dtype("bool").is_unsigned, False) self.assertEqual(tf.as_dtype("string").is_unsigned, False) self.assertEqual(tf.as_dtype("complex64").is_unsigned, False) - self.assertEqual(tf.as_dtype("complex128").is_unsigned, False) def testMinMax(self): # make sure min/max evaluates for all data types that have min/max @@ -200,8 +192,7 @@ class TypesTest(test_util.TensorFlowTestCase): if (dtype.is_quantized or dtype.base_dtype == tf.bool or dtype.base_dtype == tf.string or - dtype.base_dtype == tf.complex64 or - dtype.base_dtype == tf.complex128): + dtype.base_dtype == tf.complex64): continue print("%s: %s - %s" % (dtype, dtype.min, dtype.max)) diff --git a/tensorflow/python/framework/ops_test.py b/tensorflow/python/framework/ops_test.py index afa5c4812df..cfc96a0cc80 100644 --- a/tensorflow/python/framework/ops_test.py +++ b/tensorflow/python/framework/ops_test.py @@ -1289,7 +1289,7 @@ class ColocationGroupTest(test_util.TensorFlowTestCase): with ops.colocate_with(a.op): with ops.colocate_with(b.op, ignore_existing=True): c = constant_op.constant(4.0) - self.assertEqual(set([b"loc:@b"]), set(c.op.colocation_groups())) + self.assertEqual(set(["loc:@b"]), set(c.op.colocation_groups())) def testColocateVariables(self): a = variables.Variable([2.0], name="a") diff --git a/tensorflow/python/framework/tensor_util.py b/tensorflow/python/framework/tensor_util.py index 7a9add319a6..b1b39f0651d 100644 --- a/tensorflow/python/framework/tensor_util.py +++ b/tensorflow/python/framework/tensor_util.py @@ -76,16 +76,11 @@ else: def SlowAppendInt64ArrayToTensorProto(tensor_proto, proto_values): tensor_proto.int64_val.extend([np.asscalar(x) for x in proto_values]) - def SlowAppendComplex64ArrayToTensorProto(tensor_proto, proto_values): + def SlowAppendComplexArrayToTensorProto(tensor_proto, proto_values): tensor_proto.scomplex_val.extend([np.asscalar(v) for x in proto_values for v in [x.real, x.imag]]) - def SlowAppendComplex128ArrayToTensorProto(tensor_proto, proto_values): - tensor_proto.dcomplex_val.extend([np.asscalar(v) - for x in proto_values - for v in [x.real, x.imag]]) - def SlowAppendObjectArrayToTensorProto(tensor_proto, proto_values): tensor_proto.string_val.extend([compat.as_bytes(x) for x in proto_values]) @@ -101,8 +96,8 @@ else: np.uint16: SlowAppendIntArrayToTensorProto, np.int16: SlowAppendIntArrayToTensorProto, np.int8: SlowAppendIntArrayToTensorProto, - np.complex64: SlowAppendComplex64ArrayToTensorProto, - np.complex128: SlowAppendComplex128ArrayToTensorProto, + np.complex64: SlowAppendComplexArrayToTensorProto, + np.complex128: SlowAppendComplexArrayToTensorProto, np.object: SlowAppendObjectArrayToTensorProto, np.bool: SlowAppendBoolArrayToTensorProto, dtypes.qint8.as_numpy_dtype: SlowAppendIntArrayToTensorProto, @@ -245,7 +240,6 @@ _TF_TO_IS_OK = { dtypes.int8: _FilterInt, dtypes.string: _FilterStr, dtypes.complex64: _FilterComplex, - dtypes.complex128: _FilterComplex, dtypes.int64: _FilterInt, dtypes.bool: _FilterBool, dtypes.qint32: _FilterInt, @@ -459,15 +453,6 @@ def MakeNdarray(tensor): else: return np.array([complex(x[0], x[1]) for x in zip(it, it)], dtype=dtype).reshape(shape) - elif tensor_dtype == dtypes.complex128: - it = iter(tensor.dcomplex_val) - if len(tensor.dcomplex_val) == 2: - return np.repeat(np.array(complex(tensor.dcomplex_val[0], - tensor.dcomplex_val[1]), dtype=dtype), - num_elements).reshape(shape) - else: - return np.array([complex(x[0], x[1]) for x in zip(it, it)], - dtype=dtype).reshape(shape) elif tensor_dtype == dtypes.bool: if len(tensor.bool_val) == 1: return np.repeat(np.array(tensor.bool_val[0], dtype=dtype), diff --git a/tensorflow/python/framework/tensor_util_test.py b/tensorflow/python/framework/tensor_util_test.py index d1cec3e0613..a2c28f0078f 100644 --- a/tensorflow/python/framework/tensor_util_test.py +++ b/tensorflow/python/framework/tensor_util_test.py @@ -274,7 +274,7 @@ class TensorUtilTest(tf.test.TestCase): self.assertEquals(np.object, a.dtype) self.assertAllEqual(np.array([[b"a", b"ab"], [b"abc", b"abcd"]]), a) - def testComplex64(self): + def testComplex(self): t = tensor_util.make_tensor_proto((1+2j), dtype=tf.complex64) self.assertProtoEquals(""" dtype: DT_COMPLEX64 @@ -286,30 +286,16 @@ class TensorUtilTest(tf.test.TestCase): self.assertEquals(np.complex64, a.dtype) self.assertAllEqual(np.array(1 + 2j), a) - def testComplex128(self): - t = tensor_util.make_tensor_proto((1+2j), dtype=tf.complex128) - self.assertProtoEquals(""" - dtype: DT_COMPLEX128 - tensor_shape {} - dcomplex_val: 1 - dcomplex_val: 2 - """, t) - a = tensor_util.MakeNdarray(t) - self.assertEquals(np.complex128, a.dtype) - self.assertAllEqual(np.array(1 + 2j), a) - def testComplexWithImplicitRepeat(self): - for dtype, np_dtype in [(tf.complex64, np.complex64), - (tf.complex128, np.complex128)]: - t = tensor_util.make_tensor_proto((1+1j), shape=[3, 4], - dtype=dtype) - a = tensor_util.MakeNdarray(t) - self.assertAllClose(np.array([[(1+1j), (1+1j), (1+1j), (1+1j)], - [(1+1j), (1+1j), (1+1j), (1+1j)], - [(1+1j), (1+1j), (1+1j), (1+1j)]], - dtype=np_dtype), a) + t = tensor_util.make_tensor_proto((1+1j), shape=[3, 4], + dtype=tf.complex64) + a = tensor_util.MakeNdarray(t) + self.assertAllClose(np.array([[(1+1j), (1+1j), (1+1j), (1+1j)], + [(1+1j), (1+1j), (1+1j), (1+1j)], + [(1+1j), (1+1j), (1+1j), (1+1j)]], + dtype=np.complex64), a) - def testComplex64N(self): + def testComplexN(self): t = tensor_util.make_tensor_proto([(1+2j), (3+4j), (5+6j)], shape=[1, 3], dtype=tf.complex64) self.assertProtoEquals(""" @@ -326,24 +312,7 @@ class TensorUtilTest(tf.test.TestCase): self.assertEquals(np.complex64, a.dtype) self.assertAllEqual(np.array([[(1+2j), (3+4j), (5+6j)]]), a) - def testComplex128N(self): - t = tensor_util.make_tensor_proto([(1+2j), (3+4j), (5+6j)], shape=[1, 3], - dtype=tf.complex128) - self.assertProtoEquals(""" - dtype: DT_COMPLEX128 - tensor_shape { dim { size: 1 } dim { size: 3 } } - dcomplex_val: 1 - dcomplex_val: 2 - dcomplex_val: 3 - dcomplex_val: 4 - dcomplex_val: 5 - dcomplex_val: 6 - """, t) - a = tensor_util.MakeNdarray(t) - self.assertEquals(np.complex128, a.dtype) - self.assertAllEqual(np.array([[(1+2j), (3+4j), (5+6j)]]), a) - - def testComplex64NpArray(self): + def testComplexNpArray(self): t = tensor_util.make_tensor_proto( np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), dtype=tf.complex64) # scomplex_val are real_0, imag_0, real_1, imag_1, ... @@ -363,26 +332,6 @@ class TensorUtilTest(tf.test.TestCase): self.assertEquals(np.complex64, a.dtype) self.assertAllEqual(np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), a) - def testComplex128NpArray(self): - t = tensor_util.make_tensor_proto( - np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), dtype=tf.complex128) - # scomplex_val are real_0, imag_0, real_1, imag_1, ... - self.assertProtoEquals(""" - dtype: DT_COMPLEX128 - tensor_shape { dim { size: 2 } dim { size: 2 } } - dcomplex_val: 1 - dcomplex_val: 2 - dcomplex_val: 3 - dcomplex_val: 4 - dcomplex_val: 5 - dcomplex_val: 6 - dcomplex_val: 7 - dcomplex_val: 8 - """, t) - a = tensor_util.MakeNdarray(t) - self.assertEquals(np.complex128, a.dtype) - self.assertAllEqual(np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), a) - def testUnsupportedDType(self): with self.assertRaises(TypeError): tensor_util.make_tensor_proto(np.array([1]), 0) diff --git a/tensorflow/python/lib/core/py_func.cc b/tensorflow/python/lib/core/py_func.cc index 1949913ec81..5701e8fe70b 100644 --- a/tensorflow/python/lib/core/py_func.cc +++ b/tensorflow/python/lib/core/py_func.cc @@ -99,9 +99,6 @@ Status TfDTypeToNpDType(const DataType& tf, int* np) { case DT_COMPLEX64: *np = NPY_COMPLEX64; break; - case DT_COMPLEX128: - *np = NPY_COMPLEX128; - break; case DT_STRING: *np = NPY_OBJECT; break; @@ -213,9 +210,6 @@ Status NumericNpDTypeToTfDType(const int np, DataType* tf) { case NPY_COMPLEX64: *tf = DT_COMPLEX64; break; - case NPY_COMPLEX128: - *tf = DT_COMPLEX128; - break; default: return errors::Unimplemented("Unsupported numpy type ", np); } diff --git a/tensorflow/python/training/session_manager.py b/tensorflow/python/training/session_manager.py index 9e5b9a17209..08fb65db6d3 100644 --- a/tensorflow/python/training/session_manager.py +++ b/tensorflow/python/training/session_manager.py @@ -326,7 +326,7 @@ class SessionManager(object): try: sess.run(self._ready_op) return None - except errors.FailedPreconditionError as e: + except errors.FailedPreconditionError, e: if "uninitialized" not in str(e): logging.warning("Model not ready raised: %s", str(e)) raise e diff --git a/tensorflow/tools/ci_build/Dockerfile.cpu b/tensorflow/tools/ci_build/Dockerfile.cpu index 369daa9dcfb..acc84f136a8 100644 --- a/tensorflow/tools/ci_build/Dockerfile.cpu +++ b/tensorflow/tools/ci_build/Dockerfile.cpu @@ -7,7 +7,6 @@ COPY install/*.sh /install/ RUN /install/install_bootstrap_deb_packages.sh RUN add-apt-repository -y ppa:openjdk-r/ppa RUN /install/install_deb_packages.sh -RUN /install/install_pip_packages.sh RUN /install/install_bazel.sh # Set up bazelrc. diff --git a/tensorflow/tools/ci_build/Dockerfile.gpu b/tensorflow/tools/ci_build/Dockerfile.gpu index 81cc4c9f416..b4b0ccccf75 100644 --- a/tensorflow/tools/ci_build/Dockerfile.gpu +++ b/tensorflow/tools/ci_build/Dockerfile.gpu @@ -7,7 +7,6 @@ COPY install/*.sh /install/ RUN /install/install_bootstrap_deb_packages.sh RUN add-apt-repository -y ppa:openjdk-r/ppa RUN /install/install_deb_packages.sh -RUN /install/install_pip_packages.sh RUN /install/install_bazel.sh # Set up bazelrc. diff --git a/tensorflow/tools/ci_build/builds/pip.sh b/tensorflow/tools/ci_build/builds/pip.sh index 7255de0bccf..16364fbf9ee 100755 --- a/tensorflow/tools/ci_build/builds/pip.sh +++ b/tensorflow/tools/ci_build/builds/pip.sh @@ -22,20 +22,11 @@ # pip.sh CONTAINER_TYPE [--test_tutorials] # # When executing the Python unit tests, the script obeys the shell -# variables: TF_BUILD_BAZEL_CLEAN, TF_BUILD_INSTALL_EXTRA_PIP_PACKAGES, -# TF_BUILD_NO_CACHING_VIRTUALENV, NO_TEST_ON_INSTALL +# variables: TF_BUILD_BAZEL_CLEAN, NO_TEST_ON_INSTALL # # TF_BUILD_BAZEL_CLEAN, if set to any non-empty and non-0 value, directs the # script to perform bazel clean prior to main build and test steps. # -# TF_BUILD_INSTALL_EXTRA_PIP_PACKAGES overrides the default extra pip packages -# to be installed in virtualenv before test_installation.sh is called. Multiple -# pakcage names are separated with spaces. -# -# TF_BUILD_NO_CACHING_VIRTUALENV: If set to any non-empty and non-0 value, -# will cause the script to force remove any existing (cached) virtualenv -# directory. -# # If NO_TEST_ON_INSTALL has any non-empty and non-0 value, the test-on-install # part will be skipped. # @@ -44,8 +35,6 @@ # installation and the Python unit tests-on-install step. # -INSTALL_EXTRA_PIP_PACKAGES=${TF_BUILD_INSTALL_EXTRA_PIP_PACKAGES} - # Helper functions # Get the absolute path from a path abs_path() { @@ -122,7 +111,7 @@ PIP_WHL_DIR="${PIP_TEST_ROOT}/whl" PIP_WHL_DIR=$(abs_path ${PIP_WHL_DIR}) # Get absolute path rm -rf ${PIP_WHL_DIR} && mkdir -p ${PIP_WHL_DIR} bazel-bin/tensorflow/tools/pip_package/build_pip_package ${PIP_WHL_DIR} || \ - die "build_pip_package FAILED" +die "build_pip_package FAILED" # Perform installation WHL_PATH=$(ls ${PIP_WHL_DIR}/tensorflow*.whl) @@ -136,46 +125,27 @@ echo "whl file path = ${WHL_PATH}" # Install, in user's local home folder echo "Installing pip whl file: ${WHL_PATH}" -# Create virtualenv directory for install test +# Create temporary directory for install test VENV_DIR="${PIP_TEST_ROOT}/venv" -if [[ -d "${VENV_DIR}" ]] && - [[ ! -z "${TF_BUILD_NO_CACHING_VIRTUALENV}" ]] && - [[ "${TF_BUILD_NO_CACHING_VIRTUALENV}" != "0" ]]; then - echo "TF_BUILD_NO_CACHING_VIRTUALENV=${TF_BUILD_NO_CACHING_VIRTUALENV}:" - echo "Removing existing virtualenv directory: ${VENV_DIR}" - - rm -rf "${VENV_DIR}" || \ - die "Failed to remove existing virtualenv directory: ${VENV_DIR}" -fi - -mkdir -p ${VENV_DIR} || \ - die "FAILED to create virtualenv directory: ${VENV_DIR}" +rm -rf "${VENV_DIR}" && mkdir -p "${VENV_DIR}" +echo "Create directory for virtualenv: ${VENV_DIR}" # Verify that virtualenv exists if [[ -z $(which virtualenv) ]]; then die "FAILED: virtualenv not available on path" fi -virtualenv --system-site-packages -p "${PYTHON_BIN_PATH}" "${VENV_DIR}" || \ - die "FAILED: Unable to create virtualenv" - -source "${VENV_DIR}/bin/activate" || \ - die "FAILED: Unable to activate virtualenv" +virtualenv -p "${PYTHON_BIN_PATH}" "${VENV_DIR}" || +die "FAILED: Unable to create virtualenv" +source "${VENV_DIR}/bin/activate" || +die "FAILED: Unable to activate virtualenv" # Install the pip file in virtual env -pip install -v --force-reinstall ${WHL_PATH} \ +pip install -v ${WHL_PATH} \ && echo "Successfully installed pip package ${WHL_PATH}" \ || die "pip install (without --upgrade) FAILED" -# Install extra pip packages required by the test-on-install -for PACKAGE in ${INSTALL_EXTRA_PIP_PACKAGES}; do - echo "Installing extra pip package required by test-on-install: ${PACKAGE}" - - pip install ${PACKAGE} || \ - die "pip install ${PACKAGE} FAILED" -done - # If NO_TEST_ON_INSTALL is set to any non-empty value, skip all Python # tests-on-install and exit right away if [[ ! -z "${NO_TEST_ON_INSTALL}" ]] && @@ -188,14 +158,14 @@ fi # Call test_installation.sh to perform test-on-install DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -"${DIR}/test_installation.sh" --virtualenv || \ - die "PIP tests-on-install FAILED" +"${DIR}/test_installation.sh" --virtualenv || +die "PIP tests-on-install FAILED" # Optional: Run the tutorial tests if [[ "${DO_TEST_TUTORIALS}" == "1" ]]; then - "${DIR}/test_tutorials.sh" --virtualenv || \ - die "PIP tutorial tests-on-install FAILED" + "${DIR}/test_tutorials.sh" --virtualenv || +die "PIP tutorial tests-on-install FAILED" fi -deactivate || \ - die "FAILED: Unable to deactivate virtualenv" +deactivate || +die "FAILED: Unable to deactivate virtualenv" diff --git a/tensorflow/tools/ci_build/builds/test_installation.sh b/tensorflow/tools/ci_build/builds/test_installation.sh index 8fa9b481f64..d2c8d21c5bd 100755 --- a/tensorflow/tools/ci_build/builds/test_installation.sh +++ b/tensorflow/tools/ci_build/builds/test_installation.sh @@ -166,8 +166,7 @@ cp -r tensorflow/core/lib/png ${PY_TEST_DIR}/tensorflow/core/lib # Run tests DIR0=$(pwd) -ALL_PY_TESTS=$(find tensorflow/{contrib,examples,models,python,tensorboard} \ - -type f \( -name "*_test.py" -o -name "test_*.py" \) | sort) +ALL_PY_TESTS=$(find tensorflow/{contrib,examples,models,python,tensorboard} -name "*_test.py" | sort) # TODO(cais): Add tests in tensorflow/contrib PY_TEST_COUNT=$(echo ${ALL_PY_TESTS} | wc -w) diff --git a/tensorflow/tools/ci_build/ci_parameterized_build.sh b/tensorflow/tools/ci_build/ci_parameterized_build.sh index 9b7e5abd621..46c1740af61 100755 --- a/tensorflow/tools/ci_build/ci_parameterized_build.sh +++ b/tensorflow/tools/ci_build/ci_parameterized_build.sh @@ -306,7 +306,7 @@ if [[ "${DO_DOCKER}" == "1" ]]; then fi # Write to the tmp script -echo "#!/usr/bin/env bash" > ${TMP_SCRIPT} +echo "#!/bin/bash" > ${TMP_SCRIPT} if [[ ! -z "${TF_BUILD_BAZEL_CLEAN}" ]] && [[ "${TF_BUILD_BAZEL_CLEAN}" != "0" ]]; then echo ${BAZEL_CLEAN_CMD} >> ${TMP_SCRIPT} diff --git a/tensorflow/tools/ci_build/install/install_deb_packages.sh b/tensorflow/tools/ci_build/install/install_deb_packages.sh index 1bf77b236c2..b752e86d690 100755 --- a/tensorflow/tools/ci_build/install/install_deb_packages.sh +++ b/tensorflow/tools/ci_build/install/install_deb_packages.sh @@ -29,12 +29,10 @@ apt-get install -y \ python-dev \ python-numpy \ python-pip \ - python-scipy \ python-virtualenv \ python3-dev \ python3-numpy \ python3-pip \ - python3-scipy \ sudo \ swig \ unzip \ diff --git a/tensorflow/tools/ci_build/install/install_pip_packages.sh b/tensorflow/tools/ci_build/install/install_pip_packages.sh deleted file mode 100755 index 39583869e20..00000000000 --- a/tensorflow/tools/ci_build/install/install_pip_packages.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2015 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -set -e - -pip install sklearn -pip3 install scikit-learn diff --git a/tensorflow/tools/docker/docker_run_gpu.sh b/tensorflow/tools/docker/docker_run_gpu.sh index ead05f9150f..9ebfa701e4c 100755 --- a/tensorflow/tools/docker/docker_run_gpu.sh +++ b/tensorflow/tools/docker/docker_run_gpu.sh @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/bin/bash # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tensorflow/tools/docker/run_jupyter.sh b/tensorflow/tools/docker/run_jupyter.sh index eb69d62c073..ba2f3a33262 100755 --- a/tensorflow/tools/docker/run_jupyter.sh +++ b/tensorflow/tools/docker/run_jupyter.sh @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/bin/bash # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tensorflow/tools/docs/gen_docs.sh b/tensorflow/tools/docs/gen_docs.sh index de507fcd000..95c0092d4ac 100755 --- a/tensorflow/tools/docs/gen_docs.sh +++ b/tensorflow/tools/docs/gen_docs.sh @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/bin/bash # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tensorflow/tools/docs/gen_docs_test.sh b/tensorflow/tools/docs/gen_docs_test.sh index 9375784dc23..2f905c8e47f 100755 --- a/tensorflow/tools/docs/gen_docs_test.sh +++ b/tensorflow/tools/docs/gen_docs_test.sh @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/bin/bash -eux # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,8 +14,6 @@ # limitations under the License. # ============================================================================== -set -eux - TFDIR=$TEST_SRCDIR/tensorflow DOXYGEN=doxygen DOXYGEN_CONFIG="tf-doxy_for_md-config" diff --git a/tensorflow/tools/pip_package/build_pip_package.sh b/tensorflow/tools/pip_package/build_pip_package.sh index 1ae6926b676..6b4e50490bb 100755 --- a/tensorflow/tools/pip_package/build_pip_package.sh +++ b/tensorflow/tools/pip_package/build_pip_package.sh @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/bin/bash # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tensorflow/tools/swig/swig.sh b/tensorflow/tools/swig/swig.sh index c35b2ee3634..0601703b011 100755 --- a/tensorflow/tools/swig/swig.sh +++ b/tensorflow/tools/swig/swig.sh @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/bin/bash # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tensorflow/tools/test/BUILD b/tensorflow/tools/test/BUILD index df2a0b45554..a686bbee4ca 100644 --- a/tensorflow/tools/test/BUILD +++ b/tensorflow/tools/test/BUILD @@ -53,21 +53,23 @@ py_binary( # Unit test that calls run_and_gather_logs on a benchmark, and # prints the result. -#cuda_py_test( -# name = "run_and_gather_logs_test", -# srcs = ["run_and_gather_logs.py"], -# additional_deps = [ -# ":run_and_gather_logs", -# ], -# args = [ -# "--test_name=" + "//tensorflow/core/kernels:cast_op_test", -# "--test_args=" + "'--benchmarks=BM_cpu_float'", -# ], -# data = [ -# "//tensorflow/core/kernels:cast_op_test", -# ], -# main = "run_and_gather_logs.py", -#) +cuda_py_test( + name = "run_and_gather_logs_test", + srcs = ["run_and_gather_logs.py"], + additional_deps = [ + ":run_and_gather_logs", + ], + args = [ + "--test_name=" + "//tensorflow/core/kernels:cast_op_test", + "--test_args=" + "'--benchmarks=BM_cpu_float_bfloat16'", + "--compilation_mode='$(COMPILATION_MODE)'", + "--cc_flags='$(CC_FLAGS)'", + ], + data = [ + "//tensorflow/core/kernels:cast_op_test", + ], + main = "run_and_gather_logs.py", +) filegroup( name = "all_files", diff --git a/third_party/gpus/cuda/cuda_config.sh b/third_party/gpus/cuda/cuda_config.sh index 651e5ae0317..42cd254644b 100755 --- a/third_party/gpus/cuda/cuda_config.sh +++ b/third_party/gpus/cuda/cuda_config.sh @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/bin/bash # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/util/python/python_config.sh b/util/python/python_config.sh index 83e38566906..a5666c2f7ee 100755 --- a/util/python/python_config.sh +++ b/util/python/python_config.sh @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/bin/bash # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); -- GitLab