提交 9a4878cc 编写于 作者: V Vijay Vasudevan 提交者: TensorFlower Gardener

Rollback of: "Merge changes from github."

Change: 117304114
上级 fdc6752c
#!/usr/bin/env bash
#!/bin/bash
## Set up python-related environment settings
while true; do
......
......@@ -87,9 +87,6 @@ DEFINE_CONST(bool, bool_val);
DEFINE_CONST_IMPL(complex64, proto.add_scomplex_val(t.begin()->real());
proto.add_scomplex_val(t.begin()->imag()););
DEFINE_CONST_IMPL(complex128, proto.add_dcomplex_val(t.begin()->real());
proto.add_dcomplex_val(t.begin()->imag()););
Node* Const(StringPiece s, const GraphDefBuilder::Options& options) {
if (options.HaveError()) return nullptr;
NodeBuilder node_builder(options.GetNameForOp(OpName()), OpName(),
......
......@@ -49,7 +49,6 @@ DECLARE_CONST(uint8);
DECLARE_CONST(int16);
DECLARE_CONST(int8);
DECLARE_CONST(complex64);
DECLARE_CONST(complex128);
DECLARE_CONST(int64);
DECLARE_CONST(bool);
......
......@@ -21,8 +21,6 @@ import os.path
import threading
import uuid
from six.moves import range
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework.load_library import load_op_library
......@@ -225,7 +223,7 @@ class SdcaModel(object):
dense_features = self._convert_n_to_tensor(examples['dense_features'])
dense_variables = self._convert_n_to_tensor(self._variables[
'dense_features_weights'])
for i in range(len(dense_variables)):
for i in xrange(len(dense_variables)):
predictions += dense_features[i] * dense_variables[i]
return predictions
......
......@@ -187,14 +187,13 @@ class Allocator {
// is_simple<T>::value if T[] can be safely constructed and destructed
// without running T() and ~T(). We do not use std::is_trivial<T>
// directly because std::complex<float> and std::complex<double> are
// not trival, but their arrays can be constructed and destructed
// without running their default ctors and dtors.
// directly because std::complex<float> is not trival but its array
// can be constructed and destructed without running its default ctor
// and dtor.
template <typename T>
struct is_simple {
static const bool value = std::is_trivial<T>::value ||
std::is_same<T, complex64>::value ||
std::is_same<T, complex128>::value ||
is_quantized<T>::value;
};
......
......@@ -151,7 +151,7 @@ TEST(NodeDefUtilTest, Out) {
ExpectFailure(bad, op,
"Value for attr 'T' of string is not in the list of allowed "
"values: float, double, int64, int32, uint8, uint16, int16, "
"int8, complex64, complex128, qint8, quint8, qint32");
"int8, complex64, qint8, quint8, qint32");
}
TEST(NodeDefUtilTest, Enum) {
......
......@@ -24,8 +24,6 @@ namespace tensorflow {
// Single precision complex.
typedef std::complex<float> complex64;
// Double precision complex.
typedef std::complex<double> complex128;
} // end namespace tensorflow
......
......@@ -113,7 +113,7 @@ TEST_F(OpDefBuilderTest, AttrWithRestrictions) {
ExpectSuccess(b().Attr("a:numbertype"),
"attr: { name: 'a' type: 'type' allowed_values { list { type: "
"[DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8, DT_INT16, "
"DT_UINT16, DT_INT8, DT_COMPLEX64, DT_COMPLEX128, DT_QINT8, DT_QUINT8, "
"DT_UINT16, DT_INT8, DT_COMPLEX64, DT_QINT8, DT_QUINT8, "
"DT_QINT32] } } }");
ExpectSuccess(b().Attr("a:realnumbertype"),
"attr: { name: 'a' type: 'type' allowed_values { list { type: "
......
......@@ -246,10 +246,6 @@ TEST_F(ValidateOpDefTest, BadAttrAllowed) {
TestBuilder(OpDefBuilder("BadAttrtude")
.Attr("x: list(realnumbertype) = [DT_COMPLEX64]")),
"attr 'x' of complex64 is not in the list of allowed values");
ExpectFailure(
TestBuilder(OpDefBuilder("BadAttrtude")
.Attr("x: list(realnumbertype) = [DT_COMPLEX128]")),
"attr 'x' of complex128 is not in the list of allowed values");
// Is in list of allowed strings.
TF_EXPECT_OK(TestBuilder(
OpDefBuilder("GoodAttrtude").Attr("x: {'foo', 'bar'} = 'bar'")));
......
......@@ -63,16 +63,14 @@ limitations under the License.
m(int16); \
m(int8)
// Call "m" for all number types, including complex64 and complex128.
// Call "m" for all number types, including complex64.
#define TF_CALL_NUMBER_TYPES(m) \
TF_CALL_REAL_NUMBER_TYPES(m); \
m(complex64); \
m(complex128)
m(complex64)
#define TF_CALL_NUMBER_TYPES_NO_INT32(m) \
TF_CALL_REAL_NUMBER_TYPES_NO_INT32(m); \
m(complex64); \
m(complex128)
m(complex64)
#define TF_CALL_POD_TYPES(m) \
TF_CALL_NUMBER_TYPES(m); \
......
......@@ -215,22 +215,6 @@ struct ProtoHelper<complex64> {
}
};
template <>
struct ProtoHelper<complex128> {
typedef Helper<double>::RepeatedFieldType FieldType;
static const complex128* Begin(const TensorProto& proto) {
return reinterpret_cast<const complex128*>(proto.dcomplex_val().data());
}
static size_t NumElements(const TensorProto& proto) {
return proto.dcomplex_val().size() / 2;
}
static void Fill(const complex128* data, size_t n, TensorProto* proto) {
const double* p = reinterpret_cast<const double*>(data);
FieldType copy(p, p + n * 2);
proto->mutable_dcomplex_val()->Swap(&copy);
}
};
template <>
struct ProtoHelper<qint32> {
typedef Helper<int32>::RepeatedFieldType FieldType;
......@@ -401,7 +385,6 @@ void Tensor::UnsafeCopyFromInternal(const Tensor& other,
CASE(int8, SINGLE_ARG(STMTS)) \
CASE(string, SINGLE_ARG(STMTS)) \
CASE(complex64, SINGLE_ARG(STMTS)) \
CASE(complex128, SINGLE_ARG(STMTS)) \
CASE(int64, SINGLE_ARG(STMTS)) \
CASE(bool, SINGLE_ARG(STMTS)) \
CASE(qint32, SINGLE_ARG(STMTS)) \
......
......@@ -57,8 +57,4 @@ message TensorProto {
// DT_BOOL
repeated bool bool_val = 11 [packed = true];
// DT_COMPLEX128. dcomplex_val(2*i) and dcomplex_val(2*i+1) are real
// and imaginary parts of i-th double precision complex.
repeated double dcomplex_val = 12 [packed = true];
};
......@@ -47,17 +47,12 @@ TEST(TensorTest, DataType_Traits) {
// Unfortunately. std::complex::complex() initializes (0, 0).
EXPECT_FALSE(std::is_trivial<complex64>::value);
EXPECT_FALSE(std::is_trivial<complex128>::value);
EXPECT_FALSE(std::is_trivial<std::complex<double>>::value);
EXPECT_TRUE(std::is_trivial<float[2]>::value);
EXPECT_TRUE(std::is_trivial<double[2]>::value);
struct MyComplex64 {
struct MyComplex {
float re, im;
};
EXPECT_TRUE(std::is_trivial<MyComplex64>::value);
struct MyComplex128 {
double re, im;
};
EXPECT_TRUE(std::is_trivial<MyComplex128>::value);
EXPECT_TRUE(std::is_trivial<MyComplex>::value);
}
template <typename T>
......@@ -425,19 +420,13 @@ TEST(Tensor_Bool, SimpleWithHelper) {
test::ExpectTensorEqual<bool>(t1, t2);
}
TEST(Tensor_Complex, Simple64) {
TEST(Tensor_Complex, Simple) {
Tensor t(DT_COMPLEX64, {4, 5, 3, 7});
t.flat<complex64>().setRandom();
TestCopies<complex64>(t);
}
TEST(Tensor_Complex, Simple128) {
Tensor t(DT_COMPLEX128, {4, 5, 3, 7});
t.flat<complex128>().setRandom();
TestCopies<complex128>(t);
}
TEST(Tensor_Complex, SimpleWithHelper64) {
TEST(Tensor_Complex, SimpleWithHelper) {
{
Tensor t1 = test::AsTensor<complex64>({0,
{1, 1},
......@@ -455,7 +444,7 @@ TEST(Tensor_Complex, SimpleWithHelper64) {
test::ExpectTensorEqual<complex64>(t2, t3);
}
// Does some numeric operations for complex64 numbers.
// Does some numeric operations for complex numbers.
{
const float PI = std::acos(-1);
const complex64 rotate_45 = std::polar(1.0f, PI / 4);
......@@ -486,55 +475,6 @@ TEST(Tensor_Complex, SimpleWithHelper64) {
}
}
TEST(Tensor_Complex, SimpleWithHelper128) {
{
Tensor t1 = test::AsTensor<complex128>({0,
{1, 1},
complex128(2),
complex128(3, 3),
complex128(0, 4),
complex128(2, 5)},
{2, 3});
Tensor t2(t1.dtype(), t1.shape());
t2.flat<complex128>() = t1.flat<complex128>() * complex128(0, 2);
Tensor t3 = test::AsTensor<complex128>(
{0, {-2, 2}, {0, 4}, {-6, 6}, {-8, 0}, {-10, 4}},
// shape
{2, 3});
test::ExpectTensorEqual<complex128>(t2, t3);
}
// Does some numeric operations for complex128 numbers.
{
const double PI = std::acos(-1);
const complex128 rotate_45 = std::polar(1.0, PI / 4);
// x contains all the 8-th root of unity.
Tensor x(DT_COMPLEX128, TensorShape({8}));
for (int i = 0; i < 8; ++i) {
x.vec<complex128>()(i) = std::pow(rotate_45, i);
}
// Shift the roots by 45 degree.
Tensor y(DT_COMPLEX128, TensorShape({8}));
y.vec<complex128>() = x.vec<complex128>() * rotate_45;
Tensor y_expected(DT_COMPLEX128, TensorShape({8}));
for (int i = 0; i < 8; ++i) {
y_expected.vec<complex128>()(i) = std::pow(rotate_45, i + 1);
}
test::ExpectTensorNear<complex128>(y, y_expected, 1e-5);
// Raise roots to the power of 8.
Tensor z(DT_COMPLEX128, TensorShape({8}));
z.vec<complex128>() = x.vec<complex128>().pow(8);
Tensor z_expected(DT_COMPLEX128, TensorShape({8}));
for (int i = 0; i < 8; ++i) {
z_expected.vec<complex128>()(i) = 1;
}
test::ExpectTensorNear<complex128>(z, z_expected, 1e-5);
}
}
// On the alignment.
//
// As of 2015/8, tensorflow::Tensor allocates its buffer with 32-byte
......
......@@ -127,12 +127,6 @@ inline void ExpectEqual<complex64>(const complex64& a, const complex64& b) {
EXPECT_FLOAT_EQ(a.imag(), b.imag()) << a << " vs. " << b;
}
template <>
void ExpectEqual<complex128>(const complex128& a, const complex128& b) {
EXPECT_DOUBLE_EQ(a.real(), b.real()) << a << " vs. " << b;
EXPECT_DOUBLE_EQ(a.imag(), b.imag()) << a << " vs. " << b;
}
inline void AssertSameTypeDims(const Tensor& x, const Tensor& y) {
ASSERT_EQ(x.dtype(), y.dtype());
ASSERT_TRUE(x.IsSameSize(y))
......
......@@ -64,8 +64,6 @@ string DataTypeString(DataType dtype) {
return "string";
case DT_COMPLEX64:
return "complex64";
case DT_COMPLEX128:
return "complex128";
case DT_INT64:
return "int64";
case DT_BOOL:
......@@ -127,9 +125,6 @@ bool DataTypeFromString(StringPiece sp, DataType* dt) {
} else if (sp == "complex64") {
*dt = DT_COMPLEX64;
return true;
} else if (sp == "complex128") {
*dt = DT_COMPLEX128;
return true;
} else if (sp == "int64") {
*dt = DT_INT64;
return true;
......@@ -170,10 +165,9 @@ string DataTypeSliceString(const DataTypeSlice types) {
}
DataTypeVector AllTypes() {
return {DT_FLOAT, DT_DOUBLE, DT_INT32, DT_UINT8, DT_INT16,
DT_UINT16, DT_INT8, DT_STRING, DT_COMPLEX64, DT_COMPLEX128,
DT_INT64, DT_BOOL, DT_QINT8, DT_QUINT8, DT_QINT16,
DT_QUINT16, DT_QINT32};
return {DT_FLOAT, DT_DOUBLE, DT_INT32, DT_UINT8, DT_INT16, DT_UINT16,
DT_INT8, DT_STRING, DT_COMPLEX64, DT_INT64, DT_BOOL, DT_QINT8,
DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32};
}
#if !defined(__ANDROID__)
......@@ -194,9 +188,8 @@ DataTypeVector RealAndQuantizedTypes() {
}
DataTypeVector NumberTypes() {
return {DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8,
DT_UINT16, DT_INT16, DT_INT8, DT_COMPLEX64, DT_COMPLEX128,
DT_QINT8, DT_QUINT8, DT_QINT32 };
return {DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8, DT_UINT16,
DT_INT16, DT_INT8, DT_COMPLEX64, DT_QINT8, DT_QUINT8, DT_QINT32};
}
#else // defined(__ANDROID__)
......@@ -230,7 +223,6 @@ bool DataTypeCanUseMemcpy(DataType dt) {
case DT_INT16:
case DT_INT8:
case DT_COMPLEX64:
case DT_COMPLEX128:
case DT_INT64:
case DT_BOOL:
case DT_QINT8:
......
......@@ -174,7 +174,6 @@ MATCH_TYPE_AND_ENUM(int16, DT_INT16);
MATCH_TYPE_AND_ENUM(int8, DT_INT8);
MATCH_TYPE_AND_ENUM(string, DT_STRING);
MATCH_TYPE_AND_ENUM(complex64, DT_COMPLEX64);
MATCH_TYPE_AND_ENUM(complex128, DT_COMPLEX128);
MATCH_TYPE_AND_ENUM(int64, DT_INT64);
MATCH_TYPE_AND_ENUM(bool, DT_BOOL);
MATCH_TYPE_AND_ENUM(qint8, DT_QINT8);
......
......@@ -30,10 +30,10 @@ enum DataType {
DT_QINT16 = 15; // Quantized int16
DT_QUINT16 = 16; // Quantized uint16
DT_UINT16 = 17;
DT_COMPLEX128 = 18; // Double-precision complex
// TODO(josh11b): DT_GENERIC_PROTO = ??;
// TODO(jeff,josh11b): DT_UINT64? DT_UINT32?
// TODO(zhifengc): DT_COMPLEX128 (double-precision complex)?
// Do not use! These are only for parameters. Every enum above
// should have a corresponding value below (verified by types_test).
......@@ -54,5 +54,4 @@ enum DataType {
DT_QINT16_REF = 115;
DT_QUINT16_REF = 116;
DT_UINT16_REF = 117;
DT_COMPLEX128_REF = 118;
}
......@@ -102,7 +102,6 @@ op {
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
......@@ -332,7 +331,6 @@ op {
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
......@@ -425,7 +423,6 @@ op {
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
......@@ -508,7 +505,6 @@ op {
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
......@@ -564,7 +560,6 @@ op {
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
......@@ -630,7 +625,6 @@ op {
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
......@@ -712,7 +706,6 @@ op {
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
......@@ -759,7 +752,6 @@ op {
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
......@@ -797,7 +789,6 @@ op {
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
......@@ -911,7 +902,6 @@ op {
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
......@@ -962,7 +952,6 @@ op {
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
......@@ -1389,7 +1378,6 @@ op {
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
......@@ -1475,7 +1463,6 @@ op {
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
......@@ -1551,7 +1538,6 @@ op {
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
......@@ -1601,7 +1587,6 @@ op {
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
......@@ -1656,7 +1641,6 @@ op {
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
......@@ -1690,7 +1674,6 @@ op {
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
......@@ -1711,7 +1694,6 @@ op {
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
......@@ -4338,7 +4320,6 @@ op {
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
......@@ -5128,7 +5109,6 @@ op {
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
......@@ -5439,7 +5419,6 @@ op {
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
......@@ -5541,7 +5520,6 @@ op {
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
......@@ -6442,7 +6420,6 @@ op {
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
......@@ -8155,7 +8132,6 @@ op {
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
......@@ -8221,7 +8197,6 @@ op {
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
......@@ -9171,7 +9146,6 @@ op {
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
......@@ -9269,7 +9243,6 @@ op {
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
......@@ -9351,7 +9324,6 @@ op {
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
......@@ -10262,7 +10234,6 @@ op {
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
......
......@@ -78,8 +78,7 @@ typedef enum {
TF_INT16 = 5,
TF_INT8 = 6,
TF_STRING = 7,
TF_COMPLEX64 = 8, // Single-precision complex
TF_COMPLEX = 8, // Old identifier kept for API backwards compatibility
TF_COMPLEX = 8, // Single-precision complex
TF_INT64 = 9,
TF_BOOL = 10,
TF_QINT8 = 11, // Quantized int8
......@@ -89,7 +88,6 @@ typedef enum {
TF_QINT16 = 15, // Quantized int16
TF_QUINT16 = 16, // Quantized uint16
TF_UINT16 = 17,
TF_COMPLEX128 = 18, // Double-precision complex
} TF_DataType;
// --------------------------------------------------------------------------
......
......@@ -108,7 +108,6 @@ TENSOR_PROTO_EXTRACT_TYPE(bool, bool, bool);
TENSOR_PROTO_EXTRACT_TYPE(float, float, float);
TENSOR_PROTO_EXTRACT_TYPE(double, double, double);
TENSOR_PROTO_EXTRACT_TYPE_COMPLEX(complex64, scomplex, float);
TENSOR_PROTO_EXTRACT_TYPE_COMPLEX(complex128, dcomplex, double);
TENSOR_PROTO_EXTRACT_TYPE(int32, int, int32);
TENSOR_PROTO_EXTRACT_TYPE(int64, int64, int64);
TENSOR_PROTO_EXTRACT_TYPE(uint8, int, int32);
......
......@@ -19,7 +19,7 @@ installed on your system.
3. The Android SDK and build tools may be obtained from:
https://developer.android.com/tools/revisions/build-tools.html
The Android entries in [`<workspace_root>/WORKSPACE`](../../../WORKSPACE#L2-L13) must be
The Android entries in [`<workspace_root>/WORKSPACE`](../../WORKSPACE) must be
uncommented with the paths filled in appropriately depending on where you
installed the NDK and SDK. Otherwise an error such as:
"The external label '//external:android/sdk' is not bound to anything" will
......@@ -45,8 +45,10 @@ your workspace root:
$ bazel build //tensorflow/examples/android:tensorflow_demo
```
If you get build errors about protocol buffers, run
`git submodule update --init` and build again.
If you get build errors about protocol buffers then you may have left out the
`--recurse-submodules` argument to `git clone`. Review the instructions
here and then build again:
https://www.tensorflow.org/versions/master/get_started/os_setup.html#clone-the-tensorflow-repository
If adb debugging is enabled on your Android 5.0 or later device, you may then
use the following command from your workspace root to install the APK once
......
......@@ -43,15 +43,15 @@ This uses the default example image that ships with the framework, and should
output something similar to this:
```
I tensorflow/examples/label_image/main.cc:207] military uniform (866): 0.647299
I tensorflow/examples/label_image/main.cc:207] suit (794): 0.0477195
I tensorflow/examples/label_image/main.cc:207] academic gown (896): 0.0232407
I tensorflow/examples/label_image/main.cc:207] bow tie (817): 0.0157355
I tensorflow/examples/label_image/main.cc:207] bolo tie (940): 0.0145023
I tensorflow/examples/label_image/main.cc:200] military uniform (866): 0.902268
I tensorflow/examples/label_image/main.cc:200] bow tie (817): 0.05407
I tensorflow/examples/label_image/main.cc:200] suit (794): 0.0113195
I tensorflow/examples/label_image/main.cc:200] bulletproof vest (833): 0.0100269
I tensorflow/examples/label_image/main.cc:200] bearskin (849): 0.00649746
```
In this case, we're using the default image of Admiral Grace Hopper, and you can
see the network correctly spots she's wearing a military uniform, with a high
score of 0.6.
score of 0.9.
Next, try it out on your own images by supplying the --image= argument, e.g.
......
......@@ -117,7 +117,7 @@
" print('Found and verified', filename)\n",
" else:\n",
" raise Exception(\n",
" 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n",
" 'Failed to verify' + filename + '. Can you get to it with a browser?')\n",
" return filename\n",
"\n",
"train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)\n",
......
......@@ -67,7 +67,7 @@ NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# If a model is trained with multiple GPU's prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
......@@ -255,7 +255,7 @@ def inference(images):
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Add summary for for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
......
......@@ -172,7 +172,7 @@ def distorted_inputs(data_dir, batch_size):
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Because these operations are not commutative, consider randomizing
# the order their operation.
# randomize the order their operation.
distorted_image = tf.image.random_brightness(distorted_image,
max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image,
......
......@@ -181,7 +181,6 @@ __all__.extend([
'bfloat16', 'bfloat16_ref',
'bool', 'bool_ref',
'complex64', 'complex64_ref',
'complex128', 'complex128_ref',
'double', 'double_ref',
'float32', 'float32_ref',
'float64', 'float64_ref',
......
......@@ -687,8 +687,7 @@ class SessionTest(test_util.TensorFlowTestCase):
dtypes.int8,
dtypes.int64,
dtypes.bool,
dtypes.complex64,
dtypes.complex128]:
dtypes.complex64]:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
np_dtype = dtype.as_numpy_dtype
......@@ -701,8 +700,6 @@ class SessionTest(test_util.TensorFlowTestCase):
np_array = np_array > 0
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
......
/* Copyright 2016 Google Inc. All Rights Reserved.
/* Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
......@@ -121,10 +121,7 @@ Status PyArray_TYPE_to_TF_DataType(PyArrayObject* array,
*out_tf_datatype = TF_BOOL;
break;
case NPY_COMPLEX64:
*out_tf_datatype = TF_COMPLEX64;
break;
case NPY_COMPLEX128:
*out_tf_datatype = TF_COMPLEX128;
*out_tf_datatype = TF_COMPLEX;
break;
case NPY_OBJECT:
*out_tf_datatype = TF_STRING;
......@@ -171,12 +168,9 @@ Status TF_DataType_to_PyArray_TYPE(TF_DataType tf_datatype,
case TF_BOOL:
*out_pyarray_type = NPY_BOOL;
break;
case TF_COMPLEX64:
case TF_COMPLEX:
*out_pyarray_type = NPY_COMPLEX64;
break;
case TF_COMPLEX128:
*out_pyarray_type = NPY_COMPLEX128;
break;
case TF_STRING:
*out_pyarray_type = NPY_OBJECT;
break;
......
......@@ -32,7 +32,6 @@ class DType(object):
* `tf.float64`: 64-bit double-precision floating-point.
* `tf.bfloat16`: 16-bit truncated floating-point.
* `tf.complex64`: 64-bit single-precision complex.
* `tf.complex128`: 128-bit double-precision complex.
* `tf.int8`: 8-bit signed integer.
* `tf.uint8`: 8-bit unsigned integer.
......@@ -123,8 +122,6 @@ class DType(object):
base = self.base_dtype
if base == complex64:
return float32
elif base == complex128:
return float64
else:
return self
......@@ -152,7 +149,7 @@ class DType(object):
@property
def is_complex(self):
"""Returns whether this is a complex floating point type."""
return self.base_dtype in (complex64, complex128)
return self.base_dtype == complex64
@property
def is_quantized(self):
......@@ -182,8 +179,8 @@ class DType(object):
TypeError: if this is a non-numeric, unordered, or quantized type.
"""
if (self.is_quantized or self.base_dtype in
(bool, string, complex64, complex128)):
if (self.is_quantized or self.base_dtype == bool or
self.base_dtype == string or self.base_dtype == complex64):
raise TypeError("Cannot find minimum value of %s." % self)
# there is no simple way to get the min value of a dtype, we have to check
......@@ -204,8 +201,8 @@ class DType(object):
TypeError: if this is a non-numeric, unordered, or quantized type.
"""
if (self.is_quantized or self.base_dtype in
(bool, string, complex64, complex128)):
if (self.is_quantized or self.base_dtype == bool or
self.base_dtype == string or self.base_dtype == complex64):
raise TypeError("Cannot find maximum value of %s." % self)
# there is no simple way to get the min value of a dtype, we have to check
......@@ -280,7 +277,6 @@ int16 = DType(types_pb2.DT_INT16)
int8 = DType(types_pb2.DT_INT8)
string = DType(types_pb2.DT_STRING)
complex64 = DType(types_pb2.DT_COMPLEX64)
complex128 = DType(types_pb2.DT_COMPLEX128)
int64 = DType(types_pb2.DT_INT64)
bool = DType(types_pb2.DT_BOOL)
qint8 = DType(types_pb2.DT_QINT8)
......@@ -299,7 +295,6 @@ int16_ref = DType(types_pb2.DT_INT16_REF)
int8_ref = DType(types_pb2.DT_INT8_REF)
string_ref = DType(types_pb2.DT_STRING_REF)
complex64_ref = DType(types_pb2.DT_COMPLEX64_REF)
complex128_ref = DType(types_pb2.DT_COMPLEX128_REF)
int64_ref = DType(types_pb2.DT_INT64_REF)
bool_ref = DType(types_pb2.DT_BOOL_REF)
qint8_ref = DType(types_pb2.DT_QINT8_REF)
......@@ -322,7 +317,6 @@ _INTERN_TABLE = {
types_pb2.DT_INT8: int8,
types_pb2.DT_STRING: string,
types_pb2.DT_COMPLEX64: complex64,
types_pb2.DT_COMPLEX128: complex128,
types_pb2.DT_INT64: int64,
types_pb2.DT_BOOL: bool,
types_pb2.DT_QINT8: qint8,
......@@ -340,7 +334,6 @@ _INTERN_TABLE = {
types_pb2.DT_INT8_REF: int8_ref,
types_pb2.DT_STRING_REF: string_ref,
types_pb2.DT_COMPLEX64_REF: complex64_ref,
types_pb2.DT_COMPLEX128_REF: complex128_ref,
types_pb2.DT_INT64_REF: int64_ref,
types_pb2.DT_BOOL_REF: bool_ref,
types_pb2.DT_QINT8_REF: qint8_ref,
......@@ -363,7 +356,6 @@ _TYPE_TO_STRING = {
types_pb2.DT_INT8: "int8",
types_pb2.DT_STRING: "string",
types_pb2.DT_COMPLEX64: "complex64",
types_pb2.DT_COMPLEX128: "complex128",
types_pb2.DT_INT64: "int64",
types_pb2.DT_BOOL: "bool",
types_pb2.DT_QINT8: "qint8",
......@@ -381,7 +373,6 @@ _TYPE_TO_STRING = {
types_pb2.DT_INT8_REF: "int8_ref",
types_pb2.DT_STRING_REF: "string_ref",
types_pb2.DT_COMPLEX64_REF: "complex64_ref",
types_pb2.DT_COMPLEX128_REF: "complex128_ref",
types_pb2.DT_INT64_REF: "int64_ref",
types_pb2.DT_BOOL_REF: "bool_ref",
types_pb2.DT_QINT8_REF: "qint8_ref",
......@@ -423,7 +414,6 @@ _NP_TO_TF = frozenset([
(np.int16, int16),
(np.int8, int8),
(np.complex64, complex64),
(np.complex128, complex128),
(np.object, string),
(np.bool, bool),
(_np_qint8, qint8),
......@@ -445,7 +435,6 @@ _TF_TO_NP = {
# strings.
types_pb2.DT_STRING: np.object,
types_pb2.DT_COMPLEX64: np.complex64,
types_pb2.DT_COMPLEX128: np.complex128,
types_pb2.DT_INT64: np.int64,
types_pb2.DT_BOOL: np.bool,
types_pb2.DT_QINT8: _np_qint8,
......@@ -465,7 +454,6 @@ _TF_TO_NP = {
types_pb2.DT_INT8_REF: np.int8,
types_pb2.DT_STRING_REF: np.object,
types_pb2.DT_COMPLEX64_REF: np.complex64,
types_pb2.DT_COMPLEX128_REF: np.complex128,
types_pb2.DT_INT64_REF: np.int64,
types_pb2.DT_BOOL_REF: np.bool,
types_pb2.DT_QINT8_REF: _np_qint8,
......
......@@ -71,7 +71,6 @@ class TypesTest(test_util.TensorFlowTestCase):
self.assertIs(tf.int16, tf.as_dtype(np.int16))
self.assertIs(tf.int8, tf.as_dtype(np.int8))
self.assertIs(tf.complex64, tf.as_dtype(np.complex64))
self.assertIs(tf.complex128, tf.as_dtype(np.complex128))
self.assertIs(tf.string, tf.as_dtype(np.object))
self.assertIs(tf.string, tf.as_dtype(np.array(["foo", "bar"]).dtype))
self.assertIs(tf.bool, tf.as_dtype(np.bool))
......@@ -83,7 +82,6 @@ class TypesTest(test_util.TensorFlowTestCase):
tf.int32, tf.int64]:
self.assertIs(dtype.real_dtype, dtype)
self.assertIs(tf.complex64.real_dtype, tf.float32)
self.assertIs(tf.complex128.real_dtype, tf.float64)
def testStringConversion(self):
self.assertIs(tf.float32, tf.as_dtype("float32"))
......@@ -95,7 +93,6 @@ class TypesTest(test_util.TensorFlowTestCase):
self.assertIs(tf.int8, tf.as_dtype("int8"))
self.assertIs(tf.string, tf.as_dtype("string"))
self.assertIs(tf.complex64, tf.as_dtype("complex64"))
self.assertIs(tf.complex128, tf.as_dtype("complex128"))
self.assertIs(tf.int64, tf.as_dtype("int64"))
self.assertIs(tf.bool, tf.as_dtype("bool"))
self.assertIs(tf.qint8, tf.as_dtype("qint8"))
......@@ -110,7 +107,6 @@ class TypesTest(test_util.TensorFlowTestCase):
self.assertIs(tf.int8_ref, tf.as_dtype("int8_ref"))
self.assertIs(tf.string_ref, tf.as_dtype("string_ref"))
self.assertIs(tf.complex64_ref, tf.as_dtype("complex64_ref"))
self.assertIs(tf.complex128_ref, tf.as_dtype("complex128_ref"))
self.assertIs(tf.int64_ref, tf.as_dtype("int64_ref"))
self.assertIs(tf.bool_ref, tf.as_dtype("bool_ref"))
self.assertIs(tf.qint8_ref, tf.as_dtype("qint8_ref"))
......@@ -139,7 +135,6 @@ class TypesTest(test_util.TensorFlowTestCase):
self.assertEqual(tf.as_dtype("uint8").is_integer, True)
self.assertEqual(tf.as_dtype("uint16").is_integer, True)
self.assertEqual(tf.as_dtype("complex64").is_integer, False)
self.assertEqual(tf.as_dtype("complex128").is_integer, False)
self.assertEqual(tf.as_dtype("float").is_integer, False)
self.assertEqual(tf.as_dtype("double").is_integer, False)
self.assertEqual(tf.as_dtype("string").is_integer, False)
......@@ -153,7 +148,6 @@ class TypesTest(test_util.TensorFlowTestCase):
self.assertEqual(tf.as_dtype("uint8").is_floating, False)
self.assertEqual(tf.as_dtype("uint16").is_floating, False)
self.assertEqual(tf.as_dtype("complex64").is_floating, False)
self.assertEqual(tf.as_dtype("complex128").is_floating, False)
self.assertEqual(tf.as_dtype("float32").is_floating, True)
self.assertEqual(tf.as_dtype("float64").is_floating, True)
self.assertEqual(tf.as_dtype("string").is_floating, False)
......@@ -167,7 +161,6 @@ class TypesTest(test_util.TensorFlowTestCase):
self.assertEqual(tf.as_dtype("uint8").is_complex, False)
self.assertEqual(tf.as_dtype("uint16").is_complex, False)
self.assertEqual(tf.as_dtype("complex64").is_complex, True)
self.assertEqual(tf.as_dtype("complex128").is_complex, True)
self.assertEqual(tf.as_dtype("float32").is_complex, False)
self.assertEqual(tf.as_dtype("float64").is_complex, False)
self.assertEqual(tf.as_dtype("string").is_complex, False)
......@@ -185,7 +178,6 @@ class TypesTest(test_util.TensorFlowTestCase):
self.assertEqual(tf.as_dtype("bool").is_unsigned, False)
self.assertEqual(tf.as_dtype("string").is_unsigned, False)
self.assertEqual(tf.as_dtype("complex64").is_unsigned, False)
self.assertEqual(tf.as_dtype("complex128").is_unsigned, False)
def testMinMax(self):
# make sure min/max evaluates for all data types that have min/max
......@@ -200,8 +192,7 @@ class TypesTest(test_util.TensorFlowTestCase):
if (dtype.is_quantized or
dtype.base_dtype == tf.bool or
dtype.base_dtype == tf.string or
dtype.base_dtype == tf.complex64 or
dtype.base_dtype == tf.complex128):
dtype.base_dtype == tf.complex64):
continue
print("%s: %s - %s" % (dtype, dtype.min, dtype.max))
......
......@@ -1289,7 +1289,7 @@ class ColocationGroupTest(test_util.TensorFlowTestCase):
with ops.colocate_with(a.op):
with ops.colocate_with(b.op, ignore_existing=True):
c = constant_op.constant(4.0)
self.assertEqual(set([b"loc:@b"]), set(c.op.colocation_groups()))
self.assertEqual(set(["loc:@b"]), set(c.op.colocation_groups()))
def testColocateVariables(self):
a = variables.Variable([2.0], name="a")
......
......@@ -76,16 +76,11 @@ else:
def SlowAppendInt64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int64_val.extend([np.asscalar(x) for x in proto_values])
def SlowAppendComplex64ArrayToTensorProto(tensor_proto, proto_values):
def SlowAppendComplexArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.scomplex_val.extend([np.asscalar(v)
for x in proto_values
for v in [x.real, x.imag]])
def SlowAppendComplex128ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.dcomplex_val.extend([np.asscalar(v)
for x in proto_values
for v in [x.real, x.imag]])
def SlowAppendObjectArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.string_val.extend([compat.as_bytes(x) for x in proto_values])
......@@ -101,8 +96,8 @@ else:
np.uint16: SlowAppendIntArrayToTensorProto,
np.int16: SlowAppendIntArrayToTensorProto,
np.int8: SlowAppendIntArrayToTensorProto,
np.complex64: SlowAppendComplex64ArrayToTensorProto,
np.complex128: SlowAppendComplex128ArrayToTensorProto,
np.complex64: SlowAppendComplexArrayToTensorProto,
np.complex128: SlowAppendComplexArrayToTensorProto,
np.object: SlowAppendObjectArrayToTensorProto,
np.bool: SlowAppendBoolArrayToTensorProto,
dtypes.qint8.as_numpy_dtype: SlowAppendIntArrayToTensorProto,
......@@ -245,7 +240,6 @@ _TF_TO_IS_OK = {
dtypes.int8: _FilterInt,
dtypes.string: _FilterStr,
dtypes.complex64: _FilterComplex,
dtypes.complex128: _FilterComplex,
dtypes.int64: _FilterInt,
dtypes.bool: _FilterBool,
dtypes.qint32: _FilterInt,
......@@ -459,15 +453,6 @@ def MakeNdarray(tensor):
else:
return np.array([complex(x[0], x[1]) for x in zip(it, it)],
dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.complex128:
it = iter(tensor.dcomplex_val)
if len(tensor.dcomplex_val) == 2:
return np.repeat(np.array(complex(tensor.dcomplex_val[0],
tensor.dcomplex_val[1]), dtype=dtype),
num_elements).reshape(shape)
else:
return np.array([complex(x[0], x[1]) for x in zip(it, it)],
dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.bool:
if len(tensor.bool_val) == 1:
return np.repeat(np.array(tensor.bool_val[0], dtype=dtype),
......
......@@ -274,7 +274,7 @@ class TensorUtilTest(tf.test.TestCase):
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array([[b"a", b"ab"], [b"abc", b"abcd"]]), a)
def testComplex64(self):
def testComplex(self):
t = tensor_util.make_tensor_proto((1+2j), dtype=tf.complex64)
self.assertProtoEquals("""
dtype: DT_COMPLEX64
......@@ -286,30 +286,16 @@ class TensorUtilTest(tf.test.TestCase):
self.assertEquals(np.complex64, a.dtype)
self.assertAllEqual(np.array(1 + 2j), a)
def testComplex128(self):
t = tensor_util.make_tensor_proto((1+2j), dtype=tf.complex128)
self.assertProtoEquals("""
dtype: DT_COMPLEX128
tensor_shape {}
dcomplex_val: 1
dcomplex_val: 2
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex128, a.dtype)
self.assertAllEqual(np.array(1 + 2j), a)
def testComplexWithImplicitRepeat(self):
for dtype, np_dtype in [(tf.complex64, np.complex64),
(tf.complex128, np.complex128)]:
t = tensor_util.make_tensor_proto((1+1j), shape=[3, 4],
dtype=dtype)
a = tensor_util.MakeNdarray(t)
self.assertAllClose(np.array([[(1+1j), (1+1j), (1+1j), (1+1j)],
[(1+1j), (1+1j), (1+1j), (1+1j)],
[(1+1j), (1+1j), (1+1j), (1+1j)]],
dtype=np_dtype), a)
t = tensor_util.make_tensor_proto((1+1j), shape=[3, 4],
dtype=tf.complex64)
a = tensor_util.MakeNdarray(t)
self.assertAllClose(np.array([[(1+1j), (1+1j), (1+1j), (1+1j)],
[(1+1j), (1+1j), (1+1j), (1+1j)],
[(1+1j), (1+1j), (1+1j), (1+1j)]],
dtype=np.complex64), a)
def testComplex64N(self):
def testComplexN(self):
t = tensor_util.make_tensor_proto([(1+2j), (3+4j), (5+6j)], shape=[1, 3],
dtype=tf.complex64)
self.assertProtoEquals("""
......@@ -326,24 +312,7 @@ class TensorUtilTest(tf.test.TestCase):
self.assertEquals(np.complex64, a.dtype)
self.assertAllEqual(np.array([[(1+2j), (3+4j), (5+6j)]]), a)
def testComplex128N(self):
t = tensor_util.make_tensor_proto([(1+2j), (3+4j), (5+6j)], shape=[1, 3],
dtype=tf.complex128)
self.assertProtoEquals("""
dtype: DT_COMPLEX128
tensor_shape { dim { size: 1 } dim { size: 3 } }
dcomplex_val: 1
dcomplex_val: 2
dcomplex_val: 3
dcomplex_val: 4
dcomplex_val: 5
dcomplex_val: 6
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex128, a.dtype)
self.assertAllEqual(np.array([[(1+2j), (3+4j), (5+6j)]]), a)
def testComplex64NpArray(self):
def testComplexNpArray(self):
t = tensor_util.make_tensor_proto(
np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), dtype=tf.complex64)
# scomplex_val are real_0, imag_0, real_1, imag_1, ...
......@@ -363,26 +332,6 @@ class TensorUtilTest(tf.test.TestCase):
self.assertEquals(np.complex64, a.dtype)
self.assertAllEqual(np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), a)
def testComplex128NpArray(self):
t = tensor_util.make_tensor_proto(
np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), dtype=tf.complex128)
# scomplex_val are real_0, imag_0, real_1, imag_1, ...
self.assertProtoEquals("""
dtype: DT_COMPLEX128
tensor_shape { dim { size: 2 } dim { size: 2 } }
dcomplex_val: 1
dcomplex_val: 2
dcomplex_val: 3
dcomplex_val: 4
dcomplex_val: 5
dcomplex_val: 6
dcomplex_val: 7
dcomplex_val: 8
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex128, a.dtype)
self.assertAllEqual(np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), a)
def testUnsupportedDType(self):
with self.assertRaises(TypeError):
tensor_util.make_tensor_proto(np.array([1]), 0)
......
......@@ -99,9 +99,6 @@ Status TfDTypeToNpDType(const DataType& tf, int* np) {
case DT_COMPLEX64:
*np = NPY_COMPLEX64;
break;
case DT_COMPLEX128:
*np = NPY_COMPLEX128;
break;
case DT_STRING:
*np = NPY_OBJECT;
break;
......@@ -213,9 +210,6 @@ Status NumericNpDTypeToTfDType(const int np, DataType* tf) {
case NPY_COMPLEX64:
*tf = DT_COMPLEX64;
break;
case NPY_COMPLEX128:
*tf = DT_COMPLEX128;
break;
default:
return errors::Unimplemented("Unsupported numpy type ", np);
}
......
......@@ -326,7 +326,7 @@ class SessionManager(object):
try:
sess.run(self._ready_op)
return None
except errors.FailedPreconditionError as e:
except errors.FailedPreconditionError, e:
if "uninitialized" not in str(e):
logging.warning("Model not ready raised: %s", str(e))
raise e
......
......@@ -7,7 +7,6 @@ COPY install/*.sh /install/
RUN /install/install_bootstrap_deb_packages.sh
RUN add-apt-repository -y ppa:openjdk-r/ppa
RUN /install/install_deb_packages.sh
RUN /install/install_pip_packages.sh
RUN /install/install_bazel.sh
# Set up bazelrc.
......
......@@ -7,7 +7,6 @@ COPY install/*.sh /install/
RUN /install/install_bootstrap_deb_packages.sh
RUN add-apt-repository -y ppa:openjdk-r/ppa
RUN /install/install_deb_packages.sh
RUN /install/install_pip_packages.sh
RUN /install/install_bazel.sh
# Set up bazelrc.
......
......@@ -22,20 +22,11 @@
# pip.sh CONTAINER_TYPE [--test_tutorials]
#
# When executing the Python unit tests, the script obeys the shell
# variables: TF_BUILD_BAZEL_CLEAN, TF_BUILD_INSTALL_EXTRA_PIP_PACKAGES,
# TF_BUILD_NO_CACHING_VIRTUALENV, NO_TEST_ON_INSTALL
# variables: TF_BUILD_BAZEL_CLEAN, NO_TEST_ON_INSTALL
#
# TF_BUILD_BAZEL_CLEAN, if set to any non-empty and non-0 value, directs the
# script to perform bazel clean prior to main build and test steps.
#
# TF_BUILD_INSTALL_EXTRA_PIP_PACKAGES overrides the default extra pip packages
# to be installed in virtualenv before test_installation.sh is called. Multiple
# pakcage names are separated with spaces.
#
# TF_BUILD_NO_CACHING_VIRTUALENV: If set to any non-empty and non-0 value,
# will cause the script to force remove any existing (cached) virtualenv
# directory.
#
# If NO_TEST_ON_INSTALL has any non-empty and non-0 value, the test-on-install
# part will be skipped.
#
......@@ -44,8 +35,6 @@
# installation and the Python unit tests-on-install step.
#
INSTALL_EXTRA_PIP_PACKAGES=${TF_BUILD_INSTALL_EXTRA_PIP_PACKAGES}
# Helper functions
# Get the absolute path from a path
abs_path() {
......@@ -122,7 +111,7 @@ PIP_WHL_DIR="${PIP_TEST_ROOT}/whl"
PIP_WHL_DIR=$(abs_path ${PIP_WHL_DIR}) # Get absolute path
rm -rf ${PIP_WHL_DIR} && mkdir -p ${PIP_WHL_DIR}
bazel-bin/tensorflow/tools/pip_package/build_pip_package ${PIP_WHL_DIR} || \
die "build_pip_package FAILED"
die "build_pip_package FAILED"
# Perform installation
WHL_PATH=$(ls ${PIP_WHL_DIR}/tensorflow*.whl)
......@@ -136,46 +125,27 @@ echo "whl file path = ${WHL_PATH}"
# Install, in user's local home folder
echo "Installing pip whl file: ${WHL_PATH}"
# Create virtualenv directory for install test
# Create temporary directory for install test
VENV_DIR="${PIP_TEST_ROOT}/venv"
if [[ -d "${VENV_DIR}" ]] &&
[[ ! -z "${TF_BUILD_NO_CACHING_VIRTUALENV}" ]] &&
[[ "${TF_BUILD_NO_CACHING_VIRTUALENV}" != "0" ]]; then
echo "TF_BUILD_NO_CACHING_VIRTUALENV=${TF_BUILD_NO_CACHING_VIRTUALENV}:"
echo "Removing existing virtualenv directory: ${VENV_DIR}"
rm -rf "${VENV_DIR}" || \
die "Failed to remove existing virtualenv directory: ${VENV_DIR}"
fi
mkdir -p ${VENV_DIR} || \
die "FAILED to create virtualenv directory: ${VENV_DIR}"
rm -rf "${VENV_DIR}" && mkdir -p "${VENV_DIR}"
echo "Create directory for virtualenv: ${VENV_DIR}"
# Verify that virtualenv exists
if [[ -z $(which virtualenv) ]]; then
die "FAILED: virtualenv not available on path"
fi
virtualenv --system-site-packages -p "${PYTHON_BIN_PATH}" "${VENV_DIR}" || \
die "FAILED: Unable to create virtualenv"
source "${VENV_DIR}/bin/activate" || \
die "FAILED: Unable to activate virtualenv"
virtualenv -p "${PYTHON_BIN_PATH}" "${VENV_DIR}" ||
die "FAILED: Unable to create virtualenv"
source "${VENV_DIR}/bin/activate" ||
die "FAILED: Unable to activate virtualenv"
# Install the pip file in virtual env
pip install -v --force-reinstall ${WHL_PATH} \
pip install -v ${WHL_PATH} \
&& echo "Successfully installed pip package ${WHL_PATH}" \
|| die "pip install (without --upgrade) FAILED"
# Install extra pip packages required by the test-on-install
for PACKAGE in ${INSTALL_EXTRA_PIP_PACKAGES}; do
echo "Installing extra pip package required by test-on-install: ${PACKAGE}"
pip install ${PACKAGE} || \
die "pip install ${PACKAGE} FAILED"
done
# If NO_TEST_ON_INSTALL is set to any non-empty value, skip all Python
# tests-on-install and exit right away
if [[ ! -z "${NO_TEST_ON_INSTALL}" ]] &&
......@@ -188,14 +158,14 @@ fi
# Call test_installation.sh to perform test-on-install
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
"${DIR}/test_installation.sh" --virtualenv || \
die "PIP tests-on-install FAILED"
"${DIR}/test_installation.sh" --virtualenv ||
die "PIP tests-on-install FAILED"
# Optional: Run the tutorial tests
if [[ "${DO_TEST_TUTORIALS}" == "1" ]]; then
"${DIR}/test_tutorials.sh" --virtualenv || \
die "PIP tutorial tests-on-install FAILED"
"${DIR}/test_tutorials.sh" --virtualenv ||
die "PIP tutorial tests-on-install FAILED"
fi
deactivate || \
die "FAILED: Unable to deactivate virtualenv"
deactivate ||
die "FAILED: Unable to deactivate virtualenv"
......@@ -166,8 +166,7 @@ cp -r tensorflow/core/lib/png ${PY_TEST_DIR}/tensorflow/core/lib
# Run tests
DIR0=$(pwd)
ALL_PY_TESTS=$(find tensorflow/{contrib,examples,models,python,tensorboard} \
-type f \( -name "*_test.py" -o -name "test_*.py" \) | sort)
ALL_PY_TESTS=$(find tensorflow/{contrib,examples,models,python,tensorboard} -name "*_test.py" | sort)
# TODO(cais): Add tests in tensorflow/contrib
PY_TEST_COUNT=$(echo ${ALL_PY_TESTS} | wc -w)
......
......@@ -306,7 +306,7 @@ if [[ "${DO_DOCKER}" == "1" ]]; then
fi
# Write to the tmp script
echo "#!/usr/bin/env bash" > ${TMP_SCRIPT}
echo "#!/bin/bash" > ${TMP_SCRIPT}
if [[ ! -z "${TF_BUILD_BAZEL_CLEAN}" ]] &&
[[ "${TF_BUILD_BAZEL_CLEAN}" != "0" ]]; then
echo ${BAZEL_CLEAN_CMD} >> ${TMP_SCRIPT}
......
......@@ -29,12 +29,10 @@ apt-get install -y \
python-dev \
python-numpy \
python-pip \
python-scipy \
python-virtualenv \
python3-dev \
python3-numpy \
python3-pip \
python3-scipy \
sudo \
swig \
unzip \
......
#!/usr/bin/env bash
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
set -e
pip install sklearn
pip3 install scikit-learn
#!/usr/bin/env bash
#!/bin/bash
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
......
#!/usr/bin/env bash
#!/bin/bash
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
......
#!/usr/bin/env bash
#!/bin/bash
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
......
#!/usr/bin/env bash
#!/bin/bash -eux
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
......@@ -14,8 +14,6 @@
# limitations under the License.
# ==============================================================================
set -eux
TFDIR=$TEST_SRCDIR/tensorflow
DOXYGEN=doxygen
DOXYGEN_CONFIG="tf-doxy_for_md-config"
......
#!/usr/bin/env bash
#!/bin/bash
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
......
#!/usr/bin/env bash
#!/bin/bash
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
......
......@@ -53,21 +53,23 @@ py_binary(
# Unit test that calls run_and_gather_logs on a benchmark, and
# prints the result.
#cuda_py_test(
# name = "run_and_gather_logs_test",
# srcs = ["run_and_gather_logs.py"],
# additional_deps = [
# ":run_and_gather_logs",
# ],
# args = [
# "--test_name=" + "//tensorflow/core/kernels:cast_op_test",
# "--test_args=" + "'--benchmarks=BM_cpu_float'",
# ],
# data = [
# "//tensorflow/core/kernels:cast_op_test",
# ],
# main = "run_and_gather_logs.py",
#)
cuda_py_test(
name = "run_and_gather_logs_test",
srcs = ["run_and_gather_logs.py"],
additional_deps = [
":run_and_gather_logs",
],
args = [
"--test_name=" + "//tensorflow/core/kernels:cast_op_test",
"--test_args=" + "'--benchmarks=BM_cpu_float_bfloat16'",
"--compilation_mode='$(COMPILATION_MODE)'",
"--cc_flags='$(CC_FLAGS)'",
],
data = [
"//tensorflow/core/kernels:cast_op_test",
],
main = "run_and_gather_logs.py",
)
filegroup(
name = "all_files",
......
#!/usr/bin/env bash
#!/bin/bash
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
......
#!/usr/bin/env bash
#!/bin/bash
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册