提交 ffb89899 编写于 作者: D Dero Gharibian 提交者: TensorFlower Gardener

Migrated a subset of kernels to use tstring.

This is a part of a larger migration effort for tensorflow::tstring.
See: https://github.com/tensorflow/community/pull/91
PiperOrigin-RevId: 262958277
上级 ad5a6f6f
......@@ -78,10 +78,10 @@ REGISTER(uint64)
#if defined(IS_MOBILE_PLATFORM) && !defined(SUPPORT_SELECTIVE_REGISTRATION) && \
!defined(__ANDROID_TYPES_FULL__)
// Primarily used for SavedModel support on mobile. Registering it here only
// if __ANDROID_TYPES_FULL__ is not defined (which already registers string)
// to avoid duplicate registration.
REGISTER(string);
// Primarily used for SavedModel support on mobile. Registering it here only
// if __ANDROID_TYPES_FULL__ is not defined (which already registers string)
// to avoid duplicate registration.
REGISTER(tstring);
#endif // defined(IS_MOBILE_PLATFORM) &&
// !defined(SUPPORT_SELECTIVE_REGISTRATION) &&
// !defined(__ANDROID_TYPES_FULL__)
......
......@@ -145,8 +145,8 @@ REGISTER_GPU_HOST_KERNEL(int32);
REGISTER_GPU_HOST_REF_KERNEL(int32);
REGISTER_GPU_HOST_KERNEL(bool);
REGISTER_GPU_HOST_REF_KERNEL(bool);
REGISTER_GPU_HOST_KERNEL(string);
REGISTER_GPU_HOST_REF_KERNEL(string);
REGISTER_GPU_HOST_KERNEL(tstring);
REGISTER_GPU_HOST_REF_KERNEL(tstring);
REGISTER_GPU_HOST_KERNEL(ResourceHandle);
#undef REGISTER_GPU_HOST_KERNEL
......@@ -183,7 +183,7 @@ TF_CALL_REAL_NUMBER_TYPES_NO_INT32(REGISTER_SYCL_REF_SWITCH);
SwitchOp)
REGISTER_SYCL_HOST_KERNEL(bool);
REGISTER_SYCL_HOST_KERNEL(string);
REGISTER_SYCL_HOST_KERNEL(tstring);
REGISTER_SYCL_HOST_KERNEL(int32);
#define REGISTER_SYCL_HOST_REF_KERNEL(type) \
......@@ -198,7 +198,7 @@ REGISTER_SYCL_HOST_KERNEL(int32);
REGISTER_SYCL_HOST_REF_KERNEL(int32);
REGISTER_SYCL_HOST_REF_KERNEL(bool);
REGISTER_SYCL_HOST_REF_KERNEL(string);
REGISTER_SYCL_HOST_REF_KERNEL(tstring);
#undef REGISTER_SYCL_HOST_KERNEL
#undef REGISTER_SYCL_HOST_REF_KERNEL
......@@ -350,7 +350,7 @@ TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_SYCL_REF_KERNEL);
MergeOp)
REGISTER_GPU_HOST_KERNEL(int32);
REGISTER_GPU_HOST_KERNEL(string);
REGISTER_GPU_HOST_KERNEL(tstring);
REGISTER_GPU_HOST_KERNEL(ResourceHandle);
#undef REGISTER_GPU_HOST_KERNEL
......@@ -373,7 +373,7 @@ REGISTER_GPU_HOST_KERNEL(ResourceHandle);
MergeOp)
REGISTER_SYCL_HOST_KERNEL(int32);
REGISTER_SYCL_HOST_KERNEL(string);
REGISTER_SYCL_HOST_KERNEL(tstring);
REGISTER_SYCL_HOST_KERNEL(ResourceHandle);
#undef REGISTER_SYCL_HOST_KERNEL
......@@ -439,8 +439,8 @@ TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_SYCL_REF_KERNEL);
REGISTER_SYCL_HOST_KERNEL(int32);
REGISTER_SYCL_HOST_REF_KERNEL(int32);
REGISTER_SYCL_HOST_KERNEL(string);
REGISTER_SYCL_HOST_REF_KERNEL(string);
REGISTER_SYCL_HOST_KERNEL(tstring);
REGISTER_SYCL_HOST_REF_KERNEL(tstring);
REGISTER_SYCL_HOST_KERNEL(ResourceHandle);
#undef REGISTER_SYCL_HOST_KERNEL
......@@ -468,8 +468,8 @@ REGISTER_SYCL_HOST_KERNEL(ResourceHandle);
REGISTER_GPU_HOST_KERNEL(int32);
REGISTER_GPU_HOST_REF_KERNEL(int32);
REGISTER_GPU_HOST_KERNEL(string);
REGISTER_GPU_HOST_REF_KERNEL(string);
REGISTER_GPU_HOST_KERNEL(tstring);
REGISTER_GPU_HOST_REF_KERNEL(tstring);
REGISTER_GPU_HOST_KERNEL(ResourceHandle);
#undef REGISTER_GPU_HOST_KERNEL
......@@ -529,7 +529,7 @@ TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_SYCL_KERNEL);
ExitOp)
REGISTER_SYCL_HOST_KERNEL(int32);
REGISTER_SYCL_HOST_KERNEL(string);
REGISTER_SYCL_HOST_KERNEL(tstring);
#undef REGISTER_SYCL_HOST_KERNEL
#endif // TENSORFLOW_USE_SYCL
......@@ -551,7 +551,7 @@ REGISTER_SYCL_HOST_KERNEL(string);
ExitOp)
REGISTER_GPU_HOST_KERNEL(int32);
REGISTER_GPU_HOST_KERNEL(string);
REGISTER_GPU_HOST_KERNEL(tstring);
REGISTER_GPU_HOST_KERNEL(ResourceHandle);
#undef REGISTER_GPU_HOST_KERNEL
......@@ -601,7 +601,7 @@ TF_CALL_variant(REGISTER_GPU_KERNEL);
NextIterationOp)
REGISTER_GPU_HOST_KERNEL(int32);
REGISTER_GPU_HOST_KERNEL(string);
REGISTER_GPU_HOST_KERNEL(tstring);
REGISTER_GPU_HOST_KERNEL(ResourceHandle);
#undef REGISTER_GPU_HOST_KERNEL
......@@ -634,7 +634,7 @@ TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_SYCL_KERNEL);
NextIterationOp)
REGISTER_SYCL_HOST_KERNEL(int32);
REGISTER_SYCL_HOST_KERNEL(string);
REGISTER_SYCL_HOST_KERNEL(tstring);
#undef REGISTER_SYCL_HOST_KERNEL
#endif // TENSORFLOW_USE_SYCL
......
......@@ -71,12 +71,12 @@ TEST_F(SwitchOpTest, Int32Success_2_3_s0) {
TEST_F(SwitchOpTest, StringSuccess_s1) {
Initialize(DT_STRING);
AddInputFromArray<string>(TensorShape({6}), {"A", "b", "C", "d", "E", "f"});
AddInputFromArray<tstring>(TensorShape({6}), {"A", "b", "C", "d", "E", "f"});
AddInputFromArray<bool>(TensorShape({}), {true});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_STRING, TensorShape({6}));
test::FillValues<string>(&expected, {"A", "b", "C", "d", "E", "f"});
test::ExpectTensorEqual<string>(expected, *GetOutput(1));
test::FillValues<tstring>(&expected, {"A", "b", "C", "d", "E", "f"});
test::ExpectTensorEqual<tstring>(expected, *GetOutput(1));
EXPECT_EQ(nullptr, GetOutput(0));
}
......
......@@ -23,7 +23,7 @@ namespace tensorflow {
#if !defined(__ANDROID_TYPES_SLIM__)
REGISTER6(BinaryOp, CPU, "Add", functor::add, int8, int16, complex64, uint8,
complex128, string);
complex128, tstring);
// Notice: String is excluded to allow marking AddV2 is_commutative and
// is_aggregate.
REGISTER5(BinaryOp, CPU, "AddV2", functor::add, int8, int16, complex64, uint8,
......
......@@ -23,7 +23,7 @@ namespace tensorflow {
#if !defined(__ANDROID_TYPES_SLIM__)
REGISTER6(BinaryOp, CPU, "Equal", functor::equal_to, int32, int64, complex64,
complex128, string, bool);
complex128, tstring, bool);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
REGISTER6(BinaryOp, GPU, "Equal", functor::equal_to, int8, int16, int64,
complex64, complex128, bool);
......
......@@ -23,7 +23,7 @@ namespace tensorflow {
#if !defined(__ANDROID_TYPES_SLIM__)
REGISTER6(BinaryOp, CPU, "NotEqual", functor::not_equal_to, int32, int64,
complex64, complex128, string, bool);
complex64, complex128, tstring, bool);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
REGISTER6(BinaryOp, GPU, "NotEqual", functor::not_equal_to, int8, int16, int64,
complex64, complex128, bool);
......
......@@ -32,8 +32,8 @@ namespace functor {
template <>
struct DenseUpdate<CPUDevice, string, ASSIGN> {
void operator()(const CPUDevice& d, typename TTypes<string>::Flat params,
typename TTypes<string>::ConstFlat update) {
void operator()(const CPUDevice& d, typename TTypes<tstring>::Flat params,
typename TTypes<tstring>::ConstFlat update) {
if (params.dimension(0) == 1) {
params.data()->resize(update.data()->size());
auto work = [&params, &update](int64 start, int64 end) {
......@@ -57,9 +57,9 @@ struct DenseUpdate<CPUDevice, string, ASSIGN> {
// first element of the tensor seems as good a guess as any of the sizes
// of the strings contained within...
estimated_string_size =
std::max(update.data()[0].size(), sizeof(string));
std::max(update.data()[0].size(), sizeof(tstring));
} else {
estimated_string_size = sizeof(string);
estimated_string_size = sizeof(tstring);
}
d.parallelFor(
params.dimension(0),
......
......@@ -283,7 +283,7 @@ class DeserializeSparseOp : public OpKernel {
REGISTER_KERNEL_BUILDER(Name("DeserializeSparse")
.Device(DEVICE_CPU)
.TypeConstraint<string>("Tserialized"),
.TypeConstraint<tstring>("Tserialized"),
DeserializeSparseOp)
REGISTER_KERNEL_BUILDER(Name("DeserializeManySparse").Device(DEVICE_CPU),
......
......@@ -32,9 +32,9 @@ void SetZeroFunctor<Eigen::ThreadPoolDevice, T>::operator()(
out.device(d) = out.constant(T(0));
}
void SetZeroFunctor<Eigen::ThreadPoolDevice, string>::operator()(
const Eigen::ThreadPoolDevice& d, typename TTypes<string>::Flat out) {
out.device(d) = out.constant(string());
void SetZeroFunctor<Eigen::ThreadPoolDevice, tstring>::operator()(
const Eigen::ThreadPoolDevice& d, typename TTypes<tstring>::Flat out) {
out.device(d) = out.constant(tstring());
}
// Explicit instantiations.
......
......@@ -54,9 +54,9 @@ struct SetZeroFunctor<Eigen::SyclDevice, T> {
#endif // TENSORFLOW_USE_SYCL
template <>
struct SetZeroFunctor<Eigen::ThreadPoolDevice, string> {
struct SetZeroFunctor<Eigen::ThreadPoolDevice, tstring> {
void operator()(const Eigen::ThreadPoolDevice& d,
typename TTypes<string>::Flat out);
typename TTypes<tstring>::Flat out);
};
template <typename Device, typename T>
......@@ -81,9 +81,9 @@ struct SetOneFunctor<Eigen::SyclDevice, T> {
#endif // TENSORFLOW_USE_SYCL
template <>
struct SetOneFunctor<Eigen::ThreadPoolDevice, string> {
struct SetOneFunctor<Eigen::ThreadPoolDevice, tstring> {
void operator()(const Eigen::ThreadPoolDevice& d,
typename TTypes<string>::Flat out);
typename TTypes<tstring>::Flat out);
};
} // namespace functor
......
......@@ -120,7 +120,7 @@ REGISTER_KERNEL_BUILDER(Name(kArgOp)
REGISTER_KERNEL_BUILDER(Name(kArgOp)
.Device(DEVICE_GPU)
.HostMemory("output")
.TypeConstraint<string>("T"),
.TypeConstraint<tstring>("T"),
ArgOp);
REGISTER_KERNEL_BUILDER(
......@@ -148,7 +148,7 @@ REGISTER_KERNEL_BUILDER(Name(kRetOp)
REGISTER_KERNEL_BUILDER(Name(kRetOp)
.Device(DEVICE_GPU)
.TypeConstraint<string>("T")
.TypeConstraint<tstring>("T")
.HostMemory("input"),
RetvalOp);
#undef REGISTER
......
......@@ -64,12 +64,12 @@ TEST_F(IdentityNOpTest, Int32Success_2_3) {
TEST_F(IdentityNOpTest, StringInt32Success) {
TF_ASSERT_OK(Init(DT_STRING, DT_INT32));
AddInputFromArray<string>(TensorShape({6}), {"A", "b", "C", "d", "E", "f"});
AddInputFromArray<tstring>(TensorShape({6}), {"A", "b", "C", "d", "E", "f"});
AddInputFromArray<int32>(TensorShape({8}), {1, 3, 5, 7, 9, 11, 13, 15});
TF_ASSERT_OK(RunOpKernel());
Tensor expected0(allocator(), DT_STRING, TensorShape({6}));
test::FillValues<string>(&expected0, {"A", "b", "C", "d", "E", "f"});
test::ExpectTensorEqual<string>(expected0, *GetOutput(0));
test::FillValues<tstring>(&expected0, {"A", "b", "C", "d", "E", "f"});
test::ExpectTensorEqual<tstring>(expected0, *GetOutput(0));
Tensor expected1(allocator(), DT_INT32, TensorShape({8}));
test::FillValues<int32>(&expected1, {1, 3, 5, 7, 9, 11, 13, 15});
test::ExpectTensorEqual<int32>(expected1, *GetOutput(1));
......
......@@ -158,7 +158,7 @@ REGISTER_GPU_KERNEL(Variant);
REGISTER_GPU_HOST_KERNEL(int32);
REGISTER_GPU_HOST_KERNEL(bool);
REGISTER_GPU_HOST_KERNEL(string);
REGISTER_GPU_HOST_KERNEL(tstring);
REGISTER_GPU_HOST_KERNEL(ResourceHandle);
#undef REGISTER_GPU_HOST_KERNEL
......
......@@ -56,11 +56,11 @@ TEST_F(IdentityOpTest, Int32Success_2_3) {
TEST_F(IdentityOpTest, StringSuccess) {
TF_ASSERT_OK(Init(DT_STRING));
AddInputFromArray<string>(TensorShape({6}), {"A", "b", "C", "d", "E", "f"});
AddInputFromArray<tstring>(TensorShape({6}), {"A", "b", "C", "d", "E", "f"});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_STRING, TensorShape({6}));
test::FillValues<string>(&expected, {"A", "b", "C", "d", "E", "f"});
test::ExpectTensorEqual<string>(expected, *GetOutput(0));
test::FillValues<tstring>(&expected, {"A", "b", "C", "d", "E", "f"});
test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
}
TEST_F(IdentityOpTest, RefInputError) { TF_ASSERT_OK(Init(DT_INT32_REF)); }
......
......@@ -51,7 +51,7 @@ Status DoParallelConcat(const CPUDevice& d, const Tensor& value, int32 loc,
case DataTypeToEnum<type>::value: \
return DoParallelConcatUpdate<CPUDevice, type>(d, value, loc, output);
TF_CALL_POD_TYPES(CASE);
TF_CALL_string(CASE);
TF_CALL_tstring(CASE);
TF_CALL_variant(CASE);
#undef CASE
default:
......@@ -416,7 +416,7 @@ Status DoCopy(const CPUDevice& device, const Tensor& x, Tensor* y) {
TF_CALL_NUMBER_TYPES(CASE);
TF_CALL_bool(CASE);
TF_CALL_string(CASE);
TF_CALL_tstring(CASE);
#undef CASE
default:
return errors::InvalidArgument("Unsupported data type: ",
......@@ -477,7 +477,7 @@ REGISTER_KERNEL_BUILDER(Name("DeepCopy").Device(DEVICE_CPU), CopyOp<CPUDevice>);
REGISTER_EMPTY(float, CPU)
REGISTER_EMPTY(double, CPU)
REGISTER_EMPTY(Eigen::half, CPU)
REGISTER_EMPTY(string, CPU)
REGISTER_EMPTY(tstring, CPU)
REGISTER_EMPTY(int32, CPU)
REGISTER_EMPTY(int64, CPU)
REGISTER_EMPTY(bool, CPU)
......
......@@ -104,7 +104,7 @@ class ListDiffOp : public OpKernel {
ListDiffOp<type, int64>)
TF_CALL_REAL_NUMBER_TYPES(REGISTER_LISTDIFF);
REGISTER_LISTDIFF(string);
REGISTER_LISTDIFF(tstring);
#undef REGISTER_LISTDIFF
} // namespace tensorflow
......@@ -173,7 +173,7 @@ namespace functor {
DECLARE_CPU_SPEC(T, int64, 5);
TF_CALL_POD_TYPES(DECLARE_CPU_SPECS);
TF_CALL_string(DECLARE_CPU_SPECS);
TF_CALL_tstring(DECLARE_CPU_SPECS);
#undef DECLARE_CPU_SPEC
#undef DECLARE_CPU_SPECS
......@@ -195,7 +195,7 @@ TF_CALL_string(DECLARE_CPU_SPECS);
// Note that we do register for bool type, but not in the gradient op.
TF_CALL_POD_TYPES(REGISTER_KERNEL);
TF_CALL_string(REGISTER_KERNEL);
TF_CALL_tstring(REGISTER_KERNEL);
#undef REGISTER_KERNEL
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
......
......@@ -29,7 +29,7 @@ using CpuDevice = Eigen::ThreadPoolDevice;
template struct functor::MirrorPad<CpuDevice, T, int32, CPU_PROVIDED_IXDIM>; \
template struct functor::MirrorPad<CpuDevice, T, int64, CPU_PROVIDED_IXDIM>;
TF_CALL_POD_TYPES(DEFINE_CPU_SPECS);
TF_CALL_string(DEFINE_CPU_SPECS);
TF_CALL_tstring(DEFINE_CPU_SPECS);
#undef DEFINE_CPU_SPECS
#define DEFINE_CPU_SPECS(T) \
......
......@@ -142,7 +142,7 @@ TF_CALL_QUANTIZED_TYPES(REGISTER_PACK);
#if defined(IS_MOBILE_PLATFORM) && !defined(SUPPORT_SELECTIVE_REGISTRATION)
// Primarily used for SavedModel support on mobile.
REGISTER_PACK(string);
REGISTER_PACK(tstring);
#endif // defined(IS_MOBILE_PLATFORM) &&
// !defined(SUPPORT_SELECTIVE_REGISTRATION)
......
......@@ -291,7 +291,7 @@ class PadOp : public OpKernel {
PadOp<CPUDevice, type, int64>);
TF_CALL_POD_TYPES(REGISTER_KERNEL);
TF_CALL_string(REGISTER_KERNEL);
TF_CALL_tstring(REGISTER_KERNEL);
#undef REGISTER_KERNEL
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
......
......@@ -292,7 +292,7 @@ class RaggedGatherOp : public RaggedGatherOpBase<INDEX_TYPE, SPLITS_TYPE> {
REGISTER_CPU_KERNEL_WITH_INDEX_TYPE(int32, value_type, int64) \
REGISTER_CPU_KERNEL_WITH_INDEX_TYPE(int64, value_type, int64)
TF_CALL_POD_TYPES(REGISTER_CPU_KERNEL);
TF_CALL_string(REGISTER_CPU_KERNEL);
TF_CALL_tstring(REGISTER_CPU_KERNEL);
TF_CALL_QUANTIZED_TYPES(REGISTER_CPU_KERNEL);
TF_CALL_quint16(REGISTER_CPU_KERNEL);
TF_CALL_qint16(REGISTER_CPU_KERNEL);
......
......@@ -303,7 +303,7 @@ class RaggedTensorFromVariantOp : public OpKernel {
REGISTER_KERNELS_WITH_SPLIT_TYPE(value_type, int32) \
REGISTER_KERNELS_WITH_SPLIT_TYPE(value_type, int64)
TF_CALL_POD_TYPES(REGISTER_KERNELS);
TF_CALL_string(REGISTER_KERNELS);
TF_CALL_tstring(REGISTER_KERNELS);
TF_CALL_QUANTIZED_TYPES(REGISTER_KERNELS);
TF_CALL_quint16(REGISTER_KERNELS);
TF_CALL_qint16(REGISTER_KERNELS);
......
......@@ -601,7 +601,7 @@ TEST_F(RaggedTensorFromVariantKernelTest, RaggedValuesTypeMismatch) {
{component_split_1_1}, TensorShape({1}), component_values_1);
int input_ragged_rank = 1;
int output_ragged_rank = 2;
BuildDecodeRaggedTensorGraph<string, int64>(
BuildDecodeRaggedTensorGraph<tstring, int64>(
input_ragged_rank, output_ragged_rank, TensorShape({1}),
{variant_component_1});
EXPECT_TRUE(absl::StartsWith(RunOpKernel().error_message(),
......
......@@ -210,7 +210,7 @@ class RaggedTensorToVariantOp : public OpKernel {
REGISTER_KERNELS_WITH_SPLIT_TYPE(value_type, int32) \
REGISTER_KERNELS_WITH_SPLIT_TYPE(value_type, int64)
TF_CALL_POD_TYPES(REGISTER_KERNELS);
TF_CALL_string(REGISTER_KERNELS);
TF_CALL_tstring(REGISTER_KERNELS);
TF_CALL_QUANTIZED_TYPES(REGISTER_KERNELS);
TF_CALL_quint16(REGISTER_KERNELS);
TF_CALL_qint16(REGISTER_KERNELS);
......
......@@ -950,7 +950,7 @@ class ResourceScatterUpdateOp : public OpKernel {
TF_CALL_NUMBER_TYPES(REGISTER_SCATTER_ARITHMETIC_CPU);
TF_CALL_REAL_NUMBER_TYPES(REGISTER_SCATTER_MINMAX_CPU);
REGISTER_SCATTER_KERNEL(string, CPU, "ResourceScatterUpdate",
REGISTER_SCATTER_KERNEL(tstring, CPU, "ResourceScatterUpdate",
scatter_op::UpdateOp::ASSIGN);
REGISTER_SCATTER_KERNEL(bool, CPU, "ResourceScatterUpdate",
scatter_op::UpdateOp::ASSIGN);
......
......@@ -314,7 +314,7 @@ class ReverseV2Op : public OpKernel {
.HostMemory("axis"), \
ReverseV2Op<CPUDevice, T, int64>)
TF_CALL_POD_TYPES(REGISTER_KERNELS);
TF_CALL_string(REGISTER_KERNELS);
TF_CALL_tstring(REGISTER_KERNELS);
#undef REGISTER_KERNELS
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
......
......@@ -378,7 +378,7 @@ class ScatterNdUpdateOp : public OpKernel {
TF_CALL_NUMBER_TYPES(REGISTER_SCATTER_ND_ADD_SUB_CPU);
TF_CALL_NUMBER_TYPES(REGISTER_SCATTER_ND_UPDATE_CPU);
TF_CALL_NUMBER_TYPES(REGISTER_SCATTER_ND_CPU);
TF_CALL_string(REGISTER_SCATTER_ND_CPU);
TF_CALL_tstring(REGISTER_SCATTER_ND_CPU);
TF_CALL_bool(REGISTER_SCATTER_ND_ADD_SUB_CPU);
TF_CALL_bool(REGISTER_SCATTER_ND_UPDATE_CPU);
TF_CALL_bool(REGISTER_SCATTER_ND_CPU);
......@@ -428,7 +428,7 @@ TF_CALL_bool(REGISTER_SCATTER_ND_CPU);
// Register TensorScatterUpdate/Add/Sub for all number types.
TF_CALL_NUMBER_TYPES(REGISTER_SCATTER_ND_TENSOR_CPU);
// Register only TensorScatterUpdate for string/bool types as well.
TF_CALL_string(REGISTER_SCATTER_ND_TENSOR_UPDATE_CPU);
TF_CALL_tstring(REGISTER_SCATTER_ND_TENSOR_UPDATE_CPU);
TF_CALL_bool(REGISTER_SCATTER_ND_TENSOR_UPDATE_CPU);
#undef REGISTER_SCATTER_ND_TENSOR_CPU
......
......@@ -160,7 +160,7 @@ struct ScatterNdFunctor<CPUDevice, T, Index, OP, IXDIM> {
REGISTER_SCATTER_ND_INDEX(type, scatter_nd_op::UpdateOp::SUB);
TF_CALL_ALL_TYPES(REGISTER_SCATTER_ND_UPDATE);
REGISTER_SCATTER_ND_INDEX(string, scatter_nd_op::UpdateOp::ADD);
REGISTER_SCATTER_ND_INDEX(tstring, scatter_nd_op::UpdateOp::ADD);
TF_CALL_NUMBER_TYPES(REGISTER_SCATTER_ND_MATH);
TF_CALL_bool(REGISTER_SCATTER_ND_MATH);
#undef REGISTER_SCATTER_ND_MATH
......
......@@ -51,15 +51,15 @@ class ScatterNdUpdateOpTest : public OpsTestBase {
// TODO(simister): Re-enable this once binary size is under control.
// TEST_F(ScatterNdUpdateOpTest, Simple_StringType) {
// MakeOp(DT_STRING_REF, DT_INT32);
// AddInputFromArray<string>(TensorShape({1}), {"Brain"});
// AddInputFromArray<tstring>(TensorShape({1}), {"Brain"});
// AddInputFromArray<int32>(TensorShape({1}), {0});
// AddInputFromArray<string>(TensorShape({1}), {"TensorFlow"});
// AddInputFromArray<tstring>(TensorShape({1}), {"TensorFlow"});
// TF_ASSERT_OK(RunOpKernel());
// // Check the new state of the input
// Tensor params_tensor = *mutable_input(0).tensor;
// Tensor expected(allocator(), DT_STRING, TensorShape({1}));
// test::FillValues<string>(&expected, {"TensorFlow"});
// test::ExpectTensorEqual<string>(expected, params_tensor);
// test::FillValues<tstring>(&expected, {"TensorFlow"});
// test::ExpectTensorEqual<tstring>(expected, params_tensor);
// }
// TEST_F(ScatterNdUpdateOpTest, Simple_BoolType) {
......
......@@ -50,15 +50,15 @@ class ScatterUpdateOpTest : public OpsTestBase {
TEST_F(ScatterUpdateOpTest, Simple_StringType) {
MakeOp(DT_STRING_REF, DT_INT32);
AddInputFromArray<string>(TensorShape({1}), {"Brain"});
AddInputFromArray<tstring>(TensorShape({1}), {"Brain"});
AddInputFromArray<int32>(TensorShape({1}), {0});
AddInputFromArray<string>(TensorShape({1}), {"TensorFlow"});
AddInputFromArray<tstring>(TensorShape({1}), {"TensorFlow"});
TF_ASSERT_OK(RunOpKernel());
// Check the new state of the input
Tensor params_tensor = *mutable_input(0).tensor;
Tensor expected(allocator(), DT_STRING, TensorShape({1}));
test::FillValues<string>(&expected, {"TensorFlow"});
test::ExpectTensorEqual<string>(expected, params_tensor);
test::FillValues<tstring>(&expected, {"TensorFlow"});
test::ExpectTensorEqual<tstring>(expected, params_tensor);
}
TEST_F(ScatterUpdateOpTest, Simple_BoolType) {
......
......@@ -93,7 +93,7 @@ class SerializeSparseOp : public OpKernel {
// performs O(1) shallow copies (and hence is much cheaper than
// dispatching to another thread would be).
template <>
bool SerializeSparseOp<string>::IsExpensive() {
bool SerializeSparseOp<tstring>::IsExpensive() {
return true;
}
template <>
......@@ -102,14 +102,14 @@ bool SerializeSparseOp<Variant>::IsExpensive() {
}
template <>
Status SerializeSparseOp<string>::Initialize(Tensor* result) {
Status SerializeSparseOp<tstring>::Initialize(Tensor* result) {
*result = Tensor(DT_STRING, TensorShape({3}));
return Status::OK();
}
template <>
Status SerializeSparseOp<string>::Serialize(const Tensor& input,
string* result) {
Status SerializeSparseOp<tstring>::Serialize(const Tensor& input,
tstring* result) {
TensorProto proto;
input.AsProtoTensorContent(&proto);
*result = proto.SerializeAsString();
......@@ -118,8 +118,8 @@ Status SerializeSparseOp<string>::Serialize(const Tensor& input,
REGISTER_KERNEL_BUILDER(Name("SerializeSparse")
.Device(DEVICE_CPU)
.TypeConstraint<string>("out_type"),
SerializeSparseOp<string>);
.TypeConstraint<tstring>("out_type"),
SerializeSparseOp<tstring>);
template <>
Status SerializeSparseOp<Variant>::Initialize(Tensor* result) {
......@@ -261,27 +261,27 @@ class SerializeManySparseOp : public SerializeManySparseOpBase<U> {
};
template <>
Status SerializeManySparseOpBase<string>::Initialize(const int64 n,
Tensor* result) {
Status SerializeManySparseOpBase<tstring>::Initialize(const int64 n,
Tensor* result) {
*result = Tensor(DT_STRING, TensorShape({n, 3}));
return Status::OK();
}
template <>
Status SerializeManySparseOpBase<string>::Serialize(const Tensor& input,
string* result) {
Status SerializeManySparseOpBase<tstring>::Serialize(const Tensor& input,
tstring* result) {
TensorProto proto;
input.AsProtoTensorContent(&proto);
*result = proto.SerializeAsString();
return Status::OK();
}
#define REGISTER_KERNELS(type) \
REGISTER_KERNEL_BUILDER(Name("SerializeManySparse") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.TypeConstraint<string>("out_type"), \
SerializeManySparseOp<type, string>)
#define REGISTER_KERNELS(type) \
REGISTER_KERNEL_BUILDER(Name("SerializeManySparse") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.TypeConstraint<tstring>("out_type"), \
SerializeManySparseOp<type, tstring>)
TF_CALL_ALL_TYPES(REGISTER_KERNELS);
#undef REGISTER_KERNELS
......
......@@ -291,7 +291,7 @@ _SET_SIZE_REGISTER_KERNEL_BUILDER(int32);
_SET_SIZE_REGISTER_KERNEL_BUILDER(int64);
_SET_SIZE_REGISTER_KERNEL_BUILDER(uint8);
_SET_SIZE_REGISTER_KERNEL_BUILDER(uint16);
_SET_SIZE_REGISTER_KERNEL_BUILDER(string);
_SET_SIZE_REGISTER_KERNEL_BUILDER(tstring);
#undef _SET_SIZE_REGISTER_KERNEL_BUILDER
enum InputTypes {
......@@ -716,7 +716,7 @@ _DENSE_TO_DENSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(int32);
_DENSE_TO_DENSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(int64);
_DENSE_TO_DENSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(uint8);
_DENSE_TO_DENSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(uint16);
_DENSE_TO_DENSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(string);
_DENSE_TO_DENSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(tstring);
#undef _DENSE_TO_DENSE_SET_OPERATION_REGISTER_KERNEL_BUILDER
template <typename T>
......@@ -737,7 +737,7 @@ _DENSE_TO_SPARSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(int32);
_DENSE_TO_SPARSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(int64);
_DENSE_TO_SPARSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(uint8);
_DENSE_TO_SPARSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(uint16);
_DENSE_TO_SPARSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(string);
_DENSE_TO_SPARSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(tstring);
#undef _DENSE_TO_SPARSE_SET_OPERATION_REGISTER_KERNEL_BUILDER
template <typename T>
......@@ -758,7 +758,7 @@ _SPARSE_TO_SPARSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(int32);
_SPARSE_TO_SPARSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(int64);
_SPARSE_TO_SPARSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(uint8);
_SPARSE_TO_SPARSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(uint16);
_SPARSE_TO_SPARSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(string);
_SPARSE_TO_SPARSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(tstring);
#undef _SPARSE_TO_SPARSE_SET_OPERATION_REGISTER_KERNEL_BUILDER
} // namespace tensorflow
......@@ -546,7 +546,7 @@ REGISTER_GPU_KERNEL(Variant);
REGISTER_GPU_HOST_KERNEL(int32);
REGISTER_GPU_HOST_KERNEL(bool);
REGISTER_GPU_HOST_KERNEL(string);
REGISTER_GPU_HOST_KERNEL(tstring);
REGISTER_GPU_HOST_KERNEL(ResourceHandle);
#undef REGISTER_GPU_HOST_KERNEL
......
......@@ -81,7 +81,7 @@ TF_CALL_int64(DEFINE_TYPE);
TF_CALL_half(DEFINE_TYPE);
TF_CALL_complex64(DEFINE_TYPE);
TF_CALL_complex128(DEFINE_TYPE);
TF_CALL_string(DEFINE_TYPE);
TF_CALL_tstring(DEFINE_TYPE);
#undef DEFINE_TYPE
......
......@@ -142,7 +142,7 @@ TF_CALL_int64(DECLARE_TYPE);
TF_CALL_half(DECLARE_TYPE);
TF_CALL_complex64(DECLARE_TYPE);
TF_CALL_complex128(DECLARE_TYPE);
TF_CALL_string(DECLARE_TYPE);
TF_CALL_tstring(DECLARE_TYPE);
#undef DECLARE_TYPE
#define DECLARE_DIM(T, NDIM) \
......@@ -241,7 +241,7 @@ class TileOp : public OpKernel {
TF_CALL_int16(HANDLE_TYPE_NAME);
TF_CALL_int64(HANDLE_TYPE_NAME);
TF_CALL_half(HANDLE_TYPE_NAME);
TF_CALL_string(HANDLE_TYPE_NAME); // when DEVICE=CPUDevice.
TF_CALL_tstring(HANDLE_TYPE_NAME); // when DEVICE=CPUDevice.
TF_CALL_complex64(HANDLE_TYPE_NAME);
TF_CALL_complex128(HANDLE_TYPE_NAME);
......@@ -322,7 +322,7 @@ TF_CALL_int64(HANDLE_TYPE_NAME_CPU);
TF_CALL_half(HANDLE_TYPE_NAME_CPU);
TF_CALL_complex64(HANDLE_TYPE_NAME_CPU);
TF_CALL_complex128(HANDLE_TYPE_NAME_CPU);
TF_CALL_string(HANDLE_TYPE_NAME_CPU);
TF_CALL_tstring(HANDLE_TYPE_NAME_CPU);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
TF_CALL_bool(HANDLE_TYPE_NAME_GPU);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册