提交 844aa275 编写于 作者: P Penporn Koanantakool 提交者: TensorFlower Gardener

Fix clang compilation errors in TF-MKL variant.

PiperOrigin-RevId: 257656880
上级 765f71b8
......@@ -253,6 +253,7 @@ class MklCPUAllocator : public Allocator {
auto l_stats = large_size_allocator_->GetStats();
// Combine statistics from small-size and large-size allocator.
mutex_lock l(mutex_);
stats_.num_allocs = l_stats->num_allocs + s_stats->num_allocs;
stats_.bytes_in_use = l_stats->bytes_in_use + s_stats->bytes_in_use;
stats_.peak_bytes_in_use =
......
......@@ -34,7 +34,6 @@ load(
)
load(
"//third_party/mkl:build_defs.bzl",
"if_mkl",
"if_mkl_ml",
"mkl_deps",
)
......@@ -7465,8 +7464,10 @@ tf_mkl_kernel_library(
],
deps = [
":bounds_check",
":fill_functor",
":matmul_op",
":ops_util",
"//third_party/eigen3",
"//tensorflow/core:core_cpu",
"//tensorflow/core:framework",
"//tensorflow/core:lib",
......
......@@ -107,13 +107,16 @@ struct GatherNdSlice<CPUDevice, T, Index, IXDIM> {
Eigen::Tensor<Eigen::DenseIndex, 1>::Dimensions reshape_dims{{ 1 }};
Eigen::array<Eigen::DenseIndex, 1> broadcast_dims{{ batch_size }};
#else
#if !defined(INTEL_MKL) || !defined(ENABLE_MKL)
Eigen::IndexList<Eigen::type2index<1> > reshape_dims;
#endif // defined(INTEL_MKL) && defined(ENABLE_MKL)
Eigen::IndexList<Eigen::DenseIndex> broadcast_dims;
broadcast_dims.set(0, batch_size);
#endif
#endif // !defined(EIGEN_HAS_INDEX_LIST)
generator::GatherNdSliceGenerator<T, Index, IXDIM> gather_nd_generator(
slice_size, Tindices, Tparams, Tout, &error_loc);
// TODO(b/137289929): Parallelize this with ParallelFor and remove OpenMP call.
#if defined(INTEL_MKL) && defined(ENABLE_MKL)
// Eigen implementation below is not highly performant. gather_nd_generator
// does not seem to be called in parallel, leading to very poor performance.
......
......@@ -562,8 +562,6 @@ class MklConvBackpropCommonOp : public OpKernel {
OP_REQUIRES_OK(context, context->GetAttr("strides", &strides_));
int stride_n = GetTensorDim(strides_, data_format_, 'N');
int stride_c = GetTensorDim(strides_, data_format_, 'C');
const int64 stride_h = GetTensorDim(strides_, data_format_, 'H');
const int64 stride_w = GetTensorDim(strides_, data_format_, 'W');
OP_REQUIRES(
context, (stride_n == 1 && stride_c == 1),
errors::InvalidArgument("Current implementation does not yet support "
......
......@@ -464,7 +464,8 @@ class MklPoolingOpBase : public OpKernel {
// We may not get this attribute for this node if it does not go through
// graph rewrite pass. So we do not check for error while retrieving this
// attribute value.
context->GetAttr("workspace_enabled", &this->workspace_enabled_);
TF_CHECK_OK(
context->GetAttr("workspace_enabled", &this->workspace_enabled_));
}
void Compute(OpKernelContext* context) override = 0;
......
......@@ -222,10 +222,10 @@ class MklDnnMatMulFwdPrimitive : public MklPrimitive {
bias_mem(nullptr),
dst_mem(nullptr),
fwd_desc(nullptr),
fwd_pd(nullptr),
src_md(nullptr),
weight_md(nullptr),
bias_md(nullptr),
fwd_pd(nullptr),
matmul_fwd(nullptr),
fwd_stream(nullptr) {}
};
......@@ -719,6 +719,7 @@ class MklDnnQuantizedMatMulOp : public OpKernel {
context->CtxFailure(
errors::InvalidArgument("Quantization mode must be"
"either MIN_FIRST or SCALED."));
return nullptr;
}
}
}
......
......@@ -525,8 +525,8 @@ inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor,
TensorShape output_shape = mkl_shape.GetTfShape();
// Allocate output tensor.
context->allocate_temp(DataTypeToEnum<T>::v(), output_shape,
&output_tensor);
TF_CHECK_OK(context->allocate_temp(DataTypeToEnum<T>::v(), output_shape,
&output_tensor));
auto cpu_engine = engine(engine::cpu, 0);
MklDnnData<T> input(&cpu_engine);
......@@ -576,7 +576,7 @@ inline const Tensor& MklGetInput(OpKernelContext* ctext, int n) {
inline void GetMklInputList(OpKernelContext* ctext, StringPiece name,
OpInputList* input_tensors) {
CHECK_NOTNULL(input_tensors);
ctext->input_list(name, input_tensors);
TF_CHECK_OK(ctext->input_list(name, input_tensors));
}
inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册