提交 ba368bf6 编写于 作者: L lidanqing 提交者: Tao Luo

clean up intel labeled TODOs (#19476)

test=develop
上级 5eefd6e3
......@@ -159,8 +159,8 @@ class ConcatPrimitiveFactory {
std::vector<memory::primitive_desc> srcs_pd;
std::vector<memory> srcs;
std::vector<primitive::at> inputs;
boost::optional<memory> dst_mem; // TODO(mgallus): change to std::optional
}; // upon introduction of C++17 to paddle
boost::optional<memory> dst_mem;
};
template <typename T>
class ConcatMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
......
......@@ -157,7 +157,6 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
int groups = ctx.Attr<int>("groups");
bool is_conv3d = strides.size() == 3U;
// TODO(tpatejko): add support for dilation
PADDLE_ENFORCE(
is_conv3d
? dilations.size() == 3 && dilations[0] == 1 && dilations[1] == 1 &&
......@@ -212,8 +211,7 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
src_tz, platform::MKLDNNGetDataType<T>(), chosen_memory_format);
auto weights_md = platform::MKLDNNMemDesc(
weights_tz, platform::MKLDNNGetDataType<T>(), weights_format);
std::vector<int> bias_tz; // TODO(mgallus): avoid empty vector creation.
// Currently used whenever bias is != nullptr.
std::vector<int> bias_tz;
auto dst_md = platform::MKLDNNMemDesc(
dst_tz, platform::MKLDNNGetDataType<T>(), chosen_memory_format);
......@@ -364,7 +362,6 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
"residual fusion does not support force output with fp32");
bool is_conv3d = strides.size() == 3U;
// TODO(tpatejko): add support for dilation
PADDLE_ENFORCE(
is_conv3d
? dilations.size() == 3 && dilations[0] == 1 && dilations[1] == 1 &&
......
......@@ -69,7 +69,6 @@ class ConvTransposeMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
int groups = ctx.Attr<int>("groups");
// TODO(tpatejko): add support for dilation
PADDLE_ENFORCE(
dilations.size() == 2 && dilations[0] == 1 && dilations[1] == 1,
"dilation in convolution is not implemented yet");
......@@ -150,8 +149,7 @@ class ConvTransposeMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
src_tz, platform::MKLDNNGetDataType<T>(), chosen_memory_format);
auto weights_md = platform::MKLDNNMemDesc(
weights_tz, platform::MKLDNNGetDataType<T>(), chosen_memory_format);
std::vector<int> bias_tz; // TODO(mgallus): avoid empty vector creation.
// Currently used whenever bias is != nullptr.
std::vector<int> bias_tz;
auto dst_md = platform::MKLDNNMemDesc(
dst_tz, platform::MKLDNNGetDataType<T>(), chosen_memory_format);
......
......@@ -40,8 +40,6 @@ class GaussianMKLDNNKernel : public paddle::framework::OpKernel<T> {
data[i] = dist(engine);
}
// The format of output is set as the mkldnn's format
// TODO(@mozga-intel) The format of matrix sets inside the another layers.
tensor->set_layout(DataLayout::kMKLDNN);
tensor->set_format(mkldnn::memory::format::oihw);
}
......
......@@ -124,7 +124,5 @@ class QuantOpKernel : public framework::OpKernel<T> {
} // namespace paddle
namespace ops = paddle::operators;
// TODO(Xiaoli) Support FP32->S8 quantization.
REGISTER_OP_KERNEL(quantize, MKLDNN, ::paddle::platform::CPUPlace,
ops::QuantOpKernel<float>);
......@@ -47,9 +47,7 @@ class ReQuantOpKernel : public framework::OpKernel<T> {
std::vector<int> dst_tz = paddle::framework::vectorize2int(output->dims());
mkldnn::memory::data_type src_dt =
paddle::framework::ToMKLDNNDataType(input->type());
mkldnn::memory::data_type dst_dt = src_dt; // TODO(Xiaoli) support
// requantize from different
// data type (e.g., s8 to u8)
mkldnn::memory::data_type dst_dt = src_dt;
mkldnn::memory::format src_fmt = memory::format::nhwc;
mkldnn::memory::format dst_fmt = memory::format::nhwc;
......
......@@ -139,7 +139,6 @@ class SumMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
output->set_layout(DataLayout::kMKLDNN);
output->set_format(output_format);
} else { // Fallback to naive version
// TODO(@mozga-intel) Add MKLDNN SelectedRows & LoDTensorArray support
SumKernel<CPUDeviceContext, T> reference_kernel;
reference_kernel.Compute(ctx);
}
......
......@@ -57,8 +57,7 @@ static void BuildReshapeNode(
std::shared_ptr<ngraph::Node> input =
platform::GetInputNode(op, "X", ngb_node_map);
auto input_shape = input->get_shape();
// TODO(mozga-intel) The vector of shape is not supported yet, that's
// asDispensable() operator"
std::shared_ptr<ngraph::Node> shape =
platform::GetInputNode(op, "Shape", ngb_node_map);
......
......@@ -83,8 +83,6 @@ class MKLDNNPostTrainingQuantStrategy(Strategy):
if six.PY3:
data = warmup_reader.__next__()
# TODO (Intel) Remove limits that MKLDNNPostTrainingQuantStrategy
# only support image classification
num_images = len(data)
image_data = [img.tolist() for (img, _) in data]
image_data = np.array(image_data).astype("float32").reshape(
......
......@@ -152,73 +152,6 @@ class TestElementwiseMulMKLDNNOp_Integrated_With_Convs(ElementwiseMulOp):
pass
# TODO(LeoZhao-Intel): re-enable this case
# https://github.com/PaddlePaddle/Paddle/issues/16764
@unittest.skip("Not supported well on avx2.")
class TestElementwiseMulMKLDNNOp_BroadcastNCHW16c(ElementwiseMulOp):
def init_input_output(self):
x = np.random.rand(1, 16, 2, 2).astype(self.dtype)
self.x = x.transpose(0, 2, 3, 1).reshape(1, 16, 2, 2)
self.y = np.random.rand(1, 16).astype(self.dtype)
self.out = x * self.y.reshape(1, 16, 1, 1)
self.out = self.out.transpose(0, 2, 3, 1).reshape(1, 16, 2, 2)
def setUp(self):
super(TestElementwiseMulMKLDNNOp_BroadcastNCHW16c, self).setUp()
self.attrs["x_data_format"] = "nchw16c"
self.attrs["y_data_format"] = "nc"
self._cpu_only = True
def init_kernel_type(self):
self.use_mkldnn = True
def init_axis(self):
self.axis = 0
def test_check_grad_normal(self):
pass
def test_check_grad_ingore_x(self):
pass
def test_check_grad_ingore_y(self):
pass
@unittest.skip(
"Not implemented yet.") # TODO(mgallus): enable when implemented.
class TestElementwiseMulMKLDNNOp_BroadcastNCHW8c(ElementwiseMulOp):
def init_input_output(self):
x = np.random.rand(1, 8, 2, 2).astype(self.dtype)
self.x = x.transpose(0, 2, 3, 1).reshape(1, 8, 2, 2)
self.y = np.random.rand(1, 8).astype(self.dtype)
self.out = x * self.y.reshape(1, 8, 1, 1)
self.out = self.out.transpose(0, 2, 3, 1).reshape(1, 8, 2, 2)
def setUp(self):
super(TestElementwiseMulMKLDNNOp_BroadcastNCHW8c, self).setUp()
self.attrs["x_data_format"] = "nchw8c"
self.attrs["y_data_format"] = "nc"
self._cpu_only = True
def init_kernel_type(self):
self.use_mkldnn = True
def init_axis(self):
self.axis = 0
def test_check_grad_normal(self):
pass
def test_check_grad_ingore_x(self):
pass
def test_check_grad_ingore_y(self):
pass
class TestElementwiseMulMKLDNNOp_FallbackNCHW(ElementwiseMulOp):
def init_input_output(self):
self.x = np.random.rand(1, 16, 2, 2).astype(self.dtype)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册