未验证 提交 69742bd9 编写于 作者: S Sylwester Fraczek 提交者: GitHub

Enable mkldnn layout conversion (#25778)

* enable mkldnn layout conversion

* review fix: remove tmp_place

* fix test mkldnn swish

* add UT for PrepareData CPU->MKLDNN

* add #ifdef PADDLE_WITH_MKLDNN

* Force-push commit
Co-authored-by: Ngrygielski <adam.grygielski@gmail.com>
上级 71b19c7f
......@@ -42,23 +42,17 @@ static void PrepareData(const platform::Place& place,
for (const auto& var_base : name_pair.second) {
const auto* tensor = GetTensorFromVar(var_base->Var());
if (tensor && tensor->IsInitialized()) {
auto tmp_place = tensor->place();
// TODO(jiabin): Support transform data layout when we Verify it on more
// tests
if (!(tmp_place == place)) {
auto kernel_type_for_var = op.GetKernelTypeForVar(
name_pair.first, *tensor, expected_kernel_key);
if (!NeedTransform(kernel_type_for_var, expected_kernel_key)) {
continue;
} else {
VLOG(3) << "Transform Variable " << var_base->Name() << " from "
<< kernel_type_for_var << " to " << expected_kernel_key;
framework::Tensor out;
TransformData(expected_kernel_key, kernel_type_for_var, *tensor,
&out);
SetTensorToVariable(var_base->Var(), out, var_base->MutableVar());
}
auto kernel_type_for_var = op.GetKernelTypeForVar(
name_pair.first, *tensor, expected_kernel_key);
if (!NeedTransform(kernel_type_for_var, expected_kernel_key)) {
continue;
} else {
VLOG(3) << "Transform Variable " << var_base->Name() << " from "
<< kernel_type_for_var << " to " << expected_kernel_key;
framework::Tensor out;
TransformData(expected_kernel_key, kernel_type_for_var, *tensor,
&out);
SetTensorToVariable(var_base->Var(), out, var_base->MutableVar());
}
}
}
......@@ -93,6 +87,13 @@ PreparedOp PrepareOpImpl(const NameVarMap<VarType>& ins,
auto& kernels = kernels_iter->second;
framework::RuntimeContext ctx({}, {});
#ifdef PADDLE_WITH_MKLDNN
// MKLDNN variant of code reads attributes in some of GetKernelTypeForVar and
// GetKernelType functions, so we need to copy the attributes there.
// Const qualifier of Attrs had to be discarded to overwrite it.
auto& mutable_op_attrs = const_cast<framework::AttributeMap&>(op.Attrs());
mutable_op_attrs = attrs;
#endif
auto expected_kernel_key =
op.GetExpectedKernelType(DygraphExecutionContext<VarType>(
op, framework::Scope(), *dev_ctx, ctx, ins, outs, attrs));
......
......@@ -176,7 +176,7 @@ TEST(test_prepare_op, test_prepare_data) {
}
#endif
TEST(test_prepare_op, test_prepare_data_same_place) {
void TestPrepareDataSamePlace(framework::AttributeMap attr_map) {
std::shared_ptr<imperative::VarBase> vin(
new imperative::VarBase(false, "vin"));
std::shared_ptr<imperative::VarBase> vout(
......@@ -198,7 +198,6 @@ TEST(test_prepare_op, test_prepare_data_same_place) {
var_pair out_pair = var_pair("Out", vb_vector(1, vout));
imperative::NameVarBaseMap ins = {x_pair};
imperative::NameVarBaseMap outs = {out_pair};
framework::AttributeMap attr_map;
const std::string op_type = "relu";
const auto& info = framework::OpInfoMap::Instance().Get(op_type);
if (info.Checker()) info.Checker()->Check(&attr_map);
......@@ -222,8 +221,21 @@ TEST(test_prepare_op, test_prepare_data_same_place) {
}
}
}
TEST(test_prepare_op, test_prepare_data_same_place) {
TestPrepareDataSamePlace({});
}
#ifdef PADDLE_WITH_MKLDNN
TEST(test_prepare_op, test_prepare_data_cpu_mkldnn) {
TestPrepareDataSamePlace({{"use_mkldnn", true}});
}
#endif
} // namespace imperative
} // namespace paddle
USE_OP(split);
USE_OP(relu);
#ifdef PADDLE_WITH_MKLDNN
USE_OP_DEVICE_KERNEL(relu, MKLDNN);
#endif
......@@ -196,7 +196,7 @@ framework::OpKernelType ConvOp::GetKernelTypeForVar(
auto ar = paddle::framework::AttrReader(attrs);
const std::string data_format = ar.Get<std::string>("data_format");
auto dl = framework::StringToDataLayout(data_format);
// Some models may have intentionally set "AnyLayout" for pool
// Some models may have intentionally set "AnyLayout" for conv
// op. Treat this as NCHW (default data_format value)
if (dl != framework::DataLayout::kAnyLayout) {
return framework::OpKernelType(expected_kernel_type.data_type_,
......
......@@ -112,13 +112,10 @@ class TestMKLDNNSwishDim2(TestSwish):
def setUp(self):
super(TestMKLDNNSwishDim2, self).setUp()
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
beta = 2.3
out = x * expit(beta * x)
self.attrs["use_mkldnn"] = True
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
self.attrs = {"use_mkldnn": True, "beta": beta}
def init_dtype(self):
self.dtype = np.float32
def init_dtype(self):
self.dtype = np.float32
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册