提交 32f8ac7d 编写于 作者: M mozga-intel

Remove additional message

上级 34a80843
...@@ -23,67 +23,56 @@ namespace operators { ...@@ -23,67 +23,56 @@ namespace operators {
using paddle::framework::Tensor; using paddle::framework::Tensor;
using paddle::platform::MKLDNNDeviceContext; using paddle::platform::MKLDNNDeviceContext;
struct MKLDNNMatrixSize final {
explicit MKLDNNMatrixSize(const std::vector<int>& in,
const std::vector<int>& w)
: mb{in[0]}, ic{in[1]}, oc{w[1]}, h{in[2]}, w{in[3]} {}
bool is_spatial() const { return h > 2 && w > 2; }
const int mb;
const int ic;
const int oc;
const int h, w;
};
template <typename T> template <typename T>
class MKLDNNMD { class MKLDNNMD {
public: public:
explicit MKLDNNMD(const T* in, const T* w, bool bias) explicit MKLDNNMD(const T* in, const T* w, bool bias)
: sz_(std::unique_ptr<MKLDNNMatrixSize>(new MKLDNNMatrixSize( : in{paddle::framework::vectorize2int(in->dims())},
paddle::framework::vectorize2int(in->dims()), w{paddle::framework::vectorize2int(w->dims())} {
paddle::framework::vectorize2int(w->dims())))) {
with_bias_ = bias; with_bias_ = bias;
} }
mkldnn::memory::desc dst() const { mkldnn::memory::desc dst() const {
return platform::MKLDNNMemDesc({sz_->mb, sz_->oc}, return platform::MKLDNNMemDesc({in[0], w[1]},
mkldnn::memory::data_type::f32, mkldnn::memory::data_type::f32,
mkldnn::memory::format::nc); mkldnn::memory::format::nc);
} }
mkldnn::memory::desc src() const { mkldnn::memory::desc src() const {
return sz_->is_spatial() return is_spatial()
? platform::MKLDNNMemDesc({sz_->mb, sz_->ic, sz_->h, sz_->w}, ? platform::MKLDNNMemDesc({in[0], in[1], in[2], in[3]},
mkldnn::memory::data_type::f32, mkldnn::memory::data_type::f32,
mkldnn::memory::format::nchw) mkldnn::memory::format::nchw)
: platform::MKLDNNMemDesc({sz_->mb, sz_->ic}, : platform::MKLDNNMemDesc({in[0], in[1]},
mkldnn::memory::data_type::f32, mkldnn::memory::data_type::f32,
mkldnn::memory::format::nc); mkldnn::memory::format::nc);
} }
mkldnn::memory::desc weights() const { mkldnn::memory::desc weights() const {
return sz_->is_spatial() return is_spatial()
? platform::MKLDNNMemDesc({sz_->oc, sz_->ic, sz_->h, sz_->w}, ? platform::MKLDNNMemDesc({w[1], in[1], in[2], in[3]},
mkldnn::memory::data_type::f32, mkldnn::memory::data_type::f32,
mkldnn::memory::format::oihw) mkldnn::memory::format::oihw)
: platform::MKLDNNMemDesc({sz_->oc, sz_->ic}, : platform::MKLDNNMemDesc({w[1], in[1]},
mkldnn::memory::data_type::f32, mkldnn::memory::data_type::f32,
mkldnn::memory::format::oi); mkldnn::memory::format::oi);
} }
mkldnn::memory::desc bias() const { mkldnn::memory::desc bias() const {
return with_bias_ return with_bias_
? platform::MKLDNNMemDesc({sz_->oc}, ? platform::MKLDNNMemDesc({w[1]}, mkldnn::memory::data_type::f32,
mkldnn::memory::data_type::f32,
mkldnn::memory::format::format_undef) mkldnn::memory::format::format_undef)
: platform::MKLDNNMemDesc({}, mkldnn::memory::data_type::f32, : platform::MKLDNNMemDesc({}, mkldnn::memory::data_type::f32,
mkldnn::memory::format::format_undef); mkldnn::memory::format::format_undef);
} }
private: private:
std::unique_ptr<MKLDNNMatrixSize> sz_; bool is_spatial() const { return in.size() > 1 && w.size() > 1; }
std::vector<int> in;
std::vector<int> w;
bool with_bias_; bool with_bias_;
bool is_spatial_;
}; };
class MKLDNNMemory { class MKLDNNMemory {
......
...@@ -29,8 +29,8 @@ void FCOp::InferShape(framework::InferShapeContext* ctx) const { ...@@ -29,8 +29,8 @@ void FCOp::InferShape(framework::InferShapeContext* ctx) const {
auto w_dims = ctx->GetInputDim("W"); auto w_dims = ctx->GetInputDim("W");
std::vector<int64_t> output_shape({in_dims[0], w_dims[1]}); std::vector<int64_t> output_shape({in_dims[0], w_dims[1]});
PADDLE_ENFORCE(in_dims.size() == 4, PADDLE_ENFORCE(in_dims.size() == 4 || in_dims.size() == 2,
"Fully Connected input should be 4-D tensor."); "Fully Connected input should be 2-D or 4-D tensor.");
PADDLE_ENFORCE(w_dims.size() == 2, PADDLE_ENFORCE(w_dims.size() == 2,
"Fully Connected input should be 2-D tensor."); "Fully Connected input should be 2-D tensor.");
...@@ -96,22 +96,11 @@ FCOpMaker::FCOpMaker(OpProto* proto, OpAttrChecker* op_checker) ...@@ -96,22 +96,11 @@ FCOpMaker::FCOpMaker(OpProto* proto, OpAttrChecker* op_checker)
The fully connected operation calculates the output based on the input, weights and bias attribute. The fully connected operation calculates the output based on the input, weights and bias attribute.
The size of each dimension of the parameters checked in the infer-shape. The size of each dimension of the parameters checked in the infer-shape.
Input(Input) is NCHW or NC format. Where N is batch size, C is the number of channels,
H is the height of the feature, and W is the width of the feature.
Weights(W) is OIHW or OI format. Where H is the height of the feature, W is the width of the feature,
O is the height of output, and I is the number of channels.
Output(Out) is NC format. Where N is batch size, and C is the number of channels.
The matrix of bias is generated by the mkldnn framework, when the bias_attr is True. The matrix of bias is generated by the mkldnn framework, when the bias_attr is True.
Additional parametrs are use_mkldnn and bias_attr. Additional parametrs are use_mkldnn and bias_attr.
The input(X) size and output(Out) size may be diffrent. The input(X) size and output(Out) size may be diffrent.
Example: The fully connected layer only supports MKLDNN version
Input:
Input shape: $(N, C_{in}, H_{in}, W_{in})$
Weight shape: $(O_{out}, I_{in}, H_{in}, W_{in})$
Bias shape: $(O_{out})$
Output:
Output shape: $(N, C_{out})$
)DOC"); )DOC");
} }
......
...@@ -167,8 +167,9 @@ def fc(input, ...@@ -167,8 +167,9 @@ def fc(input,
shape=param_shape, shape=param_shape,
dtype=dtype, dtype=dtype,
is_bias=False) is_bias=False)
if bias_attr is None or bias_attr is False:
bias_attr = False bias_attr = False
if bias_attr is not None: else:
bias_attr = True bias_attr = True
helper.append_op( helper.append_op(
type="fc", type="fc",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册