diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index b1e8a986fb65fd209fdc118fcda9367757ab5f6a..a480a14ecc8f60cc58bf1c8293d14756e0f97f25 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -208,7 +208,7 @@ paddle.fluid.layers.bilinear_tensor_product ArgSpec(args=['x', 'y', 'size', 'act paddle.fluid.layers.merge_selected_rows ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.get_tensor_from_selected_rows ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.lstm ArgSpec(args=['input', 'init_h', 'init_c', 'max_len', 'hidden_size', 'num_layers', 'dropout_prob', 'is_bidirec', 'is_test', 'name', 'default_initializer', 'seed'], varargs=None, keywords=None, defaults=(0.0, False, False, None, None, -1)) -paddle.fluid.layers.shuffle_channel ArgSpec(args=['x', 'group', 'name'], varargs=None, keywords=None, defaults=(1, None)) +paddle.fluid.layers.shuffle_channel ArgSpec(args=['x', 'group', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.py_func ArgSpec(args=['func', 'x', 'out', 'backward_func', 'skip_vars_in_backward_input'], varargs=None, keywords=None, defaults=(None, None)) paddle.fluid.layers.psroi_pool ArgSpec(args=['input', 'rois', 'output_channels', 'spatial_scale', 'pooled_height', 'pooled_width', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.huber_loss ArgSpec(args=['input', 'label', 'delta'], varargs=None, keywords=None, defaults=None) diff --git a/paddle/fluid/operators/shuffle_channel_op.cc b/paddle/fluid/operators/shuffle_channel_op.cc index 8449efe4a90a9c54ce11c10da0b70dc1dce55605..9b0631d5fff1faed22b074b39633f639776b3786 100644 --- a/paddle/fluid/operators/shuffle_channel_op.cc +++ b/paddle/fluid/operators/shuffle_channel_op.cc @@ -29,15 +29,13 @@ class ShuffleChannelOp : public framework::OperatorWithKernel { ctx->SetOutputDim("Out", input_dims); } - /* - protected: - framework::OpKernelType GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { - return framework::OpKernelType( - framework::ToDataType(ctx.Input("X")->type()), - ctx.device_context()); - } - */ + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType(ctx.Input("X")->type(), + ctx.device_context()); + } }; class ShuffleChannelOpMaker : public framework::OpProtoAndCheckerMaker { @@ -89,16 +87,13 @@ class ShuffleChannelGradOp : public framework::OperatorWithKernel { ctx->SetOutputDim(framework::GradVarName("X"), input_dims); } - /* - protected: - framework::OpKernelType GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { - return framework::OpKernelType( - framework::ToDataType( - framework::ToDataType(ctx.Input("X")->type()), - ctx.device_context()); - } - */ + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType(ctx.Input("X")->type(), + ctx.device_context()); + } }; } // namespace operators diff --git a/paddle/fluid/operators/shuffle_channel_op.h b/paddle/fluid/operators/shuffle_channel_op.h index dbb4495e33885f97aa55117e45a99d2f0095ad1b..f6af1bc88598870ebccef81bd37f93f376940851 100644 --- a/paddle/fluid/operators/shuffle_channel_op.h +++ b/paddle/fluid/operators/shuffle_channel_op.h @@ -50,7 +50,6 @@ class ShuffleChannelOpKernel : public framework::OpKernel { } } } - return; } }; diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 4d0ed7c55cd06f97bb91192ed41eca405663e520..9ebbb35c0713f025be9d4d2eb9952e756603c4b9 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -9335,13 +9335,13 @@ def get_tensor_from_selected_rows(x, name=None): return out -def shuffle_channel(x, group=1, name=None): +def shuffle_channel(x, group, name=None): """ **Shuffle Channel Operator** This operator obtains the group convolutional layer with channels shuffled. First, divide the input channels in each group into several subgroups, then, feed each group in the next layer with different subgroups. - Shuffle channel operation makes it possible to build more powerful structures + Channel shuffling operation makes it possible to build more powerful structures with multiple group convolutional layers. Args: