未验证 提交 d01f6269 编写于 作者: F furnace 提交者: GitHub

update mv op according PR#27024 (#27474)

上级 9d783aed
...@@ -42,21 +42,21 @@ class MVOp : public framework::OperatorWithKernel { ...@@ -42,21 +42,21 @@ class MVOp : public framework::OperatorWithKernel {
OP_INOUT_CHECK(context->HasOutput("Out"), "Output", "Out", "mv"); OP_INOUT_CHECK(context->HasOutput("Out"), "Output", "Out", "mv");
auto dim_x = context->GetInputDim("X"); auto dim_x = context->GetInputDim("X");
auto dim_y = context->GetInputDim("Vec"); auto dim_vec = context->GetInputDim("Vec");
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
dim_x.size(), 2, dim_x.size(), 2,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"The rank of input X should be 2, but is %d", dim_x.size())); "The rank of input X should be 2, but is %d", dim_x.size()));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
dim_y.size(), 1, dim_vec.size(), 1,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"The rank of input Vec should be 1, but is %d", dim_y.size())); "The rank of input Vec should be 1, but is %d", dim_vec.size()));
PADDLE_ENFORCE_EQ(dim_x[1] == dim_y[0], true, PADDLE_ENFORCE_EQ(dim_x[1], dim_vec[0],
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"The length of input X' second dim should equal the " "X's second dimension is expected to be equal to "
"length of input Vec," "Vec's first dimension"
" but X[%d, %d], Vec[%d]", "but recieved X'shape = [%s], Vec's shape = [%s]",
dim_x[0], dim_x[1], dim_y[0])); dim_x, dim_vec));
framework::DDim dim_out = framework::make_ddim({dim_x[0]}); framework::DDim dim_out = framework::make_ddim({dim_x[0]});
......
...@@ -19,7 +19,7 @@ namespace paddle { ...@@ -19,7 +19,7 @@ namespace paddle {
namespace operators { namespace operators {
template <typename T> template <typename T>
__global__ void MVGradCUDAKernel(const int m, const int n, const T *dout, __global__ void MVGradDxCUDAKernel(const int m, const int n, const T *dout,
const T *vec, T *dx) { const T *vec, T *dx) {
int idx = blockDim.x * blockIdx.x + threadIdx.x; int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (; idx < m * n; idx += blockDim.x * gridDim.x) { for (; idx < m * n; idx += blockDim.x * gridDim.x) {
...@@ -52,33 +52,32 @@ class MVGradKernel<platform::CUDADeviceContext, T> ...@@ -52,33 +52,32 @@ class MVGradKernel<platform::CUDADeviceContext, T>
int m = dim_x[0]; int m = dim_x[0];
int n = dim_x[1]; int n = dim_x[1];
dx->Resize(framework::make_ddim({m * n}));
// get data ptr // get data ptr
const T *x_data = x->data<T>(); const T *x_data = x->data<T>();
const T *vec_data = vec->data<T>(); const T *vec_data = vec->data<T>();
const T *dout_data = dout->data<T>(); const T *dout_data = dout->data<T>();
T *dx_data = dx->mutable_data<T>(context.GetPlace());
T *dvec_data = dvec->mutable_data<T>(context.GetPlace());
auto &dev_ctx = auto &dev_ctx =
context.template device_context<platform::CUDADeviceContext>(); context.template device_context<platform::CUDADeviceContext>();
auto blas = math::GetBlas<platform::CUDADeviceContext, T>(dev_ctx); auto blas = math::GetBlas<platform::CUDADeviceContext, T>(dev_ctx);
// calculate dx
auto stream = context.cuda_device_context().stream(); auto stream = context.cuda_device_context().stream();
auto config = GetGpuLaunchConfig1D(dev_ctx, m * n); auto config = GetGpuLaunchConfig1D(dev_ctx, m * n);
MVGradCUDAKernel<
if (dx) {
T *dx_data = dx->mutable_data<T>(context.GetPlace());
MVGradDxCUDAKernel<
T><<<config.block_per_grid.x, config.thread_per_block.x, 0, stream>>>( T><<<config.block_per_grid.x, config.thread_per_block.x, 0, stream>>>(
m, n, dout_data, vec_data, dx_data); m, n, dout_data, vec_data, dx_data);
}
dx->Resize(framework::make_ddim({m, n})); if (dvec) {
T *dvec_data = dvec->mutable_data<T>(context.GetPlace());
// calculate dvec
blas.GEMV(true, dim_x[0], dim_x[1], static_cast<T>(1), x_data, dout_data, blas.GEMV(true, dim_x[0], dim_x[1], static_cast<T>(1), x_data, dout_data,
static_cast<T>(0), dvec_data); static_cast<T>(0), dvec_data);
} }
}
}; };
} // namespace operators } // namespace operators
......
...@@ -74,31 +74,31 @@ class MVGradKernel : public framework::OpKernel<T> { ...@@ -74,31 +74,31 @@ class MVGradKernel : public framework::OpKernel<T> {
int m = dim_x[0]; int m = dim_x[0];
int n = dim_x[1]; int n = dim_x[1];
dx->Resize(framework::make_ddim({m * n}));
// get data ptr // get data ptr
const T *x_data = x->data<T>(); const T *x_data = x->data<T>();
const T *vec_data = vec->data<T>(); const T *vec_data = vec->data<T>();
const T *dout_data = dout->data<T>(); const T *dout_data = dout->data<T>();
if (dx) {
T *dx_data = dx->mutable_data<T>(context.GetPlace()); T *dx_data = dx->mutable_data<T>(context.GetPlace());
T *dvec_data = dvec->mutable_data<T>(context.GetPlace());
auto &dev_ctx = context.template device_context<DeviceContext>();
auto blas = math::GetBlas<DeviceContext, T>(dev_ctx);
// calculate dx
for (int i = 0; i < m; ++i) { for (int i = 0; i < m; ++i) {
for (int j = 0; j < n; ++j) for (int j = 0; j < n; ++j) {
dx_data[i * n + j] = dout_data[i] * vec_data[j]; dx_data[i * n + j] = dout_data[i] * vec_data[j];
} }
}
}
if (dvec) {
T *dvec_data = dvec->mutable_data<T>(context.GetPlace());
dx->Resize(framework::make_ddim({m, n})); auto &dev_ctx = context.template device_context<DeviceContext>();
auto blas = math::GetBlas<DeviceContext, T>(dev_ctx);
// calculate dvec
blas.GEMV(true, dim_x[0], dim_x[1], static_cast<T>(1), x_data, dout_data, blas.GEMV(true, dim_x[0], dim_x[1], static_cast<T>(1), x_data, dout_data,
static_cast<T>(0), dvec_data); static_cast<T>(0), dvec_data);
} }
}
}; };
} // namespace operators } // namespace operators
......
...@@ -20,6 +20,7 @@ import paddle ...@@ -20,6 +20,7 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.static import program_guard, Program
from op_test import OpTest from op_test import OpTest
...@@ -37,7 +38,7 @@ class TestMVOp(OpTest): ...@@ -37,7 +38,7 @@ class TestMVOp(OpTest):
self.check_grad(['X', 'Vec'], 'Out') self.check_grad(['X', 'Vec'], 'Out')
def init_config(self): def init_config(self):
self.x = np.random.random((5, 100)).astype("float64") self.x = np.random.random((2, 100)).astype("float64")
self.vec = np.random.random((100)).astype("float64") self.vec = np.random.random((100)).astype("float64")
...@@ -57,17 +58,32 @@ class TestMVAPI(unittest.TestCase): ...@@ -57,17 +58,32 @@ class TestMVAPI(unittest.TestCase):
paddle.enable_static() paddle.enable_static()
def test_static_graph(self): def test_static_graph(self):
for x_stop_gradient in [False, True]:
for vec_stop_gradient in [False, True]:
paddle.enable_static() paddle.enable_static()
train_program = Program()
startup_program = Program()
self.input_x = np.random.rand(5, 100).astype("float64") self.input_x = np.random.rand(5, 100).astype("float64")
self.input_vec = np.random.rand(100).astype("float64") self.input_vec = np.random.rand(100).astype("float64")
data_x = paddle.static.data("x", shape=[5, 100], dtype="float64") with program_guard(train_program, startup_program):
data_vec = paddle.static.data("vec", shape=[100], dtype="float64") data_x = paddle.static.data(
"x", shape=[5, 100], dtype="float64")
data_vec = paddle.static.data(
"vec", shape=[100], dtype="float64")
data_x.stop_gradient = x_stop_gradient
data_vec.stop_gradient = vec_stop_gradient
result_vec = paddle.mv(data_x, data_vec) result_vec = paddle.mv(data_x, data_vec)
self.place = paddle.CPUPlace() self.place = paddle.CPUPlace()
exe = paddle.static.Executor(self.place) exe = paddle.static.Executor(self.place)
res, = exe.run(feed={"x": self.input_x, res, = exe.run(
feed={"x": self.input_x,
"vec": self.input_vec}, "vec": self.input_vec},
fetch_list=[result_vec]) fetch_list=[result_vec])
z_expected = np.array(np.dot(self.input_x, self.input_vec)) z_expected = np.array(np.dot(self.input_x, self.input_vec))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册