未验证 提交 deb510d4 编写于 作者: T tangwei12 提交者: GitHub

cvm op feature (#17081)

cvm without LoD.
上级 554d3a71
...@@ -22,36 +22,60 @@ namespace operators { ...@@ -22,36 +22,60 @@ namespace operators {
using Tensor = framework::Tensor; using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor; using LoDTensor = framework::LoDTensor;
template <typename T>
void CvmComputeKernel(const bool use_cvm, const int64_t item_width, const T** X,
T** Y) {
const auto cvm_offset = use_cvm ? 0 : 2;
std::memcpy(*Y, *X + cvm_offset, (item_width - cvm_offset) * sizeof(T));
if (use_cvm) {
(*Y)[0] = log((*Y)[0] + 1);
(*Y)[1] = log((*Y)[1] + 1) - (*Y)[0];
}
(*X) += item_width;
(*Y) += item_width - cvm_offset;
}
template <typename T>
void CvmGradComputeKernel(const bool use_cvm, const int64_t item_width,
const T& CVM, const T** DY, T** DX) {
const auto cvm_offset = use_cvm ? 0 : 2;
std::memcpy(*DX + cvm_offset, *DY, (item_width - cvm_offset) * sizeof(T));
(*DX)[0] = (&CVM)[0];
(*DX)[1] = (&CVM)[1];
(*DX) += item_width;
(*DY) += item_width - cvm_offset;
}
template <typename T> template <typename T>
class CVMOpKernel : public framework::OpKernel<T> { class CVMOpKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
const LoDTensor* x = context.Input<LoDTensor>("X"); const auto* x = context.Input<LoDTensor>("X");
const T* x_data = x->data<T>(); const T* x_data = x->data<T>();
auto lod = x->lod()[0];
int64_t item_size = x->numel() / x->dims()[0]; auto batch_size = x->dims()[0];
int offset = 2; auto item_size = x->numel() / batch_size;
if (!context.Attr<bool>("use_cvm")) { auto use_cvm = context.Attr<bool>("use_cvm");
item_size -= offset;
} auto* y = context.Output<LoDTensor>("Y");
LoDTensor* y = context.Output<LoDTensor>("Y");
T* y_data = y->mutable_data<T>(context.GetPlace()); T* y_data = y->mutable_data<T>(context.GetPlace());
int seq_num = static_cast<int>(lod.size()) - 1; // for Input X do not have Lod Information.
for (int i = 0; i < seq_num; ++i) { if (x->NumLevels() == 0) {
int64_t seq_len = static_cast<int64_t>(lod[i + 1] - lod[i]); for (int i = 0; i < batch_size; i++) {
CvmComputeKernel(use_cvm, item_size, &x_data, &y_data);
for (int j = 0; j < seq_len; ++j) { }
if (context.Attr<bool>("use_cvm")) { } else {
std::memcpy(y_data, x_data, item_size * sizeof(T)); auto lod = x->lod()[0];
y_data[0] = log(y_data[0] + 1); for (int i = 0; i < lod.size() - 1; ++i) {
y_data[1] = log(y_data[1] + 1) - y_data[0]; for (int j = 0; j < lod[i + 1] - lod[i]; ++j) {
x_data += item_size; CvmComputeKernel(use_cvm, item_size, &x_data, &y_data);
y_data += item_size;
} else {
std::memcpy(y_data, x_data + offset, item_size * sizeof(T));
x_data += item_size + offset;
y_data += item_size;
} }
} }
} }
...@@ -62,42 +86,39 @@ template <typename T> ...@@ -62,42 +86,39 @@ template <typename T>
class CVMGradOpKernel : public framework::OpKernel<T> { class CVMGradOpKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
LoDTensor* dx = context.Output<LoDTensor>(framework::GradVarName("X")); auto* dx = context.Output<LoDTensor>(framework::GradVarName("X"));
T* dx_data = dx->mutable_data<T>(context.GetPlace()); T* dx_data = dx->mutable_data<T>(context.GetPlace());
const Tensor* cvm = context.Input<Tensor>("CVM"); const Tensor* cvm = context.Input<Tensor>("CVM");
const T* cvm_data = cvm->data<T>(); const T* cvm_data = cvm->data<T>();
int offset = 2;
const framework::LoDTensor* dOut = const auto* dOut =
context.Input<framework::LoDTensor>(framework::GradVarName("Y")); context.Input<framework::LoDTensor>(framework::GradVarName("Y"));
const T* dout_data = dOut->data<T>(); const T* dout_data = dOut->data<T>();
auto lod = dx->lod()[0]; auto use_cvm = context.Attr<bool>("use_cvm");
int64_t item_size = dx->numel() / dx->dims()[0];
if (!context.Attr<bool>("use_cvm")) {
item_size -= offset;
}
int seq_num = static_cast<int>(lod.size()) - 1; auto offset = 2;
for (int i = 0; i < seq_num; ++i) { auto batch_size = dx->dims()[0];
int64_t seq_len = static_cast<int64_t>(lod[i + 1] - lod[i]); auto item_size = dx->numel() / batch_size;
for (int j = 0; j < seq_len; ++j) { // for Input X do not have Lod Information.
if (context.Attr<bool>("use_cvm")) { if (dx->NumLevels() == 0) {
std::memcpy(dx_data, dout_data, item_size * sizeof(T)); for (int x = 0; x < batch_size; ++x) {
dx_data[0] = cvm_data[0]; CvmGradComputeKernel(use_cvm, item_size, *cvm_data, &dout_data,
dx_data[1] = cvm_data[1]; &dx_data);
dx_data += item_size; cvm_data += offset;
dout_data += item_size; }
} else { } else {
std::memcpy(dx_data + offset, dout_data, item_size * sizeof(T)); auto lod = dx->lod()[0];
dx_data[0] = cvm_data[0]; int seq_num = static_cast<int>(lod.size()) - 1;
dx_data[1] = cvm_data[1]; for (int i = 0; i < seq_num; ++i) {
dx_data += item_size + offset; for (int j = 0; j < lod[i + 1] - lod[i]; ++j) {
dout_data += item_size; CvmGradComputeKernel(use_cvm, item_size, *cvm_data, &dout_data,
&dx_data);
} }
cvm_data += offset;
} }
cvm_data += offset;
} }
} }
}; };
......
...@@ -19,15 +19,50 @@ from op_test import OpTest ...@@ -19,15 +19,50 @@ from op_test import OpTest
import unittest import unittest
class TestCVMOp(OpTest): def cvm_compute(X, item_width, use_cvm):
cvm_offset = 0 if use_cvm else 2
batch_size = X.shape[0]
Y = np.ones([batch_size, item_width - cvm_offset], np.float32)
for idx in range(batch_size):
if use_cvm:
Y[idx] = X[idx]
Y[idx][0] = log(Y[idx][0] + 1)
Y[idx][1] = log(Y[idx][1] + 1) - Y[idx][0]
else:
Y[idx] = X[idx][2:]
return Y
def cvm_grad_compute(DY, CVM, item_width, use_cvm):
batch_size = DY.shape[0]
DX = np.ones([batch_size, item_width], np.float32)
for idx in range(batch_size):
DX[idx][0] = CVM[idx][0]
DX[idx][1] = CVM[idx][1]
if use_cvm:
DX[idx][2:] = DY[idx][2:]
else:
DX[idx][2:] = DY[idx]
return DX
class TestCVMOpWithLodTensor(OpTest):
""" """
Test cvm op with discrete one-hot labels. Test cvm op with discrete one-hot labels.
""" """
def setUp(self): def setUp(self):
self.op_type = "cvm" self.op_type = "cvm"
batch_size = 4 self.use_cvm = True
batch_size = 8
dims = 11 dims = 11
lod = [[1]] lod = [[1]]
self.inputs = { self.inputs = {
'X': (np.random.uniform(0, 1, [1, dims]).astype("float32"), lod), 'X': (np.random.uniform(0, 1, [1, dims]).astype("float32"), lod),
...@@ -43,5 +78,55 @@ class TestCVMOp(OpTest): ...@@ -43,5 +78,55 @@ class TestCVMOp(OpTest):
self.check_output() self.check_output()
class TestCVMOpWithOutLodTensor1(OpTest):
"""
Test cvm op with discrete one-hot labels.
"""
def setUp(self):
self.op_type = "cvm"
self.use_cvm = True
batch_size = 2
item_width = 11
input = np.random.uniform(0, 1,
(batch_size, item_width)).astype('float32')
output = cvm_compute(input, item_width, self.use_cvm)
cvm = np.array([[0.6, 0.4]]).astype("float32")
self.inputs = {'X': input, 'CVM': cvm}
self.attrs = {'use_cvm': self.use_cvm}
self.outputs = {'Y': output}
def test_check_output(self):
self.check_output()
class TestCVMOpWithOutLodTensor2(OpTest):
"""
Test cvm op with discrete one-hot labels.
"""
def setUp(self):
self.op_type = "cvm"
self.use_cvm = False
batch_size = 2
item_width = 11
input = np.random.uniform(0, 1,
(batch_size, item_width)).astype('float32')
output = cvm_compute(input, item_width, self.use_cvm)
cvm = np.array([[0.6, 0.4]]).astype("float32")
self.inputs = {'X': input, 'CVM': cvm}
self.attrs = {'use_cvm': self.use_cvm}
self.outputs = {'Y': output}
def test_check_output(self):
self.check_output()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册