提交 d697b6a3 编写于 作者: W wanghaoshuang

Modified code using LoDTensor

上级 00ad7512
...@@ -103,25 +103,19 @@ void LoDTensor::ShrinkInLevel(size_t level, size_t elem_begin, ...@@ -103,25 +103,19 @@ void LoDTensor::ShrinkInLevel(size_t level, size_t elem_begin,
lod_ = new_lod; lod_ = new_lod;
} }
Vector<size_t> expand_lod(Vector<size_t> level, Vector<size_t> starts, Vector<size_t> expand_lod(Vector<size_t> level, Vector<size_t> indexes,
Vector<size_t> scales, bool repeat) { Vector<size_t> scales, bool repeat) {
Vector<size_t> result; Vector<size_t> result;
result.push_back(level[0]); result.push_back(level[0]);
size_t p = 0, start = 0, end = 0; size_t start = 0, end = 0;
if (!repeat) { if (!repeat) {
for (size_t i = 0; i < scales.size(); ++i) { for (size_t i = 0; i < scales.size(); ++i) {
result.push_back(result.back() + scales[i] * (level[i + 1] - level[i])); result.push_back(result.back() + scales[i] * (level[i + 1] - level[i]));
} }
} else { } else {
for (size_t i = 0; i < scales.size(); ++i) { for (size_t i = 0; i < scales.size(); ++i) {
while (starts[i] != level[p] && p < level.size()) { start = indexes[i];
++p; end = indexes[i + 1];
}
start = p;
while (starts[i + 1] != level[p] && p < level.size()) {
++p;
}
end = p + 1;
for (size_t j = 0; j < scales[i]; ++j) { for (size_t j = 0; j < scales[i]; ++j) {
for (size_t index = start; index < end - 1; ++index) { for (size_t index = start; index < end - 1; ++index) {
result.push_back(result.back() + level[index + 1] - level[index]); result.push_back(result.back() + level[index + 1] - level[index]);
......
...@@ -123,7 +123,7 @@ class LoDTensor : public Tensor { ...@@ -123,7 +123,7 @@ class LoDTensor : public Tensor {
LoD lod_; LoD lod_;
}; };
Vector<size_t> expand_lod(Vector<size_t> level, Vector<size_t> starts, Vector<size_t> expand_lod(Vector<size_t> level, Vector<size_t> indexes,
Vector<size_t> scales, bool repeat); Vector<size_t> scales, bool repeat);
} // namespace framework } // namespace framework
......
...@@ -77,15 +77,15 @@ by lod of input(Y) or 'repeat' attribute. ...@@ -77,15 +77,15 @@ by lod of input(Y) or 'repeat' attribute.
Case 1: Case 1:
Given a 2-level LoDTensor X: Given a 2-level LoDTensor X:
X.data = [1, 2 , 3, 4] X.data = [a, b , c, d]
X.lod = [[0, 3, 4], [0, 1, 3, 4]] X.lod = [[0, 3, 4], [0, 1, 3, 4]]
and and
repeat = 2 repeat = 2
then we get 3-level LoDTensor then we get 3-level LoDTensor
Out.data = [1, 2, 3, 1, 2, 3, 4, 4]
Out.lod = [[0, 6, 8], Out.lod = [[0, 6, 8],
[0, 3, 6, 7, 8], [0, 3, 6, 7, 8],
[0, 1, 3, 4, 6, 7, 8]] [0, 1, 3, 4, 6, 7, 8]]
Out.data = [a, b, c, a, b, c, d, d]
Case 2: Case 2:
......
...@@ -33,15 +33,12 @@ class SeqExpandKernel : public framework::OpKernel<T> { ...@@ -33,15 +33,12 @@ class SeqExpandKernel : public framework::OpKernel<T> {
auto x_dims = x->dims(); auto x_dims = x->dims();
auto x_lod = x->lod(); auto x_lod = x->lod();
if (x_lod.size() == 0) {
framework::Vector<size_t> level; framework::Vector<size_t> level;
for (int i = 0; i < x->dims()[0] + 1; ++i) { size_t num = (x_lod.size() == 0) ? (x->dims()[0] + 1) : x_lod[0].size();
for (int i = 0; i < num; ++i) {
level.push_back(i); level.push_back(i);
} }
x_lod.push_back(level); x_lod.push_back(level);
} else {
x_lod.insert(x_lod.begin(), x_lod[0]);
}
size_t repeat = static_cast<size_t>(context.Attr<int>("repeat")); size_t repeat = static_cast<size_t>(context.Attr<int>("repeat"));
framework::Vector<size_t> scales; framework::Vector<size_t> scales;
...@@ -56,19 +53,27 @@ class SeqExpandKernel : public framework::OpKernel<T> { ...@@ -56,19 +53,27 @@ class SeqExpandKernel : public framework::OpKernel<T> {
} else { } else {
auto* y = context.Input<LoDTensor>("Y"); auto* y = context.Input<LoDTensor>("Y");
auto y_lod = y->lod(); auto y_lod = y->lod();
for (int i = 0; i < y_lod[0].size() - 1; ++i) { auto y_abs_lod = y_lod.ToAbsOffset();
scales.push_back((y_lod[0][i + 1] - y_lod[0][i]) / auto x_abs_lod = x_lod.ToAbsOffset();
(x_lod[0][i + 1] - x_lod[0][i])); for (int i = 0; i < y_abs_lod[0].size() - 1; ++i) {
scales.push_back((y_abs_lod[0][i + 1] - y_abs_lod[0][i]) /
(x_abs_lod[0][i + 1] - x_abs_lod[0][i]));
} }
out->Resize(y->dims()); out->Resize(y->dims());
} }
framework::Vector<size_t> indexes;
for (int size_t i = 0; i < x_lod[0]; ++i) {
indexes[i] = x_lod[0];
}
framework::LoD out_lod; framework::LoD out_lod;
auto level0 = framework::expand_lod(x_lod[0], x_lod[0], scales, false); auto level0 = framework::expand_lod(indexes, x_lod[0], scales, false);
out_lod.push_back(level0); out_lod.push_back(level0);
for (int i = 1; i < x_lod.size(); ++i) { for (int i = 1; i < x_lod.size(); ++i) {
out_lod.push_back( for (int j = 0; j < indexes.size(); ++j) {
framework::expand_lod(x_lod[i], x_lod[0], scales, true)); indexes[j] = x_lod[i - 1][indexes[j]];
}
out_lod.push_back(framework::expand_lod(x_lod[i], indexes, scales, true));
} }
size_t element_len = framework::product(x_dims) / x_dims[0]; size_t element_len = framework::product(x_dims) / x_dims[0];
...@@ -80,7 +85,7 @@ class SeqExpandKernel : public framework::OpKernel<T> { ...@@ -80,7 +85,7 @@ class SeqExpandKernel : public framework::OpKernel<T> {
if (platform::is_cpu_place(place)) { if (platform::is_cpu_place(place)) {
auto& cpu_place = boost::get<platform::CPUPlace>(place); auto& cpu_place = boost::get<platform::CPUPlace>(place);
for (size_t i = 0; i < scales.size(); ++i) { for (size_t i = 0; i < scales.size(); ++i) {
count = element_len * (x_lod[0][i + 1] - x_lod[0][i]); count = element_len * (x_abs_lod[0][i + 1] - x_abs_lod[0][i]);
for (size_t j = 0; j < scales[i]; ++j) { for (size_t j = 0; j < scales[i]; ++j) {
memory::Copy(cpu_place, out_data, cpu_place, x_data, memory::Copy(cpu_place, out_data, cpu_place, x_data,
sizeof(T) * count); sizeof(T) * count);
...@@ -95,7 +100,7 @@ class SeqExpandKernel : public framework::OpKernel<T> { ...@@ -95,7 +100,7 @@ class SeqExpandKernel : public framework::OpKernel<T> {
context.device_context()) context.device_context())
.stream(); .stream();
for (size_t i = 0; i < scales.size(); ++i) { for (size_t i = 0; i < scales.size(); ++i) {
count = element_len * (x_lod[0][i + 1] - x_lod[0][i]); count = element_len * (x_abs_lod[0][i + 1] - x_abs_lod[0][i]);
for (size_t j = 0; j < scales[i]; ++j) { for (size_t j = 0; j < scales[i]; ++j) {
memory::Copy(gpu_place, out_data, gpu_place, x_data, memory::Copy(gpu_place, out_data, gpu_place, x_data,
sizeof(T) * count, stream); sizeof(T) * count, stream);
...@@ -109,6 +114,11 @@ class SeqExpandKernel : public framework::OpKernel<T> { ...@@ -109,6 +114,11 @@ class SeqExpandKernel : public framework::OpKernel<T> {
} }
out->set_lod(out_lod); out->set_lod(out_lod);
for (size_t i = 0; i < lod.size; i++) {
for (size_t j = 0; j < lod[i].size(); j++) {
LOG(INFO) << "lod[" << i << "][" << j "] = " << lod[i][j];
}
}
} }
}; };
...@@ -121,13 +131,14 @@ class SeqExpandGradKernel : public framework::OpKernel<T> { ...@@ -121,13 +131,14 @@ class SeqExpandGradKernel : public framework::OpKernel<T> {
auto* out = context.Input<LoDTensor>("Out"); auto* out = context.Input<LoDTensor>("Out");
auto* d_x = context.Output<LoDTensor>(framework::GradVarName("X")); auto* d_x = context.Output<LoDTensor>(framework::GradVarName("X"));
auto out_lod = out->lod(); auto out_lod = out->lod();
auto out_abs_lod = out_lod.ToAbsOffset();
d_x->set_lod(x->lod()); d_x->set_lod(x->lod());
const T* d_out_data = d_out->data<T>(); const T* d_out_data = d_out->data<T>();
auto d_out_dims = d_out->dims(); auto d_out_dims = d_out->dims();
T* d_x_data = d_x->mutable_data<T>(context.GetPlace()); T* d_x_data = d_x->mutable_data<T>(context.GetPlace());
size_t element_len = framework::product(d_out_dims) / d_out_dims[0]; size_t element_len = framework::product(d_out_dims) / d_out_dims[0];
for (size_t i = 0; i < out->NumElements(); ++i) { for (size_t i = 0; i < out->NumElements(); ++i) {
size_t ele_count = out_lod[0][i + 1] - out_lod[0][i]; size_t ele_count = out_abs_lod[0][i + 1] - out_abs_lod[0][i];
size_t repeat = out->NumElements(0, i); size_t repeat = out->NumElements(0, i);
Eigen::TensorMap<Eigen::Tensor<const T, 2>> d_out_t( Eigen::TensorMap<Eigen::Tensor<const T, 2>> d_out_t(
d_out_data, static_cast<int>(repeat), d_out_data, static_cast<int>(repeat),
......
...@@ -246,6 +246,8 @@ class OpTest(unittest.TestCase): ...@@ -246,6 +246,8 @@ class OpTest(unittest.TestCase):
else: else:
actual = np.array(self.scope.find_var(out_name).get_tensor()) actual = np.array(self.scope.find_var(out_name).get_tensor())
expect = self.outputs[out_name] expect = self.outputs[out_name]
print "actual= %s" % actual
print "expect = %s" % expect
self.assertTrue( self.assertTrue(
np.allclose( np.allclose(
actual, expect, atol=atol), actual, expect, atol=atol),
......
...@@ -27,7 +27,15 @@ def repeat_array(array, starts, times): ...@@ -27,7 +27,15 @@ def repeat_array(array, starts, times):
return newlist return newlist
def toAbsOffset(lod):
for i in range(len(lod) - 2, -1, -1):
for j in range(len(lod[i])):
lod[i][j] = lod[i + 1][lod[i][j]]
return lod
class TestSeqExpand(OpTest): class TestSeqExpand(OpTest):
#class TestSeqExpand():
def set_data(self): def set_data(self):
x_data = np.random.uniform(0.1, 1, [4, 1]).astype('float32') x_data = np.random.uniform(0.1, 1, [4, 1]).astype('float32')
self.inputs = {'X': x_data} self.inputs = {'X': x_data}
...@@ -35,23 +43,26 @@ class TestSeqExpand(OpTest): ...@@ -35,23 +43,26 @@ class TestSeqExpand(OpTest):
def compute(self): def compute(self):
x = self.inputs['X'] x = self.inputs['X']
print "x= %s" % x
x_data, x_lod = x if type(x) == tuple else (x, None) x_data, x_lod = x if type(x) == tuple else (x, None)
if not x_lod: n = 1 + x_data.shape[0] if not x_lod else len(x_lod[0])
x_lod = [[i for i in range(1 + x_data.shape[0])]] x_lod = [[i for i in range(n)]] + x_lod
else: x_abs_lod = toAbsOffset(x_lod)
x_lod = [x_lod[0]] + x_lod
if self.repeat: if self.repeat:
print "repeat= %s" % self.repeat
self.attrs = {'repeat': self.repeat} self.attrs = {'repeat': self.repeat}
repeats = (len(x_lod[0]) - 1) * [self.repeat] repeats = (len(x_lod[0]) - 1) * [self.repeat]
else: else:
y_data, y_lod = self.inputs['Y'] y_data, y_lod = self.inputs['Y']
repeats = [((y_lod[0][i + 1] - y_lod[0][i]) / print "y_lod: %s" % y_lod
(x_lod[0][i + 1] - x_lod[0][i])) y_abs_lod = toAbsOffset(y_lod)
for i in range(len(y_lod[0]) - 1)] repeats = [((y_abs_lod[0][i + 1] - y_abs_lod[0][i]) /
out_lod = [repeat(x_lod[0], x_lod[0], repeats, True)] + [ (x_abs_lod[0][i + 1] - x_abs_lod[0][i]))
repeat(lod, x_lod[0], repeats, False) for lod in x_lod[1:] for i in range(len(y_abs_lod[0]) - 1)]
] #out_lod = [repeat(x_lod[0], x_lod[0], repeats, True)] + [
out = repeat_array(x_data.tolist(), x_lod[0], repeats) # repeat(lod, x_lod[0], repeats, False) for lod in x_lod[1:]
#]
out = repeat_array(x_data.tolist(), x_abs_lod[0], repeats)
self.outputs = {'Out': out} self.outputs = {'Out': out}
def setUp(self): def setUp(self):
...@@ -69,7 +80,7 @@ class TestSeqExpand(OpTest): ...@@ -69,7 +80,7 @@ class TestSeqExpand(OpTest):
class TestSeqExpandCase1(TestSeqExpand): class TestSeqExpandCase1(TestSeqExpand):
def set_data(self): def set_data(self):
x_data = np.random.uniform(0.1, 1, [7, 1]).astype('float32') x_data = np.random.uniform(0.1, 1, [7, 1]).astype('float32')
x_lod = [[0, 5, 7], [0, 2, 5, 7]] x_lod = [[0, 2, 3], [0, 2, 5, 7]]
self.inputs = {'X': (x_data, x_lod)} self.inputs = {'X': (x_data, x_lod)}
self.repeat = 2 self.repeat = 2
...@@ -95,10 +106,11 @@ class TestSeqExpandCase4(TestSeqExpand): ...@@ -95,10 +106,11 @@ class TestSeqExpandCase4(TestSeqExpand):
x_data = np.random.uniform(0.1, 1, [5, 1]).astype('float32') x_data = np.random.uniform(0.1, 1, [5, 1]).astype('float32')
x_lod = [[0, 2, 5]] x_lod = [[0, 2, 5]]
y_data = np.random.uniform(0.1, 1, [13, 1]).astype('float32') y_data = np.random.uniform(0.1, 1, [13, 1]).astype('float32')
y_lod = [[0, 4, 13], [0, 2, 4, 7, 10, 13]] y_lod = [[0, 2, 5], [0, 2, 4, 7, 10, 13]]
self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)} self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)}
self.repeat = None self.repeat = None
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
# TestSeqExpandCase4().setUp()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册