提交 b0736b8b 编写于 作者: X Xinqi Li

more df functions


Former-commit-id: 506df293b368d79a785cc68461c946ad414b79a0
上级 7fd121f7
......@@ -7,23 +7,23 @@ namespace df {
namespace {
void AutoPlacementMemoryDemo() {
Tensor var(Shape({4, 4}), [](size_t index) { return index % 2 ? 0 : 1000; });
Tensor var(Shape({4, 4}), [](size_t index) { return index % 2 ? 0 : 100; });
Tensor row_ones(Shape({var.shape().At(0)}), 1);
Tensor col_ones(Shape({var.shape().At(1)}), 1);
Tensor epsilon(0.000000001);
FOR_RANGE(int, i, 0, 2000) {
FOR_RANGE(int, i, 0, 1000) {
double lr = 1;
if (i < 400) {
lr = 0.1;
} else if (i < 800) {
} else if (i < 600) {
lr = 0.01;
} else if (i < 1200) {
} else if (i < 800) {
lr = 0.001;
} else {
lr = 0.0001;
}
Tensor x = Add(Square(FixedExpectation(Update(&var, lr), 1)), epsilon);
Tensor x = Add(Square((FixedExpectation(Update(&var, lr), 1))), epsilon);
const auto& x_copies = Clone(x, 4);
Tensor row = MatrixRowSum(x_copies.at(0));
Tensor col = MatrixColSum(x_copies.at(1));
......
......@@ -64,6 +64,20 @@ Tensor Minus(const Tensor& input) {
});
}
Tensor Relu(const Tensor& input) {
std::shared_ptr<Buffer> out(new Buffer(input.buffer()));
FOR_RANGE(int, i, 0, out->Size()) {
if (input.At(i) < 0) { out->At(i) = 0; }
}
return Tensor(out, [=](const Buffer& out_diff) {
Buffer input_diff(out_diff);
FOR_RANGE(int, i, 0, input_diff.Size()) {
if (input.At(i) < 0) { input_diff.At(i) = 0; }
}
input.HandleDiff(input_diff);
});
}
Tensor Abs(const Tensor& input) {
std::shared_ptr<Buffer> out(new Buffer(input.buffer()));
FOR_RANGE(int, i, 0, out->Size()) {
......@@ -147,19 +161,28 @@ Tensor Add(const Tensor& a, const Tensor& b) {
}
Tensor Max(const Tensor& a, const Tensor& b) {
CHECK(a.Size() == 1 && b.Size() == 1);
bool is_b_gt_a = (b.At(0) > a.At(0));
std::shared_ptr<Buffer> out(new Buffer(is_b_gt_a ? b.buffer() : a.buffer()));
CHECK(a.shape().dim_vec().size() == b.shape().dim_vec().size());
FOR_RANGE(int, i, 0, a.shape().dim_vec().size()) {
CHECK(a.shape().dim_vec().at(i) == b.shape().dim_vec().at(i));
}
std::shared_ptr<Buffer> out(new Buffer(a.buffer()));
FOR_RANGE(size_t, i, 0, out->Size()) {
out->At(i) = std::max(a.At(i), b.At(i));
}
return Tensor(out, [=](const Buffer& out_diff) {
Buffer zero_diff(out_diff);
zero_diff.At(0) = 0;
if (is_b_gt_a) {
a.HandleDiff(zero_diff);
b.HandleDiff(out_diff);
} else {
a.HandleDiff(out_diff);
b.HandleDiff(zero_diff);
Buffer a_diff(out_diff.shape(), 0);
Buffer b_diff(out_diff.shape(), 0);
FOR_RANGE(size_t, i, 0, out_diff.Size()) {
if (a.At(i) > b.At(i)) {
a_diff.At(i) = out_diff.At(i);
b_diff.At(i) = 0;
} else {
b_diff.At(i) = out_diff.At(i);
a_diff.At(i) = 0;
}
}
a.HandleDiff(a_diff);
b.HandleDiff(b_diff);
});
}
......@@ -190,6 +213,23 @@ Tensor Max(const Tensor& input) {
});
}
Tensor Min(const Tensor& input) {
double min_value = std::numeric_limits<double>::max();
size_t min_index = 0;
FOR_RANGE(int, i, 0, input.Size()) {
if (input.At(i) < min_value) {
min_value = input.buffer().At(i);
min_index = i;
}
}
std::shared_ptr<Buffer> out(new Buffer(Shape({1}), min_value));
return Tensor(out, [=](const Buffer& out_diff) {
Buffer input_diff(input.shape(), 0);
input_diff.At(min_index) = out_diff.At(0);
input.HandleDiff(input_diff);
});
}
Tensor Variance(const Tensor& input) {
auto copies = Clone(input, 2);
return Avg(Square(Sub(copies.at(0), Avg(copies.at(1)))));
......@@ -200,6 +240,11 @@ Tensor AvgAbsDeviation(const Tensor& input) {
return Avg(Abs(Sub(copies.at(0), Avg(copies.at(1)))));
}
Tensor MaxDeviation(const Tensor& input) {
auto copies = Clone(input, 2);
return Sub(Max(copies.at(0)), Min(copies.at(1)));
}
Tensor Sum(const Tensor& input) {
double sum = 0;
FOR_RANGE(int, i, 0, input.Size()) { sum += input.At(i); }
......@@ -219,6 +264,41 @@ Tensor Avg(const Tensor& input) {
return Tensor(out, [=](const Buffer& out_diff) { sum.HandleDiff(out_diff); });
}
Tensor Mul(const Tensor& a, const Tensor& b) {
Tensor big = a;
Tensor small = b;
if (a.Size() < b.Size()) {
big = b;
small = a;
}
CHECK(big.Size() % small.Size() == 0);
std::shared_ptr<Buffer> out(new Buffer(big.buffer()));
size_t small_size = small.Size();
size_t group_size = big.Size() / small_size;
FOR_RANGE(int, i, 0, small_size) {
FOR_RANGE(int, j, 0, group_size) {
out->At(i * group_size + j) *= small.At(i);
}
}
return Tensor(out, [=](const Buffer& out_diff) {
Buffer big_diff(out_diff);
FOR_RANGE(int, i, 0, small_size) {
FOR_RANGE(int, j, 0, group_size) {
big_diff.At(i * group_size + j) *= small.At(i);
}
}
big.HandleDiff(big_diff);
Buffer small_diff(small.shape(), 0);
FOR_RANGE(int, i, 0, small_size) {
FOR_RANGE(int, j, 0, group_size) {
size_t index = i * group_size + j;
small_diff.At(i) += out_diff.At(index) * big.At(index);
}
}
small.HandleDiff(small_diff);
});
}
Tensor ElemWiseMul(const Tensor& a, const Tensor& b) {
CHECK(a.Size() == b.Size());
std::shared_ptr<Buffer> out(new Buffer(a.buffer()));
......
......@@ -30,18 +30,26 @@ Tensor Sub(const Tensor& a, const Tensor& b);
Tensor ElemWiseMul(const Tensor& a, const Tensor& b);
Tensor Mul(const Tensor& a, const Tensor& b);
Tensor Reciprocal(const Tensor& input);
Tensor Max(const Tensor& a, const Tensor& b);
Tensor Max(const Tensor& a);
Tensor Relu(const Tensor& input);
Tensor Min(const Tensor& a);
Tensor Sum(const Tensor& a);
Tensor Avg(const Tensor& a);
Tensor Variance(const Tensor& a);
Tensor MaxDeviation(const Tensor& a);
Tensor AvgAbsDeviation(const Tensor& a);
Tensor Square(const Tensor& input);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册