提交 984225ec 编写于 作者: D dongzhihong

"fix operator"

上级 2b3e3621
......@@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/framework/operator.h"
#include <algorithm>
#include <iterator>
namespace paddle {
namespace framework {
......@@ -95,6 +95,16 @@ std::string OperatorBase::DebugString() const {
ss << ", ";
}
}
ss << "), ";
ss << "Attrs:(";
size_t i = 0;
for (auto& attr : attrs_) {
ss << attr.first;
if (i != attrs_.size() - 1) {
ss << ", ";
}
i++;
}
ss << ").";
return ss.str();
}
......
......@@ -13,28 +13,12 @@
limitations under the License. */
#include "paddle/operators/random_op.h"
#include "glog/logging.h"
#include "paddle/framework/op_registry.h"
namespace paddle {
namespace operators {
// using paddle::platform::CPUPlace;
// template <paddle::platform::CPUPlace, typename T, typename DeviceContext>
template <typename T>
bool Gaussian(platform::CPUDeviceContext& ctx,
framework::Tensor* output,
const int size,
const T& mean,
const T& std,
const T& seed) {
auto g = ctx.RandGenerator(seed);
std::normal_distribution<double> distribution(mean, std);
for (int i = 0; i < size; ++i) {
output[i] = distribution(g());
}
return true;
}
class RandomOp : public framework::OperatorWithKernel {
protected:
void InferShape(
......@@ -42,11 +26,10 @@ protected:
const std::vector<framework::Tensor*>& outputs) const override {
PADDLE_ENFORCE(inputs.size() == 0, "Input size of RandomOp must be zero.");
PADDLE_ENFORCE(outputs.size() == 1, "Output size of RandomOp must be one.");
PADDLE_ENFORCE(inputs[0] != nullptr && outputs[0] != nullptr,
"Inputs/Outputs of RandomOp must all be set.");
PADDLE_ENFORCE(outputs[0] != nullptr,
"Outputs of RandomOp must all be set.");
outputs[0]->Resize(
framework::make_ddim(this->GetAttr<std::vector<int>>("shape")));
// outputs[0]->set_dims(context.op_.attrs_.at("shape"));
}
};
......
#include "paddle/operators/random_op.h"
#include "paddle/framework/op_registry.h"
namespace paddle {
namespace operators {
template <typename T>
bool Gaussian(platform::CUDADeviceContext &ctx, framework::Tensor* output,
const int size, const T& mean, const T& std, const T& seed) {
auto g = RandGenerator(seed);
return curandGenerateNormal(g, output, size, mean, std);
}
} // operators
} // paddle
typedef paddle::operators::RandomOpKernel<paddle::platform::GPUPlace, float>
RandomOpKernel_GPU_float;
......
......@@ -13,7 +13,9 @@ bool Gaussian(DeviceContext& ctx,
const int size,
const T& mean,
const T& std,
const T& seed);
const T& seed) {
return false;
}
template <typename T>
bool Gaussian(platform::CPUDeviceContext& ctx,
......@@ -21,14 +23,27 @@ bool Gaussian(platform::CPUDeviceContext& ctx,
const int size,
const T& mean,
const T& std,
const T& seed);
const T& seed) {
auto g = ctx.RandGenerator(seed);
std::normal_distribution<double> distribution(mean, std);
for (int i = 0; i < size; ++i) {
output[i] = distribution(g);
}
return true;
}
#ifndef PADDLE_ONLY_CPU
template <typename T>
bool Gaussian(platform::CUDADeviceContext& ctx,
framework::Tensor* output,
const int size,
const T& mean,
const T& std,
const T& seed);
const T& seed) {
auto g = RandGenerator(seed);
return curandGenerateNormal(g, output, size, mean, std);
}
#endif
template <typename Place, typename T>
class RandomOpKernel : public framework::OpKernel {
......@@ -45,41 +60,8 @@ public:
mean,
std,
seed);
// Gaussian<T, const platform::DeviceContext>(context.device_context_,
// output,
// framework::product(output->dims()),
// mean, std, seed);
// std::default_random_engine generator(seed);
// std::normal_distribution<double> distribution(mean, std);
// framework::EigenMatrix<T>::From(*output).device(*(
// context.GetEigenDevice<Place>())) =
// framework::EigenMatrix<T>::Random();
}
};
// using paddle::platform::CPUPlace;
// template<CPUPlace, typename T>
// class RandomOpKernel : public framework::OpKernel {
// public:
// void Compute(const framework::KernelContext& context) const override {
// std::unique_ptr<default_random_engine> generator(seed);
// for(size_t i=0; i < output->size(); ++i) {
// output[i] = distribution(generator());
// }
// }
// };
// using paddle::platform::GPUPlace;
// template<GPUPlace, typename T>
// class RandomOpKernel : public framework::OpKernel {
// public:
// void Compute(const framework::KernelContext& context) const override {
// }
// }
} // namespace operators
} // namespace paddle
......@@ -12,4 +12,5 @@ add_python_test(test_framework
test_mul_op.py
test_sigmoid_op.py
test_softmax_op.py
test_rowwise_add_op.py)
test_rowwise_add_op.py
test_random_op.py)
......@@ -15,13 +15,14 @@ class TestRandomOp(unittest.TestCase):
if scope.get_var(out) is None:
scope.create_var(out).get_tensor()
tensor = scope.get_var("Y").get_tensor()
tensor = scope.get_var("Out").get_tensor()
op.infer_shape(scope)
self.assertEqual([1000, 1000], tensor.shape())
ctx = core.DeviceContext.cpu_context()
op.run(scope, ctx)
self.assertAlmostEqual(numpy.std(tensor), 1.0)
self.assertAlmostEqual(numpy.mean(tensor), 5.0)
tensor_array = numpy.array(tensor)
self.assertAlmostEqual(numpy.std(tensor_array), 1.0)
self.assertAlmostEqual(numpy.mean(tensor_array), 5.0)
if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册