提交 709a9edd 编写于 作者: K ktlichkid

Code clean up

上级 64509fd9
...@@ -21,8 +21,6 @@ limitations under the License. */ ...@@ -21,8 +21,6 @@ limitations under the License. */
#include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include <iostream>
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -239,17 +237,14 @@ class BeamSearchOp : public framework::OperatorWithKernel { ...@@ -239,17 +237,14 @@ class BeamSearchOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE(ctx->HasOutput(arg), PADDLE_ENFORCE(ctx->HasOutput(arg),
"BeamSearch need output argument '%s'", arg); "BeamSearch need output argument '%s'", arg);
} }
std::cout << "Done Infer Shape\n";
} }
framework::OpKernelType GetExpectedKernelType( framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext &ctx) const override { const framework::ExecutionContext &ctx) const override {
std::cout << "Get Expected type 1\n";
framework::OpKernelType kt = framework::OpKernelType( framework::OpKernelType kt = framework::OpKernelType(
framework::ToDataType( framework::ToDataType(
ctx.Input<framework::LoDTensor>("pre_ids")->type()), ctx.Input<framework::LoDTensor>("pre_ids")->type()),
platform::CPUPlace()); platform::CPUPlace());
std::cout << "Get Expected type 2\n";
return kt; return kt;
} }
}; };
......
...@@ -23,8 +23,6 @@ limitations under the License. */ ...@@ -23,8 +23,6 @@ limitations under the License. */
#include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
#include <iostream>
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -198,79 +196,25 @@ template <typename DeviceContext, typename T> ...@@ -198,79 +196,25 @@ template <typename DeviceContext, typename T>
class BeamSearchOpKernel : public framework::OpKernel<T> { class BeamSearchOpKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
std::cout << "Compute 1\n";
auto ids_var = context.Input<framework::LoDTensor>("ids"); auto ids_var = context.Input<framework::LoDTensor>("ids");
std::cout << "Compute 2\n";
auto scores_var = context.Input<framework::LoDTensor>("scores"); auto scores_var = context.Input<framework::LoDTensor>("scores");
std::cout << "Compute 3\n";
auto pre_ids_var = context.Input<framework::LoDTensor>("pre_ids"); auto pre_ids_var = context.Input<framework::LoDTensor>("pre_ids");
std::cout << "Compute 4\n";
PADDLE_ENFORCE_NOT_NULL(ids_var); PADDLE_ENFORCE_NOT_NULL(ids_var);
std::cout << "Compute 5\n";
PADDLE_ENFORCE_NOT_NULL(scores_var); PADDLE_ENFORCE_NOT_NULL(scores_var);
std::cout << "Compute 6\n";
PADDLE_ENFORCE_NOT_NULL(pre_ids_var); PADDLE_ENFORCE_NOT_NULL(pre_ids_var);
std::cout << "Compute 7\n";
// auto& ids = ids_var->Get<framework::LoDTensor>();
// auto& scores = scores_var->Get<framework::LoDTensor>();
// auto& pre_ids = pre_ids_var->Get<framework::LoDTensor>();
size_t level = context.Attr<int>("level"); size_t level = context.Attr<int>("level");
std::cout << "Compute 8\n";
size_t beam_size = context.Attr<int>("beam_size"); size_t beam_size = context.Attr<int>("beam_size");
std::cout << "Compute 9\n";
int end_id = context.Attr<int>("end_id"); int end_id = context.Attr<int>("end_id");
std::cout << "Compute 10\n";
BeamSearch alg(*ids_var, *scores_var, level, beam_size, end_id); BeamSearch alg(*ids_var, *scores_var, level, beam_size, end_id);
std::cout << "Compute 11\n";
auto selected_ids_var = auto selected_ids_var =
context.Output<framework::LoDTensor>("selected_ids"); context.Output<framework::LoDTensor>("selected_ids");
std::cout << "Compute 12\n";
auto selected_scores_var = auto selected_scores_var =
context.Output<framework::LoDTensor>("selected_scores"); context.Output<framework::LoDTensor>("selected_scores");
std::cout << "Compute 13\n";
PADDLE_ENFORCE_NOT_NULL(selected_ids_var); PADDLE_ENFORCE_NOT_NULL(selected_ids_var);
std::cout << "Compute 14\n";
PADDLE_ENFORCE_NOT_NULL(selected_scores_var); PADDLE_ENFORCE_NOT_NULL(selected_scores_var);
std::cout << "Compute 15\n";
// auto& selected_ids_tensor =
// *selected_ids_var->GetMutable<framework::LoDTensor>();
// auto& selected_scores_tensor =
// *selected_scores_var->GetMutable<framework::LoDTensor>();
alg(*pre_ids_var, selected_ids_var, selected_scores_var); alg(*pre_ids_var, selected_ids_var, selected_scores_var);
std::cout << "Compute 16\n";
} }
}; };
/*
void RunImpl(const framework::Scope& scope,
const platform::Place& dev_place) const override {
auto ids_var = scope.FindVar(Input("ids"));
auto scores_var = scope.FindVar(Input("scores"));
auto pre_ids_var = scope.FindVar(Input("pre_ids"));
PADDLE_ENFORCE_NOT_NULL(ids_var);
PADDLE_ENFORCE_NOT_NULL(scores_var);
PADDLE_ENFORCE_NOT_NULL(pre_ids_var);
auto& ids = ids_var->Get<framework::LoDTensor>();
auto& scores = scores_var->Get<framework::LoDTensor>();
auto& pre_ids = pre_ids_var->Get<framework::LoDTensor>();
size_t level = Attr<int>("level");
size_t beam_size = Attr<int>("beam_size");
int end_id = Attr<int>("end_id");
BeamSearch alg(ids, scores, level, beam_size, end_id);
auto selected_ids_var = scope.FindVar(Output("selected_ids"));
auto selected_scores_var = scope.FindVar(Output("selected_scores"));
PADDLE_ENFORCE_NOT_NULL(selected_ids_var);
PADDLE_ENFORCE_NOT_NULL(selected_scores_var);
auto& selected_ids_tensor =
*selected_ids_var->GetMutable<framework::LoDTensor>();
auto& selected_scores_tensor =
*selected_scores_var->GetMutable<framework::LoDTensor>();
alg(pre_ids, &selected_ids_tensor, &selected_scores_tensor);
}
*/
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册