未验证 提交 95337ebc 编写于 作者: D daquexian 提交者: GitHub

add missing JUST (#5357)

* add missing JUST
Signed-off-by: Ndaquexian <daquexian566@gmail.com>

* remove redundant header
Signed-off-by: Ndaquexian <daquexian566@gmail.com>

* add missing JUST in master
Signed-off-by: Ndaquexian <daquexian566@gmail.com>

* fix compile error on gcc5
Signed-off-by: Ndaquexian <daquexian566@gmail.com>
Co-authored-by: Noneflow-ci-bot <69100618+oneflow-ci-bot@users.noreply.github.com>
上级 75c4c84d
......@@ -173,10 +173,11 @@ std::tuple<std::vector<Shape>, std::vector<const DType*>> GetTensorBufferShapesA
std::vector<const DType*> dtypes;
std::atomic<bool> synced(false);
PhysicalRun([&](InstructionsBuilder* builder) {
builder->AccessBlobByCallback(
tensor, [&synced](uint64_t of_blob_ptr) { synced = true; }, "const");
});
CHECK_JUST(PhysicalRun([&](InstructionsBuilder* builder) -> Maybe<void> {
JUST(builder->AccessBlobByCallback(
tensor, [&synced](uint64_t of_blob_ptr) { synced = true; }, "const"));
return Maybe<void>::Ok();
}));
Global<ForeignLockHelper>::Get()->WithScopedRelease([&synced]() {
while (!synced) {}
......
......@@ -204,7 +204,7 @@ Maybe<TensorTuple> StackAutogradEngine::RunBackwardAndReturnInputsTensorGrad(
return input_now_grads;
}
std::shared_ptr<FunctionNode> StackAutogradEngine::AddBackwardFuncPtr(
Maybe<FunctionNode> StackAutogradEngine::AddBackwardFuncPtr(
const std::string& op_type_name,
const std::shared_ptr<const std::function<Maybe<void>(const TensorTuple&, TensorTuple*, bool)>>&
backward_fn,
......@@ -212,7 +212,7 @@ std::shared_ptr<FunctionNode> StackAutogradEngine::AddBackwardFuncPtr(
// Firstly push function_node of tensor in stack which is leaf and requires_grad
for (const std::shared_ptr<Tensor>& in_tensor : inputs) {
if (in_tensor->is_leaf() && in_tensor->requires_grad()) {
if (!in_tensor->grad_fn_node()) { AddAccumulateFunctionNode(in_tensor); }
if (!in_tensor->grad_fn_node()) { JUST(AddAccumulateFunctionNode(in_tensor)); }
StackFunctionNode* stack_function_node =
dynamic_cast<StackFunctionNode*>(in_tensor->mut_grad_fn_node().get());
if (!stack_function_node->is_in_stack()) {
......@@ -229,7 +229,7 @@ std::shared_ptr<FunctionNode> StackAutogradEngine::AddBackwardFuncPtr(
}
func_node->set_is_in_stack(true);
node_list_.push_front(func_node);
return func_node;
return std::static_pointer_cast<FunctionNode>(func_node);
}
void GraphFunctionNode::ReleaseData() {
......@@ -413,7 +413,7 @@ Maybe<TensorTuple> GraphAutogradEngine::RunBackwardAndReturnInputsTensorGrad(
return input_now_grads;
}
std::shared_ptr<FunctionNode> GraphAutogradEngine::AddBackwardFuncPtr(
Maybe<FunctionNode> GraphAutogradEngine::AddBackwardFuncPtr(
const std::string& op_type_name,
const std::shared_ptr<const std::function<Maybe<void>(const TensorTuple&, TensorTuple*, bool)>>&
backward_fn,
......@@ -421,7 +421,7 @@ std::shared_ptr<FunctionNode> GraphAutogradEngine::AddBackwardFuncPtr(
// Firstly push function_node of tensor in stack which is leaf and requires_grad
for (const std::shared_ptr<Tensor>& in_tensor : inputs) {
if (in_tensor->is_leaf() && in_tensor->requires_grad()) {
if (!in_tensor->grad_fn_node()) { AddAccumulateFunctionNode(in_tensor); }
if (!in_tensor->grad_fn_node()) { JUST(AddAccumulateFunctionNode(in_tensor)); }
}
}
......
......@@ -79,7 +79,7 @@ class AutogradEngine {
bool create_graph) = 0;
virtual void ClearEngine() = 0;
// Builds FunctionNode, binding to all `outputs_` tensors and saving in AutogradEngine
virtual std::shared_ptr<FunctionNode> AddBackwardFuncPtr(
virtual Maybe<FunctionNode> AddBackwardFuncPtr(
const std::string& op_type_name,
const std::shared_ptr<
const std::function<Maybe<void>(const TensorTuple&, TensorTuple*, bool)>>& backward_fn,
......@@ -124,7 +124,7 @@ class StackAutogradEngine final : public AutogradEngine {
bool retain_graph,
bool create_graph) override;
void ClearEngine() override;
std::shared_ptr<FunctionNode> AddBackwardFuncPtr(
Maybe<FunctionNode> AddBackwardFuncPtr(
const std::string& op_type_name,
const std::shared_ptr<
const std::function<Maybe<void>(const TensorTuple&, TensorTuple*, bool)>>& backward_fn,
......@@ -185,7 +185,7 @@ class GraphAutogradEngine final : public AutogradEngine {
bool retain_graph,
bool create_graph) override;
void ClearEngine() override{};
std::shared_ptr<FunctionNode> AddBackwardFuncPtr(
Maybe<FunctionNode> AddBackwardFuncPtr(
const std::string& op_type_name,
const std::shared_ptr<
const std::function<Maybe<void>(const TensorTuple&, TensorTuple*, bool)>>& backward_fn,
......
......@@ -166,7 +166,7 @@ Maybe<void> DefaultOpExprGradFunction::Init(const OpExpr& op) {
input_size_ = fw_op_expr->indexed_ibns().size();
OperatorConf fw_op_conf;
fw_op_expr->BuildOpConf(&fw_op_conf, /*attrs=*/{});
JUST(fw_op_expr->BuildOpConf(&fw_op_conf, /*attrs=*/{}));
// Generate backward operator conf for each input. The `LogicalBlobId` for
// backward output gradient is dummy due to inaccessibility.
......
......@@ -129,7 +129,7 @@ class NormalizationGrad : public OpExprGradFunction<NormalizationGradInterpState
}
}
MutableAttrMap shape_attr;
shape_attr.SetAttr<Shape>("shape", Shape(dim_vec));
JUST(shape_attr.SetAttr<Shape>("shape", Shape(dim_vec)));
const auto& reshaped_gamma =
JUST(OpInterpUtil::Dispatch<Tensor>(*reshape_gamma_op_, {gamma}, shape_attr));
const auto& reshaped_inv_variance =
......
......@@ -93,8 +93,8 @@ Maybe<void> SplitLike::Apply(const SplitLikeInterpState* ctx, const TensorTuple&
}
}
MutableAttrMap concat_attrs;
concat_attrs.SetAttr<int>("axis", axis_);
concat_attrs.SetAttr<int>("max_dim_size", ctx->max_dim_size);
JUST(concat_attrs.SetAttr<int>("axis", axis_));
JUST(concat_attrs.SetAttr<int>("max_dim_size", ctx->max_dim_size));
in_grads->at(0) = JUST(OpInterpUtil::Dispatch<Tensor>(*concat_op_, inputs, concat_attrs));
return Maybe<void>::Ok();
}
......
......@@ -21,10 +21,10 @@ namespace test {
TEST(AttrMap, basic) {
MutableCfgAttrMap mut_attr_map{};
mut_attr_map.SetAttr<int32_t>("zero", 0);
mut_attr_map.SetAttr<int64_t>("one", 1);
mut_attr_map.SetAttr<std::vector<int32_t>>("zeros", std::vector<int32_t>{0});
mut_attr_map.SetAttr<std::vector<int64_t>>("ones", std::vector<int64_t>{1});
CHECK_JUST(mut_attr_map.SetAttr<int32_t>("zero", 0));
CHECK_JUST(mut_attr_map.SetAttr<int64_t>("one", 1));
CHECK_JUST(mut_attr_map.SetAttr<std::vector<int32_t>>("zeros", std::vector<int32_t>{0}));
CHECK_JUST(mut_attr_map.SetAttr<std::vector<int64_t>>("ones", std::vector<int64_t>{1}));
AttrMap attr_map(mut_attr_map);
{
const auto& val = CHECK_JUST(attr_map.GetAttr<int32_t>("zero"));
......@@ -55,10 +55,10 @@ TEST(AttrMap, basic) {
TEST(AttrMap, hash_value) {
HashMap<AttrMap, int32_t> attr_map2int_value;
MutableCfgAttrMap mut_attr_map{};
mut_attr_map.SetAttr<int32_t>("zero", 0);
mut_attr_map.SetAttr<int64_t>("one", 1);
mut_attr_map.SetAttr<std::vector<int32_t>>("zeros", std::vector<int32_t>{0});
mut_attr_map.SetAttr<std::vector<int64_t>>("ones", std::vector<int64_t>{1});
CHECK_JUST(mut_attr_map.SetAttr<int32_t>("zero", 0));
CHECK_JUST(mut_attr_map.SetAttr<int64_t>("one", 1));
CHECK_JUST(mut_attr_map.SetAttr<std::vector<int32_t>>("zeros", std::vector<int32_t>{0}));
CHECK_JUST(mut_attr_map.SetAttr<std::vector<int64_t>>("ones", std::vector<int64_t>{1}));
ASSERT_EQ(AttrMap(mut_attr_map).hash_value(), AttrMap(mut_attr_map).hash_value());
ASSERT_TRUE(AttrMap(mut_attr_map) == AttrMap(mut_attr_map));
}
......@@ -68,16 +68,16 @@ TEST(AttrMap, hash_map) {
MutableCfgAttrMap mut_attr_map{};
attr_map2int_value[AttrMap(mut_attr_map)] = 0;
ASSERT_EQ(attr_map2int_value.at(AttrMap(mut_attr_map)), 0);
mut_attr_map.SetAttr<int32_t>("zero", 0);
CHECK_JUST(mut_attr_map.SetAttr<int32_t>("zero", 0));
attr_map2int_value[AttrMap(mut_attr_map)] = 1;
ASSERT_EQ(attr_map2int_value.at(AttrMap(mut_attr_map)), 1);
mut_attr_map.SetAttr<int64_t>("one", 1);
CHECK_JUST(mut_attr_map.SetAttr<int64_t>("one", 1));
attr_map2int_value[AttrMap(mut_attr_map)] = 2;
ASSERT_EQ(attr_map2int_value.at(AttrMap(mut_attr_map)), 2);
mut_attr_map.SetAttr<std::vector<int32_t>>("zeros", std::vector<int32_t>{0});
CHECK_JUST(mut_attr_map.SetAttr<std::vector<int32_t>>("zeros", std::vector<int32_t>{0}));
attr_map2int_value[AttrMap(mut_attr_map)] = 3;
ASSERT_EQ(attr_map2int_value.at(AttrMap(mut_attr_map)), 3);
mut_attr_map.SetAttr<std::vector<int64_t>>("ones", std::vector<int64_t>{1});
CHECK_JUST(mut_attr_map.SetAttr<std::vector<int64_t>>("ones", std::vector<int64_t>{1}));
attr_map2int_value[AttrMap(mut_attr_map)] = 4;
ASSERT_EQ(attr_map2int_value.at(AttrMap(mut_attr_map)), 4);
}
......
......@@ -54,7 +54,7 @@ void ReplayInstructions() {
for (const auto& instr_msg : *RecordedInstructionList()) {
instr_msg_list.EmplaceBack(instr_msg->Clone());
}
vm::Run(&instr_msg_list);
CHECK_JUST(vm::Run(&instr_msg_list));
}
} // namespace debug
......
......@@ -139,8 +139,8 @@ Maybe<compatible_py::BlobObject> MakeNewBlobObjectLike(
std::shared_ptr<HashMap<std::string, std::shared_ptr<compatible_py::BlobObject>>>
bn_in_op2blob_object =
std::make_shared<HashMap<std::string, std::shared_ptr<compatible_py::BlobObject>>>();
builder->RawStatelessCall(std::make_shared<cfg::OpAttribute>(*op_attribute), parallel_conf,
bn_in_op2blob_object);
JUST(builder->RawStatelessCall(std::make_shared<cfg::OpAttribute>(*op_attribute), parallel_conf,
bn_in_op2blob_object));
return JUST(MapAt(*bn_in_op2blob_object, "out"));
}
......@@ -453,7 +453,7 @@ Maybe<compatible_py::BlobObject> InstructionsBuilder::MakeReferenceBlobObject(
CHECK((*parallel_desc_symbol) == (*op_arg_parallel_attr->parallel_desc_symbol()));
std::shared_ptr<compatible_py::BlobObject> ref_blob_object =
JUST(NewBlobObject(op_arg_parallel_attr, blob_object->op_arg_blob_attr()));
ReplaceMirrored(parallel_desc_symbol, {ref_blob_object}, {blob_object});
JUST(ReplaceMirrored(parallel_desc_symbol, {ref_blob_object}, {blob_object}));
return ref_blob_object;
}
......@@ -604,8 +604,8 @@ Maybe<void> InstructionsBuilder::Build121AssignInstruction(
for (int64_t i = 0; i < parallel_num; ++i) { token_id_1.emplace_back(NewTokenId()); }
std::tuple<std::vector<uint64_t>, std::vector<uint64_t>> token_ids =
std::make_tuple(token_id_0, token_id_1);
BuildSendInstruction(ref_blob_object->parallel_desc_symbol(), value_blob_object, token_ids);
BuildRecvInstruction(value_blob_object->parallel_desc_symbol(), ref_blob_object, token_ids);
JUST(BuildSendInstruction(ref_blob_object->parallel_desc_symbol(), value_blob_object, token_ids));
JUST(BuildRecvInstruction(value_blob_object->parallel_desc_symbol(), ref_blob_object, token_ids));
return Maybe<void>::Ok();
}
......
......@@ -79,7 +79,7 @@ Maybe<void> BuiltinOpExprImpl<UserOpConf>::BuildOpConf(OperatorConf* op_conf,
auto* user_op_conf = op_conf->mutable_user_conf();
for (const auto& it : attrs) {
AttrValue attr_val;
user_op::AttrValueUtil::ToProtoAttrValue(*it.second, &attr_val);
JUST(user_op::AttrValueUtil::ToProtoAttrValue(*it.second, &attr_val));
(*(user_op_conf->mutable_attr()))[it.first] = attr_val;
}
return Maybe<void>::Ok();
......@@ -90,7 +90,7 @@ Maybe<StatefulLocalOpKernel> UserOpExpr::MutKernel4Device(const Device& device)
if (it != device2kernel_.end()) { return it->second; }
std::shared_ptr<OperatorConf> op_conf = std::make_shared<OperatorConf>();
BuildOpConf(op_conf.get(), {});
JUST(BuildOpConf(op_conf.get(), {}));
op_conf->set_device_tag(JUST(device.of_type()));
std::shared_ptr<const ParallelDesc> parallel_desc = device.parallel_desc_ptr();
const auto& opkernel =
......
......@@ -169,8 +169,8 @@ Maybe<void> AutogradInterpreter::Apply(const OpExpr& op_expr, const TensorTuple&
JUST(grad_closure->Apply(out_grads, in_grads));
return Maybe<void>::Ok();
});
GetThreadLocalAutogradEngine()->AddBackwardFuncPtr(op_expr.op_type_name() + "_backward",
backward_fn, inputs, outputs);
JUST(GetThreadLocalAutogradEngine()->AddBackwardFuncPtr(op_expr.op_type_name() + "_backward",
backward_fn, inputs, outputs));
}
return Maybe<void>::Ok();
}
......
......@@ -95,7 +95,7 @@ template<>
/*static*/ Maybe<OperatorConf> OpInterpUtil::GenBuiltinOpConf(const BuiltinOpExpr& op_expr,
const AttrMap& attrs) {
auto op_conf = std::make_shared<OperatorConf>();
op_expr.BuildOpConf(op_conf.get(), attrs);
JUST(op_expr.BuildOpConf(op_conf.get(), attrs));
return op_conf;
}
......
......@@ -74,9 +74,10 @@ Maybe<void> EagerMirroredTensorImpl::UpdateTensorStorage() {
const auto& parallel_desc = this->device()->parallel_desc_ptr();
tensor_storage_->set_releaser_hook(
[blob_object, parallel_desc](const std::shared_ptr<vm::TensorBuffer>&) {
PhysicalRun([&](InstructionsBuilder* builder) {
builder->ReleaseTensor(blob_object, parallel_desc);
});
CHECK_JUST(PhysicalRun([&](InstructionsBuilder* builder) -> Maybe<void> {
JUST(builder->ReleaseTensor(blob_object, parallel_desc));
return Maybe<void>::Ok();
}));
});
return Maybe<void>::Ok();
}
......@@ -114,10 +115,11 @@ const std::shared_ptr<const Shape>& EagerMirroredTensorImpl::shape() const {
std::atomic<bool> synced(false);
PhysicalRun([&](InstructionsBuilder* builder) {
builder->AccessBlobByCallback(
this, [&synced](uint64_t) { synced = true; }, "const");
});
CHECK_JUST(PhysicalRun([&](InstructionsBuilder* builder) -> Maybe<void> {
JUST(builder->AccessBlobByCallback(
this, [&synced](uint64_t) { synced = true; }, "const"));
return Maybe<void>::Ok();
}));
Global<ForeignLockHelper>::Get()->WithScopedRelease([&synced]() {
// spin wait
......
......@@ -116,18 +116,18 @@ void ExecNode::InferBlobDescs(const ParallelContext* parallel_ctx) {
if (op_node != nullptr && parallel_ctx->parallel_num() > 1
&& parallel_distribution_signature != nullptr) {
CheckPhysicalBlobDesc(
CHECK_JUST(CheckPhysicalBlobDesc(
*op(), op()->input_bns(),
std::bind(&Operator::GetLogicalBlobDesc4Ibn, op().get(), std::placeholders::_1),
parallel_distribution_signature, parallel_ctx, GetBlobDesc4BnInOp);
parallel_distribution_signature, parallel_ctx, GetBlobDesc4BnInOp));
}
CHECK_JUST(op_->InferBlobDescsIf(GetBlobDesc4BnInOp, parallel_ctx, &GlobalJobDesc()));
if (op_node != nullptr && parallel_ctx->parallel_num() > 1
&& parallel_distribution_signature != nullptr) {
CheckPhysicalBlobDesc(
CHECK_JUST(CheckPhysicalBlobDesc(
*op(), op()->output_bns(),
std::bind(&Operator::GetLogicalBlobDesc4Obn, op().get(), std::placeholders::_1),
parallel_distribution_signature, parallel_ctx, GetBlobDesc4BnInOp);
parallel_distribution_signature, parallel_ctx, GetBlobDesc4BnInOp));
}
CHECK_JUST(op_->InferInplaceObn2IbnIf(&mut_inplace_obn2ibn_, &con_inplace_obn2ibn_,
GetBlobDesc4BnInOp, parallel_ctx));
......
......@@ -58,7 +58,7 @@ OpNode::OpNode(const std::shared_ptr<const ParallelDesc>& parallel_desc,
: parallel_desc_(parallel_desc),
op_(ConstructOp(op_conf, parallel_desc->device_type())),
ibns_(op_->input_bns().begin(), op_->input_bns().end()) {
op_->FillOpParallelDesc(parallel_desc);
CHECK_JUST(op_->FillOpParallelDesc(parallel_desc));
}
std::string OpNode::VisualStr() const {
......
......@@ -84,8 +84,8 @@ Maybe<void> Cluster::WorkerLoop() {
wait_session_init_list.front()->WaitUntilCntEqualZero();
wait_session_init_list.pop_front();
}
Global<vm::EagerOneflow>::Get()->RunPhysicalInstruction(
std::const_pointer_cast<const ClusterInstructionProto>(mut_cluster_instruction));
JUST(Global<vm::EagerOneflow>::Get()->RunPhysicalInstruction(
std::const_pointer_cast<const ClusterInstructionProto>(mut_cluster_instruction)));
} else {
OF_UNIMPLEMENTED();
}
......
......@@ -1262,10 +1262,10 @@ Maybe<void> JobBuildAndInferCtx::Rebuild() {
// build op graph
OpGraph op_graph;
if (Global<JobDesc>::Get()) {
op_graph.Init(*job_);
JUST(op_graph.Init(*job_));
} else {
auto scope = std::make_unique<GlobalJobDescScope>(job_->job_conf(), job_id());
op_graph.Init(*job_);
JUST(op_graph.Init(*job_));
}
// clear old job except job_conf
job_->mutable_net()->Clear();
......
......@@ -61,7 +61,7 @@ OperatorConf GenOutputOpConf(const std::string& op_name, const std::string& in,
auto* output_conf = output_op_conf.mutable_output_conf();
output_conf->set_in(in);
output_conf->set_out(out);
InterfaceOpUtil::InitBlobConf(output_conf->mutable_blob_conf(), parallel_blob_conf);
CHECK_JUST(InterfaceOpUtil::InitBlobConf(output_conf->mutable_blob_conf(), parallel_blob_conf));
return output_op_conf;
}
......@@ -71,7 +71,7 @@ OperatorConf GenInputOpConf(const std::string& op_name, const std::string& out,
input_op_conf.set_name(op_name);
auto* input_conf = input_op_conf.mutable_input_conf();
input_conf->set_out(out);
InterfaceOpUtil::InitBlobConf(input_conf->mutable_blob_conf(), parallel_blob_conf);
CHECK_JUST(InterfaceOpUtil::InitBlobConf(input_conf->mutable_blob_conf(), parallel_blob_conf));
return input_op_conf;
}
......
......@@ -1044,7 +1044,7 @@ void MakePullJob(const std::string& job_name, const std::string& op_name,
auto* input_conf = input_op_conf.mutable_input_conf();
input_conf->set_out("out");
auto* blob_conf = input_conf->mutable_blob_conf();
InterfaceOpUtil::InitBlobConf(blob_conf, parallel_blob_conf);
CHECK_JUST(InterfaceOpUtil::InitBlobConf(blob_conf, parallel_blob_conf));
data_type = blob_conf->data_type();
job_builder.AddOps(parallel_blob_conf.parallel_conf(), {input_op_conf});
}
......@@ -1082,7 +1082,7 @@ void MakePushJob(const std::string& job_name, const std::string& op_name,
foreign_input_conf->set_out("out");
foreign_input_conf->set_ofblob_buffer_name(GetForeignInputBufferName(job_name));
auto* blob_conf = foreign_input_conf->mutable_blob_conf();
InterfaceOpUtil::InitBlobConf(blob_conf, parallel_blob_conf);
CHECK_JUST(InterfaceOpUtil::InitBlobConf(blob_conf, parallel_blob_conf));
data_type = blob_conf->data_type();
ParallelConf parallel_conf;
parallel_conf.set_device_tag("cpu");
......@@ -1095,7 +1095,7 @@ void MakePushJob(const std::string& job_name, const std::string& op_name,
auto* output_conf = output_op_conf.mutable_output_conf();
output_conf->set_in(foreign_input_op_conf.name() + "/out");
output_conf->set_out("out");
InterfaceOpUtil::InitBlobConf(output_conf->mutable_blob_conf(), parallel_blob_conf);
CHECK_JUST(InterfaceOpUtil::InitBlobConf(output_conf->mutable_blob_conf(), parallel_blob_conf));
job_builder.AddOps(parallel_blob_conf.parallel_conf(), {output_op_conf});
}
auto* job_conf = job->mutable_job_conf();
......
......@@ -136,7 +136,8 @@ void GenerateOptimizerOpConf(JobPassCtx* ctx, const OpNode& var_op_node,
const bool has_state =
CHECK_JUST(ctx->HasState<AdamBiasCorrectionLearningRateState>(job_pass_state_key));
if (!has_state) {
ctx->ResetState(job_pass_state_key, std::make_unique<AdamBiasCorrectionLearningRateState>());
CHECK_JUST(ctx->ResetState(job_pass_state_key,
std::make_unique<AdamBiasCorrectionLearningRateState>()));
}
auto* state =
CHECK_JUST(ctx->MutableState<AdamBiasCorrectionLearningRateState>(job_pass_state_key));
......
......@@ -113,7 +113,8 @@ Maybe<void> DynamicLossScaleSchedulePass::Apply(Job* job, JobPassCtx* ctx) const
{loss_scale_var_op_conf, loss_scale_val_op_conf, good_step_counter_var_conf,
count_not_finite_stub_op.op_conf(), schedule.op_conf()});
if (!JUST(ctx->HasState<DynamicLossScaleJobPassState>("dynamic_loss_scale_state"))) {
ctx->ResetState("dynamic_loss_scale_state", std::make_unique<DynamicLossScaleJobPassState>());
JUST(ctx->ResetState("dynamic_loss_scale_state",
std::make_unique<DynamicLossScaleJobPassState>()));
}
auto state = JUST(ctx->MutableState<DynamicLossScaleJobPassState>("dynamic_loss_scale_state"));
state->set_loss_scale_val_lbn(loss_scale_val_lbn);
......
......@@ -95,7 +95,7 @@ void SetCtrlInOpName4VariableOp(const OpGraph& op_graph, JobBuilder* job_builder
void JobCompleter::Complete(Job* job) const {
JobPassCtx job_pass_ctx(GlobalJobDesc());
JobPass4Name("DumpBlobParallelConfPass")(job, &job_pass_ctx);
CHECK_JUST(JobPass4Name("DumpBlobParallelConfPass")(job, &job_pass_ctx));
// NOTE(chengcheng): disable this pass for reduce boxing memory life cycle to memory cost.
if (!Global<ResourceDesc, ForSession>::Get()->resource().disable_group_boxing_by_dst_parallel()) {
WithOpGraphAndMutJobBuilder(job, &GroupBoxingByDstParallel);
......@@ -107,7 +107,7 @@ void JobCompleter::Complete(Job* job) const {
WithOpGraphAndMutJobBuilder(job, &AutoSourceAndSinkTick);
WithOpGraphAndMutJobBuilder(job, &AddGlobalInputCriticalSections);
WithOpGraphAndMutJobBuilder(job, &AddGlobalOutputCriticalSections);
JobPass4Name("DumpBlobParallelConfPass")(job, &job_pass_ctx);
CHECK_JUST(JobPass4Name("DumpBlobParallelConfPass")(job, &job_pass_ctx));
if (XrtCompilationEnabled(GlobalJobDesc())) {
#ifdef OF_WITH_XRT
WithOpGraphAndMutJob(job, &RebuildXrtCompiledJob);
......
......@@ -202,7 +202,7 @@ bool IsS0SignatureSupported(const OpNode* node) {
auto LogicalBlobDesc4Ibn = [&](const std::string& bn) -> Maybe<const BlobDesc&> {
return Maybe<const BlobDesc&>(node->LogicalBlobDesc4Lbi(node->op().BnInOp2Lbi(bn)));
};
node->op().GetSbpSignaturesIf(LogicalBlobDesc4Ibn, node->parallel_desc(), &list);
CHECK_JUST(node->op().GetSbpSignaturesIf(LogicalBlobDesc4Ibn, node->parallel_desc(), &list));
const auto IsInOutS0Parallel = [&](const cfg::SbpSignature& signature) {
return IsS0Parallel(signature, node->op().SoleIbn())
&& IsS0Parallel(signature, node->op().SoleObn());
......
......@@ -88,7 +88,7 @@ Maybe<void> BoxingOp::InferBlobDescs(
}
DimVector data_tmp_blob_shape_vec = BlobDesc4BnInOp(input_bns().Get(0))->shape().dim_vec();
InferTmpBlobDesc(BlobDesc4BnInOp, &data_tmp_blob_shape_vec, is_logical);
JUST(InferTmpBlobDesc(BlobDesc4BnInOp, &data_tmp_blob_shape_vec, is_logical));
if (conf.out_box_case() == BoxingOpConf::kSplitBox) {
const BoxSplitConf& split_conf = conf.split_box();
......
......@@ -42,7 +42,7 @@ Maybe<void> InferBlobDescs(const OperatorConf& op_conf,
Shape broadcasted_shape(x_desc->shape());
FOR_RANGE(int64_t, i, 0, num_compatibles) {
const BlobDesc* compatible_i = BlobDesc4BnInOp(GenRepeatedBn("compatible", i));
GetBroadcastShape(broadcasted_shape, compatible_i->shape(), &broadcasted_shape);
JUST(GetBroadcastShape(broadcasted_shape, compatible_i->shape(), &broadcasted_shape));
}
BlobDesc* y_desc = BlobDesc4BnInOp("y");
y_desc->CopyFrom(*x_desc);
......@@ -97,7 +97,7 @@ class BroadcastToCompatibleWithOp final : public Operator {
Shape broadcasted_shape{1};
for (const std::string ibn : input_bns()) {
const Shape& input_shape = JUST(LogicalBlobDesc4Ibn(ibn)).shape();
GetBroadcastShape(broadcasted_shape, input_shape, &broadcasted_shape);
JUST(GetBroadcastShape(broadcasted_shape, input_shape, &broadcasted_shape));
}
const int64_t broadcast_num_axes = broadcasted_shape.NumAxes();
......
......@@ -60,11 +60,11 @@ Maybe<void> DistributeAddOp::InferBlobParallelDesc() {
std::make_shared<const ParallelDesc>(op_parallel_desc->GetParallelIdOnlyParallelConf(i));
}
bn2parallel_desc["out"] = op_parallel_desc;
FillBlobParallelDesc([&](const std::string& bn) -> Maybe<const ParallelDesc> {
JUST(FillBlobParallelDesc([&](const std::string& bn) -> Maybe<const ParallelDesc> {
auto it = bn2parallel_desc.find(bn);
CHECK_OR_RETURN(it != bn2parallel_desc.end());
return it->second;
});
}));
return Maybe<void>::Ok();
}
......
......@@ -89,11 +89,11 @@ Maybe<void> DistributeCloneOp::InferBlobParallelDesc() {
bn2parallel_desc[output_bns().Get(i)] =
std::make_shared<const ParallelDesc>(op_parallel_desc->GetParallelIdOnlyParallelConf(i));
}
FillBlobParallelDesc([&](const std::string& bn) -> Maybe<const ParallelDesc> {
JUST(FillBlobParallelDesc([&](const std::string& bn) -> Maybe<const ParallelDesc> {
auto it = bn2parallel_desc.find(bn);
CHECK_OR_RETURN(it != bn2parallel_desc.end());
return it->second;
});
}));
return Maybe<void>::Ok();
}
......
......@@ -133,11 +133,11 @@ Maybe<void> DistributeConcatOp::InferBlobParallelDesc() {
std::make_shared<const ParallelDesc>(op_parallel_desc->GetParallelIdOnlyParallelConf(i));
}
bn2parallel_desc["out"] = op_parallel_desc;
FillBlobParallelDesc([&](const std::string& bn) -> Maybe<const ParallelDesc> {
JUST(FillBlobParallelDesc([&](const std::string& bn) -> Maybe<const ParallelDesc> {
auto it = bn2parallel_desc.find(bn);
CHECK_OR_RETURN(it != bn2parallel_desc.end());
return it->second;
});
}));
return Maybe<void>::Ok();
}
......@@ -169,7 +169,7 @@ Maybe<void> DistributeConcatOp::InferSbpSignature(
}
}
cfg::SbpSignatureList sbp_sig_list;
GetSbpSignatures(LogicalBlobDesc4Ibn, &sbp_sig_list);
JUST(GetSbpSignatures(LogicalBlobDesc4Ibn, &sbp_sig_list));
*sbp_signature = sbp_sig_list.sbp_signature().Get(0);
return Maybe<void>::Ok();
}
......
......@@ -108,11 +108,11 @@ Maybe<void> DistributeSplitOp::InferBlobParallelDesc() {
bn2parallel_desc[output_bns().Get(i)] =
std::make_shared<const ParallelDesc>(op_parallel_desc->GetParallelIdOnlyParallelConf(i));
}
FillBlobParallelDesc([&](const std::string& bn) -> Maybe<const ParallelDesc> {
JUST(FillBlobParallelDesc([&](const std::string& bn) -> Maybe<const ParallelDesc> {
auto it = bn2parallel_desc.find(bn);
CHECK_OR_RETURN(it != bn2parallel_desc.end());
return it->second;
});
}));
return Maybe<void>::Ok();
}
......@@ -127,7 +127,7 @@ Maybe<void> DistributeSplitOp::InferSbpSignature(
return Maybe<const BlobDesc&>(sbp_infer_hint->logical_blob_desc());
};
cfg::SbpSignatureList sbp_sig_list;
GetSbpSignatures(LogicalBlobDesc4Ibn, &sbp_sig_list);
JUST(GetSbpSignatures(LogicalBlobDesc4Ibn, &sbp_sig_list));
*sbp_signature = sbp_sig_list.sbp_signature().Get(0);
return Maybe<void>::Ok();
}
......
......@@ -126,11 +126,11 @@ class ImageDecoderRandomCropResizeOp final : public Operator {
} else {
UNIMPLEMENTED_THEN_RETURN();
}
FillBlobParallelDesc([&](const std::string& bn) -> Maybe<const ParallelDesc> {
JUST(FillBlobParallelDesc([&](const std::string& bn) -> Maybe<const ParallelDesc> {
auto it = bn2parallel_desc.find(bn);
CHECK_OR_RETURN(it != bn2parallel_desc.end());
return it->second;
});
}));
return Maybe<void>::Ok();
}
};
......
......@@ -85,7 +85,7 @@ Maybe<void> InterfaceOpUtil::GetInputLikeOpSbpSignature(const InterfaceBlobConf&
const PbRpf<std::string>& input_bns,
const PbRpf<std::string>& output_bns,
cfg::SbpSignature* sbp_signature) {
GetSbpSignature(blob_conf, input_bns, output_bns, sbp_signature, true);
JUST(GetSbpSignature(blob_conf, input_bns, output_bns, sbp_signature, true));
return Maybe<void>::Ok();
}
......@@ -93,7 +93,7 @@ Maybe<void> InterfaceOpUtil::GetOutputLikeOpSbpSignature(const InterfaceBlobConf
const PbRpf<std::string>& input_bns,
const PbRpf<std::string>& output_bns,
cfg::SbpSignature* sbp_signature) {
GetSbpSignature(blob_conf, input_bns, output_bns, sbp_signature, false);
JUST(GetSbpSignature(blob_conf, input_bns, output_bns, sbp_signature, false));
return Maybe<void>::Ok();
}
......
......@@ -1205,17 +1205,17 @@ Maybe<void> Operator::ToOpAttribute(OpAttribute* op_attribute) const {
} else {
const auto parallel_conf =
std::make_shared<cfg::ParallelConf>(pair.second->parallel_conf());
const auto MakeParallelDescSymbol = [&parallel_conf]() -> int64_t {
const auto MakeParallelDescSymbol = [&parallel_conf]() -> Maybe<int64_t> {
int64_t symbol_id;
const auto BuildInstruction =
[&symbol_id, &parallel_conf](InstructionsBuilder* builder) -> Maybe<void> {
symbol_id = JUST(JUST(builder->GetParallelDescSymbol(parallel_conf))->symbol_id());
return Maybe<void>::Ok();
};
LogicalRun(BuildInstruction);
JUST(LogicalRun(BuildInstruction));
return symbol_id;
};
(*symbol_map)[pair.first] = MakeParallelDescSymbol();
(*symbol_map)[pair.first] = JUST(MakeParallelDescSymbol());
}
}
for (const auto& tbn : tmp_bns()) { (*symbol_map)[tbn] = parallel_desc_symbol_id; }
......
......@@ -40,7 +40,8 @@ Maybe<void> GrpcRpcManager::Bootstrap() {
} else {
ctrl_bootstrap.reset(new HostListCtrlBootstrap(env_desc));
}
ctrl_bootstrap->InitProcessCtx(Global<CtrlServer>::Get()->port(), Global<ProcessCtx>::Get());
JUST(
ctrl_bootstrap->InitProcessCtx(Global<CtrlServer>::Get()->port(), Global<ProcessCtx>::Get()));
return Maybe<void>::Ok();
}
......
......@@ -43,10 +43,11 @@ Maybe<void> Run(vm::InstructionMsgList* instr_msg_list) {
Maybe<void> SingleClientSync() {
BlockingCounter bc(1);
LogicalRun([&bc](InstructionsBuilder* builder) {
builder->ComputeGlobalFrontSeqBarrier();
builder->ComputeRankFrontSeqCallback([&bc]() { bc.Decrease(); });
});
JUST(LogicalRun([&bc](InstructionsBuilder* builder) -> Maybe<void> {
JUST(builder->ComputeGlobalFrontSeqBarrier());
JUST(builder->ComputeRankFrontSeqCallback([&bc]() { bc.Decrease(); }));
return Maybe<void>::Ok();
}));
bc.WaitUntilCntEqualZero();
......
......@@ -70,7 +70,7 @@ class CreateSummaryWriter final : public user_op::OpKernel {
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const std::string& logdir = ctx->Attr<std::string>("logdir");
Global<EventsWriter>::Get()->Init(logdir);
CHECK_JUST(Global<EventsWriter>::Get()->Init(logdir));
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return true; }
};
......
......@@ -106,11 +106,12 @@ Maybe<void> ReshapeUserOpUtil::GetReshapeUserOpSbpSignatures(
{
Shape squeezed_in_shape;
Shape squeezed_out_shape;
ReshapeUserOpUtil::Squeeze(in_shape, &squeezed_in_shape, &in_squeezed_axis2original_axis);
ReshapeUserOpUtil::Squeeze(out_shape, &squeezed_out_shape, &out_squeezed_axis2original_axis);
ReshapeUserOpUtil::GetGroupStartInAxis2OutAxis(squeezed_in_shape, squeezed_out_shape,
parallel_num,
&squeezed_group_start_in_axis2out_axis);
JUST(ReshapeUserOpUtil::Squeeze(in_shape, &squeezed_in_shape, &in_squeezed_axis2original_axis));
JUST(ReshapeUserOpUtil::Squeeze(out_shape, &squeezed_out_shape,
&out_squeezed_axis2original_axis));
JUST(ReshapeUserOpUtil::GetGroupStartInAxis2OutAxis(squeezed_in_shape, squeezed_out_shape,
parallel_num,
&squeezed_group_start_in_axis2out_axis));
}
for (const auto& pair : squeezed_group_start_in_axis2out_axis) {
int64_t start_in_axis = in_squeezed_axis2original_axis.at(pair.first);
......@@ -184,11 +185,12 @@ Maybe<void> ReshapeUserOpUtil::InferParallelDistribution(
.PartialSum(user_op::OpArg("in", 0))
.PartialSum(user_op::OpArg("out", 0))
.Build();
GetReshapeUserOpSbpSignatures(in_shape, out_shape, {{"in", 0}}, {{"like", 0}, {"out", 0}},
parallel_hierarchy.At(i), &builder);
JUST(GetReshapeUserOpSbpSignatures(in_shape, out_shape, {{"in", 0}},
{{"like", 0}, {"out", 0}}, parallel_hierarchy.At(i),
&builder));
} else {
GetReshapeUserOpSbpSignatures(in_shape, out_shape, {{"in", 0}}, {{"out", 0}},
parallel_hierarchy.At(i), &builder);
JUST(GetReshapeUserOpSbpSignatures(in_shape, out_shape, {{"in", 0}}, {{"out", 0}},
parallel_hierarchy.At(i), &builder));
}
const cfg::SbpSignature* matched_sbp_signature = nullptr;
......
......@@ -54,7 +54,7 @@ using GetSbpFn = std::function<Maybe<void>(user_op::SbpContext*)>;
GetSbpFn MakeGetSbpFn(GetSbpFn extra) {
return [extra](user_op::SbpContext* ctx) -> Maybe<void> {
JUST(extra(ctx));
GetBasicSbpSignature(ctx);
JUST(GetBasicSbpSignature(ctx));
return Maybe<void>::Ok();
};
}
......
......@@ -40,51 +40,55 @@ Maybe<void> CheckAndLabelAxesToSqueezeMinusOne(const AxisVector& axes, DimVector
} // namespace
Maybe<void> SqueezeTensorDescInferFn(user_op::InferContext* ctx) {
const Shape& in_shape = ctx->InputShape("in", 0);
Shape* out_shape = ctx->OutputShape("out", 0);
AxisVector fixed_axes_vec;
JUST(TransformNegativeAxesToPositive(ctx->Attr<std::vector<int32_t>>("axes"), in_shape.NumAxes(),
&fixed_axes_vec));
DimVector dim_vec = in_shape.dim_vec();
JUST(CheckAndLabelAxesToSqueezeMinusOne(fixed_axes_vec, &dim_vec));
dim_vec.erase(std::remove(dim_vec.begin(), dim_vec.end(), -1), dim_vec.end());
if (dim_vec.empty()) {
*out_shape = Shape({1});
} else {
*out_shape = Shape(dim_vec);
}
return Maybe<void>::Ok();
}
Maybe<void> SqueezeGetSbpFn(user_op::SbpContext* ctx) {
const user_op::TensorDesc& in_tensor = ctx->LogicalTensorDesc4InputArgNameAndIndex("in", 0);
AxisVector fixed_axes_vec;
JUST(TransformNegativeAxesToPositive(ctx->Attr<std::vector<int32_t>>("axes"),
in_tensor.shape().NumAxes(), &fixed_axes_vec));
DimVector dim_vec = in_tensor.shape().dim_vec();
JUST(CheckAndLabelAxesToSqueezeMinusOne(fixed_axes_vec, &dim_vec));
int32_t out_axis = 0;
FOR_RANGE(int32_t, in_axis, 0, dim_vec.size()) {
if (dim_vec.at(in_axis) != -1) {
ctx->NewBuilder()
.Split(user_op::OpArg("in", 0), in_axis)
.Split(user_op::OpArg("out", 0), out_axis)
.Build();
++out_axis;
}
}
return Maybe<void>::Ok();
}
REGISTER_USER_OP("squeeze")
.Input("in")
.Output("out")
.Attr<std::vector<int32_t>>("axes")
.SetTensorDescInferFn([](user_op::InferContext* ctx) -> Maybe<void> {
const Shape& in_shape = ctx->InputShape("in", 0);
Shape* out_shape = ctx->OutputShape("out", 0);
AxisVector fixed_axes_vec;
TransformNegativeAxesToPositive(ctx->Attr<std::vector<int32_t>>("axes"), in_shape.NumAxes(),
&fixed_axes_vec);
DimVector dim_vec = in_shape.dim_vec();
CheckAndLabelAxesToSqueezeMinusOne(fixed_axes_vec, &dim_vec);
dim_vec.erase(std::remove(dim_vec.begin(), dim_vec.end(), -1), dim_vec.end());
if (dim_vec.empty()) {
*out_shape = Shape({1});
} else {
*out_shape = Shape(dim_vec);
}
return Maybe<void>::Ok();
})
.SetTensorDescInferFn(SqueezeTensorDescInferFn)
.SetDataTypeInferFn([](user_op::InferContext* ctx) -> Maybe<void> {
*ctx->OutputDType("out", 0) = ctx->InputDType("in", 0);
return Maybe<void>::Ok();
})
.SetGetSbpFn([](user_op::SbpContext* ctx) -> Maybe<void> {
const user_op::TensorDesc& in_tensor = ctx->LogicalTensorDesc4InputArgNameAndIndex("in", 0);
AxisVector fixed_axes_vec;
TransformNegativeAxesToPositive(ctx->Attr<std::vector<int32_t>>("axes"),
in_tensor.shape().NumAxes(), &fixed_axes_vec);
DimVector dim_vec = in_tensor.shape().dim_vec();
CheckAndLabelAxesToSqueezeMinusOne(fixed_axes_vec, &dim_vec);
int32_t out_axis = 0;
FOR_RANGE(int32_t, in_axis, 0, dim_vec.size()) {
if (dim_vec.at(in_axis) != -1) {
ctx->NewBuilder()
.Split(user_op::OpArg("in", 0), in_axis)
.Split(user_op::OpArg("out", 0), out_axis)
.Build();
++out_axis;
}
}
return Maybe<void>::Ok();
});
.SetGetSbpFn(SqueezeGetSbpFn);
REGISTER_USER_OP_GRAD("squeeze").SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
user_op::AddOpFn AddOp) {
......
......@@ -25,6 +25,18 @@ Maybe<void> CheckStepShape(const Shape* step) {
return Maybe<void>::Ok();
}
Maybe<void> CheckStepShapeInCtx(user_op::InferContext* ctx) {
JUST(CheckStepShape(&ctx->InputShape("step", 0)));
return Maybe<void>::Ok();
}
Maybe<void> CheckInAndStepScalar(user_op::InferContext* ctx) {
const Shape& in_shape = ctx->InputShape("in", 0);
const Shape& step_shape = ctx->InputShape("step", 0);
CHECK_OR_RETURN(in_shape.elem_cnt() == 1 && step_shape.elem_cnt() == 1);
return Maybe<void>::Ok();
}
REGISTER_CPU_ONLY_USER_OP("create_summary_writer")
.Attr<std::string>("logdir")
.SetTensorDescInferFn([](user_op::InferContext* ctx) -> Maybe<void> {
......@@ -44,12 +56,7 @@ REGISTER_CPU_ONLY_USER_OP("summary_write_scalar")
.Input("in")
.Input("step")
.Input("tag")
.SetTensorDescInferFn([](user_op::InferContext* ctx) -> Maybe<void> {
const Shape& in_shape = ctx->InputShape("in", 0);
const Shape& step_shape = ctx->InputShape("step", 0);
CHECK_OR_RETURN(in_shape.elem_cnt() == 1 && step_shape.elem_cnt() == 1);
return Maybe<void>::Ok();
})
.SetTensorDescInferFn(CheckInAndStepScalar)
.SetDataTypeInferFn([](user_op::InferContext* ctx) -> Maybe<void> { return Maybe<void>::Ok(); })
.SetGetSbpFn(user_op::GetSbpFnUtil::DefaultBroadcastToBroadcast);
......@@ -57,20 +64,14 @@ REGISTER_CPU_ONLY_USER_OP("summary_write_histogram")
.Input("in")
.Input("step")
.Input("tag")
.SetTensorDescInferFn([](user_op::InferContext* ctx) -> Maybe<void> {
CheckStepShape(&ctx->InputShape("step", 0));
return Maybe<void>::Ok();
})
.SetTensorDescInferFn(CheckStepShapeInCtx)
.SetDataTypeInferFn([](user_op::InferContext* ctx) -> Maybe<void> { return Maybe<void>::Ok(); })
.SetGetSbpFn(user_op::GetSbpFnUtil::DefaultBroadcastToBroadcast);
REGISTER_CPU_ONLY_USER_OP("summary_write_pb")
.Input("in")
.Input("step")
.SetTensorDescInferFn([](user_op::InferContext* ctx) -> Maybe<void> {
CheckStepShape(&ctx->InputShape("step", 0));
return Maybe<void>::Ok();
})
.SetTensorDescInferFn(CheckStepShapeInCtx)
.SetDataTypeInferFn([](user_op::InferContext* ctx) -> Maybe<void> { return Maybe<void>::Ok(); })
.SetGetSbpFn(user_op::GetSbpFnUtil::DefaultBroadcastToBroadcast);
......@@ -78,10 +79,7 @@ REGISTER_CPU_ONLY_USER_OP("summary_write_image")
.Input("in")
.Input("step")
.Input("tag")
.SetTensorDescInferFn([](user_op::InferContext* ctx) -> Maybe<void> {
CheckStepShape(&ctx->InputShape("step", 0));
return Maybe<void>::Ok();
})
.SetTensorDescInferFn(CheckStepShapeInCtx)
.SetDataTypeInferFn([](user_op::InferContext* ctx) -> Maybe<void> { return Maybe<void>::Ok(); })
.SetGetSbpFn(user_op::GetSbpFnUtil::DefaultBroadcastToBroadcast);
} // namespace summary
......
......@@ -174,7 +174,7 @@ struct EventWriterHelper<DeviceType::kCPU, T> {
std::unique_ptr<Event> e{new Event};
e->set_step(step);
e->set_wall_time(GetWallTime());
FillScalarInSummary(value, tag, e->mutable_summary());
CHECK_JUST(FillScalarInSummary(value, tag, e->mutable_summary()));
Global<EventsWriter>::Get()->AppendQueue(std::move(e));
}
......@@ -183,7 +183,7 @@ struct EventWriterHelper<DeviceType::kCPU, T> {
std::unique_ptr<Event> e{new Event};
e->set_step(step);
e->set_wall_time(GetWallTime());
FillHistogramInSummary<T>(value, tag, e->mutable_summary());
CHECK_JUST(FillHistogramInSummary<T>(value, tag, e->mutable_summary()));
Global<EventsWriter>::Get()->AppendQueue(std::move(e));
}
......@@ -192,7 +192,7 @@ struct EventWriterHelper<DeviceType::kCPU, T> {
std::unique_ptr<Event> e{new Event};
e->set_step(step);
e->set_wall_time(GetWallTime());
FillImageInSummary(tensor, tag, e->mutable_summary());
CHECK_JUST(FillImageInSummary(tensor, tag, e->mutable_summary()));
Global<EventsWriter>::Get()->AppendQueue(std::move(e));
}
};
......
......@@ -29,7 +29,7 @@ Maybe<void> EventsWriter::Init(const std::string& logdir) {
file_system_ = std::make_unique<fs::PosixFileSystem>();
log_dir_ = logdir + "/event";
file_system_->RecursivelyCreateDirIfNotExist(log_dir_);
TryToInit();
JUST(TryToInit());
is_inited_ = true;
last_flush_time_ = CurrentMircoTime();
return Maybe<void>::Ok();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册