提交 f61ac0ba 编写于 作者: qq_22305325's avatar qq_22305325 提交者: GitHub

Merge branch 'master' into dev_compatible_Instruction_and_cfg_Instruction

Former-commit-id: 3b62d42b49d5dc2b08c1b63702004d82c76db7d0
......@@ -20,14 +20,14 @@ namespace oneflow {
REGISTER_CPU_ONLY_USER_OP("onerec_decoder")
.Input("in")
.Output("out")
.Attr("key", UserOpAttrType::kAtString)
.Attr("data_type", UserOpAttrType::kAtDataType)
.Attr("static_shape", UserOpAttrType::kAtShape)
.Attr<bool>("is_dynamic", UserOpAttrType::kAtBool, false)
.Attr<bool>("has_reshape", UserOpAttrType::kAtBool, false)
.Attr("reshape", UserOpAttrType::kAtShape)
.Attr<bool>("has_batch_padding", UserOpAttrType::kAtBool, false)
.Attr("batch_padding", UserOpAttrType::kAtShape)
.Attr<std::string>("key")
.Attr<DataType>("data_type")
.Attr<Shape>("static_shape")
.Attr<bool>("is_dynamic", false)
.Attr<bool>("has_reshape", false)
.Attr<Shape>("reshape")
.Attr<bool>("has_batch_padding", false)
.Attr<Shape>("batch_padding")
.SetTensorDescInferFn([](user_op::InferContext* ctx) -> Maybe<void> {
user_op::TensorDesc* in_tensor = ctx->TensorDesc4ArgNameAndIndex("in", 0);
user_op::TensorDesc* out_tensor = ctx->TensorDesc4ArgNameAndIndex("out", 0);
......
......@@ -19,14 +19,14 @@ namespace oneflow {
REGISTER_CPU_ONLY_USER_OP("OneRecReader")
.Output("out")
.Attr("files", UserOpAttrType::kAtListString)
.Attr("batch_size", UserOpAttrType::kAtInt32)
.Attr<bool>("random_shuffle", UserOpAttrType::kAtBool, false)
.Attr<std::string>("shuffle_mode", UserOpAttrType::kAtString, "instance")
.Attr<int64_t>("seed", UserOpAttrType::kAtInt64, -1)
.Attr<int32_t>("shuffle_buffer_size", UserOpAttrType::kAtInt32, 1024)
.Attr<bool>("shuffle_after_epoch", UserOpAttrType::kAtBool, false)
.Attr<bool>("verify_example", UserOpAttrType::kAtBool, true)
.Attr<std::vector<std::string>>("files")
.Attr<int32_t>("batch_size")
.Attr<bool>("random_shuffle", false)
.Attr<std::string>("shuffle_mode", "instance")
.Attr<int64_t>("seed", -1)
.Attr<int32_t>("shuffle_buffer_size", 1024)
.Attr<bool>("shuffle_after_epoch", false)
.Attr<bool>("verify_example", true)
.SetTensorDescInferFn([](user_op::InferContext* ctx) -> Maybe<void> {
user_op::TensorDesc* out_tensor = ctx->TensorDesc4ArgNameAndIndex("out", 0);
int32_t local_batch_size = ctx->Attr<int32_t>("batch_size");
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册