提交 9a382bd8 编写于 作者: G guo ran 提交者: GitHub

fix warning (#3732)

Co-authored-by: Nguoran <guoran@oneflow.org>
Co-authored-by: qq_22305325's avatarbinbinHan <han_binbin@163.com>
Former-commit-id: c32f0064
上级 8c5bc25f
......@@ -20,14 +20,14 @@ namespace oneflow {
REGISTER_CPU_ONLY_USER_OP("onerec_decoder")
.Input("in")
.Output("out")
.Attr("key", UserOpAttrType::kAtString)
.Attr("data_type", UserOpAttrType::kAtDataType)
.Attr("static_shape", UserOpAttrType::kAtShape)
.Attr<bool>("is_dynamic", UserOpAttrType::kAtBool, false)
.Attr<bool>("has_reshape", UserOpAttrType::kAtBool, false)
.Attr("reshape", UserOpAttrType::kAtShape)
.Attr<bool>("has_batch_padding", UserOpAttrType::kAtBool, false)
.Attr("batch_padding", UserOpAttrType::kAtShape)
.Attr<std::string>("key")
.Attr<DataType>("data_type")
.Attr<Shape>("static_shape")
.Attr<bool>("is_dynamic", false)
.Attr<bool>("has_reshape", false)
.Attr<Shape>("reshape")
.Attr<bool>("has_batch_padding", false)
.Attr<Shape>("batch_padding")
.SetTensorDescInferFn([](user_op::InferContext* ctx) -> Maybe<void> {
user_op::TensorDesc* in_tensor = ctx->TensorDesc4ArgNameAndIndex("in", 0);
user_op::TensorDesc* out_tensor = ctx->TensorDesc4ArgNameAndIndex("out", 0);
......
......@@ -19,14 +19,14 @@ namespace oneflow {
REGISTER_CPU_ONLY_USER_OP("OneRecReader")
.Output("out")
.Attr("files", UserOpAttrType::kAtListString)
.Attr("batch_size", UserOpAttrType::kAtInt32)
.Attr<bool>("random_shuffle", UserOpAttrType::kAtBool, false)
.Attr<std::string>("shuffle_mode", UserOpAttrType::kAtString, "instance")
.Attr<int64_t>("seed", UserOpAttrType::kAtInt64, -1)
.Attr<int32_t>("shuffle_buffer_size", UserOpAttrType::kAtInt32, 1024)
.Attr<bool>("shuffle_after_epoch", UserOpAttrType::kAtBool, false)
.Attr<bool>("verify_example", UserOpAttrType::kAtBool, true)
.Attr<std::vector<std::string>>("files")
.Attr<int32_t>("batch_size")
.Attr<bool>("random_shuffle", false)
.Attr<std::string>("shuffle_mode", "instance")
.Attr<int64_t>("seed", -1)
.Attr<int32_t>("shuffle_buffer_size", 1024)
.Attr<bool>("shuffle_after_epoch", false)
.Attr<bool>("verify_example", true)
.SetTensorDescInferFn([](user_op::InferContext* ctx) -> Maybe<void> {
user_op::TensorDesc* out_tensor = ctx->TensorDesc4ArgNameAndIndex("out", 0);
int32_t local_batch_size = ctx->Attr<int32_t>("batch_size");
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册