提交 92ece5c3 编写于 作者: F Frank Chen 提交者: TensorFlower Gardener

Adds auto_shard_policy option to control how auto-sharding should work (AUTO, FILE, DATA).

Auto-sharding can now be controlled via specifying whether it should shard by files, shard by data, or attempt to first shard by file and fallback on sharding by data (auto).

PiperOrigin-RevId: 275502876
Change-Id: I68f3ebc74692baa78a109a4e807075f844403216
上级 07b5ca44
......@@ -39,6 +39,10 @@ constexpr char kShardDatasetOpName[] = "ShardDataset";
constexpr char kShuffleDatasetOpName[] = "ShuffleDataset";
constexpr char kShuffleDatasetV2OpName[] = "ShuffleDatasetV2";
constexpr char kNumWorkersAttrName[] = "num_workers";
constexpr char kIndexAttrName[] = "index";
constexpr char kAutoShardPolicyAttrName[] = "auto_shard_policy";
constexpr std::array<const char*, 4> kReaderDatasetOps = {
"FixedLengthRecordDataset",
"FixedLengthRecordDatasetV2",
......@@ -101,7 +105,7 @@ constexpr std::array<const char*, 5> kUnshardableSourceDatasetOps = {
// clang-format on
Status OptimizeGraph(const GrapplerItem& item, int64 num_workers, int64 index,
GraphDef* output);
AutoShardPolicy policy, GraphDef* output);
template <std::size_t SIZE>
bool IsDatasetNodeOfType(const NodeDef& node,
......@@ -164,11 +168,10 @@ Status AddShardNode(MutableGraphView* graph, const NodeDef& add_before,
// and we need to shard the Const.
// This is probably not a dataset, so we bail because we can't infer the
// output types and shape.
LOG(WARNING)
<< "Unable to shard this input. You may need to wrap "
"the inputs to your reader dataset in a TensorSliceDataset.";
LOG(WARNING) << "Input node is: " << add_after->DebugString();
return errors::NotFound("Cannot shard non-dataset node.");
return errors::NotFound(
"Unable to shard this input. You may need to wrap the inputs to your "
"reader dataset in a TensorSliceDataset. Input node is ",
add_after->DebugString());
}
// Add new node into graph and update edges
......@@ -367,7 +370,13 @@ Status RecursivelyHandleOp(const NodeDef& node, int64 num_workers, int64 index,
if (!IsDatasetNodeOfType(node, kPassThroughOps)) {
return errors::NotFound(
"Did not find a shardable source, walked to ",
"a node which is not a dataset: ", node.DebugString());
"a node which is not a dataset: ", node.DebugString(),
". Consider either turning off auto-sharding or switching the "
"auto_shard_policy to DATA to shard this dataset. You can do this by "
"creating a new `tf.data.Options()` object then setting "
"`options.experimental_distribute.auto_shard_policy = "
"AutoShardPolicy.DATA` before applying the options object to the "
"dataset via `dataset.with_options(options)`.");
}
const NodeDef* input_node = graph_utils::GetInputNode(node, *graph, 0);
......@@ -376,7 +385,7 @@ Status RecursivelyHandleOp(const NodeDef& node, int64 num_workers, int64 index,
}
Status OptimizeGraph(const GrapplerItem& item, int64 num_workers, int64 index,
GraphDef* output) {
AutoShardPolicy policy, GraphDef* output) {
if (num_workers == 1 && index == 0) {
return Status::OK();
}
......@@ -388,29 +397,44 @@ Status OptimizeGraph(const GrapplerItem& item, int64 num_workers, int64 index,
NodeDef target_node;
absl::flat_hash_set<string> nodes_to_delete;
NodeDef* sink_node;
TF_RETURN_IF_ERROR(graph_utils::GetFetchNode(graph, item, &sink_node));
// The basic approach here is to walk the graph from sink to source, and find
// the latest occurrence of a ReaderDataset (e.g. CSVDataset, TFRecordDataset,
// etc...). We then add a shard after that dataset to shard the outputs of
// that dataset, in effect giving a piece to each worker. Finally, we remove
// occurences from randomness from before that point in the graph (e.g. things
// like ShuffleDataset) to ensure that `shard` returns a sensible result.
NodeDef* sink_node;
TF_RETURN_IF_ERROR(graph_utils::GetFetchNode(graph, item, &sink_node));
Status s = RecursivelyHandleOp(*sink_node, num_workers, index, &flib, &graph,
&nodes_to_delete);
if (!s.ok() && errors::IsNotFound(s)) {
LOG(WARNING) << "Cannot find shardable dataset, adding a shard node at "
<< "the end of the dataset instead. This may have performance "
<< "implications. Error: " << s.error_message();
TF_RETURN_IF_ERROR(AddShardNode(&graph, *sink_node, num_workers, index));
} else if (!s.ok()) {
return s;
switch (policy) {
case AutoShardPolicy::FILE:
TF_RETURN_IF_ERROR(RecursivelyHandleOp(*sink_node, num_workers, index,
&flib, &graph, &nodes_to_delete));
return graph.DeleteNodes(nodes_to_delete);
break;
case AutoShardPolicy::DATA:
return AddShardNode(&graph, *sink_node, num_workers, index);
break;
case AutoShardPolicy::AUTO:
default:
Status s = RecursivelyHandleOp(*sink_node, num_workers, index, &flib,
&graph, &nodes_to_delete);
if (!s.ok() && errors::IsNotFound(s)) {
LOG(WARNING) << "In AUTO-mode, and switching to DATA-based sharding, "
"instead of FILE-based sharding as we cannot find "
"appropriate reader dataset op(s) to shard. Error: "
<< s.error_message();
TF_RETURN_IF_ERROR(
AddShardNode(&graph, *sink_node, num_workers, index));
} else if (!s.ok()) {
return s;
}
return graph.DeleteNodes(nodes_to_delete);
break;
}
TF_RETURN_IF_ERROR(graph.DeleteNodes(nodes_to_delete));
return Status::OK();
}
} // anonymous namespace
......@@ -419,27 +443,35 @@ Status AutoShard::Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) {
if (!config) return errors::InvalidArgument("RewriterConfig not found.");
if ((config->parameter_map().find("num_workers") ==
if ((config->parameter_map().find(kNumWorkersAttrName) ==
config->parameter_map().end())) {
return errors::InvalidArgument("num_workers parameter missing.");
return errors::InvalidArgument(kNumWorkersAttrName, " parameter missing.");
}
if ((config->parameter_map().find("index") ==
if ((config->parameter_map().find(kIndexAttrName) ==
config->parameter_map().end())) {
return errors::InvalidArgument("index parameter missing.");
return errors::InvalidArgument(kIndexAttrName, " parameter missing.");
}
num_workers_ = config->parameter_map().at("num_workers").i();
index_ = config->parameter_map().at("index").i();
num_workers_ = config->parameter_map().at(kNumWorkersAttrName).i();
index_ = config->parameter_map().at(kIndexAttrName).i();
auto_shard_policy_ =
AutoShardPolicy(config->parameter_map().at(kAutoShardPolicyAttrName).i());
if (auto_shard_policy_ != AutoShardPolicy::AUTO &&
auto_shard_policy_ != AutoShardPolicy::DATA &&
auto_shard_policy_ != AutoShardPolicy::FILE) {
return errors::InvalidArgument(kAutoShardPolicyAttrName, " is invalid.");
}
if (num_workers_ < 1) {
return errors::InvalidArgument("num_workers should be >= 1, currently ",
num_workers_);
return errors::InvalidArgument(kNumWorkersAttrName,
" should be >= 1, currently ", num_workers_);
}
if (index_ < 0 || index_ >= num_workers_) {
return errors::InvalidArgument("index should be >= 0 and < ", num_workers_,
", currently ", index_);
return errors::InvalidArgument(kIndexAttrName, " should be >= 0 and < ",
num_workers_, ", currently ", index_);
}
return Status::OK();
......@@ -450,7 +482,8 @@ Status AutoShard::OptimizeAndCollectStats(Cluster* /* cluster */,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
TF_RETURN_IF_ERROR(OptimizeGraph(item, num_workers_, index_, output));
TF_RETURN_IF_ERROR(
OptimizeGraph(item, num_workers_, index_, auto_shard_policy_, output));
stats->num_changes++;
return Status::OK();
}
......
......@@ -21,6 +21,8 @@ limitations under the License.
namespace tensorflow {
namespace grappler {
enum class AutoShardPolicy { AUTO = 0, FILE = 1, DATA = 2 };
// AutoShard takes a Dataset graph and tries to insert a shard node
// automatically before a ReaderDataset (e.g. a CSVDataset or a TFRecordDataset)
// such that the dataset is sharded without any modifications to the original
......@@ -47,6 +49,7 @@ class AutoShard : public TFDataOptimizerBase {
private:
int64 num_workers_;
int64 index_;
AutoShardPolicy auto_shard_policy_;
};
} // namespace grappler
......
......@@ -31,11 +31,15 @@ namespace experimental {
constexpr char kOptimizerName[] = "tf_auto_shard";
AutoShardDatasetOp::AutoShardDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {}
: UnaryDatasetOpKernel(ctx), auto_shard_policy_(0) {
if (ctx->HasAttr("auto_shard_policy")) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("auto_shard_policy", &auto_shard_policy_));
}
}
void AutoShardDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64 index, num_workers;
int64 index, num_workers, auto_shard_policy;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kNumWorkers, &num_workers));
OP_REQUIRES(
ctx, num_workers > 0,
......@@ -45,9 +49,10 @@ void AutoShardDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
OP_REQUIRES(
ctx, index >= 0 && index < num_workers,
errors::InvalidArgument("index must be between 0 and ", num_workers - 1));
auto_shard_policy = auto_shard_policy_;
auto config_factory = [num_workers, index]() {
return CreateConfig(num_workers, index);
auto config_factory = [num_workers, index, auto_shard_policy]() {
return CreateConfig(num_workers, index, auto_shard_policy);
};
// We only want to optimize functions for some particular datasets like
......@@ -59,8 +64,8 @@ void AutoShardDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
/*record_fingerprint=*/false, output));
}
RewriterConfig AutoShardDatasetOp::CreateConfig(int64 num_workers,
int64 index) {
RewriterConfig AutoShardDatasetOp::CreateConfig(int64 num_workers, int64 index,
int64 auto_shard_policy) {
RewriterConfig rewriter_config;
rewriter_config.set_fail_on_optimizer_errors(true);
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::ONE);
......@@ -74,6 +79,10 @@ RewriterConfig AutoShardDatasetOp::CreateConfig(int64 num_workers,
AttrValue index_attr;
index_attr.set_i(index);
(*custom_optimizer->mutable_parameter_map())[kIndex] = index_attr;
AttrValue auto_shard_policy_attr;
auto_shard_policy_attr.set_i(auto_shard_policy);
(*custom_optimizer->mutable_parameter_map())[kAutoShardPolicy] =
auto_shard_policy_attr;
return rewriter_config;
}
......
......@@ -27,6 +27,7 @@ class AutoShardDatasetOp : public UnaryDatasetOpKernel {
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kNumWorkers = "num_workers";
static constexpr const char* const kIndex = "index";
static constexpr const char* const kAutoShardPolicy = "auto_shard_policy";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
......@@ -37,7 +38,9 @@ class AutoShardDatasetOp : public UnaryDatasetOpKernel {
DatasetBase** output) override;
private:
static RewriterConfig CreateConfig(int64 num_workers, int64 index);
static RewriterConfig CreateConfig(int64 num_workers, int64 index,
int64 auto_shard_policy);
int64 auto_shard_policy_;
};
} // namespace experimental
......
......@@ -33,7 +33,8 @@ class AutoShardDatasetOpTest : public DatasetOpsTestBase {
kNodeName, name_utils::OpName(AutoShardDatasetOp::kDatasetType),
{AutoShardDatasetOp::kInputDataset, AutoShardDatasetOp::kNumWorkers,
AutoShardDatasetOp::kIndex},
{{AutoShardDatasetOp::kOutputTypes, output_types},
{{AutoShardDatasetOp::kAutoShardPolicy, 0}, // AutoShardPolicy == AUTO
{AutoShardDatasetOp::kOutputTypes, output_types},
{AutoShardDatasetOp::kOutputShapes, output_shapes}});
TF_RETURN_IF_ERROR(CreateOpKernel(node_def, op_kernel));
return Status::OK();
......
......@@ -48,6 +48,7 @@ REGISTER_OP("AutoShardDataset")
.Input("num_workers: int64")
.Input("index: int64")
.Output("handle: variant")
.Attr("auto_shard_policy: int = 0")
.Attr("output_types: list(type) >= 1")
.Attr("output_shapes: list(shape) >= 1")
.SetShapeFn(shape_inference::ScalarShape);
......@@ -57,6 +58,7 @@ REGISTER_OP("ExperimentalAutoShardDataset")
.Input("num_workers: int64")
.Input("index: int64")
.Output("handle: variant")
.Attr("auto_shard_policy: int = 0")
.Attr("output_types: list(type) >= 1")
.Attr("output_shapes: list(shape) >= 1")
.SetShapeFn(shape_inference::ScalarShape);
......
......@@ -22,6 +22,7 @@ removing existing functionality.
See [Importing Data](https://tensorflow.org/guide/datasets) for an overview.
@@AutoShardPolicy
@@Counter
@@CheckpointInputPipelineHook
@@CsvDataset
......@@ -96,6 +97,7 @@ from tensorflow.python.data.experimental.ops.cardinality import cardinality
from tensorflow.python.data.experimental.ops.cardinality import INFINITE as INFINITE_CARDINALITY
from tensorflow.python.data.experimental.ops.cardinality import UNKNOWN as UNKNOWN_CARDINALITY
from tensorflow.python.data.experimental.ops.counter import Counter
from tensorflow.python.data.experimental.ops.distribute_options import AutoShardPolicy
from tensorflow.python.data.experimental.ops.distribute_options import DistributeOptions
from tensorflow.python.data.experimental.ops.enumerate_ops import enumerate_dataset
from tensorflow.python.data.experimental.ops.error_ops import ignore_errors
......
......@@ -21,6 +21,7 @@ from absl.testing import parameterized
from tensorflow.python.data.experimental.kernel_tests import reader_dataset_ops_test_base
from tensorflow.python.data.experimental.ops import distribute
from tensorflow.python.data.experimental.ops import distribute_options
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.experimental.ops import readers
......@@ -233,6 +234,40 @@ class AutoShardDatasetTest(reader_dataset_ops_test_base.TFRecordDatasetTestBase,
]
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle)
@combinations.generate(test_base.default_test_combinations())
def testWorkersGreaterThanNumFilesWithDataSharding(self):
options = dataset_ops.Options()
options.experimental_distribute.auto_shard_policy = (
distribute_options.AutoShardPolicy.DATA)
dataset = core_readers._TFRecordDataset(self.test_filenames)
dataset = dataset.with_options(options)
dataset = distribute._AutoShardDataset(dataset, 5, 0)
# Should return "Record (0,5) of file (0 --> 9)" since we are sharding by
# individual elements, we should be able to get some data from all files.
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in (0, 5)
]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(test_base.default_test_combinations())
def testFileShardingWithoutReaderDatasetOp(self):
options = dataset_ops.Options()
options.experimental_distribute.auto_shard_policy = (
distribute_options.AutoShardPolicy.FILE)
dataset = dataset_ops.Dataset.range(1024)
dataset = dataset.with_options(options)
# We are specifying that we want a file sharding policy, and this pipeline
# doesn't start with file reading, so we should error out.
with self.assertRaises(errors.NotFoundError):
dataset = distribute._AutoShardDataset(dataset, 10, 0)
self.evaluate(self.getNext(dataset)())
@combinations.generate(test_base.default_test_combinations())
def testWorkersGreaterThanNumFiles(self):
dataset = dataset_ops.Dataset.list_files(self.test_filenames)
......
......@@ -17,6 +17,8 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.compat import compat
from tensorflow.python.data.experimental.ops.distribute_options import AutoShardPolicy
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import ops
......@@ -46,11 +48,22 @@ class _AutoShardDataset(dataset_ops.UnaryDataset):
self._input_dataset = input_dataset
self._element_spec = input_dataset.element_spec
variant_tensor = ged_ops.auto_shard_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
num_workers=num_workers,
index=index,
**self._flat_structure)
if (compat.forward_compatible(2019, 11, 25) or
(input_dataset.options().experimental_distribute.auto_shard_policy !=
AutoShardPolicy.AUTO)):
variant_tensor = ged_ops.auto_shard_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
num_workers=num_workers,
index=index,
auto_shard_policy=int(input_dataset.options().experimental_distribute
.auto_shard_policy),
**self._flat_structure)
else:
variant_tensor = ged_ops.auto_shard_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
num_workers=num_workers,
index=index,
**self._flat_structure)
super(_AutoShardDataset, self).__init__(input_dataset, variant_tensor)
@property
......
......@@ -17,10 +17,24 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
from tensorflow.python.data.util import options
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.AutoShardPolicy")
class AutoShardPolicy(enum.IntEnum):
"""Represents the type of auto-sharding we enable.
Please see the DistributeOptions.auto_shard_policy documentation for more
information on each type of autosharding.
"""
AUTO = 0
FILE = 1
DATA = 2
@tf_export("data.experimental.DistributeOptions")
class DistributeOptions(options.OptionsBase):
"""Represents options for distributed data processing.
......@@ -39,14 +53,32 @@ class DistributeOptions(options.OptionsBase):
auto_shard = options.create_option(
name="auto_shard",
ty=bool,
docstring=
"Whether the dataset should be automatically sharded when processed"
"in a distributed fashion. This is applicable when using Keras with "
"multi-worker/TPU distribution strategy, and by "
"using strategy.experimental_distribute_dataset(). In other cases, this "
"option does nothing. If None, defaults to True.",
docstring="Whether the dataset should be automatically sharded when "
"processed in a distributed fashion. This is applicable when using Keras "
"with multi-worker/TPU distribution strategy, and by "
"using strategy.experimental_distribute_dataset(). You can control the "
"behavior of the auto sharder via the `auto_shard_policy` option. In "
"other cases, this option does nothing. If None, defaults to True.",
default_factory=lambda: True)
auto_shard_policy = options.create_option(
name="auto_shard_policy",
ty=AutoShardPolicy,
docstring="The type of sharding that auto-shard should attempt. If this "
"is set to FILE, then we will attempt to shard by files (each worker "
"will get a set of files to process). If we cannot find a set of files "
"to shard for at least one file per worker, we will error out. When this "
"option is selected, make sure that you have enough files so that each "
"worker gets at least one file. There will be a runtime error thrown if "
"there are insufficient files."
"If this is set to DATA, then we will shard by elements produced by the "
"dataset, and each worker will process the whole dataset and discard the "
"portion that is not for itself. "
"This option is set to AUTO by default, AUTO will attempt to first shard "
"by FILE, and fall back to sharding by DATA if we cannot find a set of "
"files to shard.",
default_factory=lambda: AutoShardPolicy.AUTO)
_make_stateless = options.create_option(
name="_make_stateless",
ty=bool,
......
path: "tensorflow.data.experimental.AutoShardPolicy"
tf_class {
is_instance: "<enum \'AutoShardPolicy\'>"
member {
name: "AUTO"
mtype: "<enum \'AutoShardPolicy\'>"
}
member {
name: "DATA"
mtype: "<enum \'AutoShardPolicy\'>"
}
member {
name: "FILE"
mtype: "<enum \'AutoShardPolicy\'>"
}
}
......@@ -7,6 +7,10 @@ tf_class {
name: "auto_shard"
mtype: "<type \'property\'>"
}
member {
name: "auto_shard_policy"
mtype: "<type \'property\'>"
}
member {
name: "num_devices"
mtype: "<type \'property\'>"
......
......@@ -4,6 +4,10 @@ tf_module {
name: "AUTOTUNE"
mtype: "<type \'int\'>"
}
member {
name: "AutoShardPolicy"
mtype: "<class \'enum.EnumMeta\'>"
}
member {
name: "CheckpointInputPipelineHook"
mtype: "<type \'type\'>"
......
......@@ -258,7 +258,7 @@ tf_module {
}
member_method {
name: "AutoShardDataset"
argspec: "args=[\'input_dataset\', \'num_workers\', \'index\', \'output_types\', \'output_shapes\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
argspec: "args=[\'input_dataset\', \'num_workers\', \'index\', \'output_types\', \'output_shapes\', \'auto_shard_policy\', \'name\'], varargs=None, keywords=None, defaults=[\'0\', \'None\'], "
}
member_method {
name: "AvgPool"
......@@ -1242,7 +1242,7 @@ tf_module {
}
member_method {
name: "ExperimentalAutoShardDataset"
argspec: "args=[\'input_dataset\', \'num_workers\', \'index\', \'output_types\', \'output_shapes\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
argspec: "args=[\'input_dataset\', \'num_workers\', \'index\', \'output_types\', \'output_shapes\', \'auto_shard_policy\', \'name\'], varargs=None, keywords=None, defaults=[\'0\', \'None\'], "
}
member_method {
name: "ExperimentalBytesProducedStatsDataset"
......
path: "tensorflow.data.experimental.AutoShardPolicy"
tf_class {
is_instance: "<enum \'AutoShardPolicy\'>"
member {
name: "AUTO"
mtype: "<enum \'AutoShardPolicy\'>"
}
member {
name: "DATA"
mtype: "<enum \'AutoShardPolicy\'>"
}
member {
name: "FILE"
mtype: "<enum \'AutoShardPolicy\'>"
}
}
......@@ -7,6 +7,10 @@ tf_class {
name: "auto_shard"
mtype: "<type \'property\'>"
}
member {
name: "auto_shard_policy"
mtype: "<type \'property\'>"
}
member {
name: "num_devices"
mtype: "<type \'property\'>"
......
......@@ -4,6 +4,10 @@ tf_module {
name: "AUTOTUNE"
mtype: "<type \'int\'>"
}
member {
name: "AutoShardPolicy"
mtype: "<class \'enum.EnumMeta\'>"
}
member {
name: "CheckpointInputPipelineHook"
mtype: "<type \'type\'>"
......
......@@ -258,7 +258,7 @@ tf_module {
}
member_method {
name: "AutoShardDataset"
argspec: "args=[\'input_dataset\', \'num_workers\', \'index\', \'output_types\', \'output_shapes\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
argspec: "args=[\'input_dataset\', \'num_workers\', \'index\', \'output_types\', \'output_shapes\', \'auto_shard_policy\', \'name\'], varargs=None, keywords=None, defaults=[\'0\', \'None\'], "
}
member_method {
name: "AvgPool"
......@@ -1242,7 +1242,7 @@ tf_module {
}
member_method {
name: "ExperimentalAutoShardDataset"
argspec: "args=[\'input_dataset\', \'num_workers\', \'index\', \'output_types\', \'output_shapes\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
argspec: "args=[\'input_dataset\', \'num_workers\', \'index\', \'output_types\', \'output_shapes\', \'auto_shard_policy\', \'name\'], varargs=None, keywords=None, defaults=[\'0\', \'None\'], "
}
member_method {
name: "ExperimentalBytesProducedStatsDataset"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册