提交 dbd23da8 编写于 作者: A Andy Ly 提交者: TensorFlower Gardener

Automated rollback of commit b9a6fea1

PiperOrigin-RevId: 257684824
上级 fbd9dc4f
......@@ -594,9 +594,9 @@ cc_library(
":debug_stripper",
":dependency_optimizer",
":function_optimizer",
":generic_layout_optimizer",
":graph_optimizer",
":implementation_selector",
":layout_optimizer",
":loop_optimizer",
":memory_optimizer",
":model_pruner",
......
......@@ -32,8 +32,8 @@ limitations under the License.
#include "tensorflow/core/grappler/optimizers/debug_stripper.h"
#include "tensorflow/core/grappler/optimizers/dependency_optimizer.h"
#include "tensorflow/core/grappler/optimizers/function_optimizer.h"
#include "tensorflow/core/grappler/optimizers/generic_layout_optimizer.h"
#include "tensorflow/core/grappler/optimizers/implementation_selector.h"
#include "tensorflow/core/grappler/optimizers/layout_optimizer.h"
#include "tensorflow/core/grappler/optimizers/loop_optimizer.h"
#include "tensorflow/core/grappler/optimizers/memory_optimizer.h"
#include "tensorflow/core/grappler/optimizers/model_pruner.h"
......@@ -121,7 +121,7 @@ std::unique_ptr<GraphOptimizer> MetaOptimizer::MakeNewOptimizer(
MK_OPT("constfold", new ConstantFolding(cpu_device_));
MK_OPT("shape", new ShapeOptimizer());
MK_OPT("remap", new Remapper(cfg_.remapping()));
MK_OPT("layout", new GenericLayoutOptimizer());
MK_OPT("layout", new LayoutOptimizer());
MK_OPT("auto_mixed_precision",
new AutoMixedPrecision(cfg_.auto_mixed_precision()));
MK_OPT("memory", new MemoryOptimizer(RewriterConfig::MANUAL));
......@@ -193,7 +193,7 @@ Status MetaOptimizer::InitializeOptimizers(
MakeUnique<DependencyOptimizer>(cfg_.dependency_optimization()));
}
if (cfg_.layout_optimizer() != RewriterConfig::OFF) {
optimizers->push_back(MakeUnique<GenericLayoutOptimizer>());
optimizers->push_back(MakeUnique<LayoutOptimizer>());
}
if (AutoMixedPrecisionEnabled(cfg_.auto_mixed_precision())) {
optimizers->push_back(
......@@ -267,7 +267,7 @@ Status MetaOptimizer::InitializeCustomGraphOptimizers(
TF_RETURN_IF_ERROR(custom_optimizer->Init(&optimizer_config));
optimizers->push_back(std::move(custom_optimizer));
} else {
// If there are no custom optimizers with given name, try to initialize a
// If there are no custom optimizers with given name, try to initalize a
// default optimizer. This way, custom configurable optimizers can be
// mixed with default optimizers in any order.
auto optimizer = MakeNewOptimizer(optimizer_config.name());
......
......@@ -6455,39 +6455,37 @@ cuda_py_test(
xla_enable_strict_auto_jit = True,
)
# TODO(b/131764887) Remove once LayoutOptimizer is swapped out with GenericLayoutOptimizer.
#
# cuda_py_test(
# name = "layout_optimizer_test",
# size = "medium",
# srcs = [
# "grappler/layout_optimizer_test.py",
# ],
# additional_deps = [
# ":client_testlib",
# ":framework_for_generated_wrappers",
# ":array_ops",
# ":constant_op",
# ":dtypes",
# ":functional_ops",
# ":math_ops",
# ":nn",
# ":ops",
# ":random_ops",
# ":state_ops",
# ":tf_cluster",
# ":tf_optimizer",
# ":training",
# "//third_party/py/numpy",
# "//tensorflow/core:protos_all_py",
# ],
# shard_count = 10,
# tags = [
# "grappler",
# ],
# # This test will not run on XLA because it primarily tests the TF Classic flow.
# xla_enable_strict_auto_jit = False,
# )
cuda_py_test(
name = "layout_optimizer_test",
size = "medium",
srcs = [
"grappler/layout_optimizer_test.py",
],
additional_deps = [
":client_testlib",
":framework_for_generated_wrappers",
":array_ops",
":constant_op",
":dtypes",
":functional_ops",
":math_ops",
":nn",
":ops",
":random_ops",
":state_ops",
":tf_cluster",
":tf_optimizer",
":training",
"//third_party/py/numpy",
"//tensorflow/core:protos_all_py",
],
shard_count = 10,
tags = [
"grappler",
],
# This test will not run on XLA because it primarily tests the TF Classic flow.
xla_enable_strict_auto_jit = False,
)
py_library(
name = "cost_analyzer",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册