提交 ce608761 编写于 作者: J Justin Lebar 提交者: TensorFlower Gardener

[XLA] Add --xla_disable_all_hlo_passes flag.

Previously we only had a flag for disabling specific passes.  But being able to
disable all passes is helpful if you have some already-optimized HLO that you
just want to run.

PiperOrigin-RevId: 224928095
上级 5478c41e
......@@ -201,6 +201,16 @@ static void AllocateFlags() {
"Comma-separated list of hlo passes to be disabled. These names "
"must exactly match the passes' names; no whitespace around "
"commas."),
tensorflow::Flag(
"xla_disable_all_hlo_passes",
bool_setter_for(&DebugOptions::set_xla_disable_all_hlo_passes), false,
"Disables all HLO passes. Notes that some passes are necessary for "
"correctness and the invariants that must be satisfied by 'fully "
"optimized' HLO are different for different devices and may change "
"over time. The only 'guarantee', such as it is, is that if you "
"compile XLA and dump the optimized HLO for some graph, you should "
"be able to run it again on the same device with the same build of "
"XLA."),
tensorflow::Flag(
"xla_embed_ir_in_executable",
bool_setter_for(&DebugOptions::set_xla_embed_ir_in_executable),
......
......@@ -77,6 +77,11 @@ std::vector<HloPassInterface*> HloPassPipeline::GetEnabledPasses(
auto repeated_field = debug_options.xla_disable_hlo_passes();
absl::flat_hash_set<string> disabled_pass_names(repeated_field.begin(),
repeated_field.end());
if (debug_options.xla_disable_all_hlo_passes()) {
VLOG(1) << "*All* passes disabled by --xla_disable_all_hlo_passes.";
return {};
}
if (!disabled_pass_names.empty()) {
VLOG(1) << "Passes disabled by --xla_disable_hlo_passes: "
<< absl::StrJoin(disabled_pass_names, ", ");
......
......@@ -100,6 +100,14 @@ message DebugOptions {
// names as specified by the HloPassInterface::name() method.
repeated string xla_disable_hlo_passes = 30;
// Disables all HLO passes. Notes that some passes are necessary for
// correctness and the invariants that must be satisfied by "fully optimized"
// HLO are different for different devices and may change over time. The only
// "guarantee", such as it is, is that if you compile XLA and dump the
// optimized HLO for some graph, you should be able to run it again on the
// same device with the same build of XLA.
bool xla_disable_all_hlo_passes = 104;
// Numerical optimization level for the XLA compiler backend; the specific
// interpretation of this value is left to the backends.
int32 xla_backend_optimization_level = 31;
......@@ -216,6 +224,8 @@ message DebugOptions {
// If set to true XLA:GPU invokes `ptxas` with -O0 (default is -O3).
bool xla_gpu_disable_ptxas_optimizations = 103;
// Next id: 105
// Extra options to pass to the compilation backend (e.g. LLVM); specific
// interpretation of these values is left to the backend.
map<string, string> xla_backend_extra_options = 500;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册