From e20f8814926de16f8019205a9a96f87219aac9b8 Mon Sep 17 00:00:00 2001 From: Anlun Xu Date: Thu, 13 Oct 2022 16:08:41 -0700 Subject: [PATCH] [xla:runtime] Add CpuCompiler::Export PiperOrigin-RevId: 481007346 --- .../compiler/xla/service/cpu/cpu_compiler.cc | 19 +++++++++++ .../compiler/xla/service/cpu/cpu_compiler.h | 3 ++ .../compiler/xla/service/cpu/cpu_executable.h | 33 +++++++++++++++++++ 3 files changed, 55 insertions(+) diff --git a/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc b/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc index cd1f118b08b..3e592d85041 100644 --- a/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc +++ b/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc @@ -1770,6 +1770,25 @@ HloCostAnalysis::ShapeSizeFunction CpuCompiler::ShapeSizeBytesFunction() const { return CpuExecutable::ShapeSizeBytes; } +StatusOr> CpuCompiler::Export( + Executable* executable) const { + auto* cpu_executable = tensorflow::down_cast(executable); + if (!cpu_executable) + return Internal("Could not downcast Executable to CpuExecutable"); + + HloModuleProto module_proto = cpu_executable->module().ToProto(); + TF_ASSIGN_OR_RETURN(std::string obj_file, cpu_executable->GetObjFile()); + TF_ASSIGN_OR_RETURN(std::string mlir_module, cpu_executable->GetMlirModule()); + TF_ASSIGN_OR_RETURN(XlaFrameworkMapping xla_framework_mapping, + cpu_executable->GetXlaFrameworkMapping()); + + std::unique_ptr result = + std::make_unique( + module_proto, obj_file, mlir_module, + cpu_executable->buffer_assignment(), xla_framework_mapping); + return result; +} + } // namespace cpu } // namespace xla diff --git a/tensorflow/compiler/xla/service/cpu/cpu_compiler.h b/tensorflow/compiler/xla/service/cpu/cpu_compiler.h index e07128eb26c..f392de93053 100644 --- a/tensorflow/compiler/xla/service/cpu/cpu_compiler.h +++ b/tensorflow/compiler/xla/service/cpu/cpu_compiler.h @@ -191,6 +191,9 @@ class CpuCompiler : public LLVMCompiler { HloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const override; + StatusOr> Export( + Executable* executable) const override; + private: // Initialize the LLVM target. static void InitializeLLVMTarget(); diff --git a/tensorflow/compiler/xla/service/cpu/cpu_executable.h b/tensorflow/compiler/xla/service/cpu/cpu_executable.h index 9e3953da65f..a6fdaf70a7e 100644 --- a/tensorflow/compiler/xla/service/cpu/cpu_executable.h +++ b/tensorflow/compiler/xla/service/cpu/cpu_executable.h @@ -68,6 +68,24 @@ class XlaRuntimeCpuExecutable { return *default_executable_; } + StatusOr GetObjFile() const { + std::unique_ptr obj_file = + jit_executable_->DefaultExecutable()->obj_file(); + + if (!obj_file) + return InternalError("XlaRuntimeCpuExecutable didn't save the obj file"); + + std::string data(obj_file->getBuffer().data(), + obj_file->getBuffer().size()); + return data; + } + + StatusOr GetMlirModule() const { + return jit_executable_->mlir_module(); + } + + XlaFrameworkMapping xla_framework_mapping() { return xla_framework_mapping_; } + private: std::unique_ptr jit_executable_; xla::runtime::Executable* default_executable_; // owned by jit_executable_. @@ -137,6 +155,21 @@ class CpuExecutable : public Executable { int64_t SizeOfGeneratedCodeInBytes() const override; + StatusOr GetObjFile() const { + if (!IsXlaRuntime()) return InternalError("Not an XLA Runtime executable"); + return xla_runtime_executable_->GetObjFile(); + } + + StatusOr GetMlirModule() const { + if (!IsXlaRuntime()) return InternalError("Not an XLA Runtime executable"); + return xla_runtime_executable_->GetMlirModule(); + } + + StatusOr GetXlaFrameworkMapping() const { + if (!IsXlaRuntime()) return InternalError("Not an XLA Runtime executable"); + return xla_runtime_executable_->xla_framework_mapping(); + } + private: // Creates an array suitable for passing as the "buffer_table" argument to the // JIT compiled function pointer. -- GitLab