未验证 提交 37580838 编写于 作者: A Allen Guo 提交者: GitHub

sync misc changes (#42534)

上级 bb5a14dd
......@@ -39,7 +39,8 @@ const bool GetBoolEnv(const std::string& str) {
int GetNumDevices() {
bool ipu_model = GetBoolEnv("POPLAR_IPUMODEL");
if (ipu_model) {
bool compile_only = GetBoolEnv("IPU_COMPILE_ONLY");
if (ipu_model || compile_only) {
return 1;
}
int num_devices =
......@@ -52,7 +53,8 @@ int GetNumDevices() {
std::vector<int> GetDeviceIds() {
bool ipu_model = GetBoolEnv("POPLAR_IPUMODEL");
if (ipu_model) {
bool compile_only = GetBoolEnv("IPU_COMPILE_ONLY");
if (ipu_model || compile_only) {
return {0};
}
std::vector<int> device_ids;
......
......@@ -96,6 +96,7 @@ Executor::~Executor() {
void Executor::Prepare(const std::string &proto) {
VLOG(10) << "enter Executor::Prepare";
compile_only_ = GetBoolEnv("IPU_COMPILE_ONLY");
AcquireDevice();
executor_resources_ = std::make_unique<ExecutorResources>();
......@@ -122,9 +123,18 @@ void Executor::Prepare(const std::string &proto) {
}
VLOG(10) << "Creating session from Onnx Model...done";
VLOG(10) << "Preparing session device...";
session_->prepareDevice();
VLOG(10) << "Preparing session device...done";
if (compile_only_) {
LOG(INFO)
<< "Save the offline cache as offline_cache.popart in current path.";
VLOG(10) << "Compile only...";
session_->compileAndExport("./offline_cache.popart");
VLOG(10) << "Compile only...done";
return;
} else {
VLOG(10) << "Preparing session device...";
session_->prepareDevice();
VLOG(10) << "Preparing session device...done";
}
SetWeightsIO();
......@@ -141,6 +151,11 @@ void Executor::Prepare(const std::string &proto) {
void Executor::Run(const std::vector<const Tensor *> &inputs,
const std::vector<Tensor *> &outputs,
const framework::ExecutionContext &ctx) {
if (compile_only_) {
LOG(INFO) << "If IPU_COMPILE_ONLY=True, skip exe.run";
return;
}
VLOG(10) << "enter Executor::Run";
// inputs
std::map<popart::TensorId, popart::IArray &> popart_inputs;
......@@ -222,6 +237,7 @@ void Executor::AcquireDevice() {
bool use_ipu_model = GetBoolEnv("POPLAR_IPUMODEL");
bool enable_distribution = ipu_strategy_->enable_distribution;
if (use_ipu_model) {
VLOG(10) << "Create IPU model device...";
std::map<std::string, std::string> deviceOpts{
{
"numIPUs", std::to_string(ipu_strategy_->num_ipus),
......@@ -230,7 +246,21 @@ void Executor::AcquireDevice() {
};
device_ = popart::DeviceManager::createDeviceManager().createIpuModelDevice(
deviceOpts);
VLOG(10) << "Create IPU model device...done";
} else if (compile_only_) {
VLOG(10) << "Create offline device...";
std::map<std::string, std::string> deviceOpts{
{
"numIPUs", std::to_string(ipu_strategy_->num_ipus),
},
{"ipuVersion", "ipu2"},
};
device_ =
popart::DeviceManager::createDeviceManager().createOfflineIPUDevice(
deviceOpts);
VLOG(10) << "Create offline device...done";
} else if (enable_distribution) {
VLOG(10) << "Create distribution device...";
auto ipus_per_replica = ipu_strategy_->num_ipus /
ipu_strategy_->popart_options.replicatedGraphCount;
auto device_id = popdist_get_device(ipus_per_replica);
......@@ -240,13 +270,16 @@ void Executor::AcquireDevice() {
device_,
errors::Unavailable("Can't attach IPU in distribution, ipu_num = %d.",
RequestIpus(ipu_strategy_->num_ipus)));
VLOG(10) << "Create distribution device...done";
} else {
VLOG(10) << "Create IPU device...";
device_ =
popart::DeviceManager::createDeviceManager().acquireAvailableDevice(
RequestIpus(ipu_strategy_->num_ipus));
PADDLE_ENFORCE_NOT_NULL(
device_, errors::Unavailable("Can't attach IPU, ipu_num = %d.",
RequestIpus(ipu_strategy_->num_ipus)));
VLOG(10) << "Create IPU device...done";
}
VLOG(10) << "leave Executor::AcquireDevice";
}
......
......@@ -91,6 +91,7 @@ class Executor {
const Scope *scope_ = nullptr;
const IpuStrategy *ipu_strategy_ = nullptr;
CompilerResources *compiler_resources_ = nullptr;
bool compile_only_ = false;
// Deviceinfo for popart session
std::shared_ptr<popart::DeviceInfo> device_;
......
......@@ -57,14 +57,21 @@ Node *checkpointoutput_handler(Graph *graph, Node *node) {
Node *custom_nll_loss_handler(Graph *graph, Node *node) {
auto *op = node->Op();
auto reduction = BOOST_GET_CONST(int, op->GetAttr("reduction"));
auto ignoreIndex = BOOST_GET_CONST(int, op->GetAttr("ignoreIndex"));
auto ignoreIndex = BOOST_GET_CONST(std::string, op->GetAttr("ignoreIndex"));
auto inputIsLogProbability =
BOOST_GET_CONST(bool, op->GetAttr("inputIsLogProbability"));
return CreateBaseOp(graph, node, "popart_nllloss_v2", node->inputs,
node->outputs,
{{"reduction", reduction},
{"ignoreIndex", ignoreIndex},
{"inputIsLogProbability", inputIsLogProbability}});
if (ignoreIndex == "None") {
return CreateBaseOp(graph, node, "popart_nllloss_v2", node->inputs,
node->outputs,
{{"reduction", reduction},
{"inputIsLogProbability", inputIsLogProbability}});
} else {
return CreateBaseOp(graph, node, "popart_nllloss_v2", node->inputs,
node->outputs,
{{"reduction", reduction},
{"ignoreIndex", std::atoi(ignoreIndex.c_str())},
{"inputIsLogProbability", inputIsLogProbability}});
}
}
Node *identity_handler(Graph *graph, Node *node) {
......
......@@ -85,23 +85,6 @@ class TestEnableFp16(TestBase):
self.attrs['dtype'] = 'float32'
class TestDisableTransferCast(TestEnableFp16):
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 3, 3])
self.feed_fp32 = {'x': data.astype(np.float32)}
self.feed_fp16 = {'x': data.astype(np.float16)}
def set_op_attrs(self):
self.attrs = {}
self.attrs['dtype'] = 'float32'
def run_model(self, exec_mode):
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
ipu_strategy.set_options({"transfer_cast_op": False})
self.run_op_test(exec_mode)
class TestCase2(TestBase):
def set_data_feed(self):
self.feed_fp32 = {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册