提交 270a87fb 编写于 作者: K Kexin Zhao

add load op fp16 mode test

上级 eb95417e
......@@ -12,7 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <fstream>
#include <iostream>
#include "paddle/fluid/framework/data_type_transform.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/profiler.h"
......@@ -51,14 +53,30 @@ class LoadOp : public framework::OperatorBase {
auto in_dtype = framework::ToDataType(tensor->type());
auto out_dtype = load_as_fp16 ? framework::proto::VarType::FP16 : in_dtype;
std::cout << "In load op: " << std::endl;
std::cout << "before conversion block" << std::endl;
if (in_dtype != out_dtype) {
// convert to float16 tensor
auto in_kernel_type = framework::OpKernelType(in_dtype, place);
auto out_kernel_type = framework::OpKernelType(out_dtype, place);
framework::LoDTensor fp16_tensor;
// copy LoD info to the new tensor
fp16_tensor.set_lod(tensor->lod());
std::cout << "before TransDataType" << std::endl;
framework::TransDataType(in_kernel_type, out_kernel_type, *tensor,
&fp16_tensor);
std::cout << "after TransDataType" << std::endl;
// reset output tensor
out_var->Clear();
tensor = out_var->GetMutable<framework::LoDTensor>();
tensor->set_lod(fp16_tensor.lod());
std::cout << "before TransDataType" << std::endl;
tensor->ShareDataWith(fp16_tensor);
std::cout << "after TransDataType" << std::endl;
}
std::cout << "Out of load op: " << std::endl;
}
};
......
......@@ -63,7 +63,7 @@ TEST(SaveLoadOp, CPU) {
}
}
TEST(SaveLoadFP16Op, CPU) {
TEST(SaveFP16Op, CPU) {
paddle::framework::Scope scope;
paddle::platform::CPUPlace place;
......@@ -94,3 +94,56 @@ TEST(SaveLoadFP16Op, CPU) {
EXPECT_EQ(expect[i], static_cast<float>(actual[i]));
}
}
TEST(LoadFP16Op, CPU) {
paddle::framework::Scope scope;
paddle::platform::CPUPlace place;
auto var = scope.Var("test_var");
auto tensor = var->GetMutable<paddle::framework::LoDTensor>();
tensor->Resize({3, 10});
paddle::framework::LoD expect_lod;
expect_lod.resize(1);
expect_lod[0].push_back(0);
expect_lod[0].push_back(1);
expect_lod[0].push_back(2);
expect_lod[0].push_back(3);
tensor->set_lod(expect_lod);
float* expect = tensor->mutable_data<float>(place);
for (int64_t i = 0; i < tensor->numel(); ++i) {
expect[i] = static_cast<float>(paddle::platform::float16(i));
}
paddle::framework::AttributeMap attrs;
attrs.insert({"file_path", std::string("tensor.save")});
attrs.insert({"load_as_fp16", true});
auto save_op = paddle::framework::OpRegistry::CreateOp(
"save", {{"X", {"test_var"}}}, {}, attrs);
save_op->Run(scope, place);
auto load_var = scope.Var("out_var");
auto target = load_var->GetMutable<paddle::framework::LoDTensor>();
auto load_op = paddle::framework::OpRegistry::CreateOp(
"load", {}, {{"Out", {"out_var"}}}, attrs);
LOG(INFO) << "before load op run";
load_op->Run(scope, place);
LOG(INFO) << "after load op run";
paddle::platform::float16* actual = target->data<paddle::platform::float16>();
LOG(INFO) << "after target->data";
for (int64_t i = 0; i < tensor->numel(); ++i) {
EXPECT_EQ(expect[i], static_cast<float>(actual[i]));
}
LOG(INFO) << "after expect equal";
auto& actual_lod = target->lod();
EXPECT_EQ(expect_lod.size(), actual_lod.size());
for (size_t i = 0; i < expect_lod.size(); ++i) {
for (size_t j = 0; j < expect_lod[i].size(); ++j) {
EXPECT_EQ(expect_lod[i][j], actual_lod[i][j]);
}
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册