未验证 提交 6af0593c 编写于 作者: S Siddharth Goyal 提交者: GitHub

Add FP16 option to load_combine op (#10601)

上级 5f6fd26f
...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <fstream> #include <fstream>
#include "paddle/fluid/framework/data_type_transform.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/device_context.h"
...@@ -31,6 +31,7 @@ class LoadCombineOp : public framework::OperatorBase { ...@@ -31,6 +31,7 @@ class LoadCombineOp : public framework::OperatorBase {
void RunImpl(const framework::Scope &scope, void RunImpl(const framework::Scope &scope,
const platform::Place &place) const override { const platform::Place &place) const override {
auto filename = Attr<std::string>("file_path"); auto filename = Attr<std::string>("file_path");
auto load_as_fp16 = Attr<bool>("load_as_fp16");
std::ifstream fin(filename); std::ifstream fin(filename);
PADDLE_ENFORCE(static_cast<bool>(fin), PADDLE_ENFORCE(static_cast<bool>(fin),
...@@ -59,17 +60,25 @@ class LoadCombineOp : public framework::OperatorBase { ...@@ -59,17 +60,25 @@ class LoadCombineOp : public framework::OperatorBase {
// Get data from fin to tensor // Get data from fin to tensor
DeserializeFromStream(fin, tensor, dev_ctx); DeserializeFromStream(fin, tensor, dev_ctx);
if (platform::is_gpu_place(place)) { auto in_dtype = framework::ToDataType(tensor->type());
// copy CPU to GPU auto out_dtype =
framework::LoDTensor cpu_tensor; load_as_fp16 ? framework::proto::VarType::FP16 : in_dtype;
cpu_tensor.ShareDataWith(*tensor);
cpu_tensor.set_lod(tensor->lod()); if (in_dtype != out_dtype) {
// convert to float16 tensor
// reset tensor auto in_kernel_type = framework::OpKernelType(in_dtype, place);
auto out_kernel_type = framework::OpKernelType(out_dtype, place);
framework::LoDTensor fp16_tensor;
// copy LoD info to the new tensor
fp16_tensor.set_lod(tensor->lod());
framework::TransDataType(in_kernel_type, out_kernel_type, *tensor,
&fp16_tensor);
// reset output tensor
out_var->Clear(); out_var->Clear();
tensor = out_var->GetMutable<framework::LoDTensor>(); tensor = out_var->GetMutable<framework::LoDTensor>();
tensor->set_lod(cpu_tensor.lod()); tensor->set_lod(fp16_tensor.lod());
TensorCopy(cpu_tensor, place, dev_ctx, tensor); tensor->ShareDataWith(fp16_tensor);
} }
} }
} }
...@@ -82,6 +91,13 @@ class LoadCombineOpProtoMaker : public framework::OpProtoAndCheckerMaker { ...@@ -82,6 +91,13 @@ class LoadCombineOpProtoMaker : public framework::OpProtoAndCheckerMaker {
"Out", "Out",
"(vector) The output LoDTensors that will be read from the input file.") "(vector) The output LoDTensors that will be read from the input file.")
.AsDuplicable(); .AsDuplicable();
AddAttr<bool>(
"load_as_fp16",
"(boolean, default false)"
"If true, the tensor will be first loaded and then "
"converted to float16 data type. Otherwise, the tensor will be "
"directly loaded without data type conversion.")
.SetDefault(false);
AddAttr<std::string>("file_path", AddAttr<std::string>("file_path",
"(string) " "(string) "
"LoDTensors will be loaded from \"file_path\".") "LoDTensors will be loaded from \"file_path\".")
......
...@@ -139,8 +139,9 @@ TEST(SaveLoadCombineOp, CPU) { ...@@ -139,8 +139,9 @@ TEST(SaveLoadCombineOp, CPU) {
CheckValues<int, int>(expect4, actual4, expect_lod4, actual_lod4, numel4); CheckValues<int, int>(expect4, actual4, expect_lod4, actual_lod4, numel4);
} }
// FP16 version of SaveLoadCombineOp Test // FP16 version of SaveLoadCombineOp Test, only altering the saving aspect
TEST(SaveLoadCombineFP16Op, CPU) { // to save as FP16.
TEST(SaveCombineFP16Op, CPU) {
paddle::framework::Scope scope; paddle::framework::Scope scope;
paddle::platform::CPUPlace place; paddle::platform::CPUPlace place;
...@@ -169,7 +170,7 @@ TEST(SaveLoadCombineFP16Op, CPU) { ...@@ -169,7 +170,7 @@ TEST(SaveLoadCombineFP16Op, CPU) {
20, 50, lod4, "test_var4", place, &scope, &expect_lod4); 20, 50, lod4, "test_var4", place, &scope, &expect_lod4);
// Set attributes // Set attributes
std::string filename = "check_tensor_fp16.ls"; std::string filename = "check_tensor_fp16_save.ls";
paddle::framework::AttributeMap attrs; paddle::framework::AttributeMap attrs;
attrs.insert({"file_path", std::string(filename)}); attrs.insert({"file_path", std::string(filename)});
attrs.insert({"save_as_fp16", true}); attrs.insert({"save_as_fp16", true});
...@@ -216,6 +217,89 @@ TEST(SaveLoadCombineFP16Op, CPU) { ...@@ -216,6 +217,89 @@ TEST(SaveLoadCombineFP16Op, CPU) {
actual_lod4, numel4); actual_lod4, numel4);
} }
// FP16 version of SaveLoadCombineOp Test, only altering the loading aspect
// to load tensors with FP16 precision.
TEST(LoadCombineFP16Op, CPU) {
paddle::framework::Scope scope;
paddle::platform::CPUPlace place;
std::vector<int> lod1 = {0, 1, 2, 3, 10};
int numel1 = 100;
paddle::framework::LoD expect_lod1;
float* expect1 = CreateForSaveCombineOp<float, paddle::platform::float16>(
10, 10, lod1, "test_var1", place, &scope, &expect_lod1);
std::vector<int> lod2 = {0, 2, 5, 10};
int numel2 = 200;
paddle::framework::LoD expect_lod2;
float* expect2 = CreateForSaveCombineOp<float, paddle::platform::float16>(
10, 20, lod2, "test_var2", place, &scope, &expect_lod2);
std::vector<int> lod3 = {0, 20};
int numel3 = 4000;
paddle::framework::LoD expect_lod3;
float* expect3 = CreateForSaveCombineOp<float, paddle::platform::float16>(
20, 200, lod3, "test_var3", place, &scope, &expect_lod3);
std::vector<int> lod4 = {0, 1, 20};
int numel4 = 1000;
paddle::framework::LoD expect_lod4;
float* expect4 = CreateForSaveCombineOp<float, paddle::platform::float16>(
20, 50, lod4, "test_var4", place, &scope, &expect_lod4);
// Set attributes
std::string filename = "check_tensor_fp16_load.ls";
paddle::framework::AttributeMap attrs;
attrs.insert({"file_path", std::string(filename)});
// Run the save_combine_op
auto save_combine_op = paddle::framework::OpRegistry::CreateOp(
"save_combine",
{{"X", {"test_var1", "test_var2", "test_var3", "test_var4"}}}, {}, attrs);
save_combine_op->Run(scope, place);
// Set up output vars
auto load_var1 = scope.Var("out_var1");
auto load_var2 = scope.Var("out_var2");
auto load_var3 = scope.Var("out_var3");
auto load_var4 = scope.Var("out_var4");
attrs.insert({"load_as_fp16", true});
// Run the load_combine_op
auto load_combine_op = paddle::framework::OpRegistry::CreateOp(
"load_combine", {},
{{"Out", {"out_var1", "out_var2", "out_var3", "out_var4"}}}, attrs);
load_combine_op->Run(scope, place);
auto* target1 = load_var1->GetMutable<paddle::framework::LoDTensor>();
auto* target2 = load_var2->GetMutable<paddle::framework::LoDTensor>();
auto* target3 = load_var3->GetMutable<paddle::framework::LoDTensor>();
auto* target4 = load_var4->GetMutable<paddle::framework::LoDTensor>();
paddle::framework::LoD actual_lod1, actual_lod2, actual_lod3, actual_lod4;
paddle::platform::float16* actual1 =
GetValuesAfterLoadCombineOp<paddle::platform::float16>(target1, scope,
&actual_lod1);
paddle::platform::float16* actual2 =
GetValuesAfterLoadCombineOp<paddle::platform::float16>(target2, scope,
&actual_lod2);
paddle::platform::float16* actual3 =
GetValuesAfterLoadCombineOp<paddle::platform::float16>(target3, scope,
&actual_lod3);
paddle::platform::float16* actual4 =
GetValuesAfterLoadCombineOp<paddle::platform::float16>(target4, scope,
&actual_lod4);
CheckValues<float, paddle::platform::float16>(expect1, actual1, expect_lod1,
actual_lod1, numel1);
CheckValues<float, paddle::platform::float16>(expect2, actual2, expect_lod2,
actual_lod2, numel2);
CheckValues<float, paddle::platform::float16>(expect3, actual3, expect_lod3,
actual_lod3, numel3);
CheckValues<float, paddle::platform::float16>(expect4, actual4, expect_lod4,
actual_lod4, numel4);
}
// Test with original SaveLoadTest // Test with original SaveLoadTest
TEST(SaveLoadTestWithCombineOp, CPU) { TEST(SaveLoadTestWithCombineOp, CPU) {
paddle::framework::Scope scope; paddle::framework::Scope scope;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册